python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from .input_output_evaluator import InputOutputEvaluator
from ...code_runner import RUN_ROOT_DIR
from ....preprocessing.lang_processors.go_processor import GoProcessor
from ... import compile_go
go_processor = GoProcessor()
class GoInputOutputEvaluator(InputOutputEvaluator):
def __init__(
self,
tmp_folder=Path(RUN_ROOT_DIR.joinpath("automatic_tests/tmp_tests_folder/go")),
timeout: float = 15,
compilation_timeout: float = 30,
num_subfolders=100,
rand_char_filename=6,
run_go_imports=True,
):
super().__init__(
"go",
tmp_folder=tmp_folder,
timeout=timeout,
num_subfolders=num_subfolders,
rand_char_filename=rand_char_filename,
)
self.run_go_imports = run_go_imports
self.compilation_timeout = compilation_timeout
def _compile_program(self, program_path):
bin_path = program_path.with_suffix("")
compile_go(
Path(program_path), self.compilation_timeout, bin_path, self.run_go_imports
)
return bin_path
@staticmethod
def _process(code):
return go_processor.detokenize_code(code)
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/input_output_runners/go_input_output_evaluator.py
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/input_output_runners/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import typing as tp
from pathlib import Path
from ...code_runner import CodeRunner, RUN_ROOT_DIR
from ...runner_errors import MissingTest, CompilationError, Timeout, TestRuntimeError
from ...utils import clean_err_output
from ...utils import FIREJAIL_COMMAND, MAX_VIRTUAL_MEMORY, limit_virtual_memory
import codegen_sources.preprocessing.lang_processors.java_processor
class InputOutputEvaluator(CodeRunner):
def __init__(
self,
lang,
tmp_folder=Path(RUN_ROOT_DIR.joinpath("automatic_tests/tmp_tests_folder")),
timeout: float = 15,
num_subfolders=100,
rand_char_filename=6,
):
super().__init__(lang, tmp_folder, timeout, num_subfolders, rand_char_filename)
def check_outputs(
self,
program: str,
inputs: tp.List[str],
outputs: tp.List[str],
truncate_errors=150,
):
"""Runs programs and checks how often the output is as expected"""
assert len(inputs) == len(outputs)
tmp_path = self._get_tmp_folder()
classname = None
if self.lang == "java":
classname = codegen_sources.preprocessing.lang_processors.java_processor.JavaProcessor.get_class_name(
tokenized_java=program
)
program_path = self._write_file(
self._process(program), tmp_path, filename=classname
)
res = None
executable_path: tp.Optional[Path] = None
try:
executable_path = self._compile_program(program_path)
except MissingTest:
res = "missing_test"
except CompilationError as e:
e = clean_err_output(e)
res = f"compilation: {clean_err_output(str(e))[:truncate_errors]}"
except Timeout as e:
e = clean_err_output(e)
res = f"compilation timeout: {clean_err_output(str(e))[:truncate_errors]}"
if res is not None:
self._clean(tmp_path)
return res, len(inputs), len(inputs), [res]
results = []
assert executable_path is not None
assert executable_path.is_file()
for _input, _output in zip(inputs, outputs):
out, err, ret_code = None, None, None
res = None
try:
out, err, ret_code = self._run_program(executable_path, _input)
except Timeout as e:
res = f"timeout: {clean_err_output(str(e))[:truncate_errors]}"
if res is None:
try:
res = self._eval_output(out, err, ret_code, _output)
except TestRuntimeError:
res = "runtime"
results.append(res)
self._clean(tmp_path)
num_failures = len([res for res in results if not res.startswith("success")])
if len([r for r in results if r.startswith("runtime")]) > 0:
short_result = [r for r in results if r.startswith("runtime")][0]
elif len([r for r in results if r.startswith("failure")]) > 0:
short_result = [r for r in results if r.startswith("failure")][0]
elif len([r for r in results if r.startswith("timeout")]) > 0:
short_result = [r for r in results if r.startswith("timeout")][0]
else:
assert num_failures == 0, results
short_result = "success"
return short_result, len(inputs), num_failures, results
def _run_program(
self, executable_path: Path, input_val: str,
):
test_cmd = str(executable_path)
return self._run_command(test_cmd, input_val)
def _eval_output(self, out, err, ret_code, output: str):
actual = out.strip()
if actual == output.strip():
return "success"
else:
if ret_code != 0:
return f"runtime: {err.decode('utf8')}"
else:
return f"failure: actual {actual} vs expected {output.strip()}"
@staticmethod
def _process(code: str):
raise NotImplementedError(
"_process_code should be implemented in inheriting class"
)
def _compile_program(self, program_path):
raise NotImplementedError(
"_compile_program should be implemented in inheriting class"
)
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/input_output_runners/input_output_evaluator.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from pathlib import Path
from ...code_runner import RUN_ROOT_DIR
from ....model.src.utils import get_java_bin_path
from ....preprocessing.lang_processors import JavaProcessor
from .input_output_evaluator import InputOutputEvaluator
from ...utils import FIREJAIL_COMMAND, MAX_VIRTUAL_MEMORY, limit_virtual_memory
from ...utils import compile_java
java_processor = JavaProcessor()
class JavaInputOutputEvaluator(InputOutputEvaluator):
def __init__(
self,
tmp_folder=Path(RUN_ROOT_DIR.joinpath("automatic_tests/tmp_tests_folder/java")),
timeout: float = 15,
compilation_timeout: float = 30,
num_subfolders=100,
rand_char_filename=6,
):
super().__init__(
"java",
tmp_folder=tmp_folder,
timeout=timeout,
num_subfolders=num_subfolders,
rand_char_filename=rand_char_filename,
)
self.compilation_timeout = compilation_timeout
self.env = os.environ.copy()
def init_env(self):
super().init_env()
self.env["PATH"] = f"{get_java_bin_path()}:{self.env['PATH']}"
def _compile_program(self, program_path):
bin_path = program_path.with_suffix(".class")
compile_java(Path(program_path), self.compilation_timeout)
return bin_path
@staticmethod
def _process(code):
return java_processor.detokenize_code(code)
def _run_program(
self, executable_path: Path, input_val: str,
):
test_cmd = f"{os.path.join(get_java_bin_path(), 'java')} {executable_path.name.replace('.class', '')}"
return self._run_command(
test_cmd, input_val, env_preparation=f"cd {executable_path.parent}"
)
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/input_output_runners/java_input_output_evaluator.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from .input_output_evaluator import InputOutputEvaluator
from ...code_runner import RUN_ROOT_DIR
from ....preprocessing.lang_processors.cpp_processor import CppProcessor
from ...utils import compile_cpp
cpp_processor = CppProcessor()
class CppInputOutputEvaluator(InputOutputEvaluator):
def __init__(
self,
tmp_folder=Path(RUN_ROOT_DIR.joinpath("automatic_tests/tmp_tests_folder/cpp")),
timeout: float = 15,
compilation_timeout: float = 30,
num_subfolders=100,
rand_char_filename=6,
):
super().__init__(
"cpp",
tmp_folder=tmp_folder,
timeout=timeout,
num_subfolders=num_subfolders,
rand_char_filename=rand_char_filename,
)
self.compilation_timeout = compilation_timeout
def _compile_program(self, program_path):
bin_path = program_path.with_suffix("")
compile_cpp(Path(program_path), self.compilation_timeout, bin_path)
return bin_path
@staticmethod
def _process(code):
return cpp_processor.detokenize_code(code)
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/input_output_runners/cpp_input_output_evaluator.py
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/evosuite_test_runners/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from pathlib import Path
from codegen_sources.preprocessing.lang_processors import LangProcessor
from ...code_runner import RUN_ROOT_DIR
from ...runner_errors import (
TestRuntimeError,
CompilationError,
InvalidTest,
)
from ..unittest_runner import UnitTestRunner
python_processor = LangProcessor.processors["python"]()
class PythonEvosuiteTestRunner(UnitTestRunner):
def __init__(
self,
tmp_folder=Path(
RUN_ROOT_DIR.joinpath("automatic_tests/tmp_tests_folder/python")
),
timeout=15,
num_subfolders=100,
rand_char_filename=6,
):
super().__init__(
"python",
tmp_folder=tmp_folder,
timeout=timeout,
num_subfolders=num_subfolders,
rand_char_filename=rand_char_filename,
)
def init_env(self):
super().init_env()
self.env["PATH"] = f"{sys.executable.rstrip('python')}:{self.env['PATH']}"
def _run_tests(
self, function: str, test: str, tmp_path: Path,
):
if "#TOFILL" not in test:
raise InvalidTest("Missing #TOFILL")
try:
f_name = python_processor.get_function_name(function)
except (ValueError, IndexError):
raise CompilationError("No function definition")
function = python_processor.detokenize_code(
function.replace(f" {f_name.strip()} ", " f_filled ")
)
filled_test = test.replace("#TOFILL", function)
test_path = self._write_file(filled_test, tmp_path)
assert test_path.is_file()
out, err, code = self._run_command(f"python {test_path}")
return out, err, code
def _eval_proc_state(self, out, err):
stderr = self.clean_firejail(err)
res_line = stderr.splitlines()
if len(res_line) <= 2 or not (
res_line[-1].startswith("OK") or res_line[-1].startswith("FAILED")
):
raise TestRuntimeError(stderr)
assert res_line[-3].startswith("Ran ")
number_of_tests = int(res_line[-3].replace("Ran ", "").split(" ")[0])
res_line = res_line[-1]
if res_line.startswith("OK"):
return "success", number_of_tests, 0
else:
assert res_line.startswith("FAILED (errors=") or res_line.startswith(
"FAILED (failures="
)
number_failures = int(res_line.split("=")[-1].replace(")", ""))
return "failure", number_of_tests, number_failures
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/evosuite_test_runners/python_evosuite_test_runner.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from codegen_sources.preprocessing.lang_processors import LangProcessor
from ...code_runner import RUN_ROOT_DIR
from ...utils import compile_cpp
from ...runner_errors import (
TestRuntimeError,
CompilationError,
InvalidTest,
)
from ..unittest_runner import UnitTestRunner
import typing as tp
NB_TESTS_STRING = "[==========] "
FAILED_STRING = "FAILED TEST"
PASSED_STRING = "[ PASSED ]"
TOFILL = "//TOFILL"
cpp_processor = LangProcessor.processors["cpp"]()
class CppEvosuiteTestRunner(UnitTestRunner):
def __init__(
self,
tmp_folder=Path(RUN_ROOT_DIR.joinpath("automatic_tests/tmp_tests_folder/cpp")),
timeout=15,
compilation_timeout=30,
num_subfolders=100,
rand_char_filename=6,
):
super().__init__(
"cpp",
tmp_folder=tmp_folder,
timeout=timeout,
num_subfolders=num_subfolders,
rand_char_filename=rand_char_filename,
)
self.compilation_timeout = compilation_timeout
def _run_tests(
self, function: str, test: str, tmp_path: Path,
):
if TOFILL not in test:
raise InvalidTest(f"Missing {TOFILL}")
try:
f_name = cpp_processor.get_function_name(function)
except (ValueError, IndexError):
raise CompilationError("No function definition")
function = cpp_processor.detokenize_code(
function.replace(f" {f_name.strip()} ", " f_filled ")
)
filled_test = test.replace(TOFILL, function)
test_path = self._write_file(filled_test, tmp_path)
assert test_path.is_file()
bin_path = test_path.with_suffix("")
compile_cpp(
code_path=test_path,
compilation_timeout=self.compilation_timeout,
output_path=bin_path,
)
test_cmd = str(bin_path)
return self._run_command(test_cmd)
def _eval_proc_state(self, out: str, err: str) -> tp.Tuple[str, int, int]:
"""
Takes out and err outputs
returns success or error code, total number of tests, number of failures
"""
res_line = out.splitlines()
if len(res_line) <= 2 or not (
res_line[-1].startswith(PASSED_STRING) or FAILED_STRING in res_line[-1]
):
raise TestRuntimeError("\n".join(res_line))
nb_tests_lines = [l for l in res_line if l.startswith(NB_TESTS_STRING)]
assert len(nb_tests_lines) > 0
nb_tests_line = nb_tests_lines[-1]
number_of_tests = int(
nb_tests_line.replace(NB_TESTS_STRING, "").split(" ")[0].strip()
)
res_last_line = res_line[-1]
if res_last_line.startswith(PASSED_STRING):
return "success", number_of_tests, 0
else:
assert FAILED_STRING in res_last_line
number_failures = int(res_last_line.split()[0])
return "failure", number_of_tests, number_failures
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/evosuite_test_runners/cpp_evosuite_test_runner.py
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/__init__.py
|
|
from pathlib import Path
import os
from codegen_sources.code_runners.test_runners import PythonEvosuiteTestRunner
from codegen_sources.preprocessing.lang_processors import LangProcessor
python_processor = LangProcessor.processors["python"]()
TEST_SIGMOID = """import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import unittest
#TOFILL
class CLASS_f72b28220d38d38dc0dd570d2e44b3e4f4bc0dbe6db07624d40924a60e481f65(unittest.TestCase):
def test0(self):
double0 = f_filled(0.0)
assert abs(0.5 - double0) <= 1.0E-4
def test1(self):
double0 = f_filled((-49379.6829442))
print(double0)
assert abs(0.0 - double0) <= 1.0E-4
if __name__ == '__main__':
unittest.main()"""
def test_runner_on_sigmoid_math_fails():
python_runner = PythonEvosuiteTestRunner()
sigmoid = """def sigmoid ( input ) :
return 1.0 / ( 1.0 + ( math.exp ( - input ) ) )"""
sigmoid = " ".join(python_processor.tokenize_code(sigmoid))
res, tests, failures = python_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert res == "failure"
assert tests == 2
assert failures == 1
def test_runner_on_sigmoid_np_success():
python_runner = PythonEvosuiteTestRunner()
sigmoid = """def sigmoid ( input ) :
return 1.0 / ( 1.0 + ( np.exp ( - input ) ) )"""
sigmoid = " ".join(python_processor.tokenize_code(sigmoid))
res, tests, failures = python_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert res == "success"
assert tests == 2
assert failures == 0
def test_runner_on_sigmoid_np_timeout():
python_runner = PythonEvosuiteTestRunner(timeout=1)
sigmoid = """def sigmoid ( input ) :
import time
time.sleep(10)
"""
sigmoid = " ".join(python_processor.tokenize_code(sigmoid))
res, tests, failures = python_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert res == "timeout"
assert tests == 0
assert failures == ""
def test_firejail_keeps_from_writing():
if os.environ.get("CI", False):
return
python_runner = PythonEvosuiteTestRunner(timeout=20)
test_out_path = Path(__file__).parent.joinpath(
"test_output_should_not_be_written.out"
)
if test_out_path.exists():
os.remove(test_out_path)
sigmoid = f"""def write_in_file ( input ) :
with open("{test_out_path}", "w") as out_file:
out_file.write("hello")
"""
sigmoid = " ".join(python_processor.tokenize_code(sigmoid))
res, tests, failures = python_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert not test_out_path.is_file(), f"{test_out_path} should not have been written"
assert res == "failure"
assert tests == 2
assert failures == 2
def test_failures():
python_runner = PythonEvosuiteTestRunner()
test = """import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
#TOFILL
class CLASS_1143a612514aceab440e7ae2afc4dcdddb4332091f8c971668711956db699122(unittest.TestCase):
def test0(self):
intArray0 = [0] * 8;
int0 = f_filled(intArray0, 9185)
assert (-1) == int0
def test1(self):
intArray0 = [0] * 8;
intArray0[0] = 45539;
int0 = f_filled(intArray0, 0)
assert 1 == int0
def test2(self):
intArray0 = [0] * 1;
intArray0[0] = 1;
int0 = f_filled(intArray0, 1)
assert 0 == int0
if __name__ == '__main__':
unittest.main()"""
function = "def findinlist ( list , value ) : NEW_LINE INDENT for i in range ( len ( list ) ) : NEW_LINE INDENT if list [ i ] == value : NEW_LINE INDENT return i NEW_LINE DEDENT DEDENT return None NEW_LINE DEDENT"
res, tests, failures = python_runner.get_tests_results(function, test)
assert res == "failure"
assert tests == 3
assert failures == 1
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/test_python_test_runner.py
|
from pathlib import Path
import os
from codegen_sources.code_runners.test_runners import CppEvosuiteTestRunner
from codegen_sources.preprocessing.lang_processors import LangProcessor
cpp_processor = LangProcessor.processors["cpp"]()
TEST_SIGMOID = """#include <iostream>
#include <cstdlib>
#include <string>
#include <vector>
#include <fstream>
#include <iomanip>
#include <bits/stdc++.h>
#include "gtest/gtest.h"
using namespace std;
//TOFILL
TEST(EvoSuiteTest, test0){
double double0 = f_filled(0.0);
ASSERT_NEAR (0.5, double0, 1.0E-4);
}
TEST(EvoSuiteTest, test1){
double double0 = f_filled((-49379.6829442));
ASSERT_NEAR (0.0, double0, 1.0E-4);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}"""
def test_runner_on_sigmoid_success():
cpp_runner = CppEvosuiteTestRunner()
sigmoid = """double sigmoid ( double input ){
return 1.0 / ( 1.0 + ( exp ( - input ) ) );
}"""
sigmoid = " ".join(cpp_processor.tokenize_code(sigmoid))
res, tests, failures = cpp_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert res == "success", (res, tests, failures)
assert tests == 2
assert failures == 0
def test_runner_on_bad_sigmoid_fails():
cpp_runner = CppEvosuiteTestRunner()
sigmoid = """double sigmoid ( double input ){
return 0.5;
}"""
sigmoid = " ".join(cpp_processor.tokenize_code(sigmoid))
res, tests, failures = cpp_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert res == "failure", (res, tests, failures)
assert tests == 2
assert failures == 1
def test_runner_on_bad_sigmoid_compilation_error():
cpp_runner = CppEvosuiteTestRunner()
sigmoid = """double sigmoid ( double input ){
return 1.0 / ( 1.0 + ( exp ( - input ) ) )
}"""
sigmoid = " ".join(cpp_processor.tokenize_code(sigmoid))
res, tests, failures = cpp_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert res == "compilation", (res, tests, failures)
assert tests == 0
assert isinstance(failures, str)
def test_runner_on_sigmoid_np_timeout():
cpp_runner = CppEvosuiteTestRunner(timeout=1)
sigmoid = """double sigmoid ( double input ){
sleep(10);
return 1;
}
"""
sigmoid = " ".join(cpp_processor.tokenize_code(sigmoid))
res, tests, failures = cpp_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert res == "timeout", (res, tests, failures)
assert tests == 0
assert isinstance(failures, str)
def test_firejail_keeps_from_writing():
if os.environ.get("CI", False):
return
cpp_runner = CppEvosuiteTestRunner(timeout=20)
test_out_path = Path(__file__).parent.joinpath(
"test_output_should_not_be_written_cpp.out"
)
if test_out_path.exists():
os.remove(test_out_path)
sigmoid = f"""double write_in_file ( double input ){{
ofstream myfile;
myfile.open ("{test_out_path}");
myfile << "hello" << endl;
return 1;
}}
"""
sigmoid = " ".join(cpp_processor.tokenize_code(sigmoid))
res, tests, failures = cpp_runner.get_tests_results(sigmoid, TEST_SIGMOID)
assert not test_out_path.is_file(), f"{test_out_path} should not have been written"
assert res == "failure", (res, tests, failures)
assert tests == 2
assert failures == 2
TEST_LATTITUDE = """#include <iostream>
#include <cstdlib>
#include <string>
#include <vector>
#include <fstream>
#include <iomanip>
#include <bits/stdc++.h>
#include "gtest/gtest.h"
using namespace std;
//TOFILL
TEST(EvoSuiteTest, test0){
double double0 = f_filled(0.5);
ASSERT_NEAR (0.0, double0, 1.0E-4);
}
TEST(EvoSuiteTest, test1){
double double0 = f_filled(0.0);
ASSERT_NEAR (85.0511287798066, double0, 1.0E-4);
}
TEST(EvoSuiteTest, test2){
double double0 = f_filled(21003.854);
ASSERT_NEAR ((-90.0), double0, 1.0E-4);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}"""
def test_runner_on_lattitude_success():
cpp_runner = CppEvosuiteTestRunner()
lattitude = "double getLatitudeFromY ( double inY ) { double n = M_PI * ( 1 - 2 * inY ) ; return 180 / M_PI * atan ( 0.5 * ( exp ( n ) - exp ( - n ) ) ) ; }"
lattitude = " ".join(cpp_processor.tokenize_code(lattitude))
res, tests, failures = cpp_runner.get_tests_results(lattitude, TEST_LATTITUDE)
assert res == "success", (res, tests, failures)
assert tests == 3
assert failures == 0
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/test_cpp_test_runner.py
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/test_input_output_runners/__init__.py
|
|
from pathlib import Path
import os
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.code_runners.test_runners import JavaInputOutputEvaluator
java_processor = LangProcessor.processors["java"]()
ADDITION_PROGRAM = " ".join(
java_processor.tokenize_code(
r"""
import java.util.*;
import java.io.*;
public class Addition {
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
String[] inputs = bufferedReader.readLine().replaceAll("\\s+$", "").split(" ");
Integer a = Integer.parseInt(inputs[0]);
Integer b = Integer.parseInt(inputs[1]);
System.out.println(a + b);
}
}
"""
)
)
def test_runner_on_addition_success():
cpp_runner = JavaInputOutputEvaluator()
res, tests, failures, res_list = cpp_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "success", (res, tests, failures)
assert tests == 2
assert failures == 0
def test_runner_on_addition_wrong_output_failure():
cpp_runner = JavaInputOutputEvaluator()
res, tests, failures, res_list = cpp_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "1\n"],
truncate_errors=None,
)
assert res == "failure: actual 0 vs expected 1", (res, tests, failures, res_list)
assert tests == 2
assert failures == 1
assert res_list[-1] == "failure: actual 0 vs expected 1"
def test_compilation_timeout():
cpp_runner = JavaInputOutputEvaluator(compilation_timeout=0.1)
res, tests, failures, res_list = cpp_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "compilation timeout: Compilation Timeout", (res, tests, failures)
assert tests == 2
assert failures == 2
def test_runtime_timeout():
cpp_runner = JavaInputOutputEvaluator(timeout=1)
res, tests, failures, res_list = cpp_runner.check_outputs(
"import java.util.concurrent.TimeUnit ;"
+ ADDITION_PROGRAM.replace(
"throws IOException {",
"throws IOException, InterruptedException { \n TimeUnit.SECONDS.sleep(5);\n",
),
inputs=["1 2"],
outputs=["3\n"],
truncate_errors=None,
)
assert res == "timeout: ", (res, tests, failures)
assert tests == 1
assert failures == 1
def test_firejail_keeps_from_writing():
if os.environ.get("CI", False):
return
cpp_runner = JavaInputOutputEvaluator(timeout=20)
test_out_path = Path(__file__).parent.joinpath(
"test_output_should_not_be_written_cpp.out"
)
if test_out_path.exists():
os.remove(test_out_path)
write_to_file = (
"""
import java.io.File;
import java.io.IOException;
public class WriteTest{
public static void main(String[] args) throws IOException {
File myObj = new File("%s");
myObj.createNewFile();
}
}
"""
% test_out_path
)
write_to_file = " ".join(java_processor.tokenize_code(write_to_file))
res, tests, failures, res_list = cpp_runner.check_outputs(
write_to_file, inputs=[""], outputs=[""], truncate_errors=None
)
assert not test_out_path.is_file(), f"{test_out_path} should not have been written"
assert res == "success", (res, tests, failures)
assert tests == 1
assert failures == 0
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/test_input_output_runners/test_java_io_evaluators.py
|
from pathlib import Path
import os
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.code_runners.test_runners import CppInputOutputEvaluator
cpp_processor = LangProcessor.processors["cpp"]()
ADDITION_PROGRAM = " ".join(
cpp_processor.tokenize_code(
"""#include <iostream>
using namespace std;
int main() {
int a, b;
cin >> a >> b;
cout << a + b << endl;
}"""
)
)
def test_runner_on_addition_success():
cpp_runner = CppInputOutputEvaluator()
res, tests, failures, res_list = cpp_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "success", (res, tests, failures)
assert tests == 2
assert failures == 0
def test_compilation_error():
cpp_runner = CppInputOutputEvaluator()
res, tests, failures, res_list = cpp_runner.check_outputs(
ADDITION_PROGRAM.replace("int a", "a"),
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res.startswith("compilation"), (res, tests, failures)
assert tests == 2
assert failures == 2
def test_runner_on_addition_wrong_output_failure():
cpp_runner = CppInputOutputEvaluator()
res, tests, failures, res_list = cpp_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "1\n"],
truncate_errors=None,
)
assert res == "failure: actual 0 vs expected 1", (res, tests, failures, res_list)
assert tests == 2
assert failures == 1
assert res_list[-1] == "failure: actual 0 vs expected 1"
def test_compilation_timeout():
cpp_runner = CppInputOutputEvaluator(compilation_timeout=0.1)
res, tests, failures, res_list = cpp_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "compilation timeout: Compilation Timeout", (res, tests, failures)
assert tests == 2
assert failures == 2
def test_runtime_timeout():
cpp_runner = CppInputOutputEvaluator(timeout=1)
res, tests, failures, res_list = cpp_runner.check_outputs(
"#include <unistd.h>\n"
+ ADDITION_PROGRAM.replace("main ( ) {", "main ( ) { sleep( 10 ) ;"),
inputs=["1 2"],
outputs=["3\n"],
truncate_errors=None,
)
assert res == "timeout: ", (res, tests, failures)
assert tests == 1
assert failures == 1
def test_firejail_keeps_from_writing():
if os.environ.get("CI", False):
return
cpp_runner = CppInputOutputEvaluator(timeout=20)
test_out_path = Path(__file__).parent.joinpath(
"test_output_should_not_be_written_cpp.out"
)
if test_out_path.exists():
os.remove(test_out_path)
write_to_file = (
"""#include <iostream>
#include <fstream>
using namespace std;
int main() {
ofstream myfile;
myfile.open ("%s");
myfile << "hello" << endl;
return 1;
}
"""
% test_out_path
)
write_to_file = " ".join(cpp_processor.tokenize_code(write_to_file))
res, tests, failures, res_list = cpp_runner.check_outputs(
write_to_file, inputs=[""], outputs=[""], truncate_errors=None
)
assert not test_out_path.is_file(), f"{test_out_path} should not have been written"
assert res == "success", (res, tests, failures)
assert tests == 1
assert failures == 0
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/test_input_output_runners/test_cpp_io_evaluators.py
|
from pathlib import Path
import os
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.code_runners.test_runners import RustInputOutputEvaluator
rust_processor = LangProcessor.processors["rust"]()
ADDITION_PROGRAM = " ".join(
rust_processor.tokenize_code(
"""
fn main() {
let cin = std::io::stdin();
let mut s = String::new();
cin.read_line(&mut s).unwrap();
let values = s
.split_whitespace()
.map(|x| x.parse::<i32>())
.collect::<Result<Vec<i32>, _>>()
.unwrap();
assert!(values.len() == 2);
let var1 = values[0];
let var2 = values[1];
println!("{}", var1 + var2);
}"""
)
)
def test_runner_on_addition_success():
rust_runner = RustInputOutputEvaluator()
res, tests, failures, res_list = rust_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "success", (res, tests, failures)
assert tests == 2
assert failures == 0
def test_runner_on_addition_wrong_output_failure():
rust_runner = RustInputOutputEvaluator()
res, tests, failures, res_list = rust_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "1\n"],
truncate_errors=None,
)
assert res == "failure: actual 0 vs expected 1", (res, tests, failures, res_list)
assert tests == 2
assert failures == 1
assert res_list[-1] == "failure: actual 0 vs expected 1"
def test_compilation_timeout():
rust_runner = RustInputOutputEvaluator(compilation_timeout=0.1)
res, tests, failures, res_list = rust_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "compilation timeout: Compilation Timeout", (res, tests, failures)
assert tests == 2
assert failures == 2
def test_firejail_keeps_from_writing():
if os.environ.get("CI", False):
return
rust_runner = RustInputOutputEvaluator(timeout=20)
test_out_path = Path(__file__).parent.joinpath(
"test_output_should_not_be_written_rust.out"
)
if test_out_path.exists():
os.remove(test_out_path)
write_to_file = (
"""use std::fs::File;
use std::io::prelude::*;
fn main() -> std::io::Result<()> {
let mut file = File::create("%s")?;
file.write_all(b"Hello, world!")?;
Ok(())
}
"""
% test_out_path
)
write_to_file = " ".join(rust_processor.tokenize_code(write_to_file))
res, tests, failures, res_list = rust_runner.check_outputs(
write_to_file, inputs=[""], outputs=[""], truncate_errors=None
)
assert not test_out_path.is_file(), f"{test_out_path} should not have been written"
assert res == "success", (res, tests, failures)
assert tests == 1
assert failures == 0
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/test_input_output_runners/test_rust_io_evaluators.py
|
from pathlib import Path
import os
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.code_runners.test_runners import GoInputOutputEvaluator
go_processor = LangProcessor.processors["go"]()
ADDITION_PROGRAM = " ".join(
go_processor.tokenize_code(
"""
func main() {
var a, b int
fmt.Scanf("%d %d", &a, &b)
fmt.Print(a + b)
}"""
)
)
def test_runner_on_addition_success():
go_runner = GoInputOutputEvaluator()
res, tests, failures, res_list = go_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "success", (res, tests, failures)
assert tests == 2
assert failures == 0
def test_runner_on_addition_without_go_imports_fails():
go_runner = GoInputOutputEvaluator(run_go_imports=False)
res, tests, failures, res_list = go_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res.startswith("compilation:"), (res, tests, failures)
assert tests == 2
assert failures == 2
def test_runner_on_addition_wrong_output_failure():
go_runner = GoInputOutputEvaluator()
res, tests, failures, res_list = go_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "1\n"],
truncate_errors=None,
)
assert res == "failure: actual 0 vs expected 1", (res, tests, failures, res_list)
assert tests == 2
assert failures == 1
assert res_list[-1] == "failure: actual 0 vs expected 1"
def test_compilation_timeout():
go_runner = GoInputOutputEvaluator(compilation_timeout=0.1)
res, tests, failures, res_list = go_runner.check_outputs(
ADDITION_PROGRAM,
inputs=["1 2", "0 0"],
outputs=["3\n", "0\n"],
truncate_errors=None,
)
assert res == "compilation timeout: Compilation Timeout", (res, tests, failures)
assert tests == 2
assert failures == 2
def test_firejail_keeps_from_writing():
if os.environ.get("CI", False):
return
go_runner = GoInputOutputEvaluator(timeout=20)
test_out_path = Path(__file__).parent.joinpath(
"test_output_should_not_be_written_go.out"
)
if test_out_path.exists():
os.remove(test_out_path)
write_to_file = (
"""
package main
import (
"os"
)
func main() {
f, _ := os.Create("%s")
f.WriteString("Hello World")
}
"""
% test_out_path
)
write_to_file = " ".join(go_processor.tokenize_code(write_to_file))
res, tests, failures, res_list = go_runner.check_outputs(
write_to_file, inputs=[""], outputs=[""], truncate_errors=None
)
assert not test_out_path.is_file(), f"{test_out_path} should not have been written"
assert res == "success", (res, tests, failures)
assert tests == 1
assert failures == 0
|
CodeGen-main
|
codegen_sources/code_runners/test_runners/tests/test_input_output_runners/test_go_io_evaluators.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import itertools
from pathlib import Path
import torch
import fastBPE
from transformers import RobertaTokenizer
import codegen_sources
import typing as tp
import codegen_sources.preprocessing.lang_processors as langp
import codegen_sources.preprocessing.lang_processors.utils as lputils
import codegen_sources.model.src.data.dictionary as dic
from codegen_sources.preprocessing.bpe_modes import FastBPEMode, RobertaBPEMode
import codegen_sources.preprocessing.obfuscation.utils_deobfuscation as deobf
from codegen_sources.model.src.utils import restore_roberta_segmentation_sentence
from . import utils
# pylint: disable=arguments-differ
# code -> updated code(=obfuscated, formatted etc)
# updated code -> tokenized
# tokenized -> bpe
#
# bpe -> tokenized -> code
import sentencepiece as spm # type: ignore
PathLike = tp.Union[str, Path]
X = tp.TypeVar("X")
Y = tp.TypeVar("Y")
Z = tp.TypeVar("Z")
BPE_FOLDER = (
Path(codegen_sources.__file__).resolve().parents[1]
/ "data"
/ "bpe"
/ "cpp-java-python"
)
# TokCode = tp.NewType('TokCode', str) # can help making sure we don't mix tok and untok
TokCode = tp.List[str]
# TODO: when it's clearer what we want tokenize code to be, let's port it
# to the languange processors (starting from the base one)
class Transform(tp.Generic[X, Y]):
def apply(self, data: X) -> Y:
"""Apply the transform"""
raise NotImplementedError
def revert(self, data: Y) -> X:
"""Revert the transform"""
raise NotImplementedError
def pipe(self, other: "Transform[Y,Z]") -> "Composition[X,Z]":
"""Create a new transform composing this transform followed by another
transform.
Parameter
----------
other: Transform
another transform taking as input the output of the current
transform
"""
return Composition(self, other)
def __repr__(self) -> str:
cls = self.__class__.__name__
params = sorted(
f"{x}={y!r}" for x, y in self.__dict__.items() if not x.startswith("_")
)
return f"{cls}({','.join(params)})"
def inverted(self) -> "Transform[Y, X]":
"""Creates the transform which is the inverted version
from this one
"""
return Inverted(self)
class Inverted(Transform[X, Y]):
"""Transform thant inverts another transform,
aka apply->revert and revert->apply
"""
def __init__(self, transform: Transform[Y, X]) -> None:
self.transform = transform
def apply(self, data: X) -> Y:
return self.transform.revert(data)
def revert(self, data: Y) -> X:
return self.transform.apply(data)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.transform})"
class _InitialSpaces(Transform[str, str]):
"""Converts initial spaces into a token and a number"""
def __init__(self, space_char: str = "<special8>") -> None:
self.space_char = space_char
self.pat = re.compile(f"{space_char} [0-9]+ (.)?")
def apply(self, string: str) -> str:
lines = string.splitlines()
new_lines = []
for line in lines:
length = len(line)
stripped = line.lstrip(" ")
count = length - len(stripped)
if count:
stripped = f"{self.space_char} {count} {stripped}"
new_lines.append(stripped)
return "\n".join(new_lines)
def revert(self, string: str) -> str:
lines = string.splitlines()
new_lines = []
for line in lines:
if self.pat.match(line) is not None:
_, num, rest = line.split(" ", maxsplit=2)
line = int(num) * " " + rest
new_lines.append(line)
return "\n".join(new_lines)
class Composition(Transform[X, Z]):
"""Composition of several transforms
Paramters
---------
transform_1: Transform
the first transform to be applied
transform_2: Transform
the second transform to be applied
"""
def __init__(
self, transform_1: Transform[X, Y], transform_2: Transform[Y, Z]
) -> None:
self.transforms: tp.List[Transform[tp.Any, tp.Any]] = []
for t in (transform_1, transform_2):
self.transforms.extend(
t.transforms if isinstance(t, Composition) else [t] # type: ignore
)
def apply(self, data: X) -> Z:
out: tp.Any = data
for t in self.transforms:
out = t.apply(out)
return out # type: ignore
def revert(self, data: Z) -> X:
out: tp.Any = data
for t in reversed(self.transforms):
out = t.revert(out)
return out # type: ignore
# pylint: disable=arguments-renamed
class CodeTokenizer(Transform[str, TokCode]):
"""Tokenize code
Parameters
----------
language: str
language to be tokenized, this will call the corresponding
language processor
keep_comments: bool
whether to keep the comments in the tokenized code
process_strings: bool
TODO
"""
def __init__(
self, language: str, keep_comments: bool = False, process_strings: bool = True
) -> None:
self.language = language
self.keep_comments = keep_comments
self.process_strings = process_strings
self._tokenizer: tp.Any = None
@property
def tokenizer(self) -> langp.LangProcessor:
if self._tokenizer is None:
self._tokenizer = langp.LangProcessor.processors[self.language]()
return self._tokenizer
def apply(self, code: str) -> TokCode:
return self.tokenizer.tokenize_code(
code, keep_comments=self.keep_comments, process_strings=self.process_strings
)
def revert(self, tok_code: TokCode) -> str:
return self.tokenizer.detokenize_code(tok_code)
# the following is needed for picklability
# (because fast_bpe is not picklable)
def __getstate__(self) -> tp.Dict[str, tp.Any]:
return utils.extract_dict(self, reset_keys=["_tokenizer"])
class BpeBase(Transform[TokCode, str]):
"""Applies a BPE model to a tokenized code
Parameter
---------
code_path: str / Path
path to the codes file to use
"""
def __init__(self) -> None:
# delayed init because can be slow/heavy/non-picklable
self._bpe_model: tp.Any = None
def _init_model(self) -> tp.Any:
raise NotImplementedError
@property
def bpe_model(self) -> tp.Any:
if self._bpe_model is None:
self._bpe_model = self._init_model()
return self._bpe_model
# the following is needed for picklability
# (because fast_bpe is not picklable)
def __getstate__(self) -> tp.Dict[str, tp.Any]:
return utils.extract_dict(self, reset_keys=["_bpe_model"])
class FastBpe(BpeBase):
def __init__(self, code_path: tp.Optional[PathLike] = None) -> None:
super().__init__()
if code_path is None:
code_path = BPE_FOLDER / "codes"
assert Path(code_path).is_file(), f"{code_path} is not a file"
self.code_path = str(code_path)
def _init_model(self) -> tp.Any:
# pylint: disable=not-callable
return fastBPE.fastBPE(self.code_path) # type: ignore
def apply(self, tok_code: TokCode) -> str:
if not isinstance(tok_code, list):
raise TypeError("Tokenized code must be provided as a list")
tokens: tp.List[str] = self.bpe_model.apply(tok_code)
out = FastBPEMode.repair_bpe_for_obfuscation_line(" ".join(tokens))
return out
def revert(self, subtokens: str) -> TokCode:
return subtokens.replace("@@ ", "").split()
class StrSplit(Transform[str, TokCode]):
"""Splits a string on spaces"""
def apply(self, code: str) -> TokCode:
return code.split(" ")
def revert(self, tok_code: TokCode) -> str:
return " ".join(tok_code)
class SentencePieceTokenizer(Transform[str, str]):
"""Computes tokenization + BPE in one step"""
def __init__(self, code_path: tp.Optional[PathLike] = None) -> None:
# delayed init because can be slow/heavy/non-picklable
if code_path is None:
code_path = BPE_FOLDER.parent / "sentencepiece/sentencepiece_32k_v2/model"
self._bpe_model: tp.Any = None
self.code_path = Path(code_path)
assert self.code_path.is_file(), f"{self.code_path} doesn't exist"
def _init_model(self) -> tp.Any:
return spm.SentencePieceProcessor(model_file=str(self.code_path))
@property
def bpe_model(self) -> tp.Any:
if self._bpe_model is None:
self._bpe_model = self._init_model()
return self._bpe_model
@staticmethod
def repl(match: tp.Match[str]) -> str:
string = match.group(0)
out = string.replace(" ", "")
if match.group("next") is not None:
out = out[:-1] + " " + match.group("next")
if match.group("prev") is not None:
out = match.group("prev") + " " + out[1:]
out = out.replace("><", "> <")
return out
def apply(self, data: str) -> str:
out = " ".join(self.bpe_model.encode_as_pieces(data))
final = r"(?P<next>\S)?"
start = r"(?P<prev>\S)?"
for prefix in deobf.OBFUSCATED_PREFIXES:
pattern = start + f'{"( )?".join(prefix)}(( )?[0-9]+)+' + final
out = re.sub(pattern, self.repl, out)
pattern = start + "(< special [0-9]+ >)+" + final
out = re.sub(pattern, self.repl, out)
return out
def revert(self, subtokens: str) -> str:
return self.bpe_model.decode_pieces(subtokens.split(" "))
class RobertaBpe(BpeBase):
def __init__(
self, new_line: str = "<special9>", space_char: str = "<special8>"
) -> None:
super().__init__()
self.new_line = f" {new_line} "
self.spaces = _InitialSpaces(space_char=space_char)
def _init_model(self) -> tp.Any:
return RobertaTokenizer.from_pretrained("roberta-base")
def apply(self, tok_code: TokCode) -> str:
# TODO splitting on lines? so we should not use TokCode?
lines = self.spaces.apply(" ".join(tok_code)).split("\n")
lines = [self.bpe_model._tokenize(line.strip()) for line in lines]
repair = RobertaBPEMode.repair_bpe_for_obfuscation_line
lines = [repair(" ".join(line)) for line in lines]
out = self.new_line.join(lines)
return out
def revert(self, subtokens: str) -> TokCode:
out: str = restore_roberta_segmentation_sentence(subtokens) # type: ignore
out = out.replace(self.new_line.strip(), "\n")
return self.spaces.revert(out).split()
class BpeTensorizer(Transform[str, torch.Tensor]):
"""Converts from BPE to tensor
Parameter
---------
vocab_path: str / Path
path to the vocab file to use
no_unk: bool
disallow unknown tokens
"""
def __init__(
self, vocab_path: tp.Optional[PathLike] = None, no_unk: bool = False
) -> None:
# check model preprocess.py
if vocab_path is None:
vocab_path = BPE_FOLDER / "vocab"
self.vocab_path = Path(vocab_path) # for representation
self.no_unk = no_unk
self.dico = dic.Dictionary.read_vocab(vocab_path)
def apply(self, subtokens: str) -> torch.Tensor:
tag = [dic.EOS_WORD] # </s>
sublist = tag + subtokens.split(" ") + tag
word2id = self.dico.word2id
unk = self.dico.unk_index
if self.no_unk:
data = [word2id[tok] for tok in sublist]
else:
data = [word2id.get(tok, unk) for tok in sublist]
return torch.LongTensor(data)
# simpler solution is slightly slower:
# return torch.LongTensor(
# [self.dico.index(tok, no_unk=self.no_unk) for tok in sublist]
# )
def revert(self, tensor: torch.Tensor) -> str:
tensor = tensor.squeeze()
if not tensor.ndim == 1:
raise ValueError(
f"Only 1-dimensional tensors can be processed (got {tensor.shape})."
)
# wid = [self.dico[val.item()] for val in tensor]
# return wid[: wid.index(dic.EOS_WORD)] if dic.EOS_WORD in wid else wid
# skip initial eos if present
start = 1 if tensor[0].item() == self.dico.eos_index else 0
iterator: tp.Iterator[str] = (self.dico[val.item()] for val in tensor[start:])
# with takewhile, we'll only fetch in the dict until we reach EOS
iterator = itertools.takewhile(lambda x: x != dic.EOS_WORD, iterator)
return " ".join(iterator)
class Tensorizer(Composition[TokCode, torch.Tensor]):
"""Converts from a tokenized string to a tensor, through BPE
Parameter
---------
bpe_folder: str / Path
folder containing a codes file for bpe and a vocab file for
tensorization.
no_unk: bool
disallow unknown tokens
"""
def __init__(
self, bpe_folder: tp.Optional[PathLike] = None, no_unk: bool = False
) -> None:
if bpe_folder is None:
bpe_folder = BPE_FOLDER
bpe_folder = Path(bpe_folder)
super().__init__(
FastBpe(bpe_folder / "codes"),
BpeTensorizer(bpe_folder / "vocab", no_unk=no_unk),
)
@property
def dico(self) -> dic.Dictionary:
return self.transforms[1].dico # type: ignore
class SentencePieceTensorizer(Composition[str, torch.Tensor]):
"""Converts from a tokenized string to a tensor, with sentencepiece
Parameter
---------
folder: str / Path
folder containing a codes file for model and a vocab file for
tensorization.
no_unk: bool
disallow unknown tokens
"""
def __init__(
self, folder: tp.Optional[PathLike] = None, no_unk: bool = False
) -> None:
tokenizer = SentencePieceTokenizer(
Path(folder) / "model" if folder is not None else None
)
super().__init__(
tokenizer,
BpeTensorizer(tokenizer.code_path.with_name("vocab"), no_unk=no_unk),
)
@property
def dico(self) -> dic.Dictionary:
return self.transforms[1].dico # type: ignore
class Dictifier(Transform[TokCode, tp.Dict[str, str]]):
"""Converts a list of tokens to the dict mapping mask_token -> result
Parameters
----------
separator: str
separator used for encoding the dict as a string
robust: bool
if True, returns all entry that do parse instead of raising
Note
----
The order is different from the legacy one, to avoid VAR_10 following
VAR_1
"""
def __init__(self, separator: str = deobf.SEPARATOR, robust: bool = False) -> None:
super().__init__()
self.robust = robust
self.separator = separator
self._tok_order = {
n: k for k, n in enumerate(lputils.obfuscation_tokens(raise_finished=False))
}
def apply(self, code_tokens: TokCode) -> tp.Dict[str, str]:
if not isinstance(code_tokens, list):
raise TypeError("Only lists are accepted")
out = {}
for entry in " ".join(code_tokens).split(self.separator):
entry = entry.strip()
if not entry:
continue
try:
name, val = entry.split(" ", maxsplit=1)
except ValueError as e:
if not self.robust:
raise ValueError(f"Cannot split dict entry {entry!r}") from e
else:
out[name] = val
return out
def revert(self, dico: tp.Dict[str, str]) -> TokCode:
keys = list(dico)
keys.sort(key=lambda k: self._tok_order.get(k, -1))
return self.separator.join(f"{k} {dico[k]}" for k in keys).split(" ")
|
CodeGen-main
|
codegen_sources/dataloaders/transforms.py
|
CodeGen-main
|
codegen_sources/dataloaders/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
import json
import gzip
import dataclasses
import contextlib
import typing as tp
from pathlib import Path
import submitit
import numpy as np
import torch
B = tp.TypeVar("B", bound="Batch")
D = tp.TypeVar("D", bound="DelayedReader")
M = tp.TypeVar("M", bound="Modulo")
X = tp.TypeVar("X")
PAD_INDEX = 2 # TODO CHECK
@dataclasses.dataclass
class Batch:
"""Batch instance with fields x, y of size [B, N] and x_len, y_len of size[B]
batch dimension and lengths can be omitted at instantiation, they will be
filled autmatically.
"""
x: torch.Tensor
y: torch.Tensor
x_len: torch.LongTensor = dataclasses.field(default_factory=torch.LongTensor)
y_len: torch.LongTensor = dataclasses.field(default_factory=torch.LongTensor)
# extra field for debugging
_ids: np.ndarray = dataclasses.field(
default_factory=lambda: np.array([], dtype=str)
)
# it's (or seems?) important to keep arrays and not list (if I understand
# correctly) as lists may lead to OOM for reasons I don't get
# more details here for more information:
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-445446603
def __post_init__(self) -> None:
for name in ["x", "y"]:
val = getattr(self, name)
# make sure to add a batch dimension if not there
if val.ndim == 1:
val = val.unsqueeze(0)
setattr(self, name, val)
# fill in sequence length
key = name + "_len"
if not getattr(self, key).numel():
setattr(self, key, torch.LongTensor([val.shape[1]]))
if not isinstance(self._ids, np.ndarray):
self._ids = np.array(self._ids)
def to(self, device: str) -> "Batch":
"""Creates a new instance on the appropriate device"""
out: tp.Dict[str, tp.Any] = {}
for field in dataclasses.fields(self):
data = getattr(self, field.name)
out[field.name] = data
if isinstance(data, torch.Tensor):
out[field.name] = data.to(device)
return self.__class__(**out)
def pin_memory(self: B) -> B:
for field in dataclasses.fields(self):
data = getattr(self, field.name)
if isinstance(data, torch.Tensor):
data.pin_memory()
return self
@classmethod
def collate_fn(cls, batches: tp.List["Batch"]) -> "Batch":
"""Creates a new instance from several by stacking in a new first dimension
for all attributes
"""
if not batches:
raise ValueError("No data to collate")
out: tp.Dict[str, tp.Any] = {}
for name in ["x", "y"]:
# concat lengths
key = name + "_len"
data = [getattr(mf, key) for mf in batches]
out[key] = torch.cat(data, dim=0) # type: ignore
# pad and concatenate data
data = [getattr(mf, name) for mf in batches]
max_len = max(d.shape[-1] for d in data)
batch_size = sum(d.shape[0] for d in data)
out[name] = torch.LongTensor(batch_size, max_len).fill_(PAD_INDEX)
start = 0
for d in data:
end = start + d.shape[0]
out[name][start:end, : d.shape[1]] = d
start = end
out["_ids"] = np.concatenate([b._ids for b in batches], axis=0)
return cls(**out)
def split(self) -> tp.Iterable["Batch"]:
if self.x.shape[0] == 1:
yield self # avoid recreating the object if it's already split
return
for k in range(self.x.shape[0]):
xl = self.x_len[k]
yl = self.y_len[k]
sl = slice(k, k + 1)
# prefill as much as possible to make it fast
out: tp.Dict[str, tp.Any] = dict(x_len=self.x_len[sl], y_len=self.y_len[sl])
if self._ids.size:
out["_ids"] = np.array([self._ids[k]])
yield Batch(x=self.x[sl, : int(xl)], y=self.y[sl, : int(yl)], **out)
class BatchOptimizer:
"""Batch iterable optimizing the batches to hold as many tokens as possible below the
maximum number,
This is done by pre-fetching a buffer of batches, sorting the sequences, choosing one
starting point then greedily grow the Batch with samples smaller and longer.
Samples which are beyond the maximum lenght are removed.
iterable: Iterable of Batch
iterable of Batch instances to pull from
max_num_tokens: int
maximum number of tokens allowed in a batch
buffer_size: int
size of the number of samples to use for creating new batches
"""
def __init__(
self,
iterable: tp.Iterable[Batch],
max_num_tokens: int,
max_sequence_length: int = 2048,
buffer_size: int = 100,
seed: int = 12,
) -> None:
self.iterable = iterable
self.rng = np.random.RandomState(seed)
self.max_sequence_length = min(max_sequence_length, max_num_tokens)
self.max_num_tokens = max_num_tokens
self._batches: tp.List[Batch] = []
self._buffer_size = buffer_size
self.removed = 0
self.timer = Timer()
@staticmethod
def _sort_key(batch: Batch) -> int:
return int(batch.x_len[0])
@staticmethod
def _get_num_tokens(batches: tp.List[Batch]) -> int:
max_len = max(b.x.shape[1] + b.y.shape[1] for b in batches)
num_batches = sum(b.x.shape[0] for b in batches)
return int(max_len) * num_batches
def _get_length(self, ind: int) -> int:
"""lengths of a sequence (x+y)
returns 0 if index is out of bond
"""
if ind < 0 or ind >= len(self._batches):
return 0
b = self._batches[ind]
return b.x.shape[0] + b.y.shape[0]
def _extract(self) -> Batch:
# start = self.rng.choice(len(self._batches))
p = np.array([b.x_len[0] for b in self._batches], dtype=float)
p /= sum(p)
# favor longer sentences because batches are smaller so they aren't
# selected as often
start = self.rng.choice(len(self._batches), p=p)
bounds = [start, start + 1]
# we will loop until we cant either increase on
# smaller sequences or on larger sequences
lengths = [self._get_length(ind) for ind in [bounds[0] - 1, bounds[1]]]
tentatives = 0
while any(lengths):
tentatives += 1
if tentatives > len(self._batches):
raise RuntimeError(
"Batch creation failed to converge\n"
f"{lengths=} {bounds=} {len(self._batches)=}"
)
# either increase by smaller seq (ind=0)
# or larger (ind=1)
# give more weights to larger seq since they are less likely to get
# selected
p = np.array(lengths, dtype=float) / sum(lengths)
ind = self.rng.choice(2, p=p)
tentative = list(bounds)
tentative[ind] += -1 if not ind else 1
num_tokens = self._get_num_tokens(
self._batches[tentative[0] : tentative[1]]
)
if num_tokens < self.max_num_tokens:
bounds = tentative
new = bounds[0] - 1 if not ind else bounds[1]
lengths[ind] = self._get_length(new)
else:
lengths[ind] = 0
out = Batch.collate_fn(self._batches[bounds[0] : bounds[1]])
self._batches = self._batches[: bounds[0]] + self._batches[bounds[1] :]
return out
def __iter__(self) -> tp.Iterator[Batch]:
self._batches = []
for batch in self.timer.iter(self.iterable, inner="fetch"):
splitted = [
b
for b in batch.split()
if max(b.x.shape[-1], b.y.shape[-1]) <= self.max_sequence_length
and b.y_len[0]
]
# print("lenghts", [b.x_len for b in splitted])
self.removed += len(splitted) - int(batch.x.shape[0])
self._batches.extend(splitted)
self._batches.sort(key=self._sort_key)
while len(self._batches) > self._buffer_size:
with self.timer.timed("extract"):
out = self._extract()
yield out
while self._batches:
yield self._extract()
def extract_dict(obj: tp.Any, reset_keys: tp.Iterable[str]) -> tp.Dict[str, tp.Any]:
"""Extract the dict of a object and reset to None
some of the keys (after checking that they do exist).
This is useful for delayed instanciation of attributes which may
not support pickling.
"""
attributes = dict(obj.__dict__)
for key in reset_keys:
assert key in attributes
attributes[key] = None
return attributes
class DelayedReader:
"""Lazily opens files or process json lines.
DelayedReader instances have a code property and id property
which are filled on demand only to spare useless computation.
"""
def __init__(self, value: tp.Any, reader: str) -> None:
self._reader = reader
self._value = value
self._code: tp.Optional[str] = None
self._id: tp.Optional[str] = None
self._info: tp.Dict[str, tp.Any] = {}
@classmethod
def from_file(cls: tp.Type[D], filepath: tp.Union[str, Path]) -> D:
"""Load lazily from a path"""
return cls(value=filepath, reader="file")
@classmethod
def from_json_string(cls: tp.Type[D], string: bytes) -> D:
"""Load lazily from a json string with fields
repo_name, path and content
"""
if not isinstance(string, bytes):
raise TypeError("String must be provided as bytes")
return cls(value=string, reader="json_string")
def _read(self) -> None:
if self._code is not None:
return
if self._reader == "file":
self._code = Path(self._value).read_text("utf8")
self._id = "filepath:" + str(self._value)
return
if self._reader == "json_string":
data = json.loads(self._value)
try:
self._code = data["content"]
except KeyError as e:
raise ValueError(
f"Missing content field in the json_string: {data}"
) from e
if all(x in data for x in ["repo_name", "path"]):
self._id = data["repo_name"] + ":" + data["path"]
elif all(x in data for x in ["max_stars_repo_name", "max_stars_repo_path"]):
self._id = (
data["max_stars_repo_name"] + ":" + data["max_stars_repo_path"]
)
self._info = data
return
raise ValueError(f"Unknown specified reader {self._reader}")
@property
def licenses(self) -> tp.Optional[tp.List[str]]:
"""Return the licenses if available"""
out = self.info.get("license", None) # only one in big query
if out is None:
return self.info.get( # type: ignore
"max_stars_repo_licenses", self.info.get("licenses", None)
)
return [out]
@property
def code(self) -> str:
self._read()
return self._code # type: ignore
@property
def id(self) -> str:
self._read()
return self._id # type: ignore
@property
def info(self) -> tp.Dict[str, str]:
self._read()
return self._info # type: ignore
def __repr__(self) -> str:
try:
return f"DelayedReader<id={self.id},code={self.code}>"
except Exception: # pylint: disable=broad-except
return f"DelayedReader<UNREADABLE VALUE={self._value}>)"
class CodeWalker:
"""Utility for walking code files randomly while avoiding
loading too much into memory at once
Parameters
---------
extensions: list of str
extensions to read data from. Currently supports .py and .json.gz
buffer_size: int
maximum buffer size for storing json.gz lines and be able to yield
from it in a random order
rng: RandomState or int
random state or seed for a random state
Returns
-------
DelayedReader
an object with fields code and id which loads the code lazily
"""
SUPPORTED_CODE_EXTENSIONS = (".py",)
def __init__(
self,
extensions: tp.List[str],
buffer_size: int = 1000,
rng: tp.Optional[tp.Union[int, np.random.RandomState]] = None,
) -> None:
if isinstance(rng, np.random.RandomState):
self.rng = rng
else:
self.rng = np.random.RandomState(rng)
self.extensions = tuple(extensions)
self.buffer_size = buffer_size
for ext in self.extensions:
if ext not in (".json.gz",) + self.SUPPORTED_CODE_EXTENSIONS:
raise ValueError(f"Extension {ext} is not supported")
def walk(self, input_path: tp.Union[str, Path]) -> tp.Iterator[DelayedReader]:
"""Walks a folder or a file and yields DelayedReader from it, in
a random order. Delayed reader have code and id fields that are fetched
on demand.
Parameter
---------
input_path: str or Path
path to the file to read or folder to walk
Yields
------
str, str
an identifier for the code and the corresponding code
"""
input_path = Path(input_path)
if not input_path.exists():
raise ValueError(f"Missing input path: {input_path}")
if input_path.is_file():
if not any(input_path.name.endswith(x) for x in self.extensions):
return
if input_path.is_dir():
if input_path.name[0] in (".", "_"):
return
if self.extensions == (".json.gz",):
# dive into sub-directories for json.gz files
# this is especially useful when a folder is a language
sub_paths = list(input_path.rglob(f"*{self.extensions[0]}"))
else:
sub_paths = list(input_path.iterdir())
self.rng.shuffle(sub_paths) # type: ignore
for sub_path in sub_paths:
yield from self.walk(sub_path)
elif input_path.suffix in self.SUPPORTED_CODE_EXTENSIONS:
yield DelayedReader.from_file(input_path)
elif input_path.name.endswith(".json.gz"):
yield from self._gzip_walk(input_path)
else:
raise ValueError(f"Unsupported extension {input_path.suffix}")
def _gzip_walk(self, input_path: Path) -> tp.Iterator[DelayedReader]:
"""Reads a json.gz code data and returns lines in a
random order (although not uniformly).
Lines are first added to a buffer until reaching a given
buffer size, and then it randomly yields from it while
replacing yielded lines with the new read lines.
"""
if not input_path.name.endswith("json.gz"):
raise RuntimeError("gzip_walk only works on json.gz files")
lines: tp.List[bytes] = []
with gzip.open(input_path, "rb") as f:
for line in f: # readlines() would load all data -> bad idea
line = line.strip()
if not line:
continue
if len(lines) < self.buffer_size:
lines.append(line)
else:
ind = self.rng.choice(len(lines))
yield DelayedReader.from_json_string(lines[ind])
lines[ind] = line
while lines:
ind = self.rng.choice(len(lines))
content = lines.pop(ind)
yield DelayedReader.from_json_string(content)
class Modulo:
"""Modulo-like object for identifying a task, with the index of the task,
and the mod (number of total tasks).
Calling this object on an index will return the modulo, and lets you select
what this task should work on.
Parameters
----------
index: int
index of the task
mod: int
total number of tasks
Note
----
Instances of this object can be created through from_env which checks slurm
and pytorch context
"""
def __init__(self, index: int, mod: int) -> None:
if index >= mod:
raise ValueError(f"Index {index} must be stricly lower than {mod}")
if index < 0:
raise ValueError("Index must be positive or null")
self.mod = mod
self.index = index
def __repr__(self) -> str:
name = self.__class__.__name__
return f"{name}(index={self.index}, mod={self.mod})"
def __call__(self, ind: int) -> bool:
out = ind % self.mod == self.index
return out
def __mul__(self: M, other: M) -> M:
mod = self.mod * other.mod
ind = self.mod * other.index + self.index
return self.__class__(ind, mod)
@classmethod
def from_env(cls: tp.Type[M]) -> M:
"""Creates a Modulo instance according to
slurm task and pytorch worker info
"""
out = cls(0, 1)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
out *= cls(worker_info.id, worker_info.num_workers)
try:
env = submitit.JobEnvironment()
except RuntimeError:
pass
else:
out *= cls(env.global_rank, env.num_tasks)
return out
class Timer:
def __init__(self) -> None:
self._starts: tp.Dict[str, float] = {}
self._durations = dict(self._starts)
def __contains__(self, key: str) -> bool:
return key in self._starts
def iter(
self, iterable: tp.Iterable[X], *, inner: str = "", outer: str = "",
) -> tp.Generator[X, None, None]:
iterator = iter(iterable)
while True:
if inner:
self.start(inner)
if outer:
# cant rely on standard approach because extract can happen in between
outer_start = time.time()
try:
val = next(iterator)
except StopIteration:
for key in (inner, outer):
if key:
self._starts.pop(key, None)
break
else:
if inner:
self.stop(inner)
yield val
if outer:
if outer in self._starts:
raise ValueError(f"Key {outer} already in use")
self._starts[outer] = outer_start
self.stop(outer)
@contextlib.contextmanager
def timed(self, key: str) -> tp.Iterator[None]:
self.start(key)
try:
yield
finally:
self.stop(key)
def start(self, *keys: str) -> "Timer":
for key in keys:
if key in self._starts:
raise ValueError(f"Key {key} already in use")
self._starts[key] = time.time()
return self
def stop(self, *keys: str) -> "Timer":
now = time.time()
for key in keys:
self._durations[key] = self._durations.get(key, 0) + now - self._starts[key]
del self._starts[key]
return self
def extract(self) -> tp.Dict[str, float]:
durations = self._durations
self._durations = {}
self._starts = {}
return durations
def split_python_code(code: str) -> tp.Tuple[str, tp.List[str]]:
"""Split code between base-level def and class definitions
Parameter
---------
code: str
code to analyze
Returns
-------
str, List[str]
the header string, and the list of parts for the rest of the code
"""
lines = code.splitlines()
header: tp.Optional[str] = None
out = []
current: tp.List[str] = []
ready = False
for line in lines:
if line and line[0] not in (" ", "\t"):
if ready:
ready = False
out.append("\n".join(current))
current = []
if line.startswith(("def ", "class ", "@")):
if line.startswith(("def ", "class ")):
ready = True
if header is None:
header = "\n".join(current)
current = []
current.append(line)
out.append("\n".join(current))
if header is None:
header = ""
return header, out
class PyAugmenter:
"""Modify the code by removing some parts and reordering base level class/functions
This can serve as data augmentation
Parameters
----------
keep_origin: float
probability to keep origin file unchanged
rng: np.random.RandomState
random state for reproducibility
"""
def __init__(
self, keep_origin: float = 0.3, rng: tp.Optional[np.random.RandomState] = None
) -> None:
if rng is None:
rng = np.random.RandomState()
self.p = keep_origin
self.rng = rng
def __call__(self, code: str) -> str:
"""Modify the code by reordering/removing base def/class definitions
Parameter
---------
code: str
code to analyze
Returns
-------
str
the updated code
"""
if self.rng.rand() < self.p:
return code
header, parts = split_python_code(code)
if len(parts) == 1:
return code
self.rng.shuffle(parts)
weights = np.array([float(k ** 2) for k in range(len(parts))])
weights /= weights.sum()
selected = int(self.rng.choice(range(len(parts)), p=weights))
return "\n".join([header] + parts[:selected])
|
CodeGen-main
|
codegen_sources/dataloaders/utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import errno
import math
import os
import re
import signal
import subprocess
import time
from functools import partial, wraps
from logging import getLogger
from pathlib import Path
import numpy as np
import uuid
import shutil
from codegen_sources.code_runners.code_runner import RUN_ROOT_DIR
from codegen_sources.code_runners.utils import GO_IMPORTS_PATH
from codegen_sources.model.src.utils import get_java_bin_path
from codegen_sources.preprocessing.lang_processors import LangProcessor
DEFAULT_IR_WORKDIR = RUN_ROOT_DIR.joinpath("ir_generation/tmp_tests_folder")
ERROR_MESSAGE = "subprocess error:"
CPP_TO_IR_COMMAND = "clang++ -c -emit-llvm -S -g1 -O0 {} -o {} -std=c++17 -Xclang -disable-O0-optnone -Wno-narrowing"
RUST_TO_IR_COMMAND = "rustc -C target-feature=-crt-static -C opt-level=z {} --crate-type={} --emit=llvm-ir -C debuginfo=1 -o {}"
JAVA_TO_IR_COMMAND = (
'export PATH="{}:$PATH"; {}bin/jlangc -cp {}jdk/out/classes {} -d {}'
)
GO_TO_IR_COMMAND = "llvm-goc -g -O0 -S -emit-llvm {} -o {}"
LANG_IMPORTS = {
"cpp": """#include <iostream>
#include <cstdlib>
#include <string>
#include <vector>
#include <fstream>
#include <iomanip>
#include <bits/stdc++.h>
using namespace std;""",
"java": """import java.util. *;
import java.lang.*;
""",
"python": r"import numpy as np \nimport math\nfrom math import *\nimport collections\nfrom collections import *\nimport heapq\nimport itertools\nimport random\nimport sys\n\n",
"go": """package main
func min(x int, y int) int {
if x < y {
return x
}
return y
}
func max(x int, y int) int {
if x > y {
return x
}
return y
}""",
"rust": """use std :: io :: { stdin , BufRead , BufReader } ;
use std::cmp::max;
use std::cmp::min;
""",
}
logger = getLogger()
class TimeoutError(BaseException):
pass
# From https://github.com/glample/ASMR/blob/master/src/utils.py
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(repeat_id, signum, frame):
signal.signal(signal.SIGALRM, partial(_handle_timeout, repeat_id + 1))
signal.alarm(seconds)
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
old_signal = signal.signal(signal.SIGALRM, partial(_handle_timeout, 0))
old_time_left = signal.alarm(seconds)
assert type(old_time_left) is int and old_time_left >= 0
if 0 < old_time_left < seconds: # do not exceed previous timer
signal.alarm(old_time_left)
start_time = time.time()
try:
result = func(*args, **kwargs)
finally:
if old_time_left == 0:
signal.alarm(0)
else:
sub = time.time() - start_time
signal.signal(signal.SIGALRM, old_signal)
signal.alarm(max(0, math.ceil(old_time_left - sub)))
return result
return wraps(func)(wrapper)
return decorator
def get_lang_processor(lang):
return LangProcessor.processors[lang]()
@timeout(120)
def demangle_names_cpp(full_file, verbose=False, timeout=120):
from codegen_sources.external_paths import LLVM_13_PATH
names_to_demangle = np.unique(re.findall(r"(?<=@)_\w+", full_file))
demangled = [
subprocess.check_output(
LLVM_13_PATH + f"llvm-cxxfilt {el}", shell=True, stderr=subprocess.DEVNULL
)
.decode()
.strip()
for el in names_to_demangle
]
demangled = [re.sub(r"\(.*\)", "", el) for el in demangled] # Remove types
pairs = list(zip(names_to_demangle, demangled))
for old, new in pairs:
full_file = re.sub(rf"(?<=@){old}(?=\W)", f'"{new}"', full_file)
return full_file
@timeout(120)
def demangle_names_rust(full_file):
from codegen_sources.external_paths import CARGO_PATH
names_to_demangle = np.unique(re.findall(r"(?<=@)_\w+", full_file))
demangled = [
subprocess.check_output(CARGO_PATH + f"rustfilt {el}", shell=True)
.decode()
.strip()
for el in names_to_demangle
]
pairs = list(zip(names_to_demangle, demangled))
for old, new in pairs:
new = new.split("::", 1)[-1]
full_file = re.sub(rf"(?<=@){old}(?=\W)", f'"{new}"', full_file)
return full_file
def demangle_names_jlang_name_only(mangled_name):
# From https://github.com/polyglot-compiler/JLang/blob/4cc09d966e2bdae814f21792811c34af589c8f77/compiler/src/jlang/util/JLangMangler.java
POLYGLOT_PREFIX = "Polyglot_"
UNDERSCORE_ESCAPE = "_1"
if not mangled_name.startswith(POLYGLOT_PREFIX):
return mangled_name
mangled_name = mangled_name.replace(POLYGLOT_PREFIX, "", 1)
mangled_name = mangled_name.replace(UNDERSCORE_ESCAPE, "~")
class_name, func_name, _, args_mangled = mangled_name.split("_", 3)
class_name = class_name.replace("~", "_")
func_name = func_name.replace("~", "_")
return f"{class_name}.{func_name}"
@timeout(120)
def demangle_names_java(full_file):
POLYGLOT_PREFIX = "Polyglot_"
names_to_demangle = np.unique(
[
el.rsplit(" ", 1)[1][1:]
for el in re.findall(
rf"(?<=\n)define .*? @{POLYGLOT_PREFIX}\w+", "\n" + full_file
)
]
)
demangled = [demangle_names_jlang_name_only(el).strip() for el in names_to_demangle]
pairs = list(zip(names_to_demangle, demangled))
for old, new in pairs:
full_file = re.sub(rf"(?<=@){old}(?=\W)", f'"{new}"', full_file)
# return full_file
return trim_java_file(full_file)
@timeout(120)
def demangle_names_go(full_file):
GO_PREFIX = "go_0"
names_to_demangle = set(
[
el.rsplit(" ", 1)[1][1:]
for el in re.findall(
rf"(?<=\n)define .*? @{GO_PREFIX}[\w.]+", "\n" + full_file
)
]
)
demangled = [
name.replace("go_0", "", 1).replace("__", "_") for name in names_to_demangle
]
demangled = [
name.rsplit(".", 1)[1] for name in names_to_demangle
] # Would return package.func
pairs = list(zip(names_to_demangle, demangled))
for old, new in pairs:
full_file = re.sub(rf"(?<=@){old}(?=\W)", f'"{new}"', full_file)
return full_file
def get_demangle_func(lang):
if lang == "rust":
return demangle_names_rust
elif lang == "java":
return demangle_names_java
elif lang == "go":
return demangle_names_go
else:
assert lang == "cpp"
return demangle_names_cpp
def multiple_replace(substitutions, string):
match_dict = {
re.sub(r"\(.*?\)", "", k, flags=re.DOTALL): v for k, v in substitutions.items()
}
def rep(match):
what_matched = match.group()
return match_dict.get(what_matched, what_matched)
return re.sub("|".join(substitutions.keys()), rep, string)
def remove_semicolumn_lines(full_file):
return re.sub("\n;.*(?=\n)", "", "\n" + full_file).strip()
@timeout(120)
def clean_file(full_file):
# Remove !tbaa ...
full_file = re.sub(", !tbaa ![0-9]+", "", full_file)
# Remove beginning of file
full_file = full_file.split("\n\n", 1)[1]
# Remove end of file (attributes etc)
full_file = full_file.rsplit("\n\n", 3)[0]
# Rename blocks
full_file = rename_blocks(full_file)
# Remove semicolumn lines
full_file = remove_semicolumn_lines(full_file)
return full_file
def rename_blocks(full_file):
# Rename all blocks (entry, start, if.then... in bb1, bb2, ...)
all_blocks = [
el[:-1] for el in re.findall(r"(?<=\n)\w\S+", full_file) if el.endswith(":")
]
replace_dict = {
**{rf"(?<=\n){el}(?=\W)": f"bb{i + 1}" for i, el in enumerate(all_blocks)},
**{rf"%{el}(?=\W)": f"%bb{i + 1}" for i, el in enumerate(all_blocks)},
}
full_file = multiple_replace(replace_dict, full_file)
return full_file
def extract_lang_IR(lang_file, output_path, lang, verbose=False, timeout=120):
lang_dict = {
"cpp": extract_cpp_IR,
"go": extract_go_IR,
"java": extract_java_IR,
"rust": extract_rust_IR,
}
return lang_dict[lang](lang_file, output_path, verbose=verbose, timeout=timeout)
def extract_cpp_IR(cpp_file, output_path, verbose=False, timeout=120):
from codegen_sources.external_paths import LLVM_13_PATH
cmd = LLVM_13_PATH + "/" + CPP_TO_IR_COMMAND.format(cpp_file, output_path)
subprocess.check_call(
cmd,
shell=True,
timeout=timeout,
stderr=None if verbose else subprocess.DEVNULL,
stdout=None if verbose else subprocess.DEVNULL,
)
def extract_rust_IR_base(rust_file, output_path, crate, verbose=False, timeout=120):
from codegen_sources.external_paths import CARGO_PATH, LLVM_13_PATH
EXPORT_PATH = f"export PATH={LLVM_13_PATH}:$PATH; "
cmd = (
EXPORT_PATH
+ CARGO_PATH
+ "/"
+ RUST_TO_IR_COMMAND.format(rust_file, crate, output_path)
)
subprocess.check_call(
cmd,
shell=True,
timeout=timeout,
stderr=None if verbose else subprocess.DEVNULL,
stdout=None if verbose else subprocess.DEVNULL,
)
def extract_rust_IR(rust_file, output_path, verbose=False, timeout=120):
try:
# Timeout in seconds
extract_rust_IR_base(
rust_file, output_path, crate="bin", verbose=verbose, timeout=timeout
)
if verbose:
print(f"{rust_file} extracted with bin", flush=True)
except subprocess.CalledProcessError:
# Timeout in seconds
extract_rust_IR_base(
rust_file, output_path, crate="dylib", verbose=verbose, timeout=timeout
)
if verbose:
print(f"{rust_file} extracted with dylib", flush=True)
def extract_java_IR(java_file, output_path, verbose=False, timeout=120):
from codegen_sources.external_paths import JLANG_PATH, LLVM_5_PATH, LLVM_13_PATH
# We remove the "package" lines...
full_java_file = open(java_file).read()
full_java_file = re.sub(r"(\n|^)package \S+;(\n|$)", "", full_java_file)
with open(java_file, "w") as f:
f.write(full_java_file)
cmd = JAVA_TO_IR_COMMAND.format(
get_java_bin_path(),
JLANG_PATH,
JLANG_PATH,
java_file,
os.path.dirname(output_path),
)
output_channel = None if verbose else subprocess.DEVNULL
subprocess.check_call(
cmd, shell=True, timeout=timeout, stderr=output_channel, stdout=output_channel
)
# JLang produces LLVM v5, which is not comparible with LLVM v14
# To make the conversion, we do LLVM v5 -> Bitcode -> LLVM v14, as Bitcode
# doesn't change between versions of LLVM
subprocess.check_call(
f"{LLVM_5_PATH}llvm-as {output_path}; rm {output_path}",
shell=True,
timeout=timeout,
stderr=output_channel,
stdout=output_channel,
)
file_wo_ext = str(output_path)[:-3]
subprocess.check_call(
f"{LLVM_13_PATH}llvm-dis {file_wo_ext}.bc; rm {file_wo_ext}.bc",
shell=True,
timeout=timeout,
stderr=output_channel,
stdout=output_channel,
)
def extract_go_IR(go_file, output_path, verbose=False, timeout=120):
from codegen_sources.external_paths import GOLLVM_PATH
output = None if verbose else subprocess.DEVNULL
_ = subprocess.check_call(
f"{GO_IMPORTS_PATH} -w {go_file}",
stdout=output,
stderr=output,
shell=True,
executable="/bin/bash",
)
subprocess.check_call(
GOLLVM_PATH + GO_TO_IR_COMMAND.format(go_file, output_path),
shell=True,
timeout=timeout,
stderr=output,
stdout=output,
)
def find_globals(ll_file):
with open(ll_file) as f:
globs = re.findall("(?<=\n@).*?(?= =)", "\n" + f.read())
return globs
@timeout(120)
def extract_function(
funcname, input_ll_file, output_ll_file, verbose=False, timeout=120
):
from codegen_sources.external_paths import LLVM_13_PATH
# --keep-const-init would keep ALL the globals, not only the ones we need
extract_command = ( # This says which globs we need but does not extract them
LLVM_13_PATH + "llvm-extract --func={} {} -S -o {}"
)
subprocess.check_call(
extract_command.format(funcname, input_ll_file, output_ll_file),
shell=True,
stderr=None if verbose else subprocess.DEVNULL,
stdout=None if verbose else subprocess.DEVNULL,
timeout=timeout,
)
globs = find_globals(output_ll_file)
init_globs = find_globals(input_ll_file)
globs = [g for g in globs if g in init_globs]
extract_w_globs_command = LLVM_13_PATH + "llvm-extract {} --func={} {} -S -o {}"
subprocess.check_call(
extract_w_globs_command.format(
" ".join([f"--glob={el}" for el in globs]),
funcname,
input_ll_file,
output_ll_file,
),
shell=True,
stderr=None if verbose else subprocess.DEVNULL,
stdout=None if verbose else subprocess.DEVNULL,
timeout=timeout,
)
def clean_mangled_funcs(mangled_funcs, full_file):
file_glob_variables = re.findall("(?<=@).*(?= =)", full_file)
mangled_funcs = np.setdiff1d(
mangled_funcs, file_glob_variables
) # The funcs that are global variables are aliases
mangled_funcs = [
el.split("@@")[0] for el in mangled_funcs
] # sometimes things like @@0 get added to the func names ?
mangled_funcs = [el.replace("$", r"\$") for el in mangled_funcs]
mangled_funcs = [el for el in mangled_funcs if "\\" not in el]
return mangled_funcs
def trim_java_file(full_java_file):
full_java_file = re.sub('(^|\n)%"?class.java_lang.*', "", full_java_file)
full_java_file = re.sub("(^|\n)%cdv_ty.*", "", full_java_file)
words_to_remove = [
"jni_JNIEnv",
"getGlobalMutexObject",
"jni_MonitorEnter",
"jni_MonitorExit",
]
for w in words_to_remove:
full_java_file = re.sub(f".*{w}.*\n?", "", full_java_file)
return full_java_file.strip()
def get_funcnames(code, lang_processor):
sa, cl = lang_processor.extract_functions(lang_processor.tokenize_code(code))
funcs = sa + cl
return [lang_processor.get_function_name(f) for f in funcs]
@timeout(120)
def _split_names(name, lang):
split = name.split('name: "')[1].split('"')[0].split("(")[0]
if lang == "java":
split = split.split("#")[-1]
elif lang == "go":
split = split.rsplit(".")[-1]
return split
def extract_relevant_functions(llfile, original_file_name, lang_processor, lang):
"""
Extract only functions defined in the original cpp file from the debug info
original_file_name example: `cpp_from_json_0_line_1096_Oz.cpp`
"""
debug = llfile.rsplit("\n\n", 1)[1]
files = re.findall("(?<=!).*!DIFile.*filename:.*", debug)
complete_file_names = [
re.search('(?<=filename: ").*?(?=")', el).group() for el in files
]
directory_names = [
re.search('(?<=directory: ").*?(?=")', el).group() for el in files
]
complete_file_names = [
os.path.join(directory_names[i], complete_file_names[i])
if directory_names[i]
else complete_file_names[i]
for i in range(len(complete_file_names))
]
file_names = [os.path.basename(el) for el in complete_file_names]
original_fn_noext = os.path.splitext(os.path.basename(original_file_name))[0]
relevant_files = [os.path.splitext(el)[0] == original_fn_noext for el in file_names]
# The exact original file name is the longest one because it contains the full path
complete_original_file_name = sorted(
[el for i, el in enumerate(complete_file_names) if relevant_files[i]], key=len
)[-1]
original_file = open(complete_original_file_name, "r", errors="ignore").read()
original_functions = set(
get_funcnames(original_file, lang_processor=lang_processor)
)
file_ids = [el.split(" ", 1)[0] for i, el in enumerate(files) if relevant_files[i]]
subprograms = re.findall(".*!DISubprogram.*", debug)
relevant_subprograms = [
el
for fid in file_ids
for el in subprograms
if f"file: !{fid}," in el
and 'name: "' in el
and _split_names(el, lang) in original_functions
]
indices_to_keep = [el.split(" ", 1)[0] for el in relevant_subprograms]
relevant_defines = sum(
[re.findall(f".* !dbg {itk} " + "{", llfile) for itk in indices_to_keep], []
)
mangled_funcs = extract_all_ll_funcnames("\n".join([""] + relevant_defines))
return clean_mangled_funcs(mangled_funcs, llfile)
def extract_all_ll_funcnames(full_file):
return [
el.split()[-1][1:]
for el in re.findall(r"(?<=\ndefine ).*? @.*?(?=\()", "\n" + full_file)
]
def source_file_to_cleaned_IR(
source_path,
lang,
work_dir=DEFAULT_IR_WORKDIR,
verbose=False,
timeout=120,
clean_dir=True,
):
hash_value = uuid.uuid4()
work_dir_full = Path(work_dir).joinpath(f"{str(int(time.time()))}_{hash_value}")
work_dir_full.mkdir(parents=True, exist_ok=True)
if verbose:
print("Work dir:", work_dir_full)
ir_path_file = work_dir_full.joinpath(
os.path.splitext(os.path.basename(source_path))[0]
).with_suffix(".ll")
extract_lang_IR(source_path, ir_path_file, lang, verbose, timeout)
if verbose:
with open(ir_path_file, "r") as f:
print(f"Full IR:\n{f.read()}")
lang_processor = get_lang_processor(lang)
full_file = open(ir_path_file, "r").read()
mangled_funcs = extract_relevant_functions(
full_file, source_path, lang_processor, lang
)
original_file_funcs_dir = os.path.join(work_dir_full, "ll_functions")
os.makedirs(original_file_funcs_dir, exist_ok=True)
all_output_funcs = []
for n_func, func in enumerate(mangled_funcs):
output_ll_file = os.path.join(original_file_funcs_dir, f"{n_func}.ll")
extract_function(func, ir_path_file, output_ll_file)
func_names = extract_all_ll_funcnames(open(output_ll_file, "r").read())
assert len(func_names) == 1 # Should be only one
full_file = open(output_ll_file, "r").read()
demangle_fn = get_demangle_func(lang)
all_output_funcs.append(demangle_fn(clean_file(full_file)))
if clean_dir and work_dir_full.is_dir():
shutil.rmtree(work_dir_full)
return all_output_funcs
def adapt_func_level(code: str, lang: str):
if lang == "java":
code = "public class UniqueFunc\n{" + code + "}"
if lang == "cpp":
code = re.sub("^inline ", "", code)
code = code.replace("public : ", "")
if lang == "rust":
code = re.sub("(?<!pub )fn ", "pub unsafe fn ", code)
if lang == "go":
assert not code.startswith(
"package "
), f"Code contains package, not at function level:\n{code}\n"
return "\n".join([LANG_IMPORTS[lang], code])
def code_to_ir(
input_code,
lang,
func_level=False,
verbose=False,
timeout=120,
work_dir=DEFAULT_IR_WORKDIR,
clean_dir=True,
):
work_dir = Path(work_dir)
hash_value = uuid.uuid4()
time_str = str(int(time.time()))
tmp_work_dir = work_dir.joinpath(f"{time_str}_{hash_value}")
output_file = tmp_work_dir.joinpath(f"tmp_code.{lang}")
if func_level:
input_code = adapt_func_level(input_code, lang)
tmp_work_dir.mkdir(parents=True, exist_ok=True)
with open(output_file, "w") as f:
f.write(input_code)
try:
out = source_file_to_cleaned_IR(
output_file, lang, verbose=verbose, timeout=timeout, clean_dir=clean_dir,
)
if clean_dir:
shutil.rmtree(tmp_work_dir)
if lang == "go" and func_level:
out = [
s
for s in out
if not re.search("^define (hidden )?i64 @main.(min|max)\(", s)
]
if len(out) != 1:
logger.warn(
f"Language {lang}, found {len(out)} functions. Input was:\n {input_code}"
)
return out
except subprocess.CalledProcessError as e:
return [f"{ERROR_MESSAGE} {e}"]
except subprocess.TimeoutExpired:
return [f"{ERROR_MESSAGE} timeout"]
except TimeoutError:
return [f"{ERROR_MESSAGE} timeout"]
finally:
if clean_dir:
if tmp_work_dir.is_dir():
shutil.rmtree(tmp_work_dir)
def ir_had_errors(ir):
return ir.startswith(ERROR_MESSAGE)
|
CodeGen-main
|
codegen_sources/IR_tools/utils_ir.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones used in the model.
#
import os
import argparse
import typing as tp
from pathlib import Path
import sys
import torch
from codegen_sources.model.src.logger import create_logger
from codegen_sources.model.src.data.dictionary import (
Dictionary,
BOS_WORD,
EOS_WORD,
PAD_WORD,
UNK_WORD,
MASK_WORD,
)
from codegen_sources.model.src.utils import bool_flag
from codegen_sources.model.src.constants import SUPPORTED_LANGUAGES_FOR_TESTS
from codegen_sources.model.src.model import build_model
from codegen_sources.model.src.utils import AttrDict
import codegen_sources.dataloaders.transforms as transf
SUPPORTED_LANGUAGES = list(SUPPORTED_LANGUAGES_FOR_TESTS) + ["ir"]
logger = create_logger(None, 0)
def get_params():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Translate sentences")
# model
parser.add_argument("--model_path", type=str, default="", help="Model path")
parser.add_argument(
"--src_lang",
type=str,
default="",
help=f"Source language, should be either {', '.join(SUPPORTED_LANGUAGES[:-1])} or {SUPPORTED_LANGUAGES[-1]}",
)
parser.add_argument(
"--tgt_lang",
type=str,
default="",
help=f"Target language, should be either {', '.join(SUPPORTED_LANGUAGES[:-1])} or {SUPPORTED_LANGUAGES[-1]}",
)
parser.add_argument(
"--BPE_path",
type=str,
default=str(
Path(__file__).parents[2].joinpath("data/bpe/cpp-java-python/codes")
),
help="Path to BPE codes.",
)
parser.add_argument(
"--beam_size",
type=int,
default=1,
help="Beam size. The beams will be printed in order of decreasing likelihood.",
)
parser.add_argument(
"--input", type=str, default=None, help="input path",
)
parser.add_argument(
"--gpu", type=bool_flag, default=True, help="input path",
)
parser.add_argument(
"--efficient_attn",
type=str,
default=None,
choices=["None", "flash", "cutlass", "fctls_bflsh", "auto"],
help="If set, uses efficient attention from xformers.",
)
parameters = parser.parse_args()
if parameters.efficient_attn == "None":
parameters.efficient_attn = None
return parameters
class Translator:
def __init__(self, model_path, BPE_path, gpu=True, efficient_attn=None) -> None:
self.gpu = gpu
# reload model
reloaded = torch.load(model_path, map_location="cpu")
# change params of the reloaded model so that it will
# relaod its own weights and not the MLM or DOBF pretrained model
reloaded["params"]["reload_model"] = ",".join([str(model_path)] * 2)
reloaded["params"]["lgs_mapping"] = ""
reloaded["params"]["reload_encoder_for_decoder"] = False
self.reloaded_params = AttrDict(reloaded["params"])
self.reloaded_params["efficient_attn"] = efficient_attn
# build dictionary / update parameters
self.dico = Dictionary(
reloaded["dico_id2word"], reloaded["dico_word2id"], reloaded["dico_counts"]
)
assert self.reloaded_params.n_words == len(self.dico)
assert self.reloaded_params.bos_index == self.dico.index(BOS_WORD)
assert self.reloaded_params.eos_index == self.dico.index(EOS_WORD)
assert self.reloaded_params.pad_index == self.dico.index(PAD_WORD)
assert self.reloaded_params.unk_index == self.dico.index(UNK_WORD)
assert self.reloaded_params.mask_index == self.dico.index(MASK_WORD)
# build model / reload weights (in the build_model method)
encoder, decoder = build_model(self.reloaded_params, self.dico, self.gpu)
self.encoder = encoder[0]
self.decoder = decoder[0]
if gpu:
self.encoder.cuda()
self.decoder.cuda()
self.encoder.eval()
self.decoder.eval()
# reload bpe
if (
self.reloaded_params.get("roberta_mode", False)
or self.reloaded_params.get("tokenization_mode", "") == "roberta"
):
self.bpe_transf: transf.BpeBase = transf.RobertaBpe()
raise ValueError("This part has not be tested thoroughly yet")
else:
self.bpe_transf = transf.FastBpe(code_path=Path(BPE_path).absolute())
def translate(
self,
input_code,
lang1: str,
lang2: str,
suffix1: str = "_sa",
suffix2: str = "_sa",
n: int = 1,
beam_size: int = 1,
sample_temperature=None,
device=None,
tokenized=False,
detokenize: bool = True,
max_tokens: tp.Optional[int] = None,
length_penalty: float = 0.5,
max_len: tp.Optional[int] = None,
):
if device is None:
device = "cuda:0" if self.gpu else "cpu"
# Build language processors
assert lang1 in SUPPORTED_LANGUAGES, lang1
assert lang2 in SUPPORTED_LANGUAGES, lang2
bpetensorizer = transf.BpeTensorizer()
bpetensorizer.dico = self.dico # TODO: hacky
in_pipe: transf.Transform[tp.Any, torch.Tensor] = self.bpe_transf.pipe(
bpetensorizer
)
out_pipe = in_pipe
if not tokenized:
in_pipe = transf.CodeTokenizer(lang1).pipe(in_pipe)
if detokenize:
out_pipe = transf.CodeTokenizer(lang2).pipe(out_pipe)
lang1 += suffix1
lang2 += suffix2
avail_langs = list(self.reloaded_params.lang2id.keys())
for lang in [lang1, lang2]:
if lang not in avail_langs:
raise ValueError(f"{lang} should be in {avail_langs}")
with torch.no_grad():
lang1_id = self.reloaded_params.lang2id[lang1]
lang2_id = self.reloaded_params.lang2id[lang2]
# Create torch batch
x1 = in_pipe.apply(input_code).to(device)[:, None]
size = x1.shape[0]
len1 = torch.LongTensor(1).fill_(size).to(device)
if max_tokens is not None and size > max_tokens:
logger.info(f"Ignoring long input sentence of size {size}")
return [f"Error: input too long: {size}"] * max(n, beam_size)
langs1 = x1.clone().fill_(lang1_id)
# Encode
enc1 = self.encoder("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
enc1 = enc1.transpose(0, 1)
if n > 1:
enc1 = enc1.repeat(n, 1, 1)
len1 = len1.expand(n)
# Decode
if max_len is None:
max_len = int(
min(self.reloaded_params.max_len, 3 * len1.max().item() + 10)
)
if beam_size == 1:
x2, len2 = self.decoder.generate(
enc1,
len1,
lang2_id,
max_len=max_len,
sample_temperature=sample_temperature,
)
else:
x2, len2, _ = self.decoder.generate_beam(
enc1,
len1,
lang2_id,
max_len=max_len,
early_stopping=False,
length_penalty=length_penalty,
beam_size=beam_size,
)
# Convert out ids to text
tok = []
for i in range(x2.shape[1]):
tok.append(out_pipe.revert(x2[:, i]))
return tok
if __name__ == "__main__":
# generate parser / parse parameters
params = get_params()
# check parameters
assert os.path.isfile(
params.model_path
), f"The path to the model checkpoint is incorrect: {params.model_path}"
assert params.input is None or os.path.isfile(
params.input
), f"The path to the input file is incorrect: {params.input}"
assert os.path.isfile(
params.BPE_path
), f"The path to the BPE tokens is incorrect: {params.BPE_path}"
assert (
params.src_lang in SUPPORTED_LANGUAGES
), f"The source language should be in {SUPPORTED_LANGUAGES}."
assert (
params.tgt_lang in SUPPORTED_LANGUAGES
), f"The target language should be in {SUPPORTED_LANGUAGES}."
# Initialize translator
translator = Translator(
params.model_path, params.BPE_path, params.gpu, params.efficient_attn
)
# read input code from stdin
input = (
open(params.input).read().strip()
if params.input is not None
else sys.stdin.read().strip()
)
print(f"Input {params.src_lang} function:")
print(input)
with torch.no_grad():
output = translator.translate(
input,
lang1=params.src_lang,
lang2=params.tgt_lang,
beam_size=params.beam_size,
)
print(f"Translated {params.tgt_lang} function:")
for out in output:
print("=" * 20)
print(out)
|
CodeGen-main
|
codegen_sources/model/translate.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Example: python data/vocab.txt data/train.txt
vocab.txt: 1stline=word, 2ndline=count
"""
import os
import sys
from codegen_sources.model.src.data.dictionary import Dictionary
from codegen_sources.model.src.logger import create_logger
logger = create_logger(None, 0)
def XLM_preprocess(voc_path, txt_path, bin_path):
assert os.path.isfile(voc_path)
assert os.path.isfile(txt_path)
dico = Dictionary.read_vocab(voc_path)
logger.info("")
data = Dictionary.index_data(txt_path, bin_path, dico)
logger.info(
"%i words (%i unique) in %i sentences."
% (
len(data["sentences"]) - len(data["positions"]),
len(data["dico"]),
len(data["positions"]),
)
)
if len(data["unk_words"]) > 0:
logger.info(
"%i unknown words (%i unique), covering %.2f%% of the data."
% (
sum(data["unk_words"].values()),
len(data["unk_words"]),
sum(data["unk_words"].values())
* 100.0
/ (len(data["sentences"]) - len(data["positions"])),
)
)
if len(data["unk_words"]) < 30000:
for w, c in sorted(data["unk_words"].items(), key=lambda x: x[1])[::-1][
:30
]:
logger.info("%s: %i" % (w, c))
if __name__ == "__main__":
voc_path_arg = sys.argv[1]
txt_path_arg = sys.argv[2]
bin_path_arg = sys.argv[2] + ".pth"
XLM_preprocess(voc_path_arg, txt_path_arg, bin_path_arg)
|
CodeGen-main
|
codegen_sources/model/preprocess.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
from pathlib import Path
import sys
import fastBPE
import torch
from codegen_sources.model.src.logger import create_logger
from codegen_sources.model.src.utils import restore_roberta_segmentation_sentence
from codegen_sources.preprocessing import bpe_modes as modes
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.model.src.data.dictionary import (
Dictionary,
BOS_WORD,
EOS_WORD,
PAD_WORD,
UNK_WORD,
MASK_WORD,
)
from codegen_sources.model.src.model import build_model
from codegen_sources.model.src.utils import AttrDict
SUPPORTED_LANGUAGES = ["java", "python"]
logger = create_logger(None, 0)
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Translate sentences")
# model
parser.add_argument("--model_path", type=str, default="", help="Model path")
parser.add_argument(
"--lang",
type=str,
default="",
help=f"Code language, should be either {', '.join(SUPPORTED_LANGUAGES[:-1])} or {SUPPORTED_LANGUAGES[-1]}",
)
parser.add_argument(
"--BPE_path",
type=str,
default=str(
Path(__file__).parents[2].joinpath("data/bpe/cpp-java-python/codes")
),
help="Path to BPE codes.",
)
parser.add_argument(
"--beam_size",
type=int,
default=1,
help="Beam size. The beams will be printed in order of decreasing likelihood.",
)
return parser
class Deobfuscator:
def __init__(self, model_path, BPE_path) -> None:
# reload model
reloaded = torch.load(model_path, map_location="cpu")
# change params of the reloaded model so that it will
# relaod its own weights and not the MLM or DOBF pretrained model
reloaded["params"]["reload_model"] = ",".join([model_path] * 2)
reloaded["params"]["lgs_mapping"] = ""
reloaded["params"]["reload_encoder_for_decoder"] = False
self.reloaded_params = AttrDict(reloaded["params"])
# build dictionary / update parameters
self.dico = Dictionary(
reloaded["dico_id2word"], reloaded["dico_word2id"], reloaded["dico_counts"]
)
assert self.reloaded_params.n_words == len(self.dico)
assert self.reloaded_params.bos_index == self.dico.index(BOS_WORD)
assert self.reloaded_params.eos_index == self.dico.index(EOS_WORD)
assert self.reloaded_params.pad_index == self.dico.index(PAD_WORD)
assert self.reloaded_params.unk_index == self.dico.index(UNK_WORD)
assert self.reloaded_params.mask_index == self.dico.index(MASK_WORD)
# build model / reload weights (in the build_model method)
encoder, decoder = build_model(self.reloaded_params, self.dico)
self.encoder = encoder[0]
self.decoder = decoder[0]
self.encoder.cuda()
self.decoder.cuda()
self.encoder.eval()
self.decoder.eval()
# reload bpe
if (
getattr(self.reloaded_params, "roberta_mode", False)
or getattr(self.reloaded_params, "tokenization_mode", "") == "roberta"
):
self.bpe_model: modes.BPEMode = modes.RobertaBPEMode()
else:
self.bpe_model = modes.FastBPEMode(
codes=os.path.abspath(BPE_path), vocab_path=None
)
def deobfuscate(
self, input, lang, n=1, beam_size=1, sample_temperature=None, device="cuda:0",
):
# Build language processors
assert lang, lang in SUPPORTED_LANGUAGES
lang_processor = LangProcessor.processors[lang](
root_folder=Path(__file__).parents[2].joinpath("tree-sitter")
)
obfuscator = lang_processor.obfuscate_code
tokenizer = lang_processor.tokenize_code
lang1 = lang + "_obfuscated"
lang2 = lang + "_dictionary"
lang1_id = self.reloaded_params.lang2id[lang1]
lang2_id = self.reloaded_params.lang2id[lang2]
assert (
lang1 in self.reloaded_params.lang2id.keys()
), f"{lang1} should be in {self.reloaded_params.lang2id.keys()}"
assert (
lang2 in self.reloaded_params.lang2id.keys()
), f"{lang2} should be in {self.reloaded_params.lang2id.keys()}"
print("Original Code:")
print(input)
input = obfuscator(input)[0]
print("Obfuscated Code:")
print(input)
with torch.no_grad():
# Convert source code to ids
tokens = [t for t in tokenizer(input)]
print(f"Tokenized {lang} function:")
print(tokens)
tokens = self.bpe_model.apply_bpe(" ".join(tokens))
tokens = self.bpe_model.repair_bpe_for_obfuscation_line(tokens)
print(f"BPE {params.lang} function:")
print(tokens)
tokens = ["</s>"] + tokens.split() + ["</s>"]
input = " ".join(tokens)
# Create torch batch
len1 = len(input.split())
len1 = torch.LongTensor(1).fill_(len1).to(device)
x1 = torch.LongTensor([self.dico.index(w) for w in input.split()]).to(
device
)[:, None]
langs1 = x1.clone().fill_(lang1_id)
# Encode
enc1 = self.encoder("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
enc1 = enc1.transpose(0, 1)
if n > 1:
enc1 = enc1.repeat(n, 1, 1)
len1 = len1.expand(n)
# Decode
if beam_size == 1:
x2, len2 = self.decoder.generate(
enc1,
len1,
lang2_id,
max_len=int(
min(self.reloaded_params.max_len, 3 * len1.max().item() + 10)
),
sample_temperature=sample_temperature,
)
else:
x2, len2, _ = self.decoder.generate_beam(
enc1,
len1,
lang2_id,
max_len=int(
min(self.reloaded_params.max_len, 3 * len1.max().item() + 10)
),
early_stopping=False,
length_penalty=1.0,
beam_size=beam_size,
)
# Convert out ids to text
tok = []
for i in range(x2.shape[1]):
wid = [self.dico[x2[j, i].item()] for j in range(len(x2))][1:]
wid = wid[: wid.index(EOS_WORD)] if EOS_WORD in wid else wid
if (
getattr(self.reloaded_params, "roberta_mode", False)
or getattr(self.reloaded_params, "tokenization_mode", "")
== "roberta"
):
tok.append(restore_roberta_segmentation_sentence(" ".join(wid)))
else:
tok.append(" ".join(wid).replace("@@ ", ""))
results = []
for t in tok:
results.append(t)
return results
if __name__ == "__main__":
# generate parser / parse parameters
parser = get_parser()
params = parser.parse_args()
# check parameters
assert os.path.isfile(
params.model_path
), f"The path to the model checkpoint is incorrect: {params.model_path}"
assert os.path.isfile(
params.BPE_path
), f"The path to the BPE tokens is incorrect: {params.BPE_path}"
assert (
params.lang in SUPPORTED_LANGUAGES
), f"The source language should be in {SUPPORTED_LANGUAGES}."
# Initialize translator
deobfuscator = Deobfuscator(params.model_path, params.BPE_path)
# read input code from stdin
input = sys.stdin.read().strip()
with torch.no_grad():
output = deobfuscator.deobfuscate(
input, lang=params.lang, beam_size=params.beam_size,
)
for out in output:
print("=" * 20)
print(out)
|
CodeGen-main
|
codegen_sources/model/deobfuscate.py
|
CodeGen-main
|
codegen_sources/model/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import random
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parents[2]))
import codegen_sources
from codegen_sources.model.src.constants import TOKENIZATION_MODES
from codegen_sources.model.src.data.loader import check_data_params, load_data
from codegen_sources.model.src.evaluation.evaluator import (
EncDecEvaluator,
SingleEvaluator,
)
from codegen_sources.model.src.model import (
build_classifier,
build_model,
check_model_params,
)
from codegen_sources.model.src.slurm import init_distributed_mode, init_signal_handler
from codegen_sources.model.src.trainer import EncDecTrainer, SingleTrainer
from codegen_sources.model.src.utils import (
bool_flag,
initialize_exp,
print_memory,
set_sampling_probs,
shuf_order,
)
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Language transfer")
# main parameters
parser.add_argument(
"--dump_path", type=str, default="./dumped/", help="Experiment dump path"
)
parser.add_argument("--exp_name", type=str, default="", help="Experiment name")
parser.add_argument(
"--save_periodic",
type=int,
default=0,
help="Save the model periodically (0 to disable)",
)
parser.add_argument("--exp_id", type=str, default="", help="Experiment ID")
# float16 / AMP API
parser.add_argument(
"--fp16", type=bool_flag, default=False, help="Run model with float16"
)
parser.add_argument(
"--efficient_attn",
type=str,
default=None,
choices=["flash", "cutlass", "fctls_bflsh", "auto"],
help="If set, uses efficient attention from xformers. Flash attention only works on A100 GPUs.",
)
parser.add_argument(
"--amp",
type=int,
default=-1,
help="Use AMP wrapper for float16 / distributed / gradient accumulation. Level of optimization. -1 to disable.",
)
parser.add_argument(
"--apex",
type=bool_flag,
default=False,
help="Whether to use apex for fp16 computation. By default, torch amp.",
)
# only use an encoder (use a specific decoder for machine translation)
parser.add_argument(
"--encoder_only", type=bool_flag, default=True, help="Only use an encoder"
)
# model parameters
parser.add_argument("--emb_dim", type=int, default=512, help="Embedding layer size")
parser.add_argument(
"--emb_dim_encoder", type=int, default=0, help="Embedding layer size"
)
parser.add_argument(
"--emb_dim_decoder", type=int, default=0, help="Embedding layer size"
)
parser.add_argument(
"--n_layers", type=int, default=4, help="Number of Transformer layers"
)
parser.add_argument(
"--n_layers_encoder",
type=int,
default=0,
help="Number of Transformer layers for the encoder",
)
parser.add_argument(
"--n_layers_decoder",
type=int,
default=0,
help="Number of Transformer layers for the decoder",
)
parser.add_argument(
"--n_heads", type=int, default=8, help="Number of Transformer heads"
)
parser.add_argument("--dropout", type=float, default=0, help="Dropout")
parser.add_argument(
"--attention_dropout",
type=float,
default=0,
help="Dropout in the attention layer",
)
parser.add_argument(
"--gelu_activation",
type=bool_flag,
default=False,
help="Use a GELU activation instead of ReLU",
)
parser.add_argument(
"--share_inout_emb",
type=bool_flag,
default=True,
help="Share input and output embeddings",
)
parser.add_argument(
"--sinusoidal_embeddings",
type=bool_flag,
default=False,
help="Use sinusoidal embeddings",
)
parser.add_argument(
"--layer_dropout", type=float, default=0.0, help="Layer dropout ratio"
)
parser.add_argument(
"--min_layers",
type=int,
default=2,
help="Minimum number of layers remaining after layer dropout",
)
# CAPE relative embeddings
parser.add_argument(
"--cape_embeddings",
type=bool_flag,
default=False,
help="Use CAPE embeddings https://arxiv.org/pdf/2106.03143.pdf",
)
parser.add_argument(
"--cape_global_shift",
type=float,
default=5.0,
help="CAPE global shift parameter",
)
parser.add_argument(
"--cape_local_shift",
type=float,
default=0.5,
help="CAPE local shift parameter, above 0.5 token ordering is not preserved",
)
parser.add_argument(
"--cape_global_scaling",
type=float,
default=1.0,
help="CAPE max global scaling parameter. At 1, no scaling",
)
parser.add_argument(
"--discrete_cape_max",
type=int,
default=0,
help="Discrete cape global shift maximum. At 0, no shift.",
)
parser.add_argument(
"--use_lang_emb", type=bool_flag, default=True, help="Use language embedding"
)
# causal language modeling task parameters
parser.add_argument(
"--context_size",
type=int,
default=0,
help="Context size (0 means that the first elements in sequences won't have any context)",
)
# masked language modeling task parameters
parser.add_argument(
"--word_pred",
type=float,
default=0.15,
help="Fraction of words for which we need to make a prediction in MLM.",
)
parser.add_argument(
"--sample_alpha",
type=float,
default=0,
help="Exponent for transforming word counts to probabilities (~word2vec sampling)",
)
parser.add_argument(
"--word_mask_keep_rand",
type=str,
default="0.8,0.1,0.1",
help="Fraction of words to mask out / keep / randomize, among the words to predict",
)
parser.add_argument(
"--mask_length",
type=str,
default="",
help="Length distribution of the masked spans. "
"No span masking if kept empty. Constant if integer. Poisson if 'poisson'",
)
parser.add_argument(
"--poisson_lambda",
type=float,
default=3.0,
help="Parameter of the poisson distribution for span length",
)
# input sentence noise
parser.add_argument(
"--word_shuffle",
type=float,
default=0,
help="Randomly shuffle input words (0 to disable)",
)
parser.add_argument(
"--word_dropout",
type=float,
default=0,
help="Randomly dropout input words (0 to disable)",
)
parser.add_argument(
"--word_blank",
type=float,
default=0,
help="Randomly blank input words (0 to disable)",
)
# data
parser.add_argument("--data_path", type=str, default="", help="Data path")
parser.add_argument(
"--lgs", type=str, default="", help="Languages (lg1-lg2-lg3 .. ex: en-fr-es-de)"
)
parser.add_argument(
"--lgs_mapping",
type=str,
default="",
help="Map the lngs to pretrained lgs, java_sa:java_obfuscated"
"then the emb of java_sa in this XP will be mapped to the emb of java_obfuscated in pretrained model",
)
parser.add_argument(
"--lgs_id_mapping",
type=str,
default="",
help="Map the in or out language id of some languages to others for mt_steps "
"for instance 'java_np:java_buggy-java_resolved' means java_np gets the "
"same language embeddings as java_buggy for input sentences and java_resolved "
"for output sentences. Different mappings separated by commas",
)
parser.add_argument(
"--max_vocab",
type=int,
default=-1,
help="Maximum vocabulary size (-1 to disable)",
)
parser.add_argument(
"--min_count", type=int, default=0, help="Minimum vocabulary count"
)
parser.add_argument(
"--lg_sampling_factor", type=float, default=-1, help="Language sampling factor"
)
parser.add_argument(
"--has_sentence_ids",
type=str,
default="",
help="Datasets with parallel sentence ids. Datasets separated by ,. "
"Example 'valid|para,train|lang1 if all parallel valid datasets and train lang1 datasets have ids",
)
# batch parameters
parser.add_argument(
"--bptt", type=int, default=256, help="Sequence length for stream dataset"
)
parser.add_argument(
"--max_len",
type=int,
default=100,
help="Maximum length of sentences (after BPE)",
)
parser.add_argument(
"--group_by_size",
type=bool_flag,
default=True,
help="Sort sentences by size during the training",
)
parser.add_argument(
"--batch_size", type=int, default=32, help="Number of sentences per batch"
)
parser.add_argument(
"--max_batch_size",
type=int,
default=0,
help="Maximum number of sentences per batch (used in combination with tokens_per_batch, 0 to disable)",
)
parser.add_argument(
"--tokens_per_batch", type=int, default=-1, help="Number of tokens per batch"
)
parser.add_argument(
"--eval_tokens_per_batch",
type=int,
default=None,
help="Number of tokens per batch for evaluation. By default, same as for training.",
)
parser.add_argument(
"--gen_tpb_multiplier",
type=int,
default=1,
help="Multiplier of token per batch during generation when doing back translation. Typically 4",
)
# training parameters
parser.add_argument(
"--split_data",
type=bool_flag,
default=False,
help="Split data across workers of a same node",
)
parser.add_argument(
"--split_data_accross_gpu",
type=str,
default="local",
help="Split data across GPU locally or globally. Set 'local' or 'global'",
)
parser.add_argument(
"--optimizer",
type=str,
default="adam,lr=0.0001",
help="Optimizer (SGD / RMSprop / Adam, etc.)",
)
parser.add_argument(
"--clip_grad_norm",
type=float,
default=1,
help="Clip gradients norm (0 to disable)",
)
parser.add_argument(
"--epoch_size",
type=int,
default=100000,
help="Epoch size / evaluation frequency (-1 for parallel data size)",
)
parser.add_argument(
"--max_epoch", type=int, default=100000, help="Maximum epoch size"
)
parser.add_argument(
"--stopping_criterion",
type=str,
default="",
help="Stopping criterion, and number of non-increase before stopping the experiment",
)
parser.add_argument(
"--validation_metrics", type=str, default="", help="Validation metrics"
)
parser.add_argument(
"--accumulate_gradients",
type=int,
default=1,
help="Accumulate model gradients over N iterations (N times larger batch sizes)",
)
parser.add_argument(
"--add_eof_to_stream",
type=bool_flag,
default=False,
help="Whether to add </s> at the beginning "
"of every sentence in steam datasets."
"It matters for MLM.",
)
# training coefficients
parser.add_argument(
"--lambda_mlm", type=str, default="1", help="Prediction coefficient (MLM)"
)
parser.add_argument(
"--lambda_clm", type=str, default="1", help="Causal coefficient (LM)"
)
parser.add_argument("--lambda_ae", type=str, default="1", help="AE coefficient")
parser.add_argument("--lambda_tae", type=str, default="1", help="TAE coefficient")
parser.add_argument("--lambda_mt", type=str, default="1", help="MT coefficient")
parser.add_argument(
"--lambda_do", type=str, default="1", help="Deobfuscation coefficient"
)
parser.add_argument("--lambda_bt", type=str, default="1", help="BT coefficient")
parser.add_argument(
"--lambda_st", type=str, default="1", help="Self-training coefficient"
)
parser.add_argument(
"--lambda_classif",
type=str,
default="1",
help="Classificationlambda coefficient - can have one per pair of lang/label - format 'lang1-label1::lambda / lang2-label2::lambda / lambda' or 'lang1-label1::lambda / lang2-label2::lambda' or 'lambda'",
)
# training steps
parser.add_argument(
"--clm_steps", type=str, default="", help="Causal prediction steps (CLM)"
)
parser.add_argument(
"--mlm_steps", type=str, default="", help="Masked prediction steps (MLM / TLM)"
)
parser.add_argument(
"--mt_steps", type=str, default="", help="Machine translation steps"
)
parser.add_argument(
"--cmt_steps",
type=str,
default="",
help="Conditioned machine translation steps",
)
parser.add_argument(
"--disc_steps", type=str, default="", help="Discriminator training steps"
)
parser.add_argument("--do_steps", type=str, default="", help="Deobfuscation steps")
parser.add_argument(
"--obf_proba",
type=float,
default=0.5,
help="For Deobfuscation steps, probability of obsfuscation. If = 1 everything is obfuscated, 0 only one variable.",
)
parser.add_argument(
"--st_steps", type=str, default="", help="Self trainings teps using unit tests"
)
parser.add_argument(
"--ae_steps", type=str, default="", help="Denoising auto-encoder steps"
)
parser.add_argument(
"--tae_steps",
type=str,
default="",
help="Concatenated denoising auto-encoding steps",
)
parser.add_argument(
"--bt_steps", type=str, default="", help="Back-translation steps"
)
parser.add_argument(
"--mt_spans_steps",
type=str,
default="",
help="Machine translation steps. Format for one step is lang1-lang2-span. Steps are separated by commas.",
)
parser.add_argument(
"--spans_emb_encoder",
type=bool_flag,
default=False,
help="Whether to use span embeddings in the encoder",
)
parser.add_argument(
"--classif_steps", type=str, default="", help="Classification steps"
)
# reload pretrained embeddings / pretrained model / checkpoint
parser.add_argument(
"--reload_emb", type=str, default="", help="Reload pretrained word embeddings"
)
parser.add_argument(
"--reload_model", type=str, default="", help="Reload a pretrained model"
)
parser.add_argument(
"--reload_encoder_attn_on_decoder",
type=bool_flag,
default=False,
help="If true, reload encoder attention on decoder if there is no pre-trained decoder.",
)
parser.add_argument(
"--reload_encoder_for_decoder",
type=bool_flag,
default=False,
help="Reload a the encoder of the pretrained model for the decoder.",
)
parser.add_argument(
"--tokenization_mode",
type=str,
default="fastbpe",
choices=TOKENIZATION_MODES,
help="Type of tokenization, can be fastbpe, roberta or sentencepiece"
"If we reload a pretrained roberta, need to put this params to True that positions idx are computed in the roberta way and use gelu.",
)
parser.add_argument(
"--sentencepiece_model_path",
type=Path,
default=Path(codegen_sources.__file__).resolve().parents[1]
/ "data"
/ "bpe"
/ "sentencepiece"
/ "sentencepiece_32k_v2"
/ "model",
help="Path to sentencepiece model. Only used if tokenization_mode is set to 'sentencepiece'",
)
parser.add_argument(
"--reload_checkpoint", type=str, default="", help="Reload a checkpoint"
)
# beam search (for MT only)
parser.add_argument(
"--beam_size",
type=int,
default=1,
help="Beam size, default = 1 (greedy decoding)",
)
parser.add_argument(
"--length_penalty",
type=float,
default=1,
help="Length penalty, values < 1.0 favor shorter sentences, while values > 1.0 favor longer ones.",
)
parser.add_argument(
"--early_stopping",
type=bool_flag,
default=False,
help="Early stopping, stop as soon as we have `beam_size` hypotheses, although longer ones may have better scores.",
)
# sampling at eval time
parser.add_argument(
"--number_samples",
type=int,
default=1,
help="Number of examples to sample (default = 1)",
)
parser.add_argument(
"--eval_temperature",
type=float,
default=None,
help="Evaluation temperature when using several samples",
)
# BT parameters
parser.add_argument(
"--bt_sample_temperature",
type=str,
default="0",
help="At BT training, sample temperature for generation",
)
parser.add_argument(
"--bt_max_len",
type=int,
default=None,
help="At BT max length of generations. Will be max_len by default.",
)
# ST parameters
parser.add_argument(
"--st_sample_temperature",
type=str,
default="0",
help="At ST training, sample temperature for generation",
)
parser.add_argument(
"--st_sample_cache_ratio",
type=str,
default="2",
help="At ST training, probability to sample from cache. If integer, sampling deterministically n times for each creation step",
)
parser.add_argument(
"--st_limit_tokens_per_batch",
type=bool_flag,
default=True,
help="At ST training, whether to limit batch size based on tokens per batch",
)
parser.add_argument(
"--st_sample_size",
type=int,
default=1,
help="Batch size for data sampled from cache",
)
parser.add_argument(
"--st_remove_proba",
type=float,
default=0.0,
help="Proba to remove sampled elements from cache",
)
parser.add_argument(
"--cache_warmup",
type=int,
default=500,
help="Batch size for data sampled from cache",
)
parser.add_argument(
"--robin_cache",
type=bool_flag,
default=False,
help="Whether to use the round robin cache",
)
parser.add_argument(
"--st_min_asserts",
type=str,
default="2",
help="Minimum number of asserts for the unit tests",
)
parser.add_argument(
"--st_show_stats",
type=bool,
default=False,
help="Whether to show stats about the created tests",
)
parser.add_argument(
"--st_min_mutation_score",
type=str,
default="0.9",
help="Minimum mutation score for the unit tests",
)
parser.add_argument(
"--st_refresh_iterator_rate",
type=int,
default=-1,
help="rate for refreshing the iterator taking new cutoff rate into account",
)
parser.add_argument(
"--unit_tests_path",
type=str,
default="",
help="path to the json file containing the unit tests and scores",
)
parser.add_argument(
"--cache_size",
type=int,
default=20000,
help="Size of the cache for round robin cache",
)
parser.add_argument(
"--cache_init_path",
type=str,
default="",
help="path to files to use to initialize the cache",
)
# ST beam size
parser.add_argument(
"--st_beam_size", type=str, default="1", help="At ST training: beam size",
)
# ST beam size
parser.add_argument(
"--st_length_penalty",
type=float,
default=0.5,
help="Length penalty for generating elements",
)
# ST test timeout
parser.add_argument(
"--st_test_timeout",
type=int,
default=15,
help="Timeout for the test runner running the unit tests",
)
# Classification parameters
parser.add_argument(
"--n_classes_classif",
type=int,
default=0,
help="Number of classes for classification steps.",
)
parser.add_argument(
"--reload_classifier",
type=str,
default="",
help="Reload pretrained classifier.",
)
# evaluation
parser.add_argument(
"--eval_bleu",
type=bool_flag,
default=False,
help="Evaluate BLEU score during MT training",
)
parser.add_argument(
"--eval_bt_pairs",
type=bool_flag,
default=True,
help="Whether to evaluate on BT language pairs",
)
parser.add_argument(
"--eval_denoising",
type=bool_flag,
default=False,
help="Whether to evaluate the model for denoising",
)
parser.add_argument(
"--eval_subtoken_score",
type=bool_flag,
default=False,
help="Evaluate subtoken score during MT training",
)
parser.add_argument(
"--eval_bleu_test_only",
type=bool_flag,
default=False,
help="Evaluate BLEU score during MT training",
)
parser.add_argument(
"--eval_computation",
type=str,
default="",
help="Check if the generated function is compilable, and if it returns the same output as ground truth.",
)
parser.add_argument(
"--eval_ir_similarity",
type=str,
default="",
help="Check BLEU similarity on the recomputed IR",
)
parser.add_argument(
"--eval_computation_pivot",
type=str,
default="",
help="Check if the generated function is compilable, and if it returns the same output as ground truth.",
)
parser.add_argument(
"--pivot_bpe_model",
type=str,
default="",
help="Check if the generated function is compilable, and if it returns the same output as ground truth.",
)
parser.add_argument(
"--eval_st",
type=bool_flag,
default=False,
help="Whether to evaluate on self-generated tests with evosuite.",
)
parser.add_argument(
"--translation_eval_set",
type=str,
default="GfG",
choices=["GfG", "CodeNet"],
help="Evaluation set for translation. Supported eval sets are GfG and CodeNet right now.",
)
parser.add_argument(
"--generate_hypothesis",
type=bool_flag,
default=False,
help="generate hypothesis for test/valid mono dataset",
)
parser.add_argument(
"--eval_only", type=bool_flag, default=False, help="Only run evaluations"
)
parser.add_argument(
"--eval_beginning",
type=bool_flag,
default=False,
help="Eval at the beginning of training",
)
parser.add_argument(
"--train_only", type=bool_flag, default=False, help="Run no evaluation"
)
parser.add_argument(
"--retry_mistmatching_types",
type=bool_flag,
default=False,
help="Retry with wrapper at eval time when the types do not match",
)
parser.add_argument(
"--n_sentences_eval",
type=int,
default=1500,
help="Number of sentences for evaluation",
)
# debug
parser.add_argument(
"--debug_train",
type=bool_flag,
default=False,
help="Use valid sets for train sets (faster loading)",
)
parser.add_argument(
"--debug_slurm",
type=bool_flag,
default=False,
help="Debug multi-GPU / multi-node within a SLURM job",
)
parser.add_argument("--debug", help="Enable all debug flags", action="store_true")
# multi-gpu / multi-node
parser.add_argument(
"--local_rank", type=int, default=-1, help="Multi-GPU - Local rank"
)
parser.add_argument(
"--master_port",
type=int,
default=-1,
help="Master port (for multi-node SLURM jobs)",
)
parser.add_argument(
"--separate_decoders",
type=bool_flag,
default=False,
help="Use a separate decoder for each language",
)
parser.add_argument(
"--n_share_dec", type=int, default=0, help="Number of decoder layers to share"
)
return parser
def main(params):
# initialize the multi-GPU / multi-node training
init_distributed_mode(params)
# initialize the experiment
logger = initialize_exp(params)
# initialize SLURM signal handler for time limit / pre-emption
init_signal_handler()
# load data
data = load_data(params)
# build model
print_memory(logger, "before build modules")
if params.encoder_only:
model = build_model(params, data["dico"])
else:
encoder, decoder = build_model(params, data["dico"])
print_memory(logger, "before build classifier")
if params.use_classifier:
classifier = build_classifier(params)
else:
classifier = None
# build trainer, reload potential checkpoints / build evaluator
if params.encoder_only:
trainer = SingleTrainer(model, data, params, classifier)
if not params.train_only:
evaluator = SingleEvaluator(trainer, data, params)
else:
trainer = EncDecTrainer(encoder, decoder, data, params)
if not params.train_only:
evaluator = EncDecEvaluator(trainer, data, params)
print_memory(logger, "after building all models")
# evaluation
if params.eval_only or params.eval_beginning:
scores = evaluator.run_all_evals(trainer)
for k, v in scores.items():
if isinstance(v, list):
logger.info("%s -> %s" % (k, json.dumps(["%.2f" % el for el in v])))
else:
logger.info("%s -> %.6f" % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
if params.eval_only:
exit()
# set sampling probabilities for training
set_sampling_probs(data, params)
# language model training
for _ in range(params.max_epoch):
logger.info("============ Starting epoch %i ... ============" % trainer.epoch)
trainer.n_sentences = 0
while trainer.n_sentences < trainer.epoch_size:
show_example = True if trainer.n_sentences == 0 else False
# CLM steps
for lang1, lang2 in shuf_order(params.clm_steps, params):
trainer.clm_step(
lang1, lang2, params.lambda_clm, show_example=show_example
)
# MLM steps (also includes TLM if lang2 is not None)
for lang1, lang2 in shuf_order(params.mlm_steps, params):
trainer.mlm_step(
lang1, lang2, params.lambda_mlm, show_example=show_example
)
# denoising auto-encoder steps
for lang in shuf_order(params.ae_steps):
trainer.ae_step(
lang, None, params.lambda_ae, show_example=show_example,
)
# concatenated denoising auto-encoder steps
for lang1, lang2 in shuf_order(params.tae_steps):
trainer.ae_step(
lang1, lang2, params.lambda_tae, show_example=show_example,
)
# machine translation steps
for lang1, lang2 in shuf_order(params.mt_steps, params):
trainer.mt_step(
lang1, lang2, params.lambda_mt, show_example=show_example,
)
# machine translation using spans steps
for lang1, lang2, span in shuf_order(params.mt_spans_steps, params):
trainer.mt_step(
lang1,
lang2,
params.lambda_mt,
span=span,
show_example=show_example,
)
# deobfuscation step
for lang1, lang2 in shuf_order(params.do_steps):
trainer.dobf_step(
lang1,
lang2,
params.lambda_do,
deobfuscate_p=1 - params.obf_proba,
show_example=show_example,
)
# back-translation steps
for lang1, lang2, lang3 in shuf_order(params.bt_steps):
trainer.bt_step(
lang1,
lang2,
lang3,
params.lambda_bt,
params.bt_sample_temperature,
show_example=show_example,
)
# Classification
for lang1, lang2 in shuf_order(params.classif_steps, params):
trainer.classif_step(
lang1,
lang2,
getattr(params, "lambda_classif_" + "_".join((lang1, lang2))),
)
# Self-Labelling
for lang1, langs2 in shuf_order(params.st_steps):
trainer.st_step(
lang1, langs2, params.lambda_st, show_example=show_example,
)
trainer.iter()
logger.info("============ End of epoch %i ============" % trainer.epoch)
# evaluate perplexity
scores = None
if not params.train_only:
scores = evaluator.run_all_evals(trainer)
# print / JSON log
for k, v in scores.items():
if isinstance(v, list):
logger.info("%s -> %s" % (k, json.dumps(["%.2f" % el for el in v])))
else:
logger.info("%s -> %.6f" % (k, v))
if params.is_master:
logger.info("__log__:%s" % json.dumps(scores))
# end of epoch
if params.validation_metrics != "":
trainer.save_best_model(scores)
trainer.save_periodic()
trainer.end_epoch(scores)
if __name__ == "__main__":
# generate parser / parse parameters
parser = get_parser()
params = parser.parse_args()
# debug mode
if params.debug:
params.exp_name = "debug"
params.exp_id = "debug_%08i" % random.randint(0, 100000000)
params.debug_slurm = True
params.debug_train = True
# check parameters
check_data_params(params)
check_model_params(params)
# run experiment
main(params)
|
CodeGen-main
|
codegen_sources/model/train.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from pythainlp.tokenize import word_tokenize
for line in sys.stdin.readlines():
line = line.rstrip("\n")
print(" ".join(word_tokenize(line)))
|
CodeGen-main
|
codegen_sources/model/tools/segment_th.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import unicodedata
import six
def convert_to_unicode(text):
"""
Converts `text` to Unicode (if it's not already), assuming UTF-8 input.
"""
# six_ensure_text is copied from https://github.com/benjaminp/six
def six_ensure_text(s, encoding="utf-8", errors="strict"):
if isinstance(s, six.binary_type):
return s.decode(encoding, errors)
elif isinstance(s, six.text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
return six_ensure_text(text, encoding="utf-8", errors="ignore")
def run_strip_accents(text):
"""
Strips accents from a piece of text.
"""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
for line in sys.stdin:
line = convert_to_unicode(line.rstrip().lower())
line = run_strip_accents(line)
print("%s" % line.lower())
|
CodeGen-main
|
codegen_sources/model/tools/lowercase_and_remove_accent.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import typing as tp
from pathlib import Path
import numpy
import pytest
import requests
import torch
from ..src.data.dictionary import (
BOS_WORD,
EOS_WORD,
PAD_WORD,
UNK_WORD,
MASK_WORD,
Dictionary,
)
from ..src.model import build_model
from ..src.utils import AttrDict, batch_sentences
import codegen_sources.dataloaders.transforms as transf
import codegen_sources
TOLERANCE = 1e-5
OUTPUT_DELIMITER = """Translated %s function:
===================="""
TRANSCODER_MODEL_1_URL = "https://dl.fbaipublicfiles.com/transcoder/pre_trained_models/TransCoder_model_1.pth"
ROOT_FOLDER = Path(__file__).parents[3]
model_folder = ROOT_FOLDER.joinpath("data", "sample_model")
model_folder.mkdir(exist_ok=True)
MODEL_PATH = model_folder.joinpath("TransCoder_model_1.pth")
if not MODEL_PATH.exists():
r = requests.get(TRANSCODER_MODEL_1_URL, allow_redirects=True)
open(MODEL_PATH, "wb").write(r.content)
@pytest.mark.parametrize(
"efficient_attn", (None, "cutlass", "fctls_bflsh", "auto")
) # flash attention only works on A100
def test_reload_and_run(efficient_attn) -> None:
BPE_path: Path = Path(codegen_sources.__file__).parents[1].resolve().joinpath(
"data/bpe/cpp-java-python/codes"
)
gpu = torch.cuda.device_count() > 0
if not gpu and efficient_attn is not None:
print("Skipping test: xformers does not run on CPU")
return
device = "cuda:0" if gpu else "cpu"
decoder, encoder, in_pipe, lang2id = reload_model(BPE_path, efficient_attn, gpu)
lang1 = "cpp"
input_code = """int factorial ( int n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}"""
in_pipe = transf.CodeTokenizer(lang1).pipe(in_pipe)
x1 = in_pipe.apply(input_code).to(device)[:, None]
size = x1.shape[0]
len1 = torch.LongTensor(1).fill_(size).to(device)
lang1_id = lang2id[lang1 + "_sa"]
langs1 = x1.clone().fill_(lang1_id)
# Encode
enc1 = encoder("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
assert abs(enc1.mean().item() + 0.0388823039829731) < TOLERANCE, enc1.mean().item()
enc1 = enc1.transpose(0, 1)
print(enc1)
lang2 = "python"
output_code = """def factorial ( n ) :
if n > 1 :
return n * factorial ( n - 1 )
else :
return 1
"""
dec2, x2, len2 = decode(
output_code, enc1, len1, lang2, lang2id, in_pipe, decoder, device
)
assert (
abs(dec2.mean().item() + 0.05856532230973244) < TOLERANCE
), "Decoder values changed"
assert abs(dec2[0, 0, 0].item() + 0.5106964111328125) < TOLERANCE * 10
assert abs(dec2[-1, -1, -1].item() + 0.2060123234987259) < TOLERANCE * 10
loss = get_loss(x2, len2, dec2, decoder)
assert abs(loss.item() - 2.666358232498169) < TOLERANCE, loss.item()
output_code_2 = """def sum(a, b):
return a + b
"""
dec2_2, x2_2, len2_2 = decode(
output_code_2, enc1, len1, lang2, lang2id, in_pipe, decoder, device
)
loss = get_loss(x2_2, len2_2, dec2_2, decoder)
assert abs(loss.item() - 4.038794040679932) < TOLERANCE
@pytest.mark.parametrize(
"efficient_attn", (None,)
) # flash attention only works on A100
# Custom attention bias and padding are not supported by xformers right now
def test_reload_and_run_with_padding(efficient_attn) -> None:
BPE_path: Path = Path(codegen_sources.__file__).parents[1].resolve().joinpath(
"data/bpe/cpp-java-python/codes"
)
gpu = torch.cuda.device_count() > 0
if not gpu and efficient_attn is not None:
print("Skipping test: xformers does not run on CPU")
return
device = "cuda:0" if gpu else "cpu"
decoder, encoder, in_pipe, lang2id = reload_model(BPE_path, efficient_attn, gpu)
lang1 = "cpp"
input_code = """int factorial ( int n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}"""
longer_code = """// This is an implementation of the factorial function using the int type
int longer_factorial_function ( int input_integer ) {
if ( input_integer > 1 ) return input_integer * factorial ( input_integer - 1 ) ;
else return 1 ;
}"""
in_pipe = transf.CodeTokenizer(lang1).pipe(in_pipe)
x1, len1 = batch_sentences(
[
numpy.array(in_pipe.apply(input_code))[1:-1],
numpy.array(in_pipe.apply(longer_code))[1:-1],
],
eos_index=encoder.dico.eos_index,
pad_index=encoder.dico.pad_index,
)
x1, len1 = x1.to(device), len1.to(device)
lang1_id = lang2id[lang1 + "_sa"]
langs1 = x1.clone().fill_(lang1_id)
# Encode
enc1 = encoder("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
# check first element did not change
assert (
abs(enc1[: len1[0].item(), 0, :].mean().item() + 0.0388823039829731) < TOLERANCE
), enc1.mean().item()
assert abs(enc1.mean().item() + 0.0337064266204834) < TOLERANCE, enc1.mean().item()
enc1 = enc1.transpose(0, 1)
print(enc1)
lang2 = "python"
output_code = """def factorial ( n ) :
if n > 1 :
return n * factorial ( n - 1 )
else :
return 1
"""
output_code_longer = """def longer_factorial_function ( input_integer ) :
if input_integer > 1 :
return input_integer * factorial ( input_integer - 1 )
else :
return 1
"""
dec2, x2, len2 = decode(
[output_code, output_code_longer],
enc1,
len1,
lang2,
lang2id,
in_pipe,
decoder,
device,
)
# Check the output is still the same for first element
assert (
abs(dec2[: len2[0].item(), 0, :].mean().item() + 0.05856532230973244)
< TOLERANCE
), f"Decoder values changed: was {dec2[:len2[0].item(), 0, :].mean().item()}"
assert abs(dec2[0, 0, 0].item() + 0.5106940865516663) < TOLERANCE * 10
assert abs(dec2[len2[0] - 1, 0, -1].item() + 0.2060139924287796) < TOLERANCE * 10
loss = get_loss(x2[:, :1], len2[:1], dec2[:, :1, :], decoder)
assert abs(loss.item() - 2.666358232498169) < TOLERANCE, loss.item()
def reload_model(BPE_path, efficient_attn, gpu) -> tp.Tuple:
model_path = MODEL_PATH
reloaded = torch.load(model_path, map_location="cpu")
# change params of the reloaded model so that it will
# relaod its own weights and not the MLM or DOBF pretrained model
reloaded["params"]["reload_model"] = ",".join([str(model_path)] * 2)
reloaded["params"]["lgs_mapping"] = ""
reloaded["params"]["reload_encoder_for_decoder"] = False
reloaded["params"]["fp16"] = False
reloaded_params = AttrDict(reloaded["params"])
reloaded_params["efficient_attn"] = efficient_attn
# build dictionary / update parameters
dico = Dictionary(
reloaded["dico_id2word"], reloaded["dico_word2id"], reloaded["dico_counts"]
)
assert reloaded_params.n_words == len(dico)
assert reloaded_params.bos_index == dico.index(BOS_WORD)
assert reloaded_params.eos_index == dico.index(EOS_WORD)
assert reloaded_params.pad_index == dico.index(PAD_WORD)
assert reloaded_params.unk_index == dico.index(UNK_WORD)
assert reloaded_params.mask_index == dico.index(MASK_WORD)
lang2id = reloaded_params.lang2id
# build model / reload weights (in the build_model method)
encoder, decoder = build_model(reloaded_params, dico, gpu)
encoder = encoder[0]
decoder = decoder[0]
encoder.eval()
decoder.eval()
assert (
abs(
sum([x.mean().item() for x in encoder.state_dict().values()])
- 7.67796907491811
)
< 1e-6
), "Encoder badly reloaded"
assert (
abs(
sum([x.mean().item() for x in decoder.state_dict().values()])
- 13.814257268892561
)
< 1e-6
), "Encoder badly reloaded"
# reload bpe
if (
reloaded_params.get("roberta_mode", False)
or reloaded_params.get("tokenization_mode", "") == "roberta"
):
bpe_transf: transf.BpeBase = transf.RobertaBpe()
raise ValueError("This part has not beem tested thoroughly yet")
else:
bpe_transf = transf.FastBpe(code_path=Path(BPE_path).absolute())
bpetensorizer = transf.BpeTensorizer()
bpetensorizer.dico = dico # TODO: hacky
in_pipe = bpe_transf.pipe(bpetensorizer)
return decoder, encoder, in_pipe, lang2id
def get_loss(x2, len2, dec2, decoder):
# loss
alen = torch.arange(x2.shape[0], dtype=torch.long, device=len2.device)
# do not predict anything given the last target word
pred_mask = alen[:, None] < len2[None] - 1
y = x2[1:].masked_select(pred_mask[:-1])
_, loss = decoder(
"predict", tensor=dec2, pred_mask=pred_mask, y=y, get_scores=False
)
return loss
def decode(
codes_input: tp.Union[str, tp.List[str]],
enc1,
len1,
lang2,
lang2id,
in_pipe,
decoder,
device,
):
if isinstance(codes_input, str):
codes = [codes_input]
else:
codes = codes_input
lang2_id = lang2id[lang2 + "_sa"]
x2, len2 = batch_sentences(
[numpy.array(in_pipe.apply(code))[1:-1] for code in codes],
eos_index=decoder.dico.eos_index,
pad_index=decoder.dico.pad_index,
)
x2, len2 = x2.to(device), len2.to(device)
langs2 = x2.clone().fill_(lang2_id)
dec2 = decoder(
"fwd",
x=x2,
lengths=len2,
langs=langs2,
causal=True,
src_enc=enc1,
src_len=len1,
)
return dec2, x2, len2
|
CodeGen-main
|
codegen_sources/model/model_tests/test_forward_pass.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
import typing as tp
from codegen_sources.model.src.evaluation.comp_acc_computation import (
submit_functions,
init_eval_scripts_folder,
EVAL_SCRIPT_FOLDER,
)
from codegen_sources.preprocessing.lang_processors import LangProcessor
EVAL_SCRIPTS_ = "/tmp/eval_scripts/"
Path(EVAL_SCRIPTS_).mkdir(parents=True, exist_ok=True)
class Params:
def __init__(self) -> None:
self.eval_scripts_root = EVAL_SCRIPTS_
self.eval_scripts_folders: tp.Dict[tp.Tuple[str, str, str], str] = {}
params = Params()
def test_submit_correct_function():
lang1 = "cpp"
lang2 = "cpp"
data_set = "valid"
hyp = """int numberOfTriangle ( int n ) {
return 2 * pow ( 3 , n ) - 1 ;
}"""
ref = """int numberOfTriangles ( int n ) {
int ans = 2 * ( pow ( 3 , n ) ) - 1 ;
return ans ;
}"""
init_eval_scripts_folder(data_set, lang1, lang2, params)
id = "NUMBER_TRIANGLES_N_MOVES_1"
results_list, i = submit_functions(
[hyp],
id,
ref,
lang="cpp",
outfolder=params.eval_scripts_folders[(lang1, lang2, data_set)],
script_folder=EVAL_SCRIPT_FOLDER[data_set],
retry_mismatching_types=False,
)
assert results_list == [("success", None)], results_list
assert i == id, f"{i} != {id}"
def test_submit_correct_function_bug():
lang1 = "cpp"
lang2 = "cpp"
data_set = "valid"
ref = "int numberOfTriangles ( int n ) { int ans = 2 * ( pow ( 3 , n ) ) - 1 ; return ans ; }"
hyp = "int numberOfTriangle( int n ) { return 2 * pow ( 3 , n ) - 1 ; }"
# hyp = " ".join(lang_processor.tokenize_code(hyp))
# ref = " ".join(lang_processor.tokenize_code(ref))
init_eval_scripts_folder(data_set, lang1, lang2, params)
id = "NUMBER_TRIANGLES_N_MOVES_1"
results_list, i = submit_functions(
[hyp],
id,
ref,
lang="cpp",
outfolder=params.eval_scripts_folders[(lang1, lang2, data_set)],
script_folder=EVAL_SCRIPT_FOLDER[data_set],
retry_mismatching_types=False,
)
assert results_list == [("success", None)], results_list
assert i == id, f"{i} != {id}"
|
CodeGen-main
|
codegen_sources/model/model_tests/test_comp_acc_computation.py
|
CodeGen-main
|
codegen_sources/model/model_tests/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from codegen_sources.model.src.evaluation.subtoken_score import (
subtoken_counts,
subtoken_score_on_lines,
subtoken_score_on_lines_subtoken_level,
)
def test_same_strings_perfect_match():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"linesCount", "linesCount"
)
assert precise_tokens == proposed_tokens == gt_tokens == 2
def test_inverted_tokens_perfect_match():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"countLines", "linesCount"
)
assert precise_tokens == proposed_tokens == gt_tokens == 2
def test_different_cases_perfect_match():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"lines_count", "linesCount"
)
assert precise_tokens == proposed_tokens == gt_tokens == 2
def test_extra_token_perfect_recall():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"emptyLinesCount", "linesCount"
)
assert precise_tokens == gt_tokens == 2
assert proposed_tokens == 3
def test_missing_token_perfect_precision():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts("count", "linesCount")
assert precise_tokens == proposed_tokens == 1
assert gt_tokens == 2
def test_empty_proposed_low_recall():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts("", "linesCount")
assert precise_tokens == proposed_tokens == 0
assert gt_tokens == 2
def test_full_subtoken_score():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 words"]], ["VAR_1 countLines | VAR_2 uniqueWords"]
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.83333333) < 0.0001, res_dict
def test_extra_tokens():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 words | VA RandomStuff"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.83333333) < 0.0001, res_dict
def test_full_subtoken_score_subtoken_level():
res_dict = subtoken_score_on_lines_subtoken_level(
["VAR_1 linesCount | VAR_2 words"], ["VAR_1 countLines | VAR_2 uniqueWords"]
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.85714285) < 0.0001, res_dict
def test_full_subtoken_score_low_precision():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 sentences"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert (
res_dict["precision"] == res_dict["recall"] == res_dict["F1"] == 0.5
), res_dict
assert res_dict["exact_match"] == 0, res_dict
def test_full_subtoken_score_snakecase_vs_camlcase():
res_dict = subtoken_score_on_lines(
[["VAR_1 lines_count | VAR_2 sentences"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert (
res_dict["precision"] == res_dict["recall"] == res_dict["F1"] == 0.5
), res_dict
assert res_dict["exact_match"] == 0, res_dict
def test_exact_match():
res_dict = subtoken_score_on_lines(
[["VAR_1 lines_count | VAR_2 sentences"]],
["VAR_1 countLines | VAR_2 sentences"],
)
assert (
res_dict["precision"] == res_dict["recall"] == res_dict["F1"] == 1.0
), res_dict
assert res_dict["exact_match"] == 0.5, res_dict
def test_full_subtoken_score_case_insensitive():
res_dict = subtoken_score_on_lines([["VAR_1 Lines_count"]], ["VAR_1 CountLines"])
assert (
res_dict["precision"] == res_dict["recall"] == res_dict["F1"] == 1.0
), res_dict
def test_full_subtoken_score_takes_best_beam():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 sentences", "VAR_1 linesCount | VAR_2 words"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.83333333) < 0.0001, res_dict
def test_subtoken_score_with_spaces():
res_dict = subtoken_score_on_lines(
[["VAR_1 Optional [ float ] | VAR_2 float"]],
["VAR_1 Optional [ int ] | VAR_2 float"],
)
assert res_dict["exact_match"] == 0.5, res_dict
|
CodeGen-main
|
codegen_sources/model/model_tests/test_subtoken_score.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import subprocess
import uuid
from pathlib import Path
import pytest
import requests
import torch
import codegen_sources
from ..src.constants import EXT
from ..translate import Translator
from ...code_runners.python_code_runner import PYTHON_ENV
from ...preprocessing.tests.obfuscation.utils import diff_tester
DELIMITER = "=" * 20
OUTPUT_DELIMITER = f"""Translated %s function:
{DELIMITER}"""
TRANSCODER_MODEL_1_URL = "https://dl.fbaipublicfiles.com/transcoder/pre_trained_models/TransCoder_model_1.pth"
ROOT_FOLDER = Path(codegen_sources.__file__).parents[1]
model_folder = ROOT_FOLDER.joinpath("data", "sample_model")
model_folder.mkdir(exist_ok=True)
MODEL_PATH = model_folder.joinpath("TransCoder_model_1.pth")
BPE_PATH = ROOT_FOLDER / "data/bpe/cpp-java-python/codes"
if not MODEL_PATH.exists():
r = requests.get(TRANSCODER_MODEL_1_URL, allow_redirects=True)
open(MODEL_PATH, "wb").write(r.content)
def translation_generic_tester(
input_function: str,
src_lang: str,
tgt_lang: str,
expected: str,
model_path: Path = MODEL_PATH,
beam_size: int = 1,
):
if os.environ.get("CI", False):
# This test doesn't work on the CI while it works on ssh for unknown reasons
return
hash_value = uuid.uuid4()
code_path = f"/tmp/{hash_value}{EXT[tgt_lang]}"
with open(code_path, "w") as f:
f.write(input_function)
cmd = f"cd {ROOT_FOLDER};NPY_MKL_FORCE_INTEL=1 python -m codegen_sources.model.translate --input {code_path} --src_lang {src_lang} --tgt_lang {tgt_lang} --model_path {model_path} --gpu false --beam_size {beam_size}"
print(f"Running: {cmd}")
try:
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=180,
shell=True,
env=PYTHON_ENV,
)
except:
Path(code_path).unlink()
raise
assert proc.returncode == 0, f"Translation failed with error {proc.stderr.decode()}"
output = proc.stdout.decode()
delimiter = OUTPUT_DELIMITER % tgt_lang
assert (
delimiter in output
), f"was successful but couldn't find translation in {output}"
output = output.split(delimiter)[-1]
diff_tester(expected.strip(), output.strip())
def translation_class_tester(
input_function: str,
src_lang: str,
tgt_lang: str,
expected: str,
model_path: Path = MODEL_PATH,
beam_size: int = 1,
):
hash_value = uuid.uuid4()
code_path = f"/tmp/{hash_value}{EXT[tgt_lang]}"
with open(code_path, "w") as f:
f.write(input_function)
cmd = f"cd {ROOT_FOLDER};NPY_MKL_FORCE_INTEL=1 python -m codegen_sources.model.translate --input {code_path} --src_lang {src_lang} --tgt_lang {tgt_lang} --model_path {model_path} --gpu false --beam_size {beam_size}"
print(f"Running: {cmd}")
# Initialize translator
translator = Translator(MODEL_PATH, BPE_PATH, gpu=False, efficient_attn=None,)
# read input code from stdin
print(input_function)
with torch.no_grad():
output = translator.translate(
input_function, lang1=src_lang, lang2=tgt_lang, beam_size=beam_size,
)
output = DELIMITER.join([x.strip() for x in output])
expected = DELIMITER.join([x.strip() for x in expected.split(DELIMITER)])
diff_tester(expected, output)
CPP_FACTORIAL = """int factorial ( int n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}"""
JAVA_FACTORIAL = """public static int factorial ( int n ) {
if ( n > 1 ) {
return n * factorial ( n - 1 ) ;
}
else {
return 1 ;
}
}
"""
PYTHON_FACTORIAL = """def factorial ( n ) :
if n > 1 :
return n * factorial ( n - 1 )
else :
return 1
"""
@pytest.mark.parametrize("from_class", (False, True))
def test_cpp_to_python_translation(from_class: bool):
input_function = CPP_FACTORIAL
expected_output = PYTHON_FACTORIAL
if from_class:
translation_class_tester(input_function, "cpp", "python", expected_output)
else:
translation_generic_tester(input_function, "cpp", "python", expected_output)
@pytest.mark.parametrize("from_class", (False, True))
def test_cpp_to_java_translation(from_class: bool):
expected_output = JAVA_FACTORIAL
if from_class:
translation_class_tester(CPP_FACTORIAL, "cpp", "java", expected_output)
else:
translation_generic_tester(CPP_FACTORIAL, "cpp", "java", expected_output)
@pytest.mark.parametrize("from_class", (False, True))
def test_java_to_python_translation(from_class: bool):
if from_class:
translation_class_tester(JAVA_FACTORIAL, "java", "python", PYTHON_FACTORIAL)
else:
translation_generic_tester(JAVA_FACTORIAL, "java", "python", PYTHON_FACTORIAL)
@pytest.mark.parametrize("from_class", (False, True))
def test_java_to_cpp_translation(from_class: bool):
if from_class:
translation_class_tester(JAVA_FACTORIAL, "java", "cpp", CPP_FACTORIAL)
else:
translation_generic_tester(JAVA_FACTORIAL, "java", "cpp", CPP_FACTORIAL)
@pytest.mark.parametrize("from_class", (False, True))
def test_python_to_java_translation(from_class: bool):
expected = """public static int factorial ( int n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}"""
if from_class:
translation_class_tester(PYTHON_FACTORIAL, "python", "java", expected)
else:
translation_generic_tester(PYTHON_FACTORIAL, "python", "java", expected)
@pytest.mark.parametrize("from_class", (False, True))
def test_python_to_cpp_translation(from_class: bool):
if from_class:
translation_class_tester(PYTHON_FACTORIAL, "python", "cpp", CPP_FACTORIAL)
else:
translation_generic_tester(PYTHON_FACTORIAL, "python", "cpp", CPP_FACTORIAL)
@pytest.mark.parametrize("from_class", (False, True))
def test_translation_with_beam_decoding(from_class: bool):
expected = """int factorial ( int n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}
====================
public : int factorial ( int n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}
====================
int factorial ( int n ) {
if ( n > 1 ) {
return n * factorial ( n - 1 ) ;
}
else {
return 1 ;
}
}
====================
inline int factorial ( int n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}
====================
long long factorial ( long long n ) {
if ( n > 1 ) return n * factorial ( n - 1 ) ;
else return 1 ;
}"""
if from_class:
translation_class_tester(
PYTHON_FACTORIAL, "python", "cpp", expected, beam_size=5
)
else:
translation_generic_tester(
PYTHON_FACTORIAL, "python", "cpp", expected, beam_size=5
)
|
CodeGen-main
|
codegen_sources/model/model_tests/test_translation.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import signal
import socket
import subprocess
import sys
from logging import getLogger
import torch
logger = getLogger()
def sig_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ["SLURM_PROCID"])
logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
logger.warning("Requeuing job " + os.environ["SLURM_JOB_ID"])
os.system("scontrol requeue " + os.environ["SLURM_JOB_ID"])
sys.exit(-1)
else:
logger.warning("Not the master process, no need to requeue.")
def term_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
logger.warning("Bypassing SIGTERM.")
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
logger.warning("Signal handler installed.")
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- n_nodes
- node_id
- local_rank
- global_rank
- world_size
"""
params.is_slurm_job = "SLURM_JOB_ID" in os.environ and not params.debug_slurm
print("SLURM job: %s" % str(params.is_slurm_job))
# SLURM job
if params.is_slurm_job:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
SLURM_VARIABLES = [
"SLURM_JOB_ID",
"SLURM_JOB_NODELIST",
"SLURM_JOB_NUM_NODES",
"SLURM_NTASKS",
"SLURM_TASKS_PER_NODE",
"SLURM_MEM_PER_NODE",
"SLURM_MEM_PER_CPU",
"SLURM_NODEID",
"SLURM_PROCID",
"SLURM_LOCALID",
"SLURM_TASK_PID",
]
PREFIX = "%i - " % int(os.environ["SLURM_PROCID"])
for name in SLURM_VARIABLES:
value = os.environ.get(name, None)
print(PREFIX + "%s: %s" % (name, str(value)))
# # job ID
# params.job_id = os.environ['SLURM_JOB_ID']
# number of nodes / node ID
params.n_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
params.node_id = int(os.environ["SLURM_NODEID"])
# local rank on the current node / global rank
params.local_rank = int(os.environ["SLURM_LOCALID"])
params.global_rank = int(os.environ["SLURM_PROCID"])
# number of processes / GPUs per node
params.world_size = int(os.environ["SLURM_NTASKS"])
params.n_gpu_per_node = params.world_size // params.n_nodes
# define master address and master port
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", os.environ["SLURM_JOB_NODELIST"]]
)
params.master_addr = hostnames.split()[0].decode("utf-8")
assert 10001 <= params.master_port <= 20000 or params.world_size == 1
print(PREFIX + "Master address: %s" % params.master_addr)
print(PREFIX + "Master port : %i" % params.master_port)
# set environment variables for 'env://'
os.environ["MASTER_ADDR"] = params.master_addr
os.environ["MASTER_PORT"] = str(params.master_port)
os.environ["WORLD_SIZE"] = str(params.world_size)
os.environ["RANK"] = str(params.global_rank)
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif params.local_rank != -1:
assert params.master_port == -1
# read environment variables
params.global_rank = int(os.environ["RANK"])
params.world_size = int(os.environ["WORLD_SIZE"])
params.n_gpu_per_node = int(os.environ["NGPU"])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
# local job (single GPU)
else:
assert params.local_rank == -1
assert params.master_port == -1
params.n_nodes = 1
params.node_id = 0
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.n_gpu_per_node = 1
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in distributed mode
params.is_master = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
params.multi_gpu = params.world_size > 1
# summary
PREFIX = "%i - " % params.global_rank
print(PREFIX + "Number of nodes: %i" % params.n_nodes)
print(PREFIX + "Node ID : %i" % params.node_id)
print(PREFIX + "Local rank : %i" % params.local_rank)
print(PREFIX + "Global rank : %i" % params.global_rank)
print(PREFIX + "World size : %i" % params.world_size)
print(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
print(PREFIX + "Master : %s" % str(params.is_master))
print(PREFIX + "Multi-node : %s" % str(params.multi_node))
print(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
print(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
print("Initializing PyTorch distributed ...")
torch.distributed.init_process_group(
init_method="env://", backend="nccl",
)
|
CodeGen-main
|
codegen_sources/model/src/slurm.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
REF = "REF"
OUT = "OUT"
HYPO = "HYPO"
IR = "IR"
SOURCE = "SOURCE"
SUPPORTED_LANGUAGES_FOR_TESTS = {"java", "python", "cpp", "rust", "go"}
EXT = {"rust": ".rs", "cpp": ".cpp", "java": ".java", "python": ".py", "go": ".go"}
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
TOKENIZATION_MODES = {"fastbpe", "roberta", "sentencepiece"}
|
CodeGen-main
|
codegen_sources/model/src/constants.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pickle
import random
import typing as tp
from logging import getLogger
from pathlib import Path
import torch
logger = getLogger()
class Cache(object):
def __init__(self, elements=None, params=None) -> None:
self.eos_index = params.eos_index
self.pad_index = params.pad_index
self.elements = elements
self.st_remove_proba = params.st_remove_proba
self.params = params
def sample(self, size):
raise NotImplementedError()
def add(self, new_elements, keys=None):
raise NotImplementedError
def exists(self, element_id):
raise NotImplementedError
def __len__(self):
return len(self.elements)
def sample_batch(self, n):
sampled_elements = self.sample(n)
sent1 = [e[0] for e in sampled_elements]
len1 = [e[1] for e in sampled_elements]
sent2 = [e[2] for e in sampled_elements]
len2 = [e[3] for e in sampled_elements]
return self.batch_sequences(sent1, len1), self.batch_sequences(sent2, len2)
def batch_sequences(self, sequences: list, lengths: list):
"""
Take as input a list of n sequences (torch.LongTensor vectors) and return
a tensor of size (slen, n) where slen is the length of the longest
sentence, and a vector lengths containing the length of each sentence.
"""
assert all(
[
len(s) >= l
and s[0].item() == self.eos_index
and s[l - 1].item() == self.eos_index
for s, l in zip(sequences, lengths)
]
)
lengths_tensor = torch.LongTensor(lengths)
sent = torch.LongTensor(max(lengths_tensor), len(lengths_tensor)).fill_(
self.pad_index
)
assert min(lengths_tensor) > 2
for i, s in enumerate(sequences):
sent[0 : int(lengths_tensor[i]), i].copy_(s[: int(lengths_tensor[i])])
return sent, lengths_tensor
def limit_tokens_per_batch(self, sampled_elements):
max_len = 0
for i, (s1, l1, s2, l2) in enumerate(sampled_elements):
max_len = max(max_len, l1, l2)
tokens_in_batch = max_len * (i + 1)
if tokens_in_batch > self.params.tokens_per_batch:
return i - 1
return len(sampled_elements)
def save(self, path):
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
pickle.dump(self.elements, f)
@classmethod
def from_file(cls, cache_path, params):
print(cache_path)
with open(cache_path, "rb") as pickle_in:
elements = pickle.load(pickle_in)
return cls(elements, params)
def load(self, path):
"""Loads elements from a path and adds them to the existing elements"""
if not Path(path).exists():
raise ValueError(f"{path} not found")
with open(path, "rb") as pickle_in:
elements = pickle.load(pickle_in)
assert isinstance(elements, list)
self.add(elements)
class ListCache(Cache):
def __init__(self, elements: tp.Optional[tp.List] = None, params=None) -> None:
super().__init__(elements, params)
if elements is None:
self.elements = []
else:
self.elements = elements
self.tokens_per_batch = params.tokens_per_batch
def exists(self, element_id):
# for ListCache, always we don't store the ID
return False
def sample(self, n):
indices = random.sample(list(range(len(self.elements))), n)
sampled = [self.elements[i] for i in indices]
if self.params.st_limit_tokens_per_batch:
limit = self.limit_tokens_per_batch(sampled)
indices = indices[:limit]
sampled = sampled[:limit]
if random.random() < self.st_remove_proba:
indices = set(indices)
self.elements = [e for i, e in enumerate(self.elements) if i not in indices]
return sampled
def add(self, new_elements, keys=None):
self.elements.extend(new_elements)
class RoundRobinCache(Cache):
def __init__(self, elements: tp.Optional[tp.List] = None, params=None) -> None:
super().__init__(elements, params)
self.cache_size = params.cache_size
if elements is None:
self.elements = []
else:
if len(elements) > self.cache_size:
logger.info(
f"Taking only the first {self.cache_size} elements from {len(elements)} initial cache elements"
)
self.elements = elements[: self.cache_size]
else:
self.elements = elements
self.tokens_per_batch = params.tokens_per_batch
self.current_index = 0
def exists(self, element_id):
# for ListCache, always we don't store the ID
return False
def sample(self, n):
indices = random.sample(list(range(len(self.elements))), n)
sampled = [self.elements[i] for i in indices]
if self.params.st_limit_tokens_per_batch:
limit = self.limit_tokens_per_batch(sampled)
sampled = sampled[:limit]
return sampled
def add(self, new_elements, keys=None):
if len(new_elements) > self.cache_size:
logger.info(
f"Cannot add {len(new_elements)} in the cache of size {self.cache_size}. Truncating."
)
new_elements = new_elements[: self.cache_size]
if len(self.elements) < self.cache_size:
last_fitting = self.cache_size - len(self.elements)
self.elements.extend(new_elements[:last_fitting])
if last_fitting < len(new_elements):
for i, e in enumerate(new_elements[last_fitting:]):
self.elements[i] = e
self.current_index = len(new_elements) - last_fitting
else:
for i, e in enumerate(new_elements):
self.elements[(self.current_index + i) % self.cache_size] = e
self.current_index = (
self.current_index + len(new_elements)
) % self.cache_size
|
CodeGen-main
|
codegen_sources/model/src/cache.py
|
CodeGen-main
|
codegen_sources/model/src/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import time
from datetime import timedelta
class LogFormatter:
def __init__(self) -> None:
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime("%x %X"),
timedelta(seconds=elapsed_seconds),
)
message = record.getMessage()
message = message.replace("\n", "\n" + " " * (len(prefix) + 3))
return "%s - %s" % (prefix, message) if message else ""
def create_logger(filepath, rank):
"""
Create a logger.
Use a different log file for each process.
"""
# create log formatter
log_formatter = LogFormatter()
# create file handler and set level to debug
if filepath is not None:
if rank > 0:
filepath = "%s-%i" % (filepath, rank)
file_handler = logging.FileHandler(filepath, "a")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to debug
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
if filepath is not None:
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# reset logger elapsed time
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
return logger
|
CodeGen-main
|
codegen_sources/model/src/logger.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import getpass
import math
import os
import pickle
import random
import re
import subprocess
import sys
import typing as tp
from pathlib import Path
import numpy as np
import psutil
import sentencepiece # type: ignore
import torch
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
from .constants import (
SUPPORTED_LANGUAGES_FOR_TESTS,
FALSY_STRINGS,
TRUTHY_STRINGS,
TOKENIZATION_MODES,
)
from .data.dictionary import NUM_SPECIAL_TOKENS
TOK_AVOID_NEWLINE = "#NEWLINE"
MAX_VIRTUAL_MEMORY = 2 * 1024 * 1024 * 1024 # 2 GB
LTensor = torch.LongTensor
PathLike = tp.Union[Path, str]
REPO_ROOT = Path(__file__).parents[3].absolute()
TREE_SITTER_ROOT = REPO_ROOT.joinpath("tree-sitter")
sys.path.append(str(REPO_ROOT))
print("adding to path", str(REPO_ROOT))
from .logger import create_logger
DUMP_PATH = "/checkpoint/%s/dumped" % getpass.getuser()
dynamic_coeff = [
"lambda_clm",
"lambda_mlm",
"lambda_ae",
"lambda_tae",
"lambda_mt",
"lambda_bt",
"lambda_st",
"bt_sample_temperature",
"st_sample_temperature",
"st_sample_cache_ratio",
"st_beam_size",
"lambda_classif",
"lambda_do",
"st_min_asserts",
"st_min_mutation_score",
]
def show_batch(
logger,
to_print,
dico,
tokenization_mode,
example_type,
sentencepiece_model_path: tp.Optional[PathLike] = None,
):
"""
log first element of batch.
x1 and x2 should be of size bs x slen
"""
logger.info("")
logger.info(f"========== {example_type} example ==========")
for label, x in to_print:
source_sentence = " ".join(
[dico.id2word[int(w)] for w in x[0] if w != dico.pad_index]
)
logger.info(
f"{label} sent: {restore_segmentation_sentence(source_sentence, tokenization_mode, sentencepiece_model_path)}"
)
logger.info("")
for label, x in to_print:
source_sentence = " ".join(
[dico.id2word[int(w)] for w in x[0] if w != dico.pad_index]
)
logger.info(f"{label} tok: {source_sentence}")
logger.info("")
def print_memory(logger, where):
mem_av_gb = psutil.virtual_memory().available / (1024 ** 3)
logger.info(f"MEMORY ({where}) : {mem_av_gb}")
def batch_sentences(sentences, pad_index, eos_index):
"""
Take as input a list of n sentences (torch.LongTensor vectors) and return
a tensor of size (slen, n) where slen is the length of the longest
sentence, and a vector lengths containing the length of each sentence.
"""
# sentences = sorted(sentences, key=lambda x: len(x), reverse=True)
lengths = torch.LongTensor([len(s) + 2 for s in sentences])
sent = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(pad_index)
sent[0] = eos_index
for i, s in enumerate(sentences):
if lengths[i] > 2: # if sentence not empty
sent[1 : lengths[i] - 1, i].copy_(torch.from_numpy(s.astype(np.int64)))
sent[lengths[i] - 1, i] = eos_index
return sent, lengths
def limit_virtual_memory(max_virtual_memory):
# We do a soft limit in order to be able to change the limit later if needed
return f"ulimit -S -v {max_virtual_memory}"
class AttrDict(dict):
def __init__(self, *args: tp.Any, **kwargs: tp.Any) -> None:
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def __getattr__(self, name: str) -> tp.Any: # deactivates mypy checks
raise RuntimeError
def read_file_lines(hyp_path):
with open(hyp_path, "r", encoding="utf-8") as f:
functions = f.readlines()
return functions
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")
def initialize_exp(params):
"""
Initialize the experience:
- dump parameters
- create a logger
"""
# dump parameters
get_dump_path(params)
pickle.dump(params, open(os.path.join(params.dump_path, "params.pkl"), "wb"))
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith("--"):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match("^[a-zA-Z0-9_]+$", x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = " ".join(command)
params.command = command + ' --exp_id "%s"' % params.exp_id
# check experiment name
assert len(params.exp_name.strip()) > 0
# create a logger
logger = create_logger(
os.path.join(params.dump_path, "train.log"),
rank=getattr(params, "global_rank", 0),
)
logger.info("============ Initialized logger ============")
logger.info(
"\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(params)).items()))
)
logger.info("The experiment will be stored in %s\n" % params.dump_path)
logger.info("Running command: %s" % command)
logger.info("")
return logger
def get_dump_path(params):
"""
Create a directory to store the experiment.
"""
dump_path = DUMP_PATH if params.dump_path == "" else params.dump_path
assert len(params.exp_name) > 0
# create the sweep path if it does not exist
sweep_path = os.path.join(dump_path, params.exp_name)
if not os.path.exists(sweep_path):
subprocess.Popen("mkdir -p %s" % sweep_path, shell=True).wait()
# create an ID for the job if it is not given in the parameters.
# if we run on the cluster, the job ID is the one of Chronos.
# otherwise, it is randomly generated
if params.exp_id == "":
chronos_job_id = os.environ.get("CHRONOS_JOB_ID")
slurm_job_id = os.environ.get("SLURM_JOB_ID")
assert chronos_job_id is None or slurm_job_id is None
exp_id = chronos_job_id if chronos_job_id is not None else slurm_job_id
if exp_id is None:
chars = "abcdefghijklmnopqrstuvwxyz0123456789"
while True:
exp_id = "".join(random.choice(chars) for _ in range(10))
if not os.path.isdir(os.path.join(sweep_path, exp_id)):
break
else:
assert exp_id.isdigit()
params.exp_id = exp_id
# create the dump folder / update parameters
params.dump_path = os.path.join(sweep_path, params.exp_id)
if not os.path.isdir(params.dump_path):
subprocess.Popen("mkdir -p %s" % params.dump_path, shell=True).wait()
# cant be typed since we cant map inputs to outputs
def to_cuda(*args: tp.Union[None, torch.Tensor, torch.LongTensor]) -> tp.List[tp.Any]:
"""
Move tensors to CUDA.
"""
return [None if x is None else x.cuda() for x in args]
def restore_segmentation_sentence(
sentence: str,
tokenization_mode: str = "fastbpe",
sentencepiece_model_path: tp.Optional[PathLike] = None,
) -> str:
"""
Take a sentence segmented with BPE and restore it to its original segmentation.
"""
assert tokenization_mode in TOKENIZATION_MODES
if tokenization_mode == "fastbpe":
return sentence.replace("@@ ", "")
elif tokenization_mode == "roberta":
return restore_roberta_segmentation_sentence(sentence)
else:
assert sentencepiece_model_path is not None
model = sentencepiece.SentencePieceProcessor(str(sentencepiece_model_path))
return model.decode_pieces(sentence.split(" "))
def restore_segmentation(
path,
tokenization_mode: str = "fastbpe",
single_line=False,
sentencepiece_model_path: tp.Optional[PathLike] = None,
):
"""
Take a file segmented with BPE and restore it to its original segmentation.
"""
assert tokenization_mode in TOKENIZATION_MODES
assert os.path.isfile(path)
if tokenization_mode == "fastbpe":
restore_fastBPE_segmentation(path)
elif tokenization_mode == "roberta":
return restore_roberta_segmentation(path, single_line=single_line)
else:
assert sentencepiece_model_path is not None
return restore_sentencepiece_segmentation(
path, sentencepiece_model_path, single_line=single_line
)
def restore_roberta_segmentation(path: str, single_line: bool = False) -> None:
with open(path, "r", encoding="utf-8", errors="replace") as input_file:
text_inputs = input_file.read().split("\n")
output = restore_roberta_segmentation_string(text_inputs, single_line)
with open(path, "w") as output_path:
output_path.write(output)
def restore_roberta_segmentation_string(
text_inputs: tp.Union[str, tp.List[str]], single_line: bool = False
) -> str:
if isinstance(text_inputs, str):
text_inputs = text_inputs.splitlines()
output_lines = [
restore_roberta_segmentation_sentence(line, single_line=single_line)
for line in text_inputs
]
return "\n".join(output_lines)
def restore_roberta_segmentation_sentence(line: str, single_line: bool = False):
byte_encoder = bytes_to_unicode()
byte_decoder = {v: k for k, v in byte_encoder.items()}
text = "".join(line.replace(" ", ""))
res = bytearray([byte_decoder[c] for c in text]).decode("utf-8", errors="replace")
return res.replace("\n", TOK_AVOID_NEWLINE) if single_line else res
def restore_sentencepiece_segmentation(
path: str, sentencepiece_model_path: PathLike, single_line: bool = False,
) -> None:
model = sentencepiece.SentencePieceProcessor(str(sentencepiece_model_path))
with open(path, "r", encoding="utf-8", errors="replace") as input_file:
text_inputs = input_file.read().split("\n")
output = restore_sentencepiece_segmentation_string(text_inputs, model, single_line)
with open(path, "w") as output_path:
output_path.write(output)
def restore_sentencepiece_segmentation_string(
text_inputs: tp.Union[str, tp.List[str]],
model: sentencepiece.SentencePieceProcessor,
single_line: bool = False,
) -> str:
if isinstance(text_inputs, str):
text_inputs = text_inputs.splitlines()
output_lines = [model.decode_pieces(line.split(" ")) for line in text_inputs]
if single_line:
output_lines = [line.replace("\n", TOK_AVOID_NEWLINE) for line in output_lines]
return "\n".join(output_lines)
def restore_fastBPE_segmentation(path: str) -> None:
restore_cmd = "sed -i -r 's/(@@ )|(@@ ?$)//g' %s"
subprocess.Popen(restore_cmd % path, shell=True).wait()
def parse_lambda_config(params):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000
"""
# for lambda_classif possibility to have one lambda per pair of language, so split per languages fisrt
global dynamic_coeff
if len(params.classif_steps) > 0:
x = getattr(params, "lambda_classif")
split = [s.split("::") for s in x.split("/")]
assert all(len(s) == 2 or len(s) == 1 for s in split)
assert all(
tuple(s[0].split("-")) in params.classif_steps for s in split if len(s) == 2
)
assert sum([1 if len(s) == 1 else 0 for s in split]) < 2
general_lambda = "1"
for s in split:
if len(s) == 1:
general_lambda = s[0]
break
lambda_by_step = {s[0]: s[1] for s in split if len(s) == 2}
for step in params.classif_steps:
step = "-".join(step)
if step in lambda_by_step:
setattr(
params,
"lambda_classif" + "_" + step.replace("-", "_"),
lambda_by_step[step],
)
else:
setattr(
params,
"lambda_classif" + "_" + step.replace("-", "_"),
general_lambda,
)
dynamic_coeff.append("lambda_classif" + "_" + step.replace("-", "_"))
dynamic_coeff.remove("lambda_classif")
for name in dynamic_coeff:
x = getattr(params, name)
split = x.split(",")
if len(split) == 1:
setattr(params, name, float(x))
setattr(params, name + "_config", None)
else:
split = [s.split(":") for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(
int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1)
)
setattr(params, name, float(split[0][1]))
setattr(params, name + "_config", [(int(k), float(v)) for k, v in split])
def get_lambda_value(config, n_iter: int) -> float:
"""
Compute a lambda value according to its schedule configuration.
"""
ranges = [
i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]
]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
def update_lambdas(params, n_iter):
"""
Update all lambda coefficients.
"""
for name in dynamic_coeff:
config = getattr(params, name + "_config")
if config is not None:
setattr(params, name, get_lambda_value(config, n_iter))
def set_sampling_probs(data, params):
"""
Set the probability of sampling specific languages / language pairs during training.
"""
coeff = params.lg_sampling_factor
if coeff == -1:
return
assert coeff > 0
# monolingual data
params.mono_list = [k for k, v in data["mono_stream"].items() if "train" in v]
if len(params.mono_list) > 0:
probs = np.array(
[1.0 * len(data["mono_stream"][lang]["train"]) for lang in params.mono_list]
)
probs /= probs.sum()
probs = np.array([p ** coeff for p in probs])
probs /= probs.sum()
params.mono_probs = probs
# parallel data
params.para_list = [k for k, v in data["para"].items() if "train" in v]
if len(params.para_list) > 0:
probs = np.array(
[
1.0 * len(data["para"][(l1, l2)]["train"])
for (l1, l2) in params.para_list
]
)
probs /= probs.sum()
probs = np.array([p ** coeff for p in probs])
probs /= probs.sum()
params.para_probs = probs
def concat_batches(
x1: LTensor,
len1: LTensor,
lang1_id: int,
x2: LTensor,
len2: LTensor,
lang2_id: int,
pad_idx: int,
eos_idx: int,
reset_positions: bool,
) -> tp.Tuple[LTensor, LTensor, LTensor, LTensor]:
"""
Concat batches with different languages.
"""
assert reset_positions is False or lang1_id != lang2_id
lengths = len1 + len2
if not reset_positions:
lengths -= 1
slen, bs = lengths.max().item(), lengths.size(0)
x = x1.new(slen, bs).fill_(pad_idx)
x[: int(len1.max().item())].copy_(x1)
positions = torch.arange(slen)[:, None].repeat(1, bs).to(x1.device)
langs = x1.new(slen, bs).fill_(lang1_id)
for i in range(bs):
l1 = int(len1[i] if reset_positions else len1[i] - 1)
l2 = int(len2[i])
x[l1 : l1 + l2, i].copy_(x2[:l2, i])
if reset_positions:
positions[l1:, i] -= len1[i]
langs[l1:, i] = lang2_id
assert (x == eos_idx).long().sum().item() == (4 if reset_positions else 3) * bs
return x, lengths, positions, langs # type: ignore
def truncate(
x: LTensor, lengths: LTensor, max_len: int, eos_index: int
) -> tp.Tuple[LTensor, LTensor]:
"""
Truncate long sentences.
"""
if lengths.max().item() > max_len:
x = x[:max_len].clone() # type: ignore
lengths = lengths.clone() # type: ignore
for i in range(len(lengths)):
if lengths[i] > max_len:
lengths[i] = max_len
x[max_len - 1, i] = eos_index
return x, lengths
def shuf_order(langs, params=None, n=5):
"""
Randomize training order.
"""
if len(langs) == 0:
return []
if params is None:
return [langs[i] for i in np.random.permutation(len(langs))]
# sample monolingual and parallel languages separately
mono = []
para = []
for l in langs:
assert len(l) > 0
if len(l) == 1 or l[1] is None:
mono.append(l[0])
else:
para.append(l)
# uniform / weighted sampling
if params.lg_sampling_factor == -1:
p_mono = None
p_para = None
else:
p_mono = np.array([params.mono_probs[params.mono_list.index(k)] for k in mono])
p_para = np.array(
[params.para_probs[params.para_list.index(tuple(sorted(k)))] for k in para]
)
p_mono = p_mono / p_mono.sum()
p_para = p_para / p_para.sum()
s_mono = (
[
mono[i]
for i in np.random.choice(
len(mono), size=min(n, len(mono)), p=p_mono, replace=True
)
]
if len(mono) > 0
else []
)
s_para = (
[
para[i]
for i in np.random.choice(
len(para), size=min(n, len(para)), p=p_para, replace=True
)
]
if len(para) > 0
else []
)
assert len(s_mono) + len(s_para) > 0
return [(lang, None) for lang in s_mono] + s_para
def set_MKL_env_vars():
for k in ["MKL_THREADING_LAYER", "MKL_SERVICE_FORCE_INTEL"]:
print(f"{k}: {os.environ.get(k)}")
if os.environ.get(k) is None:
print(f"Setting {k} to GNU")
os.environ[k] = "GNU"
def word_shuffle(x, l, params, rng=None):
"""
Randomly shuffle input words.
"""
if params.word_shuffle == 0:
return x, l
# define noise word scores
noise = rng.uniform(0, params.word_shuffle, size=(x.size(0) - 1, x.size(1)))
noise[0] = -1 # do not move start sentence symbol
assert params.word_shuffle > 1
x2 = x.clone()
for i in range(l.size(0)):
# generate a random permutation
scores = np.arange(l[i] - 1) + noise[: l[i] - 1, i]
permutation = scores.argsort()
# shuffle words
x2[: l[i] - 1, i].copy_(x2[: l[i] - 1, i][torch.from_numpy(permutation)])
return x2, l
def word_dropout(x, l, params, rng):
"""
Randomly drop input words.
"""
if params.word_dropout == 0:
return x, l
assert 0 < params.word_dropout < 1
# define words to drop
eos = params.eos_index
assert (x[0] == eos).sum() == l.size(0)
keep = rng.rand(x.size(0) - 1, x.size(1)) >= params.word_dropout
keep[0] = 1 # do not drop the start sentence symbol
sentences = []
lengths = []
for i in range(l.size(0)):
assert x[l[i] - 1, i] == eos
words = x[: l[i] - 1, i].tolist()
# randomly drop words from the input
new_s = [w for j, w in enumerate(words) if keep[j, i]]
# we need to have at least one word in the sentence (more than the start / end sentence symbols)
if len(new_s) == 1:
new_s.append(words[np.random.randint(1, len(words))])
new_s.append(eos)
assert len(new_s) >= 3 and new_s[0] == eos and new_s[-1] == eos
sentences.append(new_s)
lengths.append(len(new_s))
# re-construct input
l2 = torch.LongTensor(lengths)
x2 = torch.LongTensor(l2.max(), l2.size(0)).fill_(params.pad_index)
for i in range(l2.size(0)):
x2[: l2[i], i].copy_(torch.LongTensor(sentences[i]))
return x2, l2
def word_blank(x, l, params, rng):
"""
Randomly blank input words.
"""
if params.word_blank == 0:
return x, l
assert 0 < params.word_blank < 1
# define words to blank
eos = params.eos_index
assert (x[0] == eos).sum() == l.size(0)
keep = rng.rand(x.size(0) - 1, x.size(1)) >= params.word_blank
keep[0] = 1 # do not blank the start sentence symbol
sentences = []
for i in range(l.size(0)):
assert x[l[i] - 1, i] == eos
words = x[: l[i] - 1, i].tolist()
# randomly blank words from the input
new_s = [w if keep[j, i] else params.mask_index for j, w in enumerate(words)]
new_s.append(eos)
assert len(new_s) == l[i] and new_s[0] == eos and new_s[-1] == eos
sentences.append(new_s)
# re-construct input
x2 = torch.LongTensor(l.max(), l.size(0)).fill_(params.pad_index)
for i in range(l.size(0)):
x2[: l[i], i].copy_(torch.LongTensor(sentences[i]))
return x2, l
def span_masking(x, len, params, max_vocab, rng, torch_rng):
if params.mask_length_dist is None:
return word_blank(x, len, params, rng)
else:
sentences = [
mask_spans(x[:l, i], params, max_vocab, torch_rng)
for i, l in zip(range(x.size(1)), len)
]
newlen = torch.LongTensor([s.size(0) for s in sentences])
sent = torch.LongTensor(newlen.max().item(), newlen.size(0)).fill_(
params.pad_index
)
sent[0] = params.eos_index
for i, s in enumerate(sentences):
if newlen[i] > 2: # if sentence not empty
sent[0 : newlen[i], i] = s
sent[newlen[i] - 1, i] = params.eos_index
return sent, newlen
def mask_spans(x, params, max_vocab, torch_rng):
"""
Randomly masks spans or replaces with random words
"""
assert x[0].item() == x[-1].item() == params.eos_index
assert (x != params.pad_index).all().item()
source_length = len(x)
num_to_mask = math.ceil(source_length * params.word_blank)
lengths = torch.multinomial(
params.mask_length_dist_probas,
num_to_mask,
replacement=True,
generator=torch_rng,
)
if lengths.sum() > num_to_mask:
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return insert_tokens(x, num_inserts, params, max_vocab, torch_rng)
# indices to mask without start or end symbol
indices = torch.randperm(source_length - 2, generator=torch_rng)[:num_to_mask] + 1
assert source_length - 1 not in indices
assert 0 not in indices
mask_random = (
torch.FloatTensor(num_to_mask).uniform_(generator=torch_rng) < params.word_rand
)
to_keep = torch.ones(source_length, dtype=torch.bool)
# keep first index, but replace it with [MASK] or random token
probs = torch.multinomial(
params.pred_probs, len(indices), replacement=True, generator=torch_rng
)
_x_real = x[indices]
_x_rand = _x_real.clone().random_(params.n_words)
_x_mask = _x_real.clone().fill_(params.mask_index)
_x = (
_x_mask * (probs == 0).long()
+ _x_real * (probs == 1).long()
+ _x_rand * (probs == 2).long()
)
x[indices] = _x
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= 1
uncompleted = (lengths >= 0) & (indices < source_length - 1)
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
# delete token
to_keep[indices] = 0
to_keep[0] = 1
to_keep[-1] = 1
x = x[to_keep]
if num_inserts > 0:
x = insert_tokens(x, num_inserts, params, max_vocab, torch_rng)
assert x[0].item() == x[-1].item() == params.eos_index
return x
def insert_tokens(x, n, params, max_vocab, torch_rng):
num_tokens = len(x)
# insert in a position which is not the first or the last one
noise_indices = torch.randperm(num_tokens + n - 2, generator=torch_rng)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
num_random = (
torch.FloatTensor(n).uniform_(generator=torch_rng) < params.word_rand
).sum()
result = torch.LongTensor(n + num_tokens).fill_(-1)
result[noise_indices[num_random:]] = params.mask_index
result[noise_indices[:num_random]] = torch.randint(
low=NUM_SPECIAL_TOKENS, high=max_vocab, size=(num_random,), generator=torch_rng
)
result[~noise_mask] = x
assert (result >= 0).all()
return result
def add_noise(words, lengths, params, max_vocab, rng=None, torch_rng=None):
"""
Add noise to the encoder input.
"""
if rng is None:
rng = np.random.RandomState()
words, lengths = word_shuffle(words, lengths, params=params, rng=rng)
words, lengths = word_dropout(words, lengths, params, rng=rng)
words, lengths = span_masking(
words, lengths, params, max_vocab, rng=rng, torch_rng=torch_rng
)
return words, lengths
def safe_index(l, elmt):
try:
return l.index(elmt)
except ValueError:
return None
def convert_to_text(batch, lengths, dico, params, generate_several_reps=False):
"""
Convert a batch of sentences to a list of text sentences.
"""
batch = batch.cpu().numpy()
lengths = lengths.cpu().numpy()
assert (
len(batch.shape) == 2 or len(batch.shape) == 3
), f"generated batch shape was {batch.shape} while it should be in dimension 2 or 3"
nb_repetitions = 1
if len(batch.shape) == 2:
slen, bs = batch.shape
assert (batch[0] == params.eos_index).sum() == bs
assert (batch == params.eos_index).sum() == 2 * bs
else:
slen, nb_repetitions, bs = batch.shape
assert (batch == params.eos_index).sum() == 2 * bs * nb_repetitions
assert (batch[0] == params.eos_index).sum() == bs * nb_repetitions, print(
f"The values were {(batch[0] == params.eos_index).sum()} and {bs * nb_repetitions}"
)
assert lengths.max() == slen and lengths.shape[0] == bs, print(
lengths.max(), slen, lengths.shape[0], bs
)
sentences = []
for j in range(bs):
sentences.append([])
for rep in range(nb_repetitions):
words = []
length_j = lengths[j].max() if len(lengths.shape) == 2 else lengths[j]
for k in range(1, length_j):
next_element = (
batch[k, j] if len(batch.shape) == 2 else batch[k, rep, j]
)
if next_element == params.eos_index:
break
words.append(dico[next_element])
sentences[j].append(" ".join(words))
if generate_several_reps:
return sentences
else:
return [s[0] for s in sentences]
def get_programming_language_name(lang):
if "_ir_" in lang:
return "ir"
if lang in SUPPORTED_LANGUAGES_FOR_TESTS:
return lang
elif lang.split("_")[0] in SUPPORTED_LANGUAGES_FOR_TESTS:
return lang.split("_")[0]
else:
raise ValueError(
f"The language {lang} is not supported for unit tests self-training. "
f"The supported languages are {SUPPORTED_LANGUAGES_FOR_TESTS}"
)
def get_java_bin_path():
try:
from codegen_sources.external_paths import JAVA_HOME
return JAVA_HOME
except ImportError:
return ""
|
CodeGen-main
|
codegen_sources/model/src/utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import re
import subprocess
import sys
import textwrap
from pathlib import Path
import torch
from .constants import REF, OUT, HYPO, IR, SOURCE
from .utils import get_programming_language_name, read_file_lines, TOK_AVOID_NEWLINE
LTensor = torch.LongTensor
REPO_ROOT = Path(__file__).parents[3].absolute()
sys.path.append(str(REPO_ROOT))
print("adding to path", str(REPO_ROOT))
from codegen_sources.preprocessing.lang_processors import LangProcessor, IRProcessor
def vizualize_translated_files(
lang1_processor,
lang2_processor,
src_file,
hyp_file,
ids,
ref_file=None,
out_file=None,
irs_file=None,
page_width: int = 250,
tokenization_mode="fastbpe",
):
if tokenization_mode != "fastbpe":
lang1_processor = lang2_processor = None
each_width = page_width // 4 - 4 if irs_file is None else (page_width // 5 - 4)
if isinstance(lang1_processor, str):
lang1_processor = LangProcessor.processors[
get_programming_language_name(lang1_processor)
]()
if isinstance(lang2_processor, str):
lang2_processor = LangProcessor.processors[
get_programming_language_name(lang2_processor)
]()
ir_processor = None
if irs_file:
ir_processor = IRProcessor()
src_viz = str(Path(src_file).with_suffix(".vizualize.txt"))
hyp_viz = str(
Path(re.sub("beam\d", "", hyp_file[0])).with_suffix(".vizualize.txt.tmp")
)
if ref_file is None:
ref_viz = str(Path("ref_tmp").with_suffix(".vizualize.txt"))
else:
ref_viz = str(Path(ref_file).with_suffix(".vizualize.txt"))
if irs_file is not None:
ir_viz = str(Path(irs_file).with_suffix(".vizualize.txt"))
else:
ir_viz = None
if out_file is None:
out_viz = str(Path("out_tmp").with_suffix(".vizualize.txt"))
else:
out_viz = str(
Path(re.sub("beam\d", "", out_file[0])).with_suffix(".vizualize.txt")
)
hyp_lines = list(
zip(*[read_file_lines(path) for path in hyp_file])
) # test_size * beam_size
ids = (
open(ids, "r", encoding="utf-8").readlines()
if ids is not None
else [""] * len(hyp_lines)
)
beam_size = len(hyp_lines[0])
with open(src_file, encoding="utf-8") as f:
src_lines = f.readlines() # test_size
if ref_file is not None:
with open(ref_file, encoding="utf-8") as f:
ref_lines = f.readlines() # test_size
else:
ref_lines = ["" for _ in range(len(src_lines))]
if irs_file is None:
irs_lines = [""] * len(src_lines)
else:
with open(irs_file, encoding="utf-8") as f:
irs_lines = f.readlines() # test_size
if out_file is not None:
out_lines = list(
zip(*[read_file_lines(path) for path in out_file])
) # test_size * beam_size
else:
out_lines = [
["" for _ in range(len(hyp_lines[0]))] for _ in range(len(src_lines))
]
to_show = [
(SOURCE, src_viz),
(IR, ir_viz),
(HYPO, hyp_viz),
(REF, ref_viz),
(OUT, out_viz),
]
to_show = [x for x in to_show if x[1]]
file_writers = {
header: open(str(path), "w", encoding="utf-8") for header, path in to_show
}
try:
for header, writer in file_writers.items():
writer.write(
f"========================{header}============================\n"
)
for src, ir, hyps, ref, outs, i in zip(
src_lines, irs_lines, hyp_lines, ref_lines, out_lines, ids
):
for header, writer in file_writers.items():
writer.write(
"=========================================================\n"
)
writer.write(f"{i}")
writer.write("--\n")
try:
if lang1_processor:
src = overflow_fill(
lang1_processor.detokenize_code(src), each_width
)
src = src.replace(TOK_AVOID_NEWLINE, "\n")
file_writers[SOURCE].write(src)
except KeyboardInterrupt:
raise
except:
src = overflow_fill(src, each_width)
file_writers[SOURCE].write(src)
if IR in file_writers:
try:
if lang1_processor:
assert ir_processor, "ir_procesor not defined"
ir = overflow_fill(ir_processor.detokenize_code(ir), each_width)
file_writers[IR].write(ir)
except KeyboardInterrupt:
raise
except:
ir = overflow_fill(ir, each_width)
file_writers[IR].write(ir)
try:
if lang2_processor:
ref = overflow_fill(
lang2_processor.detokenize_code(ref), each_width
)
ref = ref.replace(TOK_AVOID_NEWLINE, "\n")
file_writers[REF].write(ref)
except KeyboardInterrupt:
raise
except:
ref = overflow_fill(ref, each_width)
file_writers[REF].write(ref)
for i in range(beam_size):
hyp = hyps[i]
out = outs[i]
try:
if lang2_processor:
hyp = overflow_fill(
lang2_processor.detokenize_code(hyp), each_width
)
hyp = hyp.replace(TOK_AVOID_NEWLINE, "\n")
file_writers[HYPO].write(hyp)
except KeyboardInterrupt:
raise
except:
hyp = overflow_fill(hyp, each_width)
file_writers[HYPO].write(hyp)
out = overflow_fill(out, each_width - 4)
file_writers[OUT].write(out)
if i == 0:
maximum = max(
len(src.split("\n")),
len(hyp.split("\n")),
len(ref.split("\n")),
len(out.split("\n")),
len(ir.split("\n")),
)
for i in range(len(src.split("\n")), maximum):
file_writers[SOURCE].write("\n")
if IR in file_writers:
for i in range(len(ir.split("\n")), maximum):
file_writers[IR].write("\n")
for i in range(len(hyp.split("\n")), maximum):
file_writers[HYPO].write("\n")
for i in range(len(ref.split("\n")), maximum):
file_writers[REF].write("\n")
for i in range(len(out.split("\n")), maximum):
file_writers[OUT].write("\n")
else:
maximum = max(len(hyp.split("\n")), len(out.split("\n")))
for i in range(maximum - 1):
file_writers[SOURCE].write("\n")
file_writers[REF].write("\n")
if IR in file_writers:
file_writers[IR].write("\n")
for i in range(len(hyp.split("\n")), maximum):
file_writers[HYPO].write("\n")
for i in range(len(out.split("\n")), maximum):
file_writers[OUT].write("\n")
for writer in file_writers.values():
writer.write("-\n")
for writer in file_writers.values():
writer.write("--\n\n")
finally:
for writer in file_writers.values():
writer.close()
command = f"pr -w {page_width} -m -t {src_viz} {ir_viz if ir_viz else ''} {ref_viz} {hyp_viz} {out_viz} > {hyp_viz[:-4]}"
subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
).wait()
os.remove(src_viz)
if src_viz != ref_viz:
os.remove(ref_viz)
if ir_viz is not None and Path(ir_viz).is_file():
os.remove(ir_viz)
os.remove(hyp_viz)
os.remove(out_viz)
def vizualize_do_files(lang1, src_file, ref_file, hyp_file):
lang1_processor = LangProcessor.processors[get_programming_language_name(lang1)]()
src_viz = str(Path(src_file).with_suffix(".vizualize.txt"))
hyp_viz = str(
Path(re.sub("beam\d", "", hyp_file[0])).with_suffix(".vizualize.txt.tmp")
)
ref_viz = str(Path(ref_file).with_suffix(".vizualize.txt"))
hyp_lines = list(
zip(*[read_file_lines(path) for path in hyp_file])
) # test_size * beam_size
beam_size = len(hyp_lines[0])
with open(src_file, encoding="utf-8") as f:
src_lines = f.readlines() # test_size
with open(ref_file, encoding="utf-8") as f:
ref_lines = f.readlines() # test_size
with open(src_viz, "w", encoding="utf-8") as src_vizf:
with open(hyp_viz, "w", encoding="utf-8") as hyp_vizf:
with open(ref_viz, "w", encoding="utf-8") as ref_vizf:
src_vizf.write(
"========================SOURCE============================\n"
)
hyp_vizf.write(
"=========================HYPO=============================\n"
)
ref_vizf.write(
"==========================REF=============================\n"
)
for src, hyps, ref in zip(src_lines, hyp_lines, ref_lines):
src_vizf.write(
"=========================================================\n"
)
hyp_vizf.write(
"=========================================================\n"
)
ref_vizf.write(
"=========================================================\n"
)
try:
src = lang1_processor.detokenize_code(src)
src_vizf.write(src)
except:
src = "".join(
[
c if (i + 1) % 50 != 0 else c + "\n"
for i, c in enumerate(src)
]
)
src_vizf.write(src)
ref = ref.replace("|", "\n").strip()
ref_vizf.write(ref)
for i in range(beam_size):
hyp = hyps[i]
hyp = hyp.replace("|", "\n").strip()
hyp_vizf.write(hyp)
if i == 0:
maximum = max(
len(src.split("\n")),
len(hyp.split("\n")),
len(ref.split("\n")),
)
for i in range(len(src.split("\n")), maximum):
src_vizf.write("\n")
for i in range(len(hyp.split("\n")), maximum):
hyp_vizf.write("\n")
for i in range(len(ref.split("\n")), maximum):
ref_vizf.write("\n")
else:
maximum = max(
len(src.split("\n")),
len(hyp.split("\n")),
len(ref.split("\n")),
)
for i in range(maximum - 1):
src_vizf.write("\n")
for i in range(maximum - 1):
ref_vizf.write("\n")
for i in range(len(hyp.split("\n")), maximum):
hyp_vizf.write("\n")
src_vizf.write("-\n")
hyp_vizf.write("-\n")
ref_vizf.write("-\n")
src_vizf.write("--\n\n")
hyp_vizf.write("--\n\n")
ref_vizf.write("--\n\n")
command = f"pr -w 250 -m -t {src_viz} {ref_viz} {hyp_viz} > {hyp_viz[:-4]}"
subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
).wait()
os.remove(src_viz)
os.remove(ref_viz)
os.remove(hyp_viz)
def overflow_fill(s, max_width):
return "\n".join([textwrap.fill(l, max_width) for l in s.splitlines()])
|
CodeGen-main
|
codegen_sources/model/src/vizualization_utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import os
import random
import time
import typing as tp
from collections import OrderedDict
from concurrent.futures.process import ProcessPoolExecutor
from logging import getLogger
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
from .cache import ListCache, RoundRobinCache
from .data.loader import SELF_TRAINED
from .model import CustomDDP
from .optim import get_optimizer
from .utils import (
add_noise,
batch_sentences,
concat_batches,
parse_lambda_config,
show_batch,
to_cuda,
update_lambdas,
convert_to_text,
safe_index,
restore_segmentation_sentence,
get_programming_language_name,
)
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parents[3]))
print("adding to path", str(Path(__file__).parents[3]))
from ...code_runners import test_runners
logger = getLogger()
LTensor = torch.LongTensor
OptLTensor = tp.Optional[torch.LongTensor]
# x, lengths, positions, langs = concat_batches(
SampleInfo = tp.Tuple[LTensor, LTensor, LTensor, LTensor]
class Trainer:
def __init__(self, data, params, model_names: tp.List[str]) -> None:
"""
Initialize trainer.
"""
# epoch / iteration size
self.params = params
self.data = data
self.MODEL_NAMES = model_names
self.epoch_size = params.epoch_size
if self.epoch_size == -1:
self.epoch_size = len(self.data)
assert self.epoch_size > 0
# data iterators
self.iterators: tp.Dict[
tp.Tuple[tp.Optional[str], ...], tp.Iterator[tp.List[SampleInfo]]
] = {}
# set parameters
self.set_parameters()
# float16 / distributed (no AMP)
assert params.amp >= 1 or not params.fp16
assert params.amp >= 0 or params.accumulate_gradients == 1
if params.multi_gpu and not params.apex:
logger.info("Using nn.parallel.DistributedDataParallel ...")
for name in self.MODEL_NAMES:
model_attr = getattr(self, name)
if isinstance(model_attr, list):
setattr(
self,
name,
[
CustomDDP.CustomTorchDDP(
model,
device_ids=[params.local_rank],
output_device=params.local_rank,
broadcast_buffers=True,
find_unused_parameters=True,
)
for model in model_attr
],
)
else:
setattr(
self,
name,
CustomDDP.CustomTorchDDP(
model_attr,
device_ids=[params.local_rank],
output_device=params.local_rank,
broadcast_buffers=True,
find_unused_parameters=True,
),
)
# set optimizers
self.set_optimizers()
# float16 / distributed (AMP)
self.scaler = None
if params.fp16 and not params.apex:
logger.info("Using torch.cuda.amp GradScaler for fp16 optimization")
self.scaler = torch.cuda.amp.GradScaler()
assert params.accumulate_gradients >= 1
else:
assert params.accumulate_gradients == 1
# TODO: accumulation should be possible with:
# https://github.com/pytorch/pytorch/pull/21736
# float16 with apex. A bit faster but issues saving optimizer state
if params.amp >= 0 and params.apex:
self.init_amp_apex()
if params.multi_gpu:
logger.info("Using apex.parallel.DistributedDataParallel ...")
for name in self.MODEL_NAMES:
model_attr = getattr(self, name)
if isinstance(model_attr, list):
setattr(
self,
name,
[
CustomDDP.CustomApexDDP(model, delay_allreduce=True)
for model in model_attr
],
)
else:
setattr(
self,
name,
CustomDDP.CustomApexDDP(model_attr, delay_allreduce=True),
)
# stopping criterion used for early stopping
if params.stopping_criterion != "":
split = params.stopping_criterion.split(",")
assert len(split) == 2 and split[1].isdigit()
self.decrease_counts_max = int(split[1])
self.decrease_counts = 0
if split[0][0] == "_":
self.stopping_criterion: tp.Optional[tp.Tuple[str, bool]] = (
split[0][1:],
False,
)
else:
self.stopping_criterion = (split[0], True)
self.best_stopping_criterion: tp.Optional[float] = (
-1e12 if self.stopping_criterion[1] else 1e12
)
else:
self.stopping_criterion = None
self.best_stopping_criterion = None
if len(params.st_steps) > 0:
self.test_runners = {
"python": test_runners.PythonEvosuiteTestRunner(
timeout=params.st_test_timeout
),
"cpp": test_runners.CppEvosuiteTestRunner(
timeout=params.st_test_timeout
),
}
self.unit_tests = data[f"java_st_unit_tests"]
# probability of masking out / randomize / not modify words to predict
params.pred_probs = torch.FloatTensor(
[params.word_mask, params.word_keep, params.word_rand]
)
# probabilty to predict a word
counts = np.array(list(self.data["dico"].counts.values()))
params.mask_scores = np.maximum(counts, 1) ** -params.sample_alpha
params.mask_scores[params.pad_index] = 0 # do not predict <PAD> index
# do not predict special tokens
params.mask_scores[counts == 0] = 0
# validation metrics
self.metrics = []
metrics = [m for m in params.validation_metrics.split(",") if m != ""]
for m in metrics:
m = (m[1:], False) if m[0] == "_" else (m, True)
self.metrics.append(m)
self.best_metrics = {
metric: (-1e12 if biggest else 1e12) for (metric, biggest) in self.metrics
}
# training statistics
self.epoch = 0
self.n_iter = 0
self.n_total_iter = 0
self.n_sentences = 0
stats: tp.List[tp.Tuple[str, tp.Any]] = [("processed_s", 0), ("processed_w", 0)]
stats.extend(
[("CLM-%s" % l, []) for l in params.langs]
+ [("CLM-%s" % ("-".join(keys)), []) for keys in data["para"].keys()]
+ [("CLM-%s" % "-".join(keys[::-1]), []) for keys in data["para"].keys()]
+ [("MLM-%s" % l, []) for l in params.langs]
+ [("MLM-%s" % ("-".join(keys)), []) for keys in data["para"].keys()]
+ [("MLM-%s" % "-".join(keys[::-1]), []) for keys in data["para"].keys()]
+ [("AE-%s" % lang, []) for lang in params.ae_steps]
+ [("TAE-%s-%s" % (lang1, lang2), []) for lang1, lang2 in params.tae_steps]
+ [("MT-%s-%s" % (l1, l2), []) for l1, l2 in params.mt_steps]
+ [
("MT-%s-%s-%s" % (l1, l2, span), [])
for l1, l2, span in params.mt_spans_steps
]
+ [("DO-%s-%s" % (l1, l2), []) for l1, l2 in params.do_steps]
+ [("Classif-%s-%s" % (l1, l2), []) for l1, l2 in params.classif_steps]
+ [("BT-%s-%s-%s" % (l1, l2, l3), []) for l1, l2, l3 in params.bt_steps]
+ [
("ST-%s:%s-%s" % (l1, l1, l2), [])
for l1, langs2 in params.st_steps
for l2 in langs2
]
+ [
("ST-%s:%s-%s" % (l1, l2, l1), [])
for l1, langs2 in params.st_steps
for l2 in langs2
]
+ [
("ST-%s:%s-%s" % (l1, l2_1, l2_2), [])
for l1, langs2 in params.st_steps
for l2_1 in langs2
for l2_2 in langs2
if l2_1 != l2_2
]
)
self.stats = OrderedDict(stats)
self.last_time = time.time()
self.st_langs = set()
for lang1, langs2 in params.st_steps:
for l1 in [lang1] + list(langs2):
for l2 in [lang1] + list(langs2):
if l1 < l2:
self.st_langs.add((l1, l2))
self.cache_class = RoundRobinCache if params.robin_cache else ListCache
self.st_cache = {
tuple([l1, l2]): self.cache_class(params=params) for l1, l2 in self.st_langs
}
self.number_consecutive_reads = 0
if params.cache_init_path != "":
self.load_initial_cache()
# reload potential checkpoints
self.reload_checkpoint()
# initialize lambda coefficients and their configurations
parse_lambda_config(params)
def load_initial_cache(self) -> None:
for (l1, l2), cache in self.st_cache.items():
cache_path = Path(self.params.cache_init_path).joinpath(
f"cache_{l1}-{l2}.pkl"
)
assert cache_path.is_file(), f"initial cache file {cache_path} is missing"
cache.load(cache_path)
def set_parameters(self) -> None:
"""
Set parameters.
"""
self.parameters = {}
named_params = []
for name in self.MODEL_NAMES:
models = getattr(self, name)
if isinstance(models, list):
for model in models:
named_params.extend(
[(k, p) for k, p in model.named_parameters() if p.requires_grad]
)
else:
named_params.extend(
[(k, p) for k, p in models.named_parameters() if p.requires_grad]
)
# model parameters
self.parameters["model"] = [p for k, p in named_params]
# log
for k, v in self.parameters.items():
logger.info("Found %i parameters in %s." % (len(v), k))
assert len(v) >= 1
def set_optimizers(self) -> None:
"""
Set optimizers.
"""
params = self.params
self.optimizers = {}
# model optimizer
self.optimizers["model"] = get_optimizer(
self.parameters["model"], params.optimizer
)
# log
logger.info("Optimizers: %s" % ", ".join(self.optimizers.keys()))
def init_amp_apex(self) -> None:
"""
Initialize AMP optimizer.
"""
# online import to avoid installing if not used (eg in the CI)
from apex import amp # type: ignore
params = self.params
assert (
params.amp == 0
and params.fp16 is False
or params.amp in [1, 2, 3]
and params.fp16 is True
)
opt_names = self.optimizers.keys()
models = [
model
for name in self.MODEL_NAMES
for model in (
getattr(self, name)
if isinstance(getattr(self, name), list)
else [getattr(self, name)]
)
]
models, optimizers = amp.initialize(
models,
[self.optimizers[k] for k in opt_names],
opt_level=("O%i" % params.amp),
)
current_index = 0
for name in self.MODEL_NAMES:
model_attr = getattr(self, name)
if isinstance(model_attr, list):
models_length = len(model_attr)
setattr(
self, name, models[current_index : current_index + models_length]
)
current_index += models_length
else:
setattr(self, name, models[current_index])
current_index += 1
assert current_index == len(models)
self.optimizers = {
opt_name: optimizer for opt_name, optimizer in zip(opt_names, optimizers)
}
def optimize(self, loss: torch.Tensor) -> None:
"""
Optimize.
"""
# check NaN
if (loss != loss).data.any():
logger.warning("NaN detected")
# exit()
params = self.params
# optimizers
names = self.optimizers.keys()
optimizers = [self.optimizers[k] for k in names]
# regular optimization
if params.fp16 is False:
for optimizer in optimizers:
optimizer.zero_grad()
loss.backward()
if params.clip_grad_norm > 0:
for name in names:
# norm_check_a = (sum([p.grad.norm(p=2).item() ** 2 for p in self.parameters[name]])) ** 0.5
clip_grad_norm_(self.parameters[name], params.clip_grad_norm)
# norm_check_b = (sum([p.grad.norm(p=2).item() ** 2 for p in self.parameters[name]])) ** 0.5
# print(name, norm_check_a, norm_check_b)
for optimizer in optimizers:
optimizer.step()
# AMP optimization
elif params.apex:
from apex import amp # type: ignore
if self.n_iter % params.accumulate_gradients == 0:
with amp.scale_loss(loss, optimizers) as scaled_loss:
scaled_loss.backward()
if params.clip_grad_norm > 0:
for name in names:
# norm_check_a = (sum([p.grad.norm(p=2).item() ** 2 for p in apex.amp.master_params(self.optimizers[name])])) ** 0.5
clip_grad_norm_(
amp.master_params(self.optimizers[name]),
params.clip_grad_norm,
)
# norm_check_b = (sum([p.grad.norm(p=2).item() ** 2 for p in apex.amp.master_params(self.optimizers[name])])) ** 0.5
# print(name, norm_check_a, norm_check_b)
for optimizer in optimizers:
optimizer.step()
optimizer.zero_grad()
else:
with amp.scale_loss(
loss, optimizers, delay_unscale=True
) as scaled_loss:
scaled_loss.backward()
else:
assert self.scaler is not None
self.scaler.scale(loss).backward()
if (self.n_iter + 1) % params.accumulate_gradients == 0:
for name in names:
if params.clip_grad_norm > 0:
# https://pytorch.org/docs/stable/notes/amp_examples.html#gradient-clipping
# Unscales the gradients of optimizer's assigned params in-place
self.scaler.unscale_(self.optimizers[name])
torch.nn.utils.clip_grad_norm_(
self.parameters[name], params.clip_grad_norm
)
self.scaler.step(self.optimizers[name])
self.scaler.update()
self.optimizers[name].zero_grad()
def iter(self) -> None:
"""
End of iteration.
"""
self.n_iter += 1
self.n_total_iter += 1
update_lambdas(self.params, self.n_total_iter)
if self.n_iter % 5 == 0:
self.print_stats()
def print_stats(self) -> None:
"""
Print statistics about the training.
"""
# if self.n_total_iter % 5 != 0:
# return
s_iter = "%7i - " % self.n_total_iter
s_stat = " || ".join(
[
"{}: {:7.4f}".format(k, float(np.mean(v)))
for k, v in self.stats.items()
if type(v) is list and len(v) > 0
]
)
for k in self.stats.keys():
if type(self.stats[k]) is list:
del self.stats[k][:]
# learning rates
s_lr = " - "
for k, v in self.optimizers.items():
s_lr = (
s_lr
+ (" - %s LR: " % k)
+ " / ".join("{:.4e}".format(group["lr"]) for group in v.param_groups)
)
if self.params.bt_sample_temperature > 0:
s_bt_samp = " - BT-sampling-T: " + "{:2.2e}".format(
self.params.bt_sample_temperature
)
else:
s_bt_samp = ""
# processing speed
new_time = time.time()
diff = new_time - self.last_time
s_speed = "{:7.2f} sent/s - {:8.2f} words/s - ".format(
self.stats["processed_s"] * 1.0 / diff,
self.stats["processed_w"] * 1.0 / diff,
)
self.stats["processed_s"] = 0
self.stats["processed_w"] = 0
self.last_time = new_time
# log speed + stats + learning rate
logger.info(s_iter + s_speed + s_stat + s_lr + s_bt_samp)
def get_iterator(
self,
iter_name: str,
lang1: str,
lang2: tp.Optional[str],
stream: bool,
span: tp.Optional[str] = None, # not sure what type it actually is
self_training: bool = False,
st_scores_cutoff=None,
) -> tp.Iterator[SampleInfo]:
"""
Create a new iterator for a dataset.
"""
if st_scores_cutoff is not None:
assert (
self_training
), f"st_scores_cutoff should only be set for self_training"
splt = SELF_TRAINED if self_training else "train"
logger.info(
"Creating new training data iterator (%s) ..."
% ",".join([str(x) for x in [iter_name, lang1, lang2] if x is not None])
)
if lang2 is None:
if stream:
if span is None:
iterator = self.data["mono_stream"][lang1][splt].get_iterator(
shuffle=True
)
else:
iterator = self.data["mono_stream"][(lang1, span)][
splt
].get_iterator(shuffle=True)
else:
assert self.params.gen_tpb_multiplier > 0
it = (
self.data["mono"][lang1][splt]
if span is None
else self.data["mono"][(lang1, span)][splt]
)
iterator = it.get_iterator(
shuffle=True,
group_by_size=self.params.group_by_size,
n_sentences=-1,
tokens_per_batch=self.params.tokens_per_batch
* (self.params.gen_tpb_multiplier if iter_name == "bt" else 1),
st_scores_cutoff=st_scores_cutoff,
)
else:
assert not self_training
assert stream is False
_lang1, _lang2 = (lang1, lang2) if lang1 < lang2 else (lang2, lang1)
it = (
self.data["para"][(_lang1, _lang2)][splt]
if span is None
else self.data["para"][(_lang1, _lang2, span)][splt]
)
iterator = it.get_iterator(
shuffle=True,
group_by_size=self.params.group_by_size,
n_sentences=-1,
tokens_per_batch=self.params.tokens_per_batch,
)
key = (
(iter_name, lang1, lang2)
if span is None
else (iter_name, lang1, lang2, span)
)
self.iterators[key] = iterator
return iterator
def get_batch(
self,
iter_name: str,
lang1: str,
lang2=None,
stream=False,
span=None,
self_training=False,
st_scores_cutoff=None,
) -> tp.List[LTensor]: # this typing is bad, but we cant be more precise :(
"""
Return a batch of sentences from a dataset.
"""
if st_scores_cutoff is not None:
assert (
self_training
), f"st_scores_cutoff should only be set for self_training"
assert lang1 in self.params.langs
assert (
lang2 is None
or lang2 in self.params.langs
or (lang1, lang2) in self.params.classif_steps
)
assert stream is False or lang2 is None
iterator = (
self.iterators.get((iter_name, lang1, lang2, span), None)
if span is not None
else self.iterators.get((iter_name, lang1, lang2), None)
)
if (
st_scores_cutoff
and self.params.st_refresh_iterator_rate > 0
and self.n_iter % self.params.st_refresh_iterator_rate == 0
):
iterator = None
if iterator is None:
iterator = self.get_iterator( # type: ignore
iter_name,
lang1,
lang2,
stream,
span,
self_training=self_training,
st_scores_cutoff=st_scores_cutoff,
)
try:
x = next(iterator) # type: ignore
except StopIteration:
iterator = self.get_iterator( # type: ignore
iter_name,
lang1,
lang2,
stream,
span,
self_training=self_training,
st_scores_cutoff=st_scores_cutoff,
)
x = next(iterator) # type: ignore
return x if lang2 is None or lang1 < lang2 else x[::-1] # type: ignore
def mask_out(
self, x: LTensor, lengths: LTensor
) -> tp.Tuple[LTensor, LTensor, LTensor]:
"""
Decide of random words to mask out, and what target they get assigned.
"""
params = self.params
slen, bs = x.size()
# define target words to predict
if params.sample_alpha == 0:
pred_mask = np.random.rand(slen, bs) <= params.word_pred
pred_mask = torch.from_numpy(pred_mask.astype(np.uint8))
else:
x_prob = params.mask_scores[x.flatten()]
n_tgt = math.ceil(params.word_pred * slen * bs)
tgt_ids = np.random.choice(
len(x_prob), n_tgt, replace=False, p=x_prob / x_prob.sum()
)
pred_mask = torch.zeros(slen * bs, dtype=torch.uint8)
pred_mask[tgt_ids] = 1
pred_mask = pred_mask.view(slen, bs)
# do not predict padding
pred_mask[x == params.pad_index] = 0
pred_mask[0] = 0 # TODO: remove
# mask a number of words == 0 [8] (faster with fp16)
if params.fp16:
pred_mask = pred_mask.view(-1)
n1 = pred_mask.sum().item()
n2 = max(n1 % 8, 8 * (n1 // 8))
if n2 != n1:
pred_mask[torch.nonzero(pred_mask).view(-1)[: n1 - n2]] = 0
pred_mask = pred_mask.view(slen, bs)
assert pred_mask.sum().item() % 8 == 0
# generate possible targets / update x input
pred_mask = pred_mask == 1
_x_real = x[pred_mask]
_x_rand = _x_real.clone().random_(params.n_words)
_x_mask = _x_real.clone().fill_(params.mask_index)
probs = torch.multinomial(params.pred_probs, len(_x_real), replacement=True)
_x = (
_x_mask * (probs == 0).long()
+ _x_real * (probs == 1).long()
+ _x_rand * (probs == 2).long()
)
x = x.masked_scatter(pred_mask, _x) # type: ignore
assert 0 <= x.min() <= x.max() < params.n_words
assert x.size() == (slen, bs)
assert pred_mask.size() == (slen, bs)
return x, _x_real, pred_mask # type: ignore
def deobfuscate(
self, x: LTensor, y: LTensor, p: float
) -> tp.Tuple[LTensor, LTensor]:
"""
Deobfuscate class, function and variable name with probabilty p.
For all variables, functions and classes, we pick some occurences with a probability p and deobfuscate them.
i.e some occurences of VAR_0 will be deobfuscated and other keept as VAR_0.
x : tensor slen x bs , x is obfuscated, i.e variable, function and classes names are
replaced by special tokens. ( CLASS_X, FUNC_X and VAR_X)
y : ylen x bs contains the dictionary of obfuscated tokens, i.e 'CLASS_0 class_name | VAR_0 variable_name .. '
"""
slen, bs = x.size()
obf_tokens = (x >= self.data["dico"].obf_index["CLASS"]) * (
x < (self.data["dico"].obf_index["CLASS"] + self.data["dico"].n_obf_tokens)
)
dobf_mask = torch.from_numpy(np.random.rand(slen, bs) <= p)
dobf_mask = dobf_mask * obf_tokens
x[dobf_mask] = -x[ # type: ignore
dobf_mask
] # put to negative all the obf_tokens that have to be restored
# convert sentences to strings and dictionary to a python dictionary {obf_token_special : original_name}
x_ = [
" ".join(
[
str(w)
for w in s
if w not in [self.params.pad_index, self.params.eos_index]
]
)
for s in x.transpose(0, 1).tolist()
]
y_ = [
" ".join(
[
str(w)
for w in s
if w not in [self.params.pad_index, self.params.eos_index]
]
)
for s in y.transpose(0, 1).tolist()
]
sep = f" {self.data['dico'].word2id['|']} "
d = [
{
mapping.strip().split()[0]: " ".join(mapping.strip().split()[1:])
for mapping in pred.split(sep)
}
for pred in y_
]
# restore x i.e replace negative numbers by the original name
# TODO check that sentences are < max_len like in deobfuscate_by_variable
for i in range(bs):
for k, v in d[i].items():
x_[i] = x_[i].replace(f"-{k}", v)
x_[i] = np.array([int(id) for id in x_[i].split()]) # type: ignore
x_b, lengths = batch_sentences(x_, self.params.pad_index, self.params.eos_index)
assert sum(sum((x_b < 0).float())) == 0 # type: ignore
return (x_b, lengths)
def deobfuscate_by_variable(
self, x: LTensor, y: LTensor, p: float, roberta_mode: bool, rng=None
) -> tp.Tuple[OptLTensor, OptLTensor, OptLTensor, OptLTensor]:
"""
Deobfuscate class, function and variable name with probabilty p, by variable blocked.
We chose some variables VAR_N, functions FUNC_N or class CLASS_N - with probability p - to deobfuscate entirely.
I.e if VAR_0 is picked, all the occurences of VAR_0 are deobfuscated.
x : tensor slen x bs , x is obfuscated, i.e variable, function and classes names are
replaced by special tokens. ( CLASS_X, FUNC_X and VAR_X)
y : ylen x bs contains the dictionary of obfuscated tokens, i.e 'CLASS_0 class_name | VAR_0 variable_name .. '
"""
slen, bs = x.size()
# put to negative all the obf_tokens, useful for restoration i.e replacement in string later on
obf_tokens = (x >= self.data["dico"].obf_index["CLASS"]) * (
x < (self.data["dico"].obf_index["CLASS"] + self.data["dico"].n_obf_tokens)
)
x[obf_tokens] = -x[obf_tokens]
# convert sentences to strings and dictionary to a python dictionary (obf_token_special , original_name)
x_ = [
" ".join(
[
str(w)
for w in s
if w not in [self.params.pad_index, self.params.eos_index]
]
)
for s in x.transpose(0, 1).tolist()
]
y_ = [
" ".join(
[
str(w)
for w in s
if w not in [self.params.pad_index, self.params.eos_index]
]
)
for s in y.transpose(0, 1).tolist()
]
if roberta_mode:
sep = (
f" {self.data['dico'].word2id['Ġ|']} {self.data['dico'].word2id['Ġ']} "
)
else:
sep = f" {self.data['dico'].word2id['|']} "
# reversed order to have longer obfuscation first, to make replacement in correct order
d = [
list(
reversed(
[
(
mapping.strip().split()[0],
" ".join(mapping.strip().split()[1:]),
)
for mapping in pred.split(sep)
]
)
)
for pred in y_
]
# restore x i.e select variable with probability p and restore all occurence of this variable
# keep only unrestored variable in dictionary d_
x2 = []
y2 = []
for i in range(bs):
d_ = []
if rng:
dobf_mask = rng.rand(len(d[i])) <= p
else:
dobf_mask = np.random.rand(len(d[i])) <= p
# make sure at least one variable is picked
if sum(dobf_mask) == len(d[i]):
if rng:
dobf_mask[rng.randint(0, len(d[i]))] = False
else:
dobf_mask[np.random.randint(0, len(d[i]))] = False
for m, (k, v) in enumerate(d[i]):
if dobf_mask[m]:
x_[i] = x_[i].replace(f"-{k}", f"{v}")
else:
d_.append((k, v))
x_[i] = x_[i].replace(f"-{k}", f"{k}")
if roberta_mode:
# we need to remove the double space introduced during deobfuscation, i.e the "Ġ Ġ"
sent_ids = np.array(
[
self.data["dico"].word2id[index]
for index in (
" ".join(
[
self.data["dico"].id2word[int(w)]
for w in x_[i].split()
]
).replace("Ġ Ġ", "Ġ")
).split()
]
)
else:
sent_ids = np.array([int(id) for id in x_[i].split()])
if len(sent_ids) < self.params.max_len:
x2.append(sent_ids[: self.params.max_len - 2])
d_ids = sep.join([" ".join([k, v]) for k, v in reversed(d_)])
d_ids = np.array([int(id) for id in d_ids.split()]) # type: ignore
y2.append(d_ids)
if len(x2) == 0:
return None, None, None, None
x, len_x = batch_sentences(x2, self.params.pad_index, self.params.eos_index)
y, len_y = batch_sentences(y2, self.params.pad_index, self.params.eos_index)
assert sum(sum((x < 0).float())) == 0 # type: ignore
return (x, len_x, y, len_y)
def generate_batch(
self, lang1: str, lang2: str, name: str
) -> tp.Tuple[
LTensor,
LTensor,
LTensor,
LTensor,
tp.Tuple[tp.Optional[LTensor], tp.Optional[LTensor]],
]:
"""
Prepare a batch (for causal or non-causal mode).
"""
params = self.params
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2] if lang2 is not None else None
if lang2 is None:
x, lengths = self.get_batch(name, lang1, stream=True)
positions = None
langs = x.clone().fill_(lang1_id) if params.n_langs > 1 else None
elif lang1 == lang2:
(x1, len1, _, _) = self.get_batch(name, lang1)
(x2, len2) = (x1, len1)
(x1, len1) = add_noise(x1, len1, self.params, len(self.data["dico"]) - 1)
x, lengths, positions, langs = concat_batches(
x1,
len1,
lang1_id,
x2,
len2,
lang2_id,
params.pad_index,
params.eos_index,
reset_positions=False,
)
else:
(x1, len1, _, _), (x2, len2, _, _) = self.get_batch(name, lang1, lang2)
x, lengths, positions, langs = concat_batches(
x1,
len1,
lang1_id,
x2,
len2,
lang2_id,
params.pad_index,
params.eos_index,
reset_positions=True,
)
return (
x,
lengths,
positions,
langs,
(None, None) if lang2 is None else (len1, len2),
)
def save_checkpoint(self, name: str, include_optimizers: bool = True) -> None:
"""
Save the model / checkpoints.
"""
for (lang1, lang2), cache in self.st_cache.items():
path = os.path.join(
self.params.dump_path,
f"cache_{name}-{lang1}-{lang2}-{self.params.global_rank}.pkl",
)
cache.save(path)
if not self.params.is_master:
return
path = os.path.join(self.params.dump_path, "%s.pth" % name)
logger.info("Saving %s to %s ..." % (name, path))
data = {
"epoch": self.epoch,
"n_total_iter": self.n_total_iter,
"best_metrics": self.best_metrics,
"best_stopping_criterion": self.best_stopping_criterion,
}
for name in self.MODEL_NAMES:
logger.warning(f"Saving {name} parameters ...")
model_attr = getattr(self, name)
if isinstance(model_attr, list) and len(model_attr) > 1:
for i, model in enumerate(model_attr):
data[f"{name}_{i}"] = model.state_dict()
else:
if isinstance(model_attr, list):
assert len(model_attr) == 1
model_attr = model_attr[0]
data[name] = model_attr.state_dict()
if include_optimizers:
for name in self.optimizers.keys():
logger.warning(f"Saving {name} optimizer ...")
data[f"{name}_optimizer"] = self.optimizers[name].state_dict()
if self.scaler is not None:
data[f"scaler"] = self.scaler.state_dict()
data["dico_id2word"] = self.data["dico"].id2word
data["dico_word2id"] = self.data["dico"].word2id
data["dico_counts"] = self.data["dico"].counts
data["params"] = {k: v for k, v in self.params.__dict__.items()}
torch.save(data, path)
def reload_checkpoint(self) -> None:
"""
Reload a checkpoint if we find one.
"""
checkpoint_path = Path(self.params.dump_path) / "checkpoint.pth"
if not checkpoint_path.is_file():
if self.params.reload_checkpoint == "":
return
else:
checkpoint_path = Path(self.params.reload_checkpoint)
assert checkpoint_path.is_file()
logger.warning(f"Reloading checkpoint from {checkpoint_path} ...")
data = torch.load(checkpoint_path, map_location="cpu")
for (lang1, lang2), cache in self.st_cache.items():
checkpoint_path = Path(checkpoint_path)
cache_path = Path(checkpoint_path).parent.joinpath(
f"cache_{str(checkpoint_path.name).replace('.pth', '')}-{lang1}-{lang2}-{self.params.global_rank}.pkl"
)
logger.warning(f"Reloading cache from {cache_path} ...")
self.st_cache[(lang1, lang2)] = self.cache_class.from_file(
cache_path, self.params
)
# reload model parameters
for name in self.MODEL_NAMES:
model_attr = getattr(self, name)
if isinstance(model_attr, list) and len(model_attr) > 1:
for i, model in enumerate(model_attr):
model.load_state_dict(data[f"{name}_{i}"])
else:
if isinstance(model_attr, list):
assert len(model_attr) == 1
model_attr = model_attr[0]
model_attr.load_state_dict(data[name])
# reload optimizers
for name in self.optimizers.keys():
if (
self.params.apex
): # AMP checkpoint reloading is buggy, we cannot do that - TODO: fix - https://github.com/NVIDIA/apex/issues/250
# instead, we only reload current iterations / learning rates
logger.warning(f"Not reloading checkpoint optimizer {name}.")
for group_id, param_group in enumerate(
self.optimizers[name].param_groups
):
if "num_updates" not in param_group:
logger.warning(f"No 'num_updates' for optimizer {name}.")
continue
logger.warning(
f"Reloading 'num_updates' and 'lr' for optimizer {name}."
)
param_group["num_updates"] = data[f"{name}_optimizer"][
"param_groups"
][group_id]["num_updates"]
param_group["lr"] = self.optimizers[name].get_lr_for_step(
param_group["num_updates"]
)
else:
logger.warning(f"Reloading checkpoint optimizer {name}...")
self.optimizers[name].load_state_dict(data[f"{name}_optimizer"])
for group_id, param_group in enumerate(
self.optimizers[name].param_groups
):
for k in ["num_updates", "lr"]:
if k in param_group:
logger.warning(
f"Optimizer parameter group (ID={group_id})"
f" - {k}: {param_group[k]}"
)
# reload gradient scaler
if self.params.fp16:
logger.warning("Reloading gradient scaler ...")
assert self.scaler is not None
self.scaler.load_state_dict(data["scaler"])
else:
assert self.scaler is None and "scaler" not in data
# reload main metrics
self.epoch = data["epoch"] + 1
self.n_total_iter = data["n_total_iter"]
self.best_metrics = data["best_metrics"]
self.best_stopping_criterion = data["best_stopping_criterion"]
logger.warning(
f"Checkpoint reloaded. Resuming at epoch {self.epoch} / iteration {self.n_total_iter} ..."
)
def save_periodic(self) -> None:
"""
Save the models periodically.
"""
if not self.params.is_master:
return
if (
self.params.save_periodic > 0
and self.epoch % self.params.save_periodic == 0
):
self.save_checkpoint("periodic-%i" % self.epoch, include_optimizers=False)
def save_best_model(self, scores):
"""
Save best models according to given validation metrics.
"""
if not self.params.is_master:
return
for metric, biggest in self.metrics:
if metric not in scores:
logger.warning('Metric "%s" not found in scores!' % metric)
continue
factor = 1 if biggest else -1
if factor * scores[metric] > factor * self.best_metrics[metric]:
self.best_metrics[metric] = scores[metric]
logger.info("New best score for %s: %.6f" % (metric, scores[metric]))
self.save_checkpoint("best-%s" % metric, include_optimizers=False)
def end_epoch(self, scores):
"""
End the epoch.
"""
# stop if the stopping criterion has not improved after a certain number of epochs
if self.stopping_criterion is not None and self.params.is_master:
metric, biggest = self.stopping_criterion
assert metric in scores, metric
factor = 1 if biggest else -1
if factor * scores[metric] > factor * self.best_stopping_criterion:
self.best_stopping_criterion = scores[metric]
logger.info(
"New best validation score: %f" % self.best_stopping_criterion
)
self.decrease_counts = 0
else:
logger.info(
"Not a better validation score (%i / %i)."
% (self.decrease_counts, self.decrease_counts_max)
)
self.decrease_counts += 1
if self.decrease_counts > self.decrease_counts_max:
logger.info(
"Stopping criterion has been below its best value for more "
"than %i epochs. Ending the experiment..."
% self.decrease_counts_max
)
if self.params.multi_gpu and "SLURM_JOB_ID" in os.environ:
os.system("scancel " + os.environ["SLURM_JOB_ID"])
exit()
self.save_checkpoint("checkpoint", include_optimizers=True)
self.st_translation_stats = {}
self.epoch += 1
def round_batch(self, x, lengths, positions, langs):
"""
For float16 only.
Sub-sample sentences in a batch, and add padding,
so that each dimension is a multiple of 8.
"""
params = self.params
if not params.fp16 or len(lengths) < 8:
return x, lengths, positions, langs, None
# number of sentences == 0 [8]
bs1 = len(lengths)
bs2 = 8 * (bs1 // 8)
assert bs2 > 0 and bs2 % 8 == 0
if bs1 != bs2:
idx = torch.randperm(bs1)[:bs2]
lengths = lengths[idx]
slen = lengths.max().item()
x = x[:slen, idx]
positions = None if positions is None else positions[:slen, idx]
langs = None if langs is None else langs[:slen, idx]
else:
idx = None
# sequence length == 0 [8]
ml1 = x.size(0)
if ml1 % 8 != 0:
pad = 8 - (ml1 % 8)
ml2 = ml1 + pad
x = torch.cat([x, torch.LongTensor(pad, bs2).fill_(params.pad_index)], 0)
if positions is not None:
positions = torch.cat(
[positions, torch.arange(pad)[:, None] + positions[-1][None] + 1], 0
)
if langs is not None:
langs = torch.cat([langs, langs[-1][None].expand(pad, bs2)], 0)
assert x.size() == (ml2, bs2)
assert x.size(0) % 8 == 0
assert x.size(1) % 8 == 0
return x, lengths, positions, langs, idx
def clm_step(
self, lang1: str, lang2: str, lambda_coeff: float, show_example: bool = False
) -> None:
"""
Next word prediction step (causal prediction).
CLM objective.
"""
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
params = self.params
name = "model" if params.encoder_only else "decoder"
model = getattr(self, name)[0]
model.train()
# generate batch / select words to predict
x, lengths, positions, langs, _ = self.generate_batch(lang1, lang2, "causal")
x, lengths, positions, langs, _ = self.round_batch(x, lengths, positions, langs)
alen = torch.arange(
lengths.max(), dtype=torch.long, device=lengths.device
) # type: ignore
pred_mask = alen[:, None] < lengths[None] - 1
if params.context_size > 0: # do not predict without context
pred_mask[: params.context_size] = 0
y = x[1:].masked_select(pred_mask[:-1])
assert pred_mask.sum().item() == y.size(0)
if show_example:
show_batch(
logger,
[("Sentence", x.transpose(0, 1))],
self.data["dico"],
self.params.tokenization_mode,
"Training",
self.params.sentencepiece_model_path,
)
# cuda
x, lengths, langs, pred_mask, y = to_cuda(x, lengths, langs, pred_mask, y)
# forward / loss
tensor = model("fwd", x=x, lengths=lengths, langs=langs, causal=True)
_, loss = model(
"predict", tensor=tensor, pred_mask=pred_mask, y=y, get_scores=False
)
self.stats[
("CLM-%s" % lang1) if lang2 is None else ("CLM-%s-%s" % (lang1, lang2))
].append(loss.item())
loss = lambda_coeff * loss
# optimize
self.optimize(loss)
# number of processed sentences / words
self.n_sentences += params.batch_size
self.stats["processed_s"] += lengths.size(0)
self.stats["processed_w"] += pred_mask.sum().item()
def mlm_step(
self, lang1: str, lang2: str, lambda_coeff: float, show_example: bool = False
) -> None:
"""
Masked word prediction step.
MLM objective is lang2 is None, TLM objective otherwise.
"""
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
params = self.params
name = "model" if params.encoder_only else "encoder"
model = getattr(self, name)[0]
model.train()
# generate batch / select words to predict
x, lengths, positions, langs, _ = self.generate_batch(lang1, lang2, "pred")
x, lengths, positions, langs, _ = self.round_batch(x, lengths, positions, langs)
x, y, pred_mask = self.mask_out(x, lengths)
# log first batch of training
if show_example:
show_batch(
logger,
[("masked source", x.transpose(0, 1))],
self.data["dico"],
self.params.tokenization_mode,
"Training",
self.params.sentencepiece_model_path,
)
# cuda
x, y, pred_mask, lengths, positions, langs = to_cuda(
x, y, pred_mask, lengths, positions, langs
)
# forward / loss
tensor = model(
"fwd", x=x, lengths=lengths, positions=positions, langs=langs, causal=False
)
_, loss = model(
"predict", tensor=tensor, pred_mask=pred_mask, y=y, get_scores=False
)
self.stats[
("MLM-%s" % lang1) if lang2 is None else ("MLM-%s-%s" % (lang1, lang2))
].append(loss.item())
loss = lambda_coeff * loss
# optimize
self.optimize(loss)
# number of processed sentences / words
self.n_sentences += params.batch_size
self.stats["processed_s"] += lengths.size(0)
self.stats["processed_w"] += pred_mask.sum().item()
class SingleTrainer(Trainer):
def __init__(self, model, data, params, classifier=None) -> None:
self.MODEL_NAMES = ["model"]
if classifier is not None:
self.MODEL_NAMES.append("classifier")
# model / data / params
self.model = model
self.data = data
self.params = params
if classifier is not None:
self.classifier = [classifier]
super().__init__(data, params, self.MODEL_NAMES)
def classif_step(self, lang1: str, lang2: str, lambda_coeff: float) -> None:
"""
Masked word prediction step.
MLM objective is lang2 is None, TLM objective otherwise.
"""
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
params = self.params
name = "model" if params.encoder_only else "encoder"
model = getattr(self, name)[0]
model.train()
assert self.classifier is not None
classifier = self.classifier[0].train()
lang1_id = params.lang2id[lang1]
(x1, len1, _, _), (y, len2, _, _) = self.get_batch("classif", lang1, lang2)
pred_mask = (x1 != self.params.eos_index) * (x1 != self.params.pad_index)
assert len1.equal(len2)
langs1 = x1.clone().fill_(lang1_id)
# cuda
x1, len1, langs1, y = to_cuda(x1, len1, langs1, y)
# encode source sentence
enc1 = model("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
if self.params.fp16:
enc1 = enc1.half()
# classification + loss
scores, loss = classifier(enc1, y, pred_mask)
self.stats[("Classif-%s-%s" % (lang1, lang2))].append(loss.item())
loss = lambda_coeff * loss
# optimize
self.optimize(loss)
# number of processed sentences / words
self.n_sentences += params.batch_size
self.stats["processed_s"] += len2.size(0)
self.stats["processed_w"] += (len2 - 1).sum().item()
class EncDecTrainer(Trainer):
def __init__(self, encoder, decoder, data, params, second_decoder=None) -> None:
self.MODEL_NAMES = ["encoder", "decoder"]
# if second_decoder is not None:
# self.MODEL_NAMES.append('decoder2')
# model / data / params
self.encoder = encoder
self.decoder = decoder
self.data = data
self.params = params
self.st_translation_stats = {}
super().__init__(data, params, self.MODEL_NAMES)
def mt_step(
self,
lang1,
lang2,
lambda_coeff,
span=None,
deobfuscate=False,
deobfuscate_p=None,
show_example=False,
):
"""
Machine translation step.
Can also be used for denoising auto-encoding.
"""
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
assert (
deobfuscate_p is not None and 0 <= deobfuscate_p and deobfuscate_p <= 1
) or not deobfuscate
# assert deobfuscate or span is not None
params = self.params
self.train_mode()
spans = None
# generate batch
if lang1 == lang2:
assert not span, "spans not supported for AE steps"
(x1, len1, _, _) = self.get_batch("ae", lang1)
(x2, len2) = (x1, len1)
(x1, len1) = add_noise(x1, len1, self.params, len(self.data["dico"]) - 1)
elif span:
(
(x1, len1, _, _),
(x2, len2, _, _),
(spans, len_spans, _, _),
) = self.get_batch("mt_spans", lang1, lang2, span=span)
elif deobfuscate:
(x1, len1, _, _), (x2, len2, _, _) = self.get_batch("mt", lang1, lang2)
(x1, len1, x2, len2) = self.deobfuscate_by_variable(
x1, x2, deobfuscate_p, params.tokenization_mode == "roberta", rng=None
)
if x1 is None:
return
else:
(x1, len1, _, _), (x2, len2, _, _) = self.get_batch("mt", lang1, lang2)
loss = self.mt_train_step(
x1, len1, lang1, x2, len2, lang2, spans, lambda_coeff, params, show_example
)
if deobfuscate:
self.stats[("DO-%s-%s" % (lang1, lang2))].append(loss.item())
else:
key = (lang1, lang2) if span is None else (lang1, lang2, span)
self.stats[
("AE-%s" % lang1) if lang1 == lang2 else ("MT-%s" % "-".join(key))
].append(loss.item())
def ae_step(
self, lang1, lang2, lambda_coeff, show_example=False,
):
"""
Denoising auto-encoding steps
If lang2 is not None, the lang2 sentences will be concatenated to the lang1 sentences
"""
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
# assert deobfuscate or span is not None
params = self.params
self.train_mode()
# generate batch
if lang2 is None:
(x1, len1, _, _) = self.get_batch("ae", lang1)
(x2, len2) = (x1, len1)
(x1, len1) = add_noise(x1, len1, self.params, len(self.data["dico"]) - 1)
positions_in = positions_out = None
langs_in = langs_out = None
lang2 = lang1
else:
(x1, len1, _, _), (x2, len2, _, _) = self.get_batch("tae", lang1, lang2)
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2]
x_out, len_out, positions_out, langs_out = concat_batches(
x1,
len1,
lang1_id,
x2,
len2,
lang2_id,
params.pad_index,
params.eos_index,
reset_positions=False,
)
x1, len1 = add_noise(x1, len1, self.params, len(self.data["dico"]) - 1)
x2, len2 = add_noise(x2, len2, self.params, len(self.data["dico"]) - 1)
x1, len1, positions_in, langs_in = concat_batches(
x1,
len1,
lang1_id,
x2,
len2,
lang2_id,
params.pad_index,
params.eos_index,
reset_positions=False,
)
x2, len2 = x_out, len_out
selection_mask = (len1 < params.max_len) & (len2 < params.max_len)
if not selection_mask.any().item():
logger.info(
f"GPU: {self.params.global_rank}. Nothing matching the mask"
)
x1 = torch.tensor([self.data["dico"].eos_index] * 2)[:, None]
x2 = torch.tensor([self.data["dico"].eos_index] * 2)[:, None]
len1 = torch.ones(1) * 2
len2 = torch.ones(1) * 2
positions_in = None
positions_out = None
langs_in = torch.zeros_like(x1).fill_(lang1_id)
langs_out = torch.zeros_like(x2).fill_(lang2_id)
else:
len1 = len1[selection_mask]
len2 = len2[selection_mask]
x1 = x1[: len1.max(), selection_mask]
positions_in = positions_in[: len1.max(), selection_mask]
langs_in = langs_in[: len1.max(), selection_mask]
x2 = x2[: len2.max(), selection_mask]
positions_out = positions_out[: len2.max(), selection_mask]
langs_out = langs_out[: len2.max(), selection_mask]
loss = self.mt_train_step(
x1,
len1,
lang1,
x2,
len2,
lang2,
spans=None,
lambda_coeff=lambda_coeff,
params=params,
show_example=show_example,
positions_in=positions_in,
positions_out=positions_out,
langs_in=langs_in,
langs_out=langs_out,
)
self.stats[
("AE-%s" % lang1)
if lang2 == lang1
else ("TAE-%s" % "-".join((lang1, lang2)))
].append(loss.item())
def dobf_step(
self, lang1, lang2, lambda_coeff, deobfuscate_p=None, show_example=False,
):
"""
Deobfuscation steps
Obfuscates a ratio deobfuscate_p of identifiers and trains to retrieve the dictionary
"""
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
assert deobfuscate_p is not None and 0 <= deobfuscate_p and deobfuscate_p <= 1
# assert deobfuscate or span is not None
params = self.params
self.train_mode()
spans = None
# generate batch
(x1, len1, _, _), (x2, len2, _, _) = self.get_batch("mt", lang1, lang2)
(x1, len1, x2, len2) = self.deobfuscate_by_variable(
x1, x2, deobfuscate_p, params.tokenization_mode == "roberta", rng=None
)
if x1 is None:
return
loss = self.mt_train_step(
x1, len1, lang1, x2, len2, lang2, spans, lambda_coeff, params, show_example
)
self.stats[("DO-%s-%s" % (lang1, lang2))].append(loss.item())
def mt_train_step(
self,
x1,
len1,
lang1,
x2,
len2,
lang2,
spans,
lambda_coeff,
params,
show_example,
positions_in=None,
positions_out=None,
langs_in=None,
langs_out=None,
):
"""
Common training steps for all steps doing a form of machine translation
Positions and langs are inferred if not given
"""
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2]
decoder = (
self.decoder[lang2_id] if params.separate_decoders else self.decoder[0]
)
# log first batch of training
if show_example:
show_batch(
logger,
[("source", x1.transpose(0, 1)), ("target", x2.transpose(0, 1))],
self.data["dico"],
self.params.tokenization_mode,
f"Train {lang1}-{lang2}",
self.params.sentencepiece_model_path,
)
langs1 = x1.clone().fill_(lang1_id) if langs_in is None else langs_in
langs2 = x2.clone().fill_(lang2_id) if langs_out is None else langs_out
# target words to predict
alen = torch.arange(len2.max(), dtype=torch.long, device=len2.device)
# do not predict anything given the last target word
pred_mask = alen[:, None] < len2[None] - 1
y = x2[1:].masked_select(pred_mask[:-1])
assert len(y) == (len2 - 1).sum().item()
# cuda
(
x1,
len1,
langs1,
x2,
len2,
langs2,
y,
spans,
positions_in,
positions_out,
) = to_cuda(
x1, len1, langs1, x2, len2, langs2, y, spans, positions_in, positions_out
)
# encode source sentence
enc1 = self.encoder[0](
"fwd",
x=x1,
lengths=len1,
langs=langs1,
causal=False,
spans=spans,
positions=positions_in,
)
enc1 = enc1.transpose(0, 1)
# decode target sentence
dec2 = decoder(
"fwd",
x=x2,
lengths=len2,
langs=langs2,
causal=True,
src_enc=enc1,
src_len=len1,
spans=spans,
positions=positions_out,
)
# loss
_, loss = decoder(
"predict", tensor=dec2, pred_mask=pred_mask, y=y, get_scores=False
)
loss = lambda_coeff * loss
# optimize
self.optimize(loss)
# number of processed sentences / words
self.n_sentences += params.batch_size
self.stats["processed_s"] += len2.size(0)
self.stats["processed_w"] += (len2 - 1).sum().item()
return loss
def train_mode(self) -> None:
[enc.train() for enc in self.encoder]
if self.decoder is not None:
[dec.train() for dec in self.decoder]
def eval_mode(self) -> None:
[enc.eval() for enc in self.encoder]
if self.decoder is not None:
[dec.eval() for dec in self.decoder]
def bt_step(
self, lang1, lang2, lang3, lambda_coeff, sample_temperature, show_example=False
):
"""
Back-translation step for machine translation.
"""
if sample_temperature == 0:
sample_temperature = None
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
assert lang1 == lang3 and lang1 != lang2 and lang2 is not None
params = self.params
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2]
_encoder = self.encoder[0]
_decoder_lang1 = (
self.decoder[lang1_id] if params.separate_decoders else self.decoder[0]
)
_decoder_lang2 = (
self.decoder[lang2_id] if params.separate_decoders else self.decoder[0]
)
# generate source batch
x1, len1, _, _ = self.get_batch("bt", lang1)
langs1 = x1.clone().fill_(lang1_id)
# cuda
x1, len1, langs1 = to_cuda(x1, len1, langs1)
# generate a translation
with torch.no_grad():
# evaluation mode
self.eval_mode()
# encode source sentence and translate it
enc1 = _encoder("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
enc1 = enc1.transpose(0, 1)
len_v = (3 * len1 + 10).clamp(max=params.bt_max_len)
if self.params.fp16:
enc1 = enc1.half()
x2, len2 = _decoder_lang2.generate(
enc1,
len1,
lang2_id,
max_len=len_v,
sample_temperature=sample_temperature,
)
langs2 = x2.clone().fill_(lang2_id)
# free CUDA memory
del enc1
# training mode
self.train_mode()
# show and example for debugging
if show_example:
show_batch(
logger,
[
("Generated source", x2.transpose(0, 1)),
("Target (x1)", x1.transpose(0, 1)),
],
self.data["dico"],
self.params.tokenization_mode,
f"BT {lang1}-{lang2}",
self.params.sentencepiece_model_path,
)
# encode generated sentence
enc2 = self.encoder[0]("fwd", x=x2, lengths=len2, langs=langs2, causal=False)
enc2 = enc2.transpose(0, 1)
# words to predict
alen = torch.arange(len1.max(), dtype=torch.long, device=len1.device)
# do not predict anything given the last target word
pred_mask = alen[:, None] < len1[None] - 1
y1 = x1[1:].masked_select(pred_mask[:-1])
# decode original sentence
dec3 = _decoder_lang1(
"fwd",
x=x1,
lengths=len1,
langs=langs1,
causal=True,
src_enc=enc2,
src_len=len2,
)
# loss
_, loss = _decoder_lang1(
"predict", tensor=dec3, pred_mask=pred_mask, y=y1, get_scores=False
)
self.stats[("BT-%s-%s-%s" % (lang1, lang2, lang3))].append(loss.item())
loss = lambda_coeff * loss
# optimize
self.optimize(loss)
# number of processed sentences / words
self.n_sentences += params.batch_size
self.stats["processed_s"] += len1.size(0)
self.stats["processed_w"] += (len1 - 1).sum().item()
def st_step(self, lang1, langs2, lambda_coeff, show_example=False):
"""
Training on self-trained examples using unit tests
"""
assert lambda_coeff >= 0
if lambda_coeff == 0:
return
assert all([lang1 != lang2 and lang2 is not None for lang2 in langs2]), (
lang1,
langs2,
)
params = self.params
lang1_id = params.lang2id[lang1]
_encoder = self.encoder[0]
if params.is_master and params.st_show_stats:
for (l1, l2), cache in self.st_cache.items():
logger.info(f"{l1}-{l2} cache size: {len(cache)}")
dico = self.data["dico"]
if 0 <= params.st_sample_cache_ratio < 1:
read_from_cache = random.random() < params.st_sample_cache_ratio and all(
[len(cache) >= params.cache_warmup for cache in self.st_cache.values()]
)
else:
if self.number_consecutive_reads < params.st_sample_cache_ratio and all(
[len(cache) >= params.cache_warmup for cache in self.st_cache.values()]
):
read_from_cache = True
self.number_consecutive_reads += 1
else:
read_from_cache = False
self.number_consecutive_reads = 0
if read_from_cache:
if params.st_show_stats:
logger.info(f"reading {params.st_sample_size} elements from the cache")
for l1, l2 in [(l1, l2) for l1, l2 in self.st_langs]:
(x1, len1), (x2, len2) = self.st_cache[(l1, l2)].sample_batch(
params.st_sample_size
)
if params.st_show_stats:
logger.info(f"actual batch size: {len(len2)}")
x1, len1, x2, len2 = to_cuda(x1, len1, x2, len2)
self.train_on_st_data(
x1,
len1,
l1,
x2,
len2,
l2,
dico,
params,
lambda_coeff,
show_example,
lang_src=lang1,
)
# number of processed sentences / words
self.n_sentences += params.batch_size
self.stats["processed_s"] += len1.size(0)
self.stats["processed_w"] += (len1 - 1).sum().item()
del x1, len1, x2, len2
else:
# generate source batch
(x1, len1, id1, lenid1) = self.get_batch(
"st",
lang1,
self_training=True,
st_scores_cutoff=(
params.st_min_mutation_score,
self.params.st_min_asserts,
),
)
assert id1 is not None
assert lenid1 is not None
assert x1.shape[1] == len(len1) == id1.shape[1] == len(lenid1)
sent_ids = convert_to_text(id1, lenid1, dico, params)
sent_ids = [
restore_segmentation_sentence(
i,
tokenization_mode=params.tokenization_mode,
sentencepiece_model_path=params.sentencepiece_model_path,
)
for i in sent_ids
]
langs1 = x1.clone().fill_(lang1_id)
# cuda
x1, len1, langs1 = to_cuda(x1, len1, langs1)
with torch.no_grad():
# evaluation mode
self.eval_mode()
# encode source sentence and translate it
enc1 = _encoder("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
enc1 = enc1.transpose(0, 1)
# We generate data for every language in langs2 from the input in lang1
generated_x2 = {}
generated_x2_len = {}
any_successful = {}
for lang2 in langs2:
(
selected_x1,
selected_len1,
x2,
len2,
any_successful_beam,
) = self.generate_parallel_examples(
x1, len1, enc1, lang1, lang2, sent_ids, params
)
if selected_x1 is None:
continue
generated_x2[lang2] = x2
generated_x2_len[lang2] = len2
any_successful[lang2] = any_successful_beam
self.train_on_st_data(
selected_x1,
selected_len1,
lang1,
generated_x2[lang2],
generated_x2_len[lang2],
lang2,
dico,
params,
lambda_coeff,
show_example,
lang_src=lang1,
)
# if needed, train on pairs of langs2 elements
for lang2_2 in [
lang for lang in any_successful.keys() if lang != lang2
]:
x2, len2, x2_2, len2_2 = self.cross_language_st_selection(
generated_x2,
generated_x2_len,
any_successful,
lang2,
lang2_2,
params,
)
if x2 is None:
continue
self.train_on_st_data(
x2,
len2,
lang2,
x2_2,
len2_2,
lang2_2,
dico,
params,
lambda_coeff,
show_example,
lang_src=lang1,
)
# number of processed sentences / words
self.n_sentences += params.batch_size
self.stats["processed_s"] += len1.size(0)
self.stats["processed_w"] += (len1 - 1).sum().item()
def cross_language_st_selection(
self, generated_x2, generated_x2_len, any_successful, lang2, lang2_2, params
):
both_successful = [
(res2 and res2_2)
for res2, res2_2 in zip(any_successful[lang2], any_successful[lang2_2])
]
assert (
len(any_successful[lang2])
== len(any_successful[lang2_2])
== len(both_successful)
)
if not any(both_successful):
return None, None, None, None
if params.is_master:
self.log_successful_st(both_successful, "-".join([lang2, lang2_2]))
mask_lang2 = [
b for i, b in enumerate(both_successful) if any_successful[lang2][i]
]
len2 = generated_x2_len[lang2][mask_lang2]
x2 = generated_x2[lang2][: len2.max(), mask_lang2]
mask_lang2_2 = [
b for i, b in enumerate(both_successful) if any_successful[lang2_2][i]
]
len2_2 = generated_x2_len[lang2_2][mask_lang2_2]
x2_2 = generated_x2[lang2_2][: len2_2.max(), mask_lang2_2]
assert len(x2.shape) == len(x2_2.shape) == 2, (x2.shape, x2_2.shape)
assert (x2 == self.params.eos_index).sum() == 2 * len(len2)
assert (x2_2 == self.params.eos_index).sum() == 2 * len(len2_2)
assert (
x2.shape[1]
== x2_2.shape[1]
== len(len2)
== len(len2_2)
== sum(both_successful)
), (
x2.shape[1],
x2_2.shape[1],
len(len2),
len(len2_2),
sum(both_successful),
)
new_elements = [
(
x2[:, i].detach().clone().cpu(),
len2[i].detach().clone().cpu(),
x2_2[:, i].detach().clone().cpu(),
len2_2[i].detach().clone().cpu(),
)
if lang2 < lang2_2
else (
x2_2[:, i].detach().clone().cpu(),
len2_2[i].detach().clone().cpu(),
x2[:, i].detach().clone().cpu(),
len2[i].detach().clone().cpu(),
)
for i in range(len(len2))
]
if params.st_show_stats:
logger.info(
f"Adding {len(len2)} elements to the cache for {lang2}-{lang2_2}"
)
self.st_cache[tuple(sorted([lang2, lang2_2]))].add(new_elements)
return x2, len2, x2_2, len2_2
def generate_parallel_examples(
self, x1, len1, enc1, lang1, lang2, sent_ids, params
):
lang2_id = params.lang2id[lang2]
decoder = (
self.decoder[lang2_id] if params.separate_decoders else self.decoder[0]
)
decoder = decoder
# generate a translation
with torch.no_grad():
# evaluation mode
self.eval_mode()
# generate sentences in lang2
len_v = (3 * len1 + 10).clamp(max=params.max_len)
if self.params.fp16:
enc1 = enc1.half()
x2, len2, _ = decoder.generate_beam(
enc1,
len1,
lang2_id,
beam_size=int(params.st_beam_size),
length_penalty=params.st_length_penalty,
early_stopping=False,
max_len=len_v,
)
assert x2.shape[1] == len2.shape[1] == int(
params.st_beam_size
) and x2.shape[2] == len(len2), (x2.shape, len2.shape)
text_hypotheses = convert_to_text(
x2, len2, self.data["dico"], params, generate_several_reps=True
)
assert len(text_hypotheses) == len(len1), (
len(text_hypotheses),
len(len1),
)
assert len(text_hypotheses[0]) == params.st_beam_size, (
len(text_hypotheses[0]),
len(params.st_beam_size),
)
text_hypotheses = [
[
restore_segmentation_sentence(
sent, params.tokenization_mode, params.sentencepiece_model_path
)
for sent in hyps
]
for hyps in text_hypotheses
]
test_outputs = self.get_test_outputs(text_hypotheses, sent_ids, lang=lang2,)
assert len(test_outputs) == len(len1), (len(test_outputs), len(len1))
test_outputs = [
[r[0] == "success" for r in beam_res] for beam_res in test_outputs
]
first_successful_index = [safe_index(l, True) for l in test_outputs]
any_successful = [i is not None for i in first_successful_index]
if params.is_master:
self.log_successful_st(any_successful, lang2)
if not any(any_successful):
return None, None, None, None, any_successful
selected_len1 = len1[any_successful]
selected_x1 = x1[: selected_len1.max(), any_successful]
len2 = len2[any_successful]
# gather the lengths of the selected indices
first_successful_index = (
torch.tensor([i for i in first_successful_index if i is not None])
.long()
.to(x2.device)
)
len2 = len2.gather(1, first_successful_index.view(-1, 1)).squeeze(1)
assert len(len2.shape) == 1
x2 = x2[: len2.max(), :, any_successful]
assert first_successful_index.shape[0] == x2.shape[2]
# gather the elements corresponding to the first successful index
x2 = x2.gather(
1,
first_successful_index.view(1, -1).repeat(x2.shape[0], 1).unsqueeze(1),
).squeeze(1)
assert len(x2.shape) == 2, x2.shape
assert (selected_x1 == self.params.eos_index).sum() == 2 * len(
selected_len1
)
assert (x2 == self.params.eos_index).sum() == 2 * len(selected_len1)
assert (
selected_x1.shape[1]
== x2.shape[1]
== len(selected_len1)
== len(len2)
== sum(any_successful)
), (
selected_x1.shape[1],
x2.shape[1],
len(selected_len1),
len(len2),
sum(any_successful),
)
new_elements = [
(
x1[:, i].detach().clone().cpu(),
len1[i].detach().clone().cpu(),
x2[:, i].detach().clone().cpu(),
len2[i].detach().clone().cpu(),
)
if lang1 < lang2
else (
x2[:, i].detach().clone().cpu(),
len2[i].detach().clone().cpu(),
x1[:, i].detach().clone().cpu(),
len1[i].detach().clone().cpu(),
)
for i in range(len(len2))
]
if params.st_show_stats:
logger.info(f"Adding {len(len2)} elements to the cache for {lang1}-{lang2}")
self.st_cache[tuple(sorted([lang1, lang2]))].add(new_elements)
return selected_x1, selected_len1, x2, len2, any_successful
def train_on_st_data(
self,
selected_x1,
selected_len1,
lang1,
x2,
len2,
lang2,
dico,
params,
lambda_coeff,
show_example,
lang_src,
):
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2]
lang1_ids = selected_x1.clone().fill_(lang1_id)
lang2_ids = x2.clone().fill_(lang2_id)
_decoder_lang1 = (
self.decoder[lang1_id] if params.separate_decoders else self.decoder[0]
)
_decoder_lang2 = (
self.decoder[lang2_id] if params.separate_decoders else self.decoder[0]
)
# training mode
self.train_mode()
# show an example for debugging
if show_example:
show_batch(
logger,
[
("Source", selected_x1.transpose(0, 1)),
("Generated Target", x2.transpose(0, 1)),
],
dico,
self.params.tokenization_mode,
f"ST {lang1}:{lang1}-{lang2}",
self.params.sentencepiece_model_path,
)
# Train on lang1 -> lang2
loss1 = self.get_st_loss(
_decoder_lang2, selected_x1, selected_len1, lang1_ids, x2, len2, lang2_ids
)
self.stats[("ST-%s:%s-%s" % (lang_src, lang1, lang2))].append(loss1.item())
# Train on lang2 -> lang1
loss2 = self.get_st_loss(
_decoder_lang1, x2, len2, lang2_ids, selected_x1, selected_len1, lang1_ids
)
self.stats[("ST-%s:%s-%s" % (lang_src, lang2, lang1))].append(loss2.item())
loss = lambda_coeff * (loss1 + loss2)
# optimize
self.optimize(loss)
def log_successful_st(self, any_successful, key):
if key not in self.st_translation_stats:
self.st_translation_stats[key] = {"successful": 0, "failed": 0}
self.st_translation_stats[key]["successful"] += sum(any_successful)
self.st_translation_stats[key]["failed"] += len(any_successful)
if (
sum(self.st_translation_stats[key].values()) > 0
and self.params.st_show_stats
):
logger.info(
f"Ratio of successful translations {key}: "
f"{self.st_translation_stats[key]['successful'] / self.st_translation_stats[key]['failed']:.2%}"
f" ({self.st_translation_stats[key]['successful']} / {self.st_translation_stats[key]['failed']})"
)
def get_st_loss(
self,
decoder: torch.nn.Module,
x1: LTensor,
len1: LTensor,
langs1,
x2: LTensor,
len2: LTensor,
langs2,
):
# encode generated sentence
enc1 = self.encoder[0]("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
enc1 = enc1.transpose(0, 1)
# words to predict
alen = torch.arange( # type: ignore
len2.max(), dtype=torch.long, device=len1.device
)
# do not predict anything given the last target word
pred_mask = alen[:, None] < len2[None] - 1
y2 = x2[1:].masked_select(pred_mask[:-1])
# decode original sentence
dec2 = decoder(
"fwd",
x=x2,
lengths=len2,
langs=langs2,
causal=True,
src_enc=enc1,
src_len=len1,
)
# loss
_, loss = decoder(
"predict", tensor=dec2, pred_mask=pred_mask, y=y2, get_scores=False
)
return loss
def get_test_outputs(self, sentences, sent_ids, lang):
lang = get_programming_language_name(lang)
test_runner = self.test_runners[lang]
tests = [self.unit_tests[lang][test_id] for test_id in sent_ids]
assert len(sentences) == len(
tests
), f"tests of length {len(tests)} while functions are of length {len(sentences)}"
executor = ProcessPoolExecutor()
jobs = [
[
executor.submit(test_runner.get_tests_results, func, test)
for func in funcs
]
for funcs, test in zip(sentences, tests)
]
res = [[job.result() for job in beam_jobs] for beam_jobs in jobs]
return res
|
CodeGen-main
|
codegen_sources/model/src/trainer.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import math
import re
import torch
from torch import optim
class Adam(optim.Optimizer):
"""
Same as https://github.com/pytorch/pytorch/blob/master/torch/optim/adam.py,
without amsgrad, with step in a tensor, and states initialization in __init__.
It was important to add `.item()` in `state['step'].item()`.
"""
def __init__(
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0
) -> None:
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["step"] = 0 # torch.zeros(1)
state["exp_avg"] = torch.zeros_like(p.data)
state["exp_avg_sq"] = torch.zeros_like(p.data)
def __setstate__(self, state):
super().__setstate__(state)
def step(self, closure=None):
"""
Step.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
state = self.state[p]
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# if group['weight_decay'] != 0:
# grad.add_(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
# denom = exp_avg_sq.sqrt().clamp_(min=group['eps'])
bias_correction1 = 1 - beta1 ** state["step"] # .item()
bias_correction2 = 1 - beta2 ** state["step"] # .item()
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p.data.add_(p.data, alpha=-group["weight_decay"] * group["lr"])
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss
class AdamInverseSqrtWithWarmup(Adam):
"""
Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`warmup-init-lr`) until the configured
learning rate (`lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(warmup_init_lr, lr, warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = lr * sqrt(warmup_updates)
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
warmup_updates=4000,
warmup_init_lr=1e-7,
exp_factor=0.5,
):
super().__init__(
params, lr=warmup_init_lr, betas=betas, eps=eps, weight_decay=weight_decay,
)
# linearly warmup for the first warmup_updates
self.warmup_updates = warmup_updates
self.warmup_init_lr = warmup_init_lr
warmup_end_lr = lr
self.lr_step = (
(warmup_end_lr - warmup_init_lr) / warmup_updates
if warmup_updates > 0
else 1
)
# then, decay prop. to the inverse square root of the update number
self.exp_factor = exp_factor
self.decay_factor = (
warmup_end_lr * warmup_updates ** self.exp_factor
if warmup_updates > 0
else warmup_end_lr
)
# total number of updates
for param_group in self.param_groups:
param_group["num_updates"] = 0
def get_lr_for_step(self, num_updates):
if num_updates < self.warmup_updates:
return self.warmup_init_lr + num_updates * self.lr_step
else:
return self.decay_factor * (num_updates ** -self.exp_factor)
def step(self, closure=None):
super().step(closure)
for param_group in self.param_groups:
param_group["num_updates"] += 1
param_group["lr"] = self.get_lr_for_step(param_group["num_updates"])
class AdamCosineWithWarmup(Adam):
"""
Assign LR based on a cyclical schedule that follows the cosine function.
See https://arxiv.org/pdf/1608.03983.pdf for details.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``).
During warmup::
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup::
lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(t_curr / t_i))
where ``t_curr`` is current percentage of updates within the current period
range and ``t_i`` is the current period range, which is scaled by ``t_mul``
after every iteration.
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
warmup_updates=4000,
warmup_init_lr=1e-7,
min_lr=1e-9,
init_period=1000000,
period_mult=1,
lr_shrink=0.75,
):
super().__init__(
params, lr=warmup_init_lr, betas=betas, eps=eps, weight_decay=weight_decay,
)
# linearly warmup for the first warmup_updates
self.warmup_updates = warmup_updates
self.warmup_init_lr = warmup_init_lr
warmup_end_lr = lr
self.lr_step = (
(warmup_end_lr - warmup_init_lr) / warmup_updates
if warmup_updates > 0
else 1
)
# then, apply cosine scheduler
self.min_lr = min_lr
self.max_lr = lr
self.period = init_period
self.period_mult = period_mult
self.lr_shrink = lr_shrink
# total number of updates
for param_group in self.param_groups:
param_group["num_updates"] = 0
def get_lr_for_step(self, num_updates):
if num_updates < self.warmup_updates:
return self.warmup_init_lr + num_updates * self.lr_step
else:
t = num_updates - self.warmup_updates
if self.period_mult == 1:
pid = math.floor(t / self.period)
t_i = self.period
t_curr = t - (self.period * pid)
else:
pid = math.floor(
math.log(
1 - t / self.period * (1 - self.period_mult), self.period_mult
)
)
t_i = self.period * (self.period_mult ** pid)
t_curr = (
t
- (1 - self.period_mult ** pid)
/ (1 - self.period_mult)
* self.period
)
lr_shrink = self.lr_shrink ** pid
min_lr = self.min_lr * lr_shrink
max_lr = self.max_lr * lr_shrink
return min_lr + 0.5 * (max_lr - min_lr) * (
1 + math.cos(math.pi * t_curr / t_i)
)
def step(self, closure=None):
super().step(closure)
for param_group in self.param_groups:
param_group["num_updates"] += 1
param_group["lr"] = self.get_lr_for_step(param_group["num_updates"])
def get_optimizer(parameters, s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[: s.find(",")]
optim_params = {}
for x in s[s.find(",") + 1 :].split(","):
split = x.split("=")
assert len(split) == 2
assert re.match(r"^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == "adadelta":
optim_fn = optim.Adadelta
elif method == "adagrad":
optim_fn = optim.Adagrad
elif method == "adam":
optim_fn = Adam
optim_params["betas"] = (
optim_params.get("beta1", 0.9),
optim_params.get("beta2", 0.999),
)
optim_params.pop("beta1", None)
optim_params.pop("beta2", None)
elif method == "adam_inverse_sqrt":
optim_fn = AdamInverseSqrtWithWarmup
optim_params["betas"] = (
optim_params.get("beta1", 0.9),
optim_params.get("beta2", 0.999),
)
optim_params.pop("beta1", None)
optim_params.pop("beta2", None)
elif method == "adam_cosine":
optim_fn = AdamCosineWithWarmup
optim_params["betas"] = (
optim_params.get("beta1", 0.9),
optim_params.get("beta2", 0.999),
)
optim_params.pop("beta1", None)
optim_params.pop("beta2", None)
elif method == "adamax":
optim_fn = optim.Adamax
elif method == "asgd":
optim_fn = optim.ASGD
elif method == "rmsprop":
optim_fn = optim.RMSprop
elif method == "rprop":
optim_fn = optim.Rprop
elif method == "sgd":
optim_fn = optim.SGD
assert "lr" in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ["self", "params"]
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception(
'Unexpected parameters: expected "%s", got "%s"'
% (str(expected_args[2:]), str(optim_params.keys()))
)
return optim_fn(parameters, **optim_params)
|
CodeGen-main
|
codegen_sources/model/src/optim.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from logging import getLogger
import numpy as np
import torch
logger = getLogger()
def load_fasttext_model(path):
"""
Load a binarized fastText model.
"""
try:
import fastText
except ImportError:
raise Exception(
"Unable to import fastText. Please install fastText for Python: "
"https://github.com/facebookresearch/fastText"
)
return fastText.load_model(path)
def read_txt_embeddings(path, params):
"""
Reload pretrained embeddings from a text file.
"""
word2id = {}
vectors = []
# load pretrained embeddings
_emb_dim_file = params.emb_dim
with io.open(path, "r", encoding="utf-8", newline="\n", errors="ignore") as f:
for i, line in enumerate(f):
if i == 0:
split = line.split()
assert len(split) == 2
assert _emb_dim_file == int(split[1])
continue
word, vect = line.rstrip().split(" ", 1)
vect = np.fromstring(vect, sep=" ")
if word in word2id:
logger.warning('Word "%s" found twice!' % word)
continue
if not vect.shape == (_emb_dim_file,):
logger.warning(
'Invalid dimension (%i) for word "%s" in line %i.'
% (vect.shape[0], word, i)
)
continue
assert vect.shape == (_emb_dim_file,)
word2id[word] = len(word2id)
vectors.append(vect[None])
assert len(word2id) == len(vectors)
logger.info("Loaded %i pretrained word embeddings from %s" % (len(vectors), path))
# compute new vocabulary / embeddings
embeddings = np.concatenate(vectors, 0)
embeddings = torch.from_numpy(embeddings).float()
assert embeddings.size() == (len(word2id), params.emb_dim)
return word2id, embeddings
def load_bin_embeddings(path, params):
"""
Reload pretrained embeddings from a fastText binary file.
"""
model = load_fasttext_model(path)
assert model.get_dimension() == params.emb_dim
words = model.get_labels()
logger.info("Loaded binary model from %s" % path)
# compute new vocabulary / embeddings
embeddings = np.concatenate([model.get_word_vector(w)[None] for w in words], 0)
embeddings = torch.from_numpy(embeddings).float()
word2id = {w: i for i, w in enumerate(words)}
logger.info("Generated embeddings for %i words." % len(words))
assert embeddings.size() == (len(word2id), params.emb_dim)
return word2id, embeddings
def load_embeddings(path, params):
"""
Reload pretrained embeddings.
"""
if path.endswith(".bin"):
return load_bin_embeddings(path, params)
else:
return read_txt_embeddings(path, params)
|
CodeGen-main
|
codegen_sources/model/src/model/pretrain.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
try:
from apex.parallel import DistributedDataParallel
class CustomApexDDP(DistributedDataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
except ImportError:
pass
class CustomTorchDDP(nn.parallel.DistributedDataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
|
CodeGen-main
|
codegen_sources/model/src/model/CustomDDP.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import typing as tp
from einops import rearrange, repeat
import torch
from torch import nn
from torch import Tensor
class CAPE1d(nn.Module):
def __init__(
self,
d_model: int,
max_global_shift: float = 0.0,
max_local_shift: float = 0.0,
max_global_scaling: float = 1.0,
normalize: bool = False,
freq_scale: float = 1.0,
batch_first: bool = False,
):
super().__init__()
assert (
max_global_shift >= 0
), f"""Max global shift is {max_global_shift},
but should be >= 0."""
assert (
max_local_shift >= 0
), f"""Max local shift is {max_local_shift},
but should be >= 0."""
assert (
max_global_scaling >= 1
), f"""Global scaling is {max_global_scaling},
but should be >= 1."""
self.max_global_shift = max_global_shift
self.max_local_shift = max_local_shift
self.max_global_scaling = max_global_scaling
self.normalize = normalize
self.freq_scale = freq_scale
self.batch_first = batch_first
freq = freq_scale * torch.exp(
-2.0 * torch.floor(torch.arange(d_model) / 2) * (math.log(1e4) / d_model)
)
self.register_buffer("freq", freq)
_sin2cos_phase_shift = torch.tensor(math.pi) / 2.0
cos_shifts = _sin2cos_phase_shift * (torch.arange(d_model) % 2)
self.register_buffer("cos_shifts", cos_shifts)
def forward(
self,
x: Tensor,
x_lengths: tp.Optional[Tensor] = None,
positions_delta: tp.Optional[tp.Union[int, Tensor]] = None,
) -> Tensor:
return x + self.compute_pos_emb(x, x_lengths, positions_delta)
def compute_pos_emb(
self,
x: Tensor,
x_lengths: tp.Optional[Tensor] = None,
positions_delta: tp.Optional[tp.Union[int, Tensor]] = None,
) -> Tensor:
if self.batch_first:
batch_size, n_tokens, _ = x.shape # b, t, c
else:
n_tokens, batch_size, _ = x.shape # t, b, c
positions = repeat(
torch.arange(n_tokens), "t -> new_axis t", new_axis=batch_size
).to(x)
if positions_delta is None:
positions_delta = 1
else:
if (
torch.is_tensor(positions_delta)
and len(positions_delta.shape) == 1 # type:ignore
):
positions_delta = rearrange(positions_delta, "b -> b 1") # type: ignore
positions *= positions_delta
if x_lengths is not None:
padding_mask = positions > x_lengths[:, None]
positions[padding_mask] = float("nan")
if self.normalize:
positions -= torch.nanmean(positions, axis=1, keepdim=True) # type: ignore
positions = self.augment_positions(positions, positions_delta)
positions = rearrange(positions, "b t -> b t 1")
product = positions * self.freq.to(x)
pos_emb = torch.sin(product + self.cos_shifts.to(x))
if not self.batch_first:
pos_emb = rearrange(pos_emb, "b t c -> t b c")
pos_emb[pos_emb != pos_emb] = 0 # torch.nan_to_num(pos_emb, nan=0)
return pos_emb
@tp.no_type_check # TODO reactivate
def augment_positions(
self,
positions: Tensor,
positions_delta: tp.Optional[tp.Union[int, Tensor]] = None,
):
if self.training:
batch_size, n_tokens = positions.shape
if self.max_global_shift:
delta = torch.FloatTensor(batch_size, 1).uniform_(
-self.max_global_shift, self.max_global_shift
)
delta = delta.to(positions.device)
else:
delta = 0
if self.max_local_shift:
epsilon = self.max_local_shift
delta_local = torch.FloatTensor(batch_size, n_tokens)
delta_local = delta_local.uniform_(-epsilon, epsilon)
delta_local = delta_local.to(positions.device)
if positions_delta is not None:
if (
torch.is_tensor(positions_delta)
and len(positions_delta.shape) == 1
):
positions_delta = rearrange(positions_delta, "b -> b 1")
delta_local *= positions_delta
else:
delta_local = 0
if self.max_global_scaling > 1.0:
log_lambdas = torch.FloatTensor(batch_size, 1)
log_lambdas = log_lambdas.uniform_(
-math.log(self.max_global_scaling),
math.log(self.max_global_scaling),
)
log_lambdas = log_lambdas.to(positions.device)
else:
log_lambdas = torch.zeros(1).to(positions.device)
positions = (positions + delta + delta_local) * torch.exp(log_lambdas)
return positions
# def cape_positions_1d(
# positions_1d: np.ndarray,
# mean_normalize: bool,
# augment: bool, # True during training
# max_global_shift, # delta max
# max_local_shift, # epsilon max
# max_scale, # lambda max
# rng=np.random.RandomState(42),
# ):
# """
# Takes original positions, returns modified ones.
# Can reuse sin/cos embedding from "Attention is all you need".
# Code handles NaNs is positions_1d input as if those correspond to pad tokens
# """
# assert max_scale >= 1
# batch_size, n_tokens = positions_1d.shape
# if mean_normalize:
# positions_1d -= np.nanmean(positions_1d, axis=1, keepdims=True)
# if augment:
# delta = rng.uniform(-max_global_shift, +max_global_shift, size=[batch_size, 1])
# delta_local = rng.uniform(
# -max_local_shift, +max_local_shift, size=[batch_size, n_tokens]
# )
# log_lambdas = rng.uniform(
# -np.log(max_scale), +np.log(max_scale), size=[batch_size, 1]
# )
# new_positions = (positions_1d + delta + delta_local) * np.exp(log_lambdas)
# return new_positions
# else:
# return positions_1d
|
CodeGen-main
|
codegen_sources/model/src/model/cape_embeddings.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import os
import typing as tp
import dataclasses
import logging
import torch
from ..data.dictionary import UNK_WORD
from .pretrain import load_embeddings as load_embeddings
from .. import utils
# , TRANSFORMER_LAYER_PARAMS
from .transformer import DECODER_ONLY_PARAMS, Classifier, TransformerModel
logger = logging.getLogger(__name__)
def add_missing_parameters(parameters: tp.Any, log: bool = True) -> None:
"""Adds missing default arguments into the parameters object
This only applies to AttrDict instances which mock the parsed args
when reloaded, and may not contain up-to-date parameters
"""
from codegen_sources.model.train import get_parser # avoid circular import :(
parser = get_parser()
# get all defaults (simpler for debugging)
defaults = {}
for action in parser._actions: # pylint: disable=protected-access
if not action.required and action.dest != "help":
defaults[action.dest] = action.default
if isinstance(parameters, utils.AttrDict):
for p, val in defaults.items():
if p not in parameters.__dict__:
if log:
logger.info("Adding default value %s for %s in parameter", val, p)
parameters.__dict__[p] = val
def check_model_params(params):
"""
Check models parameters.
"""
# masked language modeling task parameters
assert params.bptt >= 1
assert 0 <= params.word_pred < 1
assert 0 <= params.sample_alpha < 1
s = params.word_mask_keep_rand.split(",")
assert len(s) == 3
s = [float(x) for x in s]
assert all([0 <= x <= 1 for x in s]) and sum(s) == 1
params.word_mask = s[0]
params.word_keep = s[1]
params.word_rand = s[2]
if params.mask_length == "":
params.mask_length = None
params.mask_length_dist = None
elif params.mask_length == "poisson":
assert (
params.poisson_lambda is not None
), "poisson_lambda is None, it should be set when using poisson mask_length"
_lambda = params.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
params.mask_length_dist_probas = ps
params.mask_length_dist = torch.distributions.Categorical(ps)
else:
params.mask_length = int(params.mask_length)
ps = torch.FloatTensor(params.mask_length + 1).fill_(0.0)
ps[params.mask_length] = 1
params.mask_length_dist = torch.distributions.Categorical(ps)
# input sentence noise for DAE
if len(params.ae_steps) == 0:
assert params.word_shuffle == 0
assert params.word_dropout == 0
assert params.word_blank == 0
else:
assert params.word_shuffle == 0 or params.word_shuffle > 1
assert 0 <= params.word_dropout < 1
assert 0 <= params.word_blank < 1
# model dimensions
if params.emb_dim_encoder == 0 and params.emb_dim_decoder == 0:
assert params.emb_dim > 0
params.emb_dim_encoder = params.emb_dim
params.emb_dim_decoder = params.emb_dim
else:
assert params.emb_dim == 0
assert params.emb_dim_encoder > 0 and params.emb_dim_decoder > 0
if params.emb_dim_encoder == params.emb_dim_decoder:
params.emb_dim = params.emb_dim_decoder
else:
assert params.reload_emb == "", (
"Pre-trained embeddings are not supported when the embedding size of the "
"encoder and the decoder do not match "
)
assert params.emb_dim_encoder % params.n_heads == 0
assert params.emb_dim_decoder % params.n_heads == 0
if params.n_layers_encoder == 0 and params.n_layers_decoder == 0:
assert params.n_layers > 0
params.n_layers_encoder = params.n_layers
params.n_layers_decoder = params.n_layers
else:
assert params.n_layers == 0
assert params.n_layers_encoder > 0 and params.n_layers_decoder > 0
# reload pretrained word embeddings
if params.reload_emb != "":
assert os.path.isfile(params.reload_emb)
# reload a pretrained model
if params.reload_model != "":
if params.encoder_only:
assert os.path.isfile(params.reload_model)
else:
s = params.reload_model.split(",")
assert len(s) == 2
assert all([x == "" or os.path.isfile(x) for x in s]), [
x for x in s if not os.path.isfile(x)
]
if params.use_classifier and params.reload_classifier == "":
params.reload_classifier = params.reload_model
assert not (
params.beam_size > 1 and params.number_samples > 1
), "Cannot sample when already doing beam search"
assert (params.eval_temperature is None) == (
params.number_samples <= 1
), "Eval temperature should be set if and only if taking several samples at eval time"
def set_pretrain_emb(model, dico, word2id, embeddings, gpu):
"""
Pretrain word embeddings.
"""
n_found = 0
with torch.no_grad():
for i in range(len(dico)):
idx = word2id.get(dico[i], None)
if idx is None:
continue
n_found += 1
model.embeddings.weight[i] = (
embeddings[idx].cuda() if gpu else embeddings[idx]
)
model.pred_layer.proj.weight[i] = (
embeddings[idx].cuda() if gpu else embeddings[idx]
)
logger.info(
"Pretrained %i/%i words (%.3f%%)."
% (n_found, len(dico), 100.0 * n_found / len(dico))
)
@torch.no_grad()
def build_model(params, dico, gpu=True):
"""
Build model.
"""
add_missing_parameters(params)
if params.encoder_only:
# build
model = TransformerModel(params, dico, is_encoder=True, with_output=True)
# reload pretrained word embeddings
if params.reload_emb != "":
word2id, embeddings = load_embeddings(params.reload_emb, params)
set_pretrain_emb(model, dico, word2id, embeddings, gpu)
# reload a pretrained model
if params.reload_model != "":
logger.info("============ Model Reloading")
logger.info("Reloading model from %s ..." % params.reload_model)
reload_transformer(
params, params.reload_model, dico, model, "model", gpu=gpu
)
logger.info("Model: {}".format(model))
logger.info(
"Number of parameters (model): %i"
% sum([p.numel() for p in model.parameters() if p.requires_grad])
)
logger.info("")
return [model.cuda() if gpu else model]
else:
# build
# TODO: only output when necessary - len(params.clm_steps + params.mlm_steps) > 0
encoder = TransformerModel(params, dico, is_encoder=True, with_output=True)
if params.separate_decoders:
decoders = [
TransformerModel(params, dico, is_encoder=False, with_output=True)
for _ in params.lang2id.values()
]
else:
decoders = [
TransformerModel(params, dico, is_encoder=False, with_output=True)
]
for layer in range(params.n_layers_decoder):
if layer <= params.n_share_dec - 1:
assert params.amp == -1, "sharing layers is not supported with AMP"
logger.info("Sharing decoder attention parameters for layer %i" % layer)
for i in range(1, len(decoders)):
decoders[i].attentions[layer] = decoders[0].attentions[layer]
# reload pretrained word embeddings
if params.reload_emb != "":
word2id, embeddings = load_embeddings(params.reload_emb, params)
set_pretrain_emb(encoder, dico, word2id, embeddings, gpu)
for decoder in decoders:
set_pretrain_emb(decoder, dico, word2id, embeddings, gpu)
# reload a pretrained model
if params.reload_model != "":
logger.info("============ Model Reloading")
enc_path, dec_path = params.reload_model.split(",")
assert not (enc_path == "" and dec_path == "")
# reload encoder
if enc_path != "":
logger.info("Reloading encoder from %s ..." % enc_path)
reload_transformer(params, enc_path, dico, encoder, "encoder", gpu=gpu)
# reload decoders
if dec_path != "":
for i, dec in enumerate(decoders):
logger.info("Reloading decoders from %s ..." % dec_path)
if params.reload_encoder_for_decoder:
reload_transformer(
params, dec_path, dico, dec, "encoder", gpu=gpu
)
else:
reload_transformer(
params, dec_path, dico, dec, "decoder", gpu, i
)
logger.debug("Encoder: {}".format(encoder))
logger.debug("Decoder: {}".format(decoders))
logger.info(
"Number of parameters (encoder): %i"
% sum([p.numel() for p in encoder.parameters() if p.requires_grad])
)
logger.info(
"Number of parameters (decoders): %i"
% sum([p.numel() for p in decoders[0].parameters() if p.requires_grad])
)
logger.info(f"Number of decoders: {len(decoders)}")
logger.info("")
return (
[encoder.cuda() if gpu else encoder],
[dec.cuda() if gpu else dec for dec in decoders],
)
@torch.no_grad()
def build_classifier(params):
"""
Build classifier.
"""
# build
classifier = Classifier(params)
# reload a pretrained model
if params.reload_classifier != "":
logger.info("Reloading classifier from %s ..." % params.reload_classifier)
reloaded = torch.load(
params.reload_classifier,
map_location=lambda storage, loc: storage.cuda(params.local_rank),
)
if "classifier" not in reloaded:
logger.warning(
f"There is no classifier in {params.reload_classifier}. The classifier weights will be initialized randomly"
)
else:
reloaded = reloaded["classifier"]
if all([k.startswith("module.") for k in reloaded.keys()]):
reloaded = {k[len("module.") :]: v for k, v in reloaded.items()}
classifier.load_state_dict(reloaded)
logger.info("Classifier: {}".format(classifier))
return [classifier.cuda()]
def reload_transformer(
params, path, dico, model, model_type, gpu=True, model_number=None
):
"""
Reload a transformer state dict to current model:
clean 'module.' from state dict,
match the word embeddings comparing dicos,
match lang embedding with params lang mapping,
extend or truncate position embeddings when size dont match,
load state dict.
"""
reloaded = torch.load(
path,
map_location=lambda storage, loc: storage.cuda(params.local_rank)
if gpu
else storage.cpu(),
)
if "state_dicts" in reloaded: # compatibility with new online pipeline
logger.warning("Reloading from multixp checkpoint (skipping safety checks)")
for name in ["encoder", "decoder"]:
reloaded[name] = reloaded["state_dicts"]["models/" + name]
pdict = {f.name: getattr(params, f.name) for f in dataclasses.fields(params)}
reloaded["params"] = pdict
word2id = reloaded.get("word2id", None)
if word2id is None:
logger.warning(
"word2id is missing in reloaded checkpoint, assuming current ones"
)
word2id = model.dico
reloaded["dico_word2id"] = word2id
reloaded["dico_id2word"] = {y: x for x, y in word2id.items()}
clean_model_state_dict(reloaded, model_type, model_number)
reload_word_embeddings(reloaded, dico, model_type)
reload_lang_embeddings(reloaded, params, model_type)
reload_position_embeddings(reloaded, model, model_type)
# if the model is a decoder
if hasattr(model, "encoder_attn"):
for i in range(params.n_layers_decoder):
for name in DECODER_ONLY_PARAMS:
weight_name = name % i
if weight_name not in reloaded[model_type]:
logger.warning("Parameter %s not found." % (weight_name))
encoder_attn_name = weight_name.replace(
"encoder_attn", "attentions"
)
if (
getattr(params, "reload_encoder_attn_on_decoder", False)
and "encoder_attn" in weight_name
and encoder_attn_name in reloaded[model_type]
):
logger.warning(f"Reloading {encoder_attn_name} instead")
reloaded[model_type][weight_name] = (
reloaded[model_type][encoder_attn_name].clone().detach()
)
else:
reloaded[model_type][weight_name] = model.state_dict()[
weight_name
]
model.load_state_dict(reloaded[model_type], strict=not params.spans_emb_encoder)
def clean_model_state_dict(reloaded, model_type, model_number=None):
"""
remove prefix module from the keys of the model state dict.
"""
type_with_number = f"{model_type}_{model_number}"
if model_number is not None and type_with_number in reloaded:
model_reloaded = reloaded[type_with_number]
else:
if model_number is not None:
logger.info(
f"{type_with_number} not in reloaded model, reloading {model_type}"
)
model_reloaded = reloaded[model_type if model_type in reloaded else "model"]
if all([k.startswith("module.") for k in model_reloaded.keys()]):
model_reloaded = {k[len("module.") :]: v for k, v in model_reloaded.items()}
reloaded[model_type] = model_reloaded
def reload_word_embeddings(reloaded, dico, model_type):
"""
Check when reloading a model that dictionary are the same. If not, do a word embedding mapping if possible.
"""
reloaded_word2id = reloaded["dico_word2id"]
reloaded_id2word = reloaded["dico_id2word"]
assert len(reloaded_word2id) == len(reloaded_id2word)
assert all(reloaded_id2word[v] == k for k, v in reloaded_word2id.items())
matching_indices = []
word_not_found = []
for idx, word in dico.id2word.items():
if word not in reloaded_word2id:
word_not_found += [word]
matching_indices += [reloaded_word2id[UNK_WORD]]
else:
matching_indices += [reloaded_word2id[word]]
assert len(matching_indices) == len(dico)
if len(word_not_found) > 0:
logger.warning(
f"When reloading word embeddings, could not find embeddings for {len(word_not_found)} words: {word_not_found[0:5] + ['...'] + word_not_found[-5:]}... Initializing them to < unk >."
)
reloaded[model_type]["embeddings.weight"] = torch.cat(
[
reloaded[model_type]["embeddings.weight"][index : index + 1]
for index in matching_indices
],
dim=0,
)
if "pred_layer.proj.weight" in reloaded[model_type]:
first_line = reloaded[model_type]["pred_layer.proj.weight"][0:1]
embedding_size = reloaded[model_type]["pred_layer.proj.weight"].shape[1]
reloaded[model_type]["pred_layer.proj.weight"] = torch.cat(
[
reloaded[model_type]["pred_layer.proj.weight"][index : index + 1]
if index is not None
else torch.normal(
torch.zeros_like(first_line),
torch.ones_like(first_line * (embedding_size ** (-0.5))),
)
for index in matching_indices
],
dim=0,
)
reloaded[model_type]["pred_layer.proj.bias"] = torch.cat(
[
reloaded[model_type]["pred_layer.proj.bias"][index].view(1)
if index is not None
else torch.rand_like(
reloaded[model_type]["pred_layer.proj.bias"][0].view(1)
)
for index in matching_indices
]
)
def reload_lang_embeddings(reloaded, params, model_type):
"""
When pretrained models has not been trained with the same languages:
change lang embedding state dict.
Otherwise, keep as it is.
"""
model_reloaded = reloaded[model_type]
reloaded_params = reloaded["params"]
if params.lgs_mapping == "":
lang_mapping = {}
else:
lang_mapping = {
mapping.split(":")[0]: mapping.split(":")[1]
for mapping in params.lgs_mapping.split(",")
}
langs_reloaded = reloaded_params["lang2id"]
langs_reloaded_id2lang = reloaded_params["id2lang"]
indices = []
for lang in [l for i, l in sorted(params.id2lang.items())]:
if lang in lang_mapping:
lang_ = lang_mapping[lang]
else:
lang_ = lang
index = [id for l, id in langs_reloaded.items() if l == lang_]
if len(index) == 0:
logger.warning(
f"No match found for lang {lang} {lang_} in {langs_reloaded.keys()}. Initializing randomly."
)
indices.append(None)
continue
else:
assert (
len(index) == 1
), f"matching lang found: {index} in reloaded model for lang {lang} in {langs_reloaded.keys()}"
logger.warning(
f"Lang {lang} matched to pretrained {langs_reloaded_id2lang[index[0]]} lang embedding."
)
indices.append(index[0])
first_line = model_reloaded["lang_embeddings.weight"][0:1]
embedding_size = model_reloaded["lang_embeddings.weight"].shape[1]
model_reloaded["lang_embeddings.weight"] = torch.cat(
[
model_reloaded["lang_embeddings.weight"][index : index + 1]
if index is not None
else torch.normal(
torch.zeros_like(first_line),
torch.ones_like(first_line * (embedding_size ** (-0.5))),
)
for index in indices
],
dim=0,
)
reloaded[model_type] = model_reloaded
def reload_position_embeddings(reloaded, encoder, model_type):
"""
When pretrained models has not been trained with the same size of position embedding:
remove unused or add extra positions.
"""
model_reloaded = reloaded[model_type]
current_size = encoder.position_embeddings.weight.size()[0]
reloaded_size = model_reloaded["position_embeddings.weight"].size()[0]
if current_size == reloaded_size:
return model_reloaded
elif current_size < reloaded_size:
logger.warning(
f"The size of position embeddings in current model is {current_size}, the size of reloaded is {reloaded_size}. need to truncate the reloaded position embeddings."
)
model_reloaded["position_embeddings.weight"] = model_reloaded[
"position_embeddings.weight"
][:current_size, :]
else:
logger.warning(
f"The size of position embeddings in current model is {current_size}, the size of reloaded is {reloaded_size}. need to repeat last positions {current_size - reloaded_size} times."
)
model_reloaded["position_embeddings.weight"] = torch.cat(
[
model_reloaded["position_embeddings.weight"],
model_reloaded["position_embeddings.weight"][-1, :].repeat(
current_size - reloaded_size, 1
),
],
dim=0,
)
reloaded[model_type] = model_reloaded
|
CodeGen-main
|
codegen_sources/model/src/model/__init__.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import typing as tp
import itertools
import math
import dataclasses
from logging import getLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from codegen_sources.model.src.model.cape_embeddings import CAPE1d
try:
from xformers import ops as xops # type: ignore
except (ImportError, ModuleNotFoundError):
xops = None
print("No efficient attention.")
LAYER_NORM_EPSILON = 1e-5
N_MAX_POSITIONS = 2048 # maximum input sequence length
DECODER_ONLY_PARAMS = [
"layer_norm15.%i.weight",
"layer_norm15.%i.bias",
"encoder_attn.%i.q_lin.weight",
"encoder_attn.%i.q_lin.bias",
"encoder_attn.%i.k_lin.weight",
"encoder_attn.%i.k_lin.bias",
"encoder_attn.%i.v_lin.weight",
"encoder_attn.%i.v_lin.bias",
"encoder_attn.%i.out_lin.weight",
"encoder_attn.%i.out_lin.bias",
]
TRANSFORMER_LAYER_PARAMS = [
"attentions.%i.q_lin.weight",
"attentions.%i.q_lin.bias",
"attentions.%i.k_lin.weight",
"attentions.%i.k_lin.bias",
"attentions.%i.v_lin.weight",
"attentions.%i.v_lin.bias",
"attentions.%i.out_lin.weight",
"attentions.%i.out_lin.bias",
"layer_norm1.%i.weight",
"layer_norm1.%i.bias",
"ffns.%i.lin1.weight",
"ffns.%i.lin1.bias",
"ffns.%i.lin2.weight",
"ffns.%i.lin2.bias",
"layer_norm2.%i.weight",
"layer_norm2.%i.bias",
]
logger = getLogger()
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
# nn.init.normal_(m.weight, mean=0, std=1)
# nn.init.xavier_uniform_(m.weight)
# nn.init.constant_(m.bias, 0.)
return m
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
]
)
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
@dataclasses.dataclass
class LayerDropoutSelector:
"""Randomly selects which layers should be activated or not"""
n_layers: int
dropout: float
min_layers: int
def __post_init__(self) -> None:
assert (
0 <= self.min_layers <= self.n_layers
), f"minimum {self.min_layers} out of {self.n_layers}"
def select(self, train: bool) -> tp.List[bool]:
"""Selects a set of activated layers
Parameter
---------
train: bool
whether in train mode or not
during test mode, all layers are activated.
Returns
-------
List[bool]
a list of size n_layers providing whether the corresponding
layer is active or not
"""
if self.dropout <= 0 or not train:
return [True] * self.n_layers
rates = np.random.rand(self.n_layers) # unseeded?
to_keep = rates >= self.dropout
if to_keep.sum() < self.min_layers:
idx = rates.argsort()[::-1][: self.min_layers]
to_keep[idx] = True
assert to_keep.sum() == self.min_layers
return to_keep.tolist()
def gelu(x):
"""
GELU activation
https://arxiv.org/abs/1606.08415
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/modeling.py
"""
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))
def get_masks(slen, lengths, causal):
"""
Generate hidden states mask, and optionally an attention mask.
"""
assert lengths.max().item() <= slen
bs = lengths.size(0)
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class PredLayer(nn.Module):
"""
Prediction layer (cross_entropy loss).
"""
def __init__(self, params) -> None:
super().__init__()
self.fp16 = params.fp16
self.n_words = params.n_words
self.pad_index = params.pad_index
dim = params.emb_dim_decoder
self.proj = Linear(dim, params.n_words, bias=True)
def forward(self, x, y, get_scores=False):
"""
Compute the loss, and optionally the scores.
"""
with torch.cuda.amp.autocast(enabled=self.fp16):
assert (y == self.pad_index).sum().item() == 0
scores = self.proj(x).view(-1, self.n_words)
loss = F.cross_entropy(scores.float(), y, reduction="mean").type_as(scores)
return scores, loss
def get_scores(self, x):
"""
Compute scores.
"""
with torch.cuda.amp.autocast(enabled=self.fp16):
assert x.dim() == 2
return self.proj(x)
class Classifier(nn.Module):
"""
Classifier layer (cross_entropy loss).
"""
def __init__(self, params) -> None:
super().__init__()
self.n_classes = params.n_classes_classif
self.emb_dim = params.emb_dim_decoder
self.proj = Linear(self.emb_dim, params.n_classes_classif, bias=True)
def forward(self, x, y, pred_mask, get_scores=False):
"""
Compute the loss, and optionally the scores.
x : len x bs x emb_dim
y : len x bs
"""
x = x[pred_mask.unsqueeze(-1).expand_as(x)].view(-1, self.emb_dim)
scores = self.proj(x).view(-1, self.n_classes)
assert sum(sum(pred_mask.int())).item() == scores.shape[0]
if y is None:
return scores
y = y[pred_mask].view(-1,)
loss = F.cross_entropy(scores.float(), y, reduction="mean").type_as(scores)
return scores, loss
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(
self,
n_heads,
dim,
efficient_attn: tp.Optional[str],
dim_encoder=None,
dropout: float = 0.0,
) -> None:
super().__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.dim = dim
self.dim_encoder = dim if dim_encoder is None else dim_encoder
self.efficient_attn = efficient_attn
self.n_heads = n_heads
self.dropout = dropout
# assert self.dim % self.n_heads == 0
self.q_lin = Linear(dim, dim)
self.k_lin = Linear(self.dim_encoder, dim)
self.v_lin = Linear(self.dim_encoder, dim)
self.out_lin = Linear(dim, dim)
self.cache: tp.Optional[dict] = None
if self.efficient_attn is not None:
self.attn_op = {
"flash": xops.MemoryEfficientAttentionFlashAttentionOp,
"cutlass": xops.MemoryEfficientAttentionCutlassOp,
"fctls_bflsh": xops.MemoryEfficientAttentionCutlassFwdFlashBwOp,
"auto": None,
}[self.efficient_attn]
def forward(
self,
input: torch.Tensor,
mask: torch.Tensor,
kv: tp.Optional[torch.Tensor] = None,
use_cache: bool = False,
) -> torch.Tensor:
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
- input (bs, qlen, dim)
- mask (bs, klen) (non-causal) or (bs, klen, klen)
"""
assert not (use_cache and self.cache is None)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen
if use_cache:
klen += self.cache["slen"] # type: ignore
else:
klen = kv.size(1)
assert dim == self.dim, "Dimensions do not match: %s input vs %s configured" % (
dim,
self.dim,
)
n_heads = self.n_heads
dim_per_head = dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
"""projection"""
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
"""compute context"""
return (
x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif not use_cache or self.layer_id not in self.cache: # type: ignore
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if use_cache:
assert self.cache
if self.layer_id in self.cache:
if kv is None:
k_, v_ = self.cache[self.layer_id]
# (bs, n_heads, klen, dim_per_head)
k = torch.cat([k_, k], dim=2)
# (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2)
else:
k, v = self.cache[self.layer_id]
self.cache[self.layer_id] = (k, v)
unsupported_efficient_attn = (
self.efficient_attn and mask.shape == (bs, klen) and not mask.all().item()
)
if unsupported_efficient_attn or self.efficient_attn is None:
if unsupported_efficient_attn:
logger.warning(
"xformers does not support padding and custom masks right now. Defaulting to normal attention."
)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
# (bs, n_heads, qlen, klen)
scores = torch.matmul(q, k.transpose(2, 3))
mask = (
(mask == 0).view(mask_reshape).expand_as(scores)
) # (bs, n_heads, qlen, klen)
# (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, -float("inf"))
# (bs, n_heads, qlen, klen)
weights = F.softmax(scores.float(), dim=-1).type_as(scores)
# (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training)
# (bs, n_heads, qlen, dim_per_head)
context = torch.matmul(weights, v)
# (bs, qlen, dim)
else:
dropout_proba = self.dropout if self.training else 0
assert xops is not None
# Attention Mask
if mask.all().item():
# Encoder without padding
attn_bias = None
elif mask.shape == (bs, klen):
raise ValueError(
"Padding attention masks are not supported by xformers right now"
)
# # encoder
# attn_bias = (
# mask.clone().type_as(q).fill_(0).masked_fill_(~mask, -float("inf"))
# )
# attn_bias = attn_bias[:, None, :].repeat(1, n_heads, 1).flatten(0, 1)
# attn_bias = attn_bias[:, None, :].expand(
# attn_bias.shape[0], qlen, attn_bias.shape[1]
# )
elif q.size(1) == 1 and use_cache:
# Generation
attn_bias = None
# attn_bias: bs * num_heads, qlen, klen, num_heads
else:
# decoder forward
assert mask.shape == (
bs,
klen,
klen,
), f"Should be decoder but mask shape is {mask.shape}"
attn_bias = xops.LowerTriangularMask(device=q.device, dtype=q.dtype)
# Transpose to match format [batch, seqlen, num_heads, K]
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
output = xops.memory_efficient_attention(
q, k, v, p=dropout_proba, attn_bias=attn_bias, op=self.attn_op
)
# Transpose back
context = output.transpose(1, 2)
context = unshape(context)
# output dim: bs, qlen, n_heads * dim_per_head
return self.out_lin(context)
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, dropout, gelu_activation) -> None:
super().__init__()
self.dropout = dropout
self.lin1 = Linear(in_dim, dim_hidden)
self.lin2 = Linear(dim_hidden, out_dim)
self.act = gelu if gelu_activation else F.relu
def forward(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
@dataclasses.dataclass
class TransformerConfig:
fp16: bool = False
# model parameters
emb_dim: int = 512
emb_dim_encoder: int = 512 # those duplicated params are dangereous :s
emb_dim_decoder: int = 512
n_layers: int = 4
n_layers_encoder: int = 4
n_layers_decoder: int = 4
n_heads: int = 8
dropout: float = 0
attention_dropout: float = 0
gelu_activation: bool = False
share_inout_emb: bool = True
sinusoidal_embeddings: bool = False
layer_dropout: float = 0.0
min_layers: int = 2
spans_emb_encoder: bool = False
# CAPE relative embeddings
cape_embeddings: bool = False
cape_global_shift: float = 5.0
cape_local_shift: float = 0.5
cape_global_scaling: float = 1.0
discrete_cape_max: int = 0
use_lang_emb: bool = True
# added later # where is that from?
n_words: int = -1
eos_index: int = -1
pad_index: int = -1
n_classes_classif: int = -1
n_langs: int = -1
langs: tp.List[str] = dataclasses.field(default_factory=list)
id2lang: tp.Dict[int, str] = dataclasses.field(default_factory=dict)
lang2id: tp.Dict[str, int] = dataclasses.field(default_factory=dict)
efficient_attn: tp.Optional[str] = None
class TransformerModel(nn.Module):
ATTRIBUTES = [
"encoder",
"with_output",
"eos_index",
"pad_index",
"n_langs",
"n_words",
"dim",
"n_layers",
"n_heads",
"hidden_dim",
"dropout",
"attention_dropout",
]
def __init__(
self,
# in practice it's not really a config so far, but args for argparse
params: TransformerConfig,
dico: tp.Mapping[str, torch.Tensor], # Dictionary,
is_encoder: bool,
with_output: bool,
# n_layers: tp.Optional[int] = None, # not used?
) -> None:
"""
Transformer model (encoder or decoder).
"""
super().__init__()
# encoder / decoder, output layer
self.fp16 = params.fp16
self.is_encoder = is_encoder
self.is_decoder = not is_encoder
self.with_output = with_output
self.use_span_embeddings = params.spans_emb_encoder and self.is_encoder
# dictionary / languages
self.n_langs = params.n_langs
self.n_words = params.n_words
self.eos_index = params.eos_index
self.pad_index = params.pad_index
self.dico = dico
self.id2lang = params.id2lang
self.lang2id = params.lang2id
self.use_lang_emb = getattr(params, "use_lang_emb", True)
assert len(self.dico) == self.n_words
assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = (
params.emb_dim_encoder if is_encoder else params.emb_dim_decoder
) # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = params.n_heads # 8 by default
self.n_layers = (
params.n_layers_encoder if is_encoder else params.n_layers_decoder
)
self.dropout = params.dropout
self.attention_dropout = params.attention_dropout
self.roberta_mode = getattr(params, "tokenization_mode", "") == "roberta"
self.gelu_activation = params.gelu_activation
assert self.gelu_activation or not self.roberta_mode
self.layer_dropout_selector = LayerDropoutSelector(
n_layers=self.n_layers,
dropout=params.layer_dropout,
min_layers=params.min_layers,
)
# assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads'
# embeddings
self.discrete_cape_max = (
params.discrete_cape_max if hasattr(params, "discrete_cape_max") else 0
)
if hasattr(params, "cape_embeddings") and params.cape_embeddings:
self.position_embeddings = CAPE1d(
d_model=self.dim,
max_global_shift=params.cape_global_shift,
max_local_shift=params.cape_local_shift,
max_global_scaling=params.cape_global_scaling,
normalize=False,
batch_first=True,
)
else:
max_positions = N_MAX_POSITIONS
if self.discrete_cape_max > 0:
max_positions += 2 * self.discrete_cape_max
if self.roberta_mode:
self.position_embeddings = Embedding(
max_positions, self.dim, self.pad_index
)
else:
self.position_embeddings = Embedding(max_positions, self.dim)
if params.sinusoidal_embeddings:
create_sinusoidal_embeddings(
max_positions, self.dim, out=self.position_embeddings.weight
)
if params.n_langs > 0 and self.use_lang_emb:
self.lang_embeddings = Embedding(self.n_langs, self.dim)
self.embeddings = Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
if self.use_span_embeddings:
# self.spans_embeddings = Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.spans_embeddings = Embedding(
params.n_classes_classif, self.dim, padding_idx=self.pad_index
)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=LAYER_NORM_EPSILON)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
if self.is_decoder:
self.layer_norm15 = nn.ModuleList()
self.encoder_attn = nn.ModuleList()
self.cache = None
try:
efficient_attn = getattr(params, "efficient_attn")
except (AttributeError, RuntimeError):
efficient_attn = None
for layer_id in range(self.n_layers):
self.attentions.append(
MultiHeadAttention(
self.n_heads,
self.dim,
efficient_attn,
dropout=self.attention_dropout,
)
)
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=LAYER_NORM_EPSILON))
if self.is_decoder:
self.layer_norm15.append(nn.LayerNorm(self.dim, eps=LAYER_NORM_EPSILON))
self.encoder_attn.append(
MultiHeadAttention(
self.n_heads,
self.dim,
efficient_attn,
dim_encoder=params.emb_dim_encoder,
dropout=self.attention_dropout,
)
)
self.ffns.append(
TransformerFFN(
self.dim,
self.hidden_dim,
self.dim,
dropout=self.dropout,
gelu_activation=self.gelu_activation,
)
)
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=LAYER_NORM_EPSILON))
# output layer
if self.with_output:
self.pred_layer = PredLayer(params)
if params.share_inout_emb:
self.pred_layer.proj.weight = self.embeddings.weight
def empty_cache(self):
if self.cache is not None:
self.cache.clear()
def forward(self, mode, **kwargs):
"""
Forward function with different forward modes.
### Small hack to handle PyTorch distributed.
"""
with torch.cuda.amp.autocast(enabled=self.fp16):
if mode == "fwd":
return self.fwd(**kwargs)
elif mode == "predict":
return self.predict(**kwargs)
else:
raise Exception("Unknown mode: %s" % mode)
def fwd(
self,
x,
lengths,
causal,
src_enc=None,
src_len=None,
positions=None,
langs=None,
use_cache=False,
spans=None,
):
"""
Inputs:
`x` LongTensor(slen, bs), containing word indices
`lengths` LongTensor(bs), containing the length of each sentence
`causal` Boolean, if True, the attention is only done over previous hidden states
`positions` LongTensor(slen, bs), containing word positions
`langs` LongTensor(slen, bs), containing language IDs
`spans` LongTensor(slen, bs), containing the spans if use_spans is set to True
"""
# lengths = (x != self.pad_index).float().sum(dim=1)
# mask = x != self.pad_index
assert not (use_cache and self.cache is None)
if self.use_span_embeddings:
assert spans is not None
# check inputs
slen, bs = x.size()
assert lengths.size(0) == bs
assert lengths.max().item() <= slen, (lengths.max().item(), slen)
x = x.transpose(0, 1) # batch size as dimension 0
assert (src_enc is None) == (src_len is None)
if src_enc is not None:
assert self.is_decoder
assert src_enc.size(0) == bs, (src_enc.size(0), bs)
# generate masks
mask, attn_mask = get_masks(slen, lengths, causal)
if self.is_decoder and src_enc is not None:
src_mask = (
torch.arange(src_enc.shape[1], dtype=torch.long, device=lengths.device)
< src_len[:, None]
)
# positions
if positions is None:
if self.roberta_mode:
positions = create_position_ids_from_input_ids(x, self.pad_index)
else:
positions = x.new(slen).long()
positions = torch.arange(slen, out=positions).unsqueeze(0)
else:
assert positions.size() == (
slen,
bs,
), f"positions sizes: {positions.size()} do not match the size of input {slen, bs}"
positions = positions.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (slen, bs)
langs = langs.transpose(0, 1)
# do not recompute cached elements
if use_cache:
assert self.cache is not None
_slen = slen - self.cache["slen"]
x = x[:, -_slen:]
positions = positions[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
tensor = self.embeddings(x)
if self.use_span_embeddings:
tensor = tensor + self.spans_embeddings(spans.T)
if isinstance(self.position_embeddings, CAPE1d):
tensor = self.position_embeddings(tensor, lengths)
else:
if self.discrete_cape_max > 0:
if self.training:
if len(positions) == 1 and bs > 0:
positions = positions.repeat((bs, 1))
positions += torch.randint(
low=0,
high=2 * self.discrete_cape_max,
size=(bs, 1),
device=positions.device,
)
else:
positions += self.discrete_cape_max // 2
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
if langs is not None and self.use_lang_emb:
tensor = tensor + self.lang_embeddings(langs)
tensor = self.layer_norm_emb(tensor)
tensor = F.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
to_keep = self.layer_dropout_selector.select(
train=self.training
) # always True in test
# transformer layers
for i, keep in enumerate(to_keep):
if not keep:
continue
# self attention
self.attentions[i].cache = self.cache
attn = self.attentions[i](tensor, attn_mask, use_cache=use_cache)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
# encoder attention (for decoder only)
if self.is_decoder and src_enc is not None:
assert src_enc.shape[1] == src_mask.shape[-1]
self.encoder_attn[i].cache = self.cache
attn = self.encoder_attn[i](
tensor, src_mask, kv=src_enc, use_cache=use_cache
)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm15[i](tensor)
# FFN
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# update cache length
if use_cache:
assert self.cache is not None
self.cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
tensor = tensor.transpose(0, 1)
return tensor
def predict(self, tensor, pred_mask, y, get_scores):
"""
Given the last hidden state, compute word scores and/or the loss.
`pred_mask` is a ByteTensor of shape (slen, bs), filled with 1 when
we need to predict a word
`y` is a LongTensor of shape (pred_mask.sum(),)
`get_scores` is a boolean specifying whether we need to return scores
"""
masked_tensor = tensor[pred_mask.unsqueeze(-1).expand_as(tensor)].view(
-1, self.dim
)
scores, loss = self.pred_layer(masked_tensor, y, get_scores)
return scores, loss
def generate(
self,
src_enc,
src_len,
tgt_lang_id,
max_len=200,
sample_temperature=None,
prompt=None,
):
"""
Decode a sentence given initial start.
`x`:
- LongTensor(bs, slen)
<EOS> W1 W2 W3 <EOS> <PAD>
<EOS> W1 W2 W3 W4 <EOS>
`lengths`:
- LongTensor(bs) [5, 6]
`positions`:
- False, for regular "arange" positions (LM)
- True, to reset positions from the new generation (MT)
`langs`:
- must be None if the model only supports one language
- lang_id if only one language is involved (LM)
- (lang_id1, lang_id2) if two languages are involved (MT)
`prompt`: if None, starts generating from prompt = <bos> symbol
else, considers that prompt is already generated at the beginning of the ouptut
"""
if isinstance(max_len, int):
max_lengths = src_len.clone().fill_(max_len)
global_max_len = max_len
else:
max_lengths = max_len
global_max_len = int(max_lengths.max())
# input batch
assert (src_len is None) == (src_enc is None)
if src_len is not None and src_len.size:
bs = len(src_len)
assert src_enc.size(0) == bs
else:
assert prompt is not None, "No generation prompt in decoder-only model"
bs = prompt.size(1)
# generated sentences
generated = src_len.new(global_max_len, bs) # upcoming output
generated.fill_(self.pad_index) # fill upcoming ouput with <PAD>
generated[0].fill_(self.eos_index) # we use <EOS> for <BOS> everywhere
# positions
positions = src_len.new(global_max_len).long()
positions = (
torch.arange(global_max_len, out=positions)
.unsqueeze(1)
.expand(global_max_len, bs)
)
if self.roberta_mode:
positions = positions + self.pad_index + 1
# language IDs
if tgt_lang_id is None:
langs = None
else:
langs = src_len.new(global_max_len).long().fill_(tgt_lang_id)
langs = langs.unsqueeze(1).expand(global_max_len, bs)
# current position / max lengths / length of generated sentences / unfinished sentences
cur_len = 1
gen_len = src_len.clone().fill_(1)
unfinished_sents = src_len.clone().fill_(1)
# cache compute states
self.cache = {"slen": 0}
previous_unfinished_mask = unfinished_sents.ne(0)
while cur_len < global_max_len:
# compute word scores
unfinished_mask = unfinished_sents.ne(0)
should_modify = unfinished_mask.ne(previous_unfinished_mask).any()
if should_modify and self.cache is not None:
for k, v in self.cache.items():
if isinstance(k, int):
assert len(v) == 2
self.cache[k] = (
cached_tensor[unfinished_mask[previous_unfinished_mask]]
for cached_tensor in v
)
tensor = self.forward(
"fwd",
x=generated[:cur_len, unfinished_mask],
lengths=gen_len[unfinished_mask],
positions=positions[:cur_len, unfinished_mask],
langs=None if langs is None else langs[:cur_len][:, unfinished_mask],
causal=True,
src_enc=src_enc[unfinished_mask],
src_len=src_len[unfinished_mask],
use_cache=True,
)
assert tensor.size() == (1, unfinished_mask.sum().item(), self.dim), (
cur_len,
global_max_len,
src_enc.size(),
tensor.size(),
(1, bs, self.dim),
)
tensor = tensor.data[-1, :, :].type_as(src_enc) # (bs, dim)
scores = self.pred_layer.get_scores(tensor) # (bs, n_words)
# select next words: sample or greedy
if sample_temperature is None:
next_words = torch.topk(scores, 1)[1].squeeze(1)
else:
next_words = torch.multinomial(
F.softmax(scores.float() / sample_temperature, dim=1), 1
).squeeze(1)
assert next_words.size() == (unfinished_mask.sum().item(),)
# update generations / lengths / finished sentences / current length.
# No need to updates the finished sequences since the value is self.pad_index by default
generated[cur_len, unfinished_mask] = next_words
gen_len.add_(unfinished_sents)
generated[cur_len].masked_fill_(
max_lengths.eq(cur_len + 1) & unfinished_sents.eq(1), self.eos_index
)
unfinished_sents[unfinished_mask] = (
unfinished_sents[unfinished_mask]
.mul(next_words.ne(self.eos_index).long())
.mul(max_lengths[unfinished_mask].ne(cur_len + 1).long())
)
cur_len = cur_len + 1
previous_unfinished_mask = unfinished_mask
# stop when there is a </s> in each sentence, or if we exceed the maximal length
if unfinished_sents.max() == 0:
break
# sanity check
assert (generated == self.eos_index).sum() == 2 * bs
# empty cache (saves a lot of GPU memory)
self.empty_cache()
return generated[:cur_len], gen_len
def generate_beam(
self,
src_enc,
src_len,
tgt_lang_id,
beam_size,
length_penalty,
early_stopping,
max_len=200,
):
"""
Decode a sentence given initial start.
`x`:
- LongTensor(bs, slen)
<EOS> W1 W2 W3 <EOS> <PAD>
<EOS> W1 W2 W3 W4 <EOS>
`lengths`:
- LongTensor(bs) [5, 6]
`positions`:
- False, for regular "arange" positions (LM)
- True, to reset positions from the new generation (MT)
`langs`:
- must be None if the model only supports one language
- lang_id if only one language is involved (LM)
- (lang_id1, lang_id2) if two languages are involved (MT)
"""
if isinstance(max_len, int):
max_lengths = src_len.clone().fill_(max_len)
global_max_len = max_len
else:
max_lengths = max_len
global_max_len = int(max_lengths.max())
# check inputs
assert src_enc.size(0) == src_len.size(0)
assert beam_size >= 1
# batch size / number of words
bs = len(src_len)
n_words = self.n_words
# expand to beam size the source latent representations / source lengths
src_enc = (
src_enc.unsqueeze(1)
.expand((bs, beam_size) + src_enc.shape[1:])
.contiguous()
.view((bs * beam_size,) + src_enc.shape[1:])
)
src_len = src_len.unsqueeze(1).expand(bs, beam_size).contiguous().view(-1)
# generated sentences (batch with beam current hypotheses)
generated = src_len.new(global_max_len, bs * beam_size) # upcoming output
# fill upcoming ouput with <PAD>
generated.fill_(self.pad_index)
# we use <EOS> for <BOS> everywhere
generated[0].fill_(self.eos_index)
# generated hypotheses
generated_hyps = [
BeamHypotheses(beam_size, global_max_len, length_penalty, early_stopping)
for _ in range(bs)
]
# positions
positions = src_len.new(global_max_len).long()
positions = (
torch.arange(global_max_len, out=positions)
.unsqueeze(1)
.expand_as(generated)
)
if self.roberta_mode:
positions = positions + self.pad_index + 1
# language IDs
langs = positions.clone().fill_(tgt_lang_id)
# scores for each sentence in the beam
beam_scores = src_enc.new(bs, beam_size).float().fill_(0)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1)
# current position
cur_len = 1
# cache compute states
self.cache = {"slen": 0}
# done sentences
done = [False for _ in range(bs)]
while cur_len < global_max_len:
# compute word scores
tensor = self.forward(
"fwd",
x=generated[:cur_len],
lengths=src_len.new(bs * beam_size).fill_(cur_len),
positions=positions[:cur_len],
langs=langs[:cur_len],
causal=True,
src_enc=src_enc,
src_len=src_len,
use_cache=True,
)
assert tensor.size() == (1, bs * beam_size, self.dim)
# (bs * beam_size, dim)
tensor = tensor.data[-1, :, :].type_as(src_enc)
scores = self.pred_layer.get_scores(tensor) # (bs * beam_size, n_words)
# (bs * beam_size, n_words)
scores = F.log_softmax(scores.float(), dim=-1)
assert scores.size() == (bs * beam_size, n_words)
# select next words with scores
# (bs * beam_size, n_words)
_scores = scores + beam_scores[:, None].expand_as(scores)
# (bs, beam_size * n_words)
_scores = _scores.view(bs, beam_size * n_words)
next_scores, next_words = torch.topk(
_scores, 2 * beam_size, dim=1, largest=True, sorted=True
)
assert next_scores.size() == next_words.size() == (bs, 2 * beam_size)
# next batch beam content
# list of (bs * beam_size) tuple(next hypothesis score, next word, current position in the batch)
next_batch_beam = []
# for each sentence
for sent_id in range(bs):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(
next_scores[sent_id].max().item()
)
if done[sent_id]:
next_batch_beam.extend(
[(0, self.pad_index, 0)] * beam_size
) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = idx // n_words
word_id = idx % n_words
# end of sentence, or next word
if word_id == self.eos_index or cur_len + 1 == global_max_len:
generated_hyps[sent_id].add(
generated[:cur_len, sent_id * beam_size + beam_id].clone(),
value.item(),
)
else:
next_sent_beam.append(
(value, word_id, sent_id * beam_size + beam_id)
)
# the beam for next step is full
if len(next_sent_beam) == beam_size:
break
# update next beam content
assert (
len(next_sent_beam) == 0
if cur_len + 1 == global_max_len
else beam_size
)
if len(next_sent_beam) == 0:
next_sent_beam = [
(0, self.pad_index, 0)
] * beam_size # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == beam_size * (sent_id + 1)
# sanity check / prepare next batch
assert len(next_batch_beam) == bs * beam_size
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = generated.new([x[1] for x in next_batch_beam])
beam_idx = src_len.new([x[2] for x in next_batch_beam])
# re-order batch and internal states
generated = generated[:, beam_idx]
generated[cur_len] = beam_words
for k in self.cache.keys():
if k != "slen":
self.cache[k] = (
self.cache[k][0][beam_idx],
self.cache[k][1][beam_idx],
)
# update current length
cur_len = cur_len + 1
# stop when we are done with each sentence
if all(done):
break
# visualize hypotheses
# print([len(x) for x in generated_hyps], cur_len)
# globals().update( locals() );
# !import code; code.interact(local=vars())
# for ii in range(bs):
# for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):
# print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist()))
# print("")
# select the best hypotheses
tgt_len = src_len.new(bs, beam_size)
best = []
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = [
h[1] for h in sorted(hypotheses.hyp, key=lambda x: x[0], reverse=True)
]
for j, hyp in enumerate(sorted_hyps):
tgt_len[i, j] = len(hyp) + 1
# +1 for the <EOS> symbol
best.append(sorted_hyps)
# generate target batch
decoded = src_len.new(tgt_len.max().item(), beam_size, bs).fill_(self.pad_index)
for i, hypo_list in enumerate(best):
for hyp_index, hypo in enumerate(hypo_list):
decoded[: len(hypo), hyp_index, i] = hypo
decoded[len(hypo), hyp_index, i] = self.eos_index
# sanity check
assert (decoded == self.eos_index).sum() == 2 * beam_size * bs
# empty cache (saves a lot of GPU memory)
self.empty_cache()
return decoded, tgt_len, sorted([h[0] for h in hypotheses.hyp], reverse=True)
class BeamHypotheses(object):
def __init__(self, n_hyp, max_len, length_penalty, early_stopping) -> None:
"""
Initialize n-best list of hypotheses.
"""
self.max_len = max_len - 1 # ignoring <BOS>
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.n_hyp = n_hyp
self.hyp: tp.List[tp.Tuple[str, torch.Tensor]] = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted(
[(s, idx) for idx, (s, _) in enumerate(self.hyp)]
)
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
elif self.early_stopping:
return True
else:
return (
self.worst_score
>= best_sum_logprobs / self.max_len ** self.length_penalty
)
|
CodeGen-main
|
codegen_sources/model/src/model/transformer.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import torch
from ..data.dictionary import (
BOS_WORD,
EOS_WORD,
MASK_WORD,
PAD_WORD,
UNK_WORD,
Dictionary,
)
from ..utils import AttrDict
from .transformer import TransformerModel
logger = getLogger()
class SentenceEmbedder(object):
@staticmethod
def reload(path, params):
"""
Create a sentence embedder from a pretrained model.
"""
# reload model
reloaded = torch.load(path)
state_dict = reloaded["model"]
# handle models from multi-GPU checkpoints
if "checkpoint" in path:
state_dict = {
(k[7:] if k.startswith("module.") else k): v
for k, v in state_dict.items()
}
# reload dictionary and model parameters
dico = Dictionary(
reloaded["dico_id2word"], reloaded["dico_word2id"], reloaded["dico_counts"]
)
pretrain_params = AttrDict(reloaded["params"])
pretrain_params.n_words = len(dico)
pretrain_params.bos_index = dico.index(BOS_WORD)
pretrain_params.eos_index = dico.index(EOS_WORD)
pretrain_params.pad_index = dico.index(PAD_WORD)
pretrain_params.unk_index = dico.index(UNK_WORD)
pretrain_params.mask_index = dico.index(MASK_WORD)
# build model and reload weights
model = TransformerModel(pretrain_params, dico, True, True)
model.load_state_dict(state_dict)
model.eval()
# adding missing parameters
params.max_batch_size = 0
return SentenceEmbedder(model, dico, pretrain_params)
def __init__(self, model, dico, pretrain_params) -> None:
"""
Wrapper on top of the different sentence embedders.
Returns sequence-wise or single-vector sentence representations.
"""
self.pretrain_params = {k: v for k, v in pretrain_params.__dict__.items()}
self.model = model
self.dico = dico
self.n_layers = model.n_layers
self.out_dim = model.dim
self.n_words = model.n_words
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def cuda(self):
self.model.cuda()
def get_parameters(self, layer_range):
s = layer_range.split(":")
assert len(s) == 2
i, j = int(s[0].replace("_", "-")), int(s[1].replace("_", "-"))
# negative indexing
i = self.n_layers + i + 1 if i < 0 else i
j = self.n_layers + j + 1 if j < 0 else j
# sanity check
assert 0 <= i <= self.n_layers
assert 0 <= j <= self.n_layers
if i > j:
return []
parameters = []
# embeddings
if i == 0:
# embeddings
parameters += self.model.embeddings.parameters()
logger.info("Adding embedding parameters to optimizer")
# positional embeddings
if self.pretrain_params["sinusoidal_embeddings"] is False:
parameters += self.model.position_embeddings.parameters()
logger.info("Adding positional embedding parameters to optimizer")
# language embeddings
if hasattr(self.model, "lang_embeddings"):
parameters += self.model.lang_embeddings.parameters()
logger.info("Adding language embedding parameters to optimizer")
parameters += self.model.layer_norm_emb.parameters()
# layers
for l in range(max(i - 1, 0), j):
parameters += self.model.attentions[l].parameters()
parameters += self.model.layer_norm1[l].parameters()
parameters += self.model.ffns[l].parameters()
parameters += self.model.layer_norm2[l].parameters()
logger.info("Adding layer-%s parameters to optimizer" % (l + 1))
logger.info(
"Optimizing on %i Transformer elements."
% sum([p.nelement() for p in parameters])
)
return parameters
def get_embeddings(self, x, lengths, positions=None, langs=None):
"""
Inputs:
`x` : LongTensor of shape (slen, bs)
`lengths` : LongTensor of shape (bs,)
Outputs:
`sent_emb` : FloatTensor of shape (bs, out_dim)
With out_dim == emb_dim
"""
slen, bs = x.size()
assert lengths.size(0) == bs and lengths.max().item() == slen
# get transformer last hidden layer
tensor = self.model(
"fwd", x=x, lengths=lengths, positions=positions, langs=langs, causal=False
)
assert tensor.size() == (slen, bs, self.out_dim)
# single-vector sentence representation (first column of last layer)
return tensor[0]
|
CodeGen-main
|
codegen_sources/model/src/model/embedder.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import typing as tp
from stringcase import snakecase
from ..utils import read_file_lines
def compute_subtokens(token):
return [x for x in snakecase(token).split("_") if len(x) > 0]
def subtoken_counts(proposed, ground_truth):
"""
Compute the number of precise tokens, proposed tokens and ground truth tokens
from two strings representing tokens.
"""
gt_subtokens = set(compute_subtokens(ground_truth))
proposed_subtokens = set(compute_subtokens(proposed))
precise_subtokens = proposed_subtokens.intersection(gt_subtokens)
return len(precise_subtokens), len(proposed_subtokens), len(gt_subtokens)
def subtoken_scores(proposed, ground_truth):
precise, proposed, gt = subtoken_counts(proposed, ground_truth)
precision = precise / proposed if proposed > 0 else 0
recall = precise / gt if gt > 0 else 0
f1 = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0
return precision, recall, f1
def run_subtoken_score(ref, hyp, subtoken_average=False, all_beams=False):
"""
Given a file of hypothesis and reference files,
evaluate the subtoken-level precision and recall
"""
if all_beams:
assert not subtoken_average
for h in hyp:
assert os.path.isfile(h), f"file {h} does not exist"
assert os.path.isfile(ref) or os.path.isfile(ref + "0")
refs = read_file_lines(ref)
hyps = list(zip(*[read_file_lines(path) for path in hyp]))
if subtoken_average:
return subtoken_score_on_lines_subtoken_level([h[0] for h in hyps], refs)
else:
if not all_beams:
hyps = [[h[0]] for h in hyps]
assert len(hyps) == len(refs)
return subtoken_score_on_lines(hyps, refs)
def subtoken_score_on_lines(
hyps_list: tp.List[tp.List[str]], refs: tp.List[str]
) -> tp.Dict[str, float]:
precisions, recalls, f1_scores = [], [], []
count_exact_matches = 0
for hyps, ref in zip(hyps_list, refs):
matches = {}
for obfuscated, deobfuscated in [
entry.strip().split(" ", maxsplit=1) for entry in ref.split("|")
]:
assert obfuscated not in matches
matches[obfuscated] = {"ref": deobfuscated}
for hyp_index, hyp in enumerate(hyps):
for entry in hyp.split("|"):
split = entry.strip().split(" ", maxsplit=1)
if len(split) < 2:
continue
else:
obfuscated, deobfuscated = split[0], split[1]
if obfuscated not in matches:
# the model is trying to deobfuscate a variable that does not exist. It can be detected automatically and ignored
continue
else:
matches[obfuscated][f"hyp_{hyp_index}"] = deobfuscated
for match in matches.values():
assert "ref" in match
best_precision, best_recall, best_f1 = 0, 0, 0
exact_match = False
for k, v in match.items():
if k.startswith("hyp"):
if v == match["ref"]:
exact_match = True
precision, recall, f1 = subtoken_scores(v, match["ref"])
if f1 > best_f1:
best_precision, best_recall, best_f1 = precision, recall, f1
precisions.append(best_precision)
recalls.append(best_recall)
f1_scores.append(best_f1)
count_exact_matches += 1 if exact_match else 0
nb_tokens = len(precisions)
assert (
nb_tokens == len(precisions) == len(recalls) == len(f1_scores)
), "all lists should have the same size"
precision = sum(precisions) / nb_tokens if nb_tokens > 0 else 0
recall = sum(recalls) / nb_tokens if nb_tokens > 0 else 0
f1 = sum(f1_scores) / nb_tokens if nb_tokens > 0 else 0
ratio_exact_matches = count_exact_matches / nb_tokens if nb_tokens > 0 else 0
return {
"precision": precision,
"recall": recall,
"F1": f1,
"exact_match": ratio_exact_matches,
}
def subtoken_score_on_lines_subtoken_level(hyps, refs):
precise_subtokens, proposed_subtokens, gt_subtokens = 0, 0, 0
for hyp, ref in zip(hyps, refs):
matches = {}
for obfuscated, deobfuscated in [
(entry.strip().split(" ")[0], entry.strip().split(" ")[1])
for entry in ref.split("|")
]:
assert obfuscated not in matches
matches[obfuscated] = {"ref": deobfuscated}
for entry in hyp.split("|"):
split = entry.strip().split(" ")
if len(split) < 2:
continue
else:
obfuscated, deobfuscated = split[0], split[1]
if obfuscated not in matches:
# the model is trying to deobfuscate a variable that does not exist. It can be detected automatically and ignored
continue
else:
matches[obfuscated]["hyp"] = deobfuscated
for match in matches.values():
assert "ref" in match
precise, proposed, gt = subtoken_counts(match.get("hyp", ""), match["ref"])
precise_subtokens += precise
proposed_subtokens += proposed
gt_subtokens += gt
precision = precise_subtokens / proposed_subtokens if proposed_subtokens > 0 else 0
recall = precise_subtokens / gt_subtokens if gt_subtokens > 0 else 0
return {
"precision": precision,
"recall": recall,
"F1": 2 * precision * recall / (precision + recall)
if precision + recall > 0
else 0,
}
|
CodeGen-main
|
codegen_sources/model/src/evaluation/subtoken_score.py
|
CodeGen-main
|
codegen_sources/model/src/evaluation/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .subtoken_score import (
subtoken_counts,
subtoken_score_on_lines,
subtoken_score_on_lines_subtoken_level,
)
def test_same_strings_perfect_match():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"linesCount", "linesCount"
)
assert precise_tokens == proposed_tokens == gt_tokens == 2
def test_inverted_tokens_perfect_match():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"countLines", "linesCount"
)
assert precise_tokens == proposed_tokens == gt_tokens == 2
def test_different_cases_perfect_match():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"lines_count", "linesCount"
)
assert precise_tokens == proposed_tokens == gt_tokens == 2
def test_extra_token_perfect_recall():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts(
"emptyLinesCount", "linesCount"
)
assert precise_tokens == gt_tokens == 2
assert proposed_tokens == 3
def test_missing_token_perfect_precision():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts("count", "linesCount")
assert precise_tokens == proposed_tokens == 1
assert gt_tokens == 2
def test_empty_proposed_low_recall():
precise_tokens, proposed_tokens, gt_tokens = subtoken_counts("", "linesCount")
assert precise_tokens == proposed_tokens == 0
assert gt_tokens == 2
def test_full_subtoken_score():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 words"]], ["VAR_1 countLines | VAR_2 uniqueWords"]
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.83333333) < 0.0001, res_dict
def test_extra_tokens():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 words | VA RandomStuff"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.83333333) < 0.0001, res_dict
def test_full_subtoken_score_subtoken_level():
res_dict = subtoken_score_on_lines_subtoken_level(
["VAR_1 linesCount | VAR_2 words"], ["VAR_1 countLines | VAR_2 uniqueWords"]
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.85714285) < 0.0001, res_dict
def test_full_subtoken_score_low_precision():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 sentences"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert (
res_dict["precision"] == res_dict["recall"] == res_dict["F1"] == 0.5
), res_dict
assert res_dict["exact_match"] == 0, res_dict
def test_full_subtoken_score_snakecase_vs_camlcase():
res_dict = subtoken_score_on_lines(
[["VAR_1 lines_count | VAR_2 sentences"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert (
res_dict["precision"] == res_dict["recall"] == res_dict["F1"] == 0.5
), res_dict
assert res_dict["exact_match"] == 0, res_dict
def test_full_subtoken_score_case_insensitive():
res_dict = subtoken_score_on_lines([["VAR_1 Lines_count"]], ["VAR_1 CountLines"])
assert (
res_dict["precision"] == res_dict["recall"] == res_dict["F1"] == 1.0
), res_dict
def test_full_subtoken_score_takes_best_beam():
res_dict = subtoken_score_on_lines(
[["VAR_1 linesCount | VAR_2 sentences", "VAR_1 linesCount | VAR_2 words"]],
["VAR_1 countLines | VAR_2 uniqueWords"],
)
assert res_dict["precision"] == 1.0, res_dict
assert abs(res_dict["recall"] - 0.75) < 0.0001, res_dict
assert abs(res_dict["F1"] - 0.83333333) < 0.0001, res_dict
|
CodeGen-main
|
codegen_sources/model/src/evaluation/test_subtoken_score.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import subprocess
import json
import typing as tp
from concurrent.futures import ProcessPoolExecutor
import sys
import os
from logging import getLogger
from pathlib import Path
from codegen_sources import code_runners
from codegen_sources.code_runners import test_runners
from ..utils import (
REPO_ROOT,
read_file_lines,
get_java_bin_path,
TOK_AVOID_NEWLINE,
)
from ..constants import EXT
from codegen_sources.code_runners.utils import MAX_VIRTUAL_MEMORY, limit_virtual_memory
COMPILED = "#Compiled"
COMPILATION = "#Compilation"
FAILED_IR_COMP_ = "failed_ir_comp:"
EVOSUITE = "evosuite"
GFG = "GfG"
CODE_NET = "CodeNet"
sys.path.append(str(REPO_ROOT))
print("adding to path", str(REPO_ROOT))
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.test_generation.evosuite_tests_translators.evosuite_to_python import (
EvosuiteToPython,
)
from codegen_sources.test_generation.evosuite_tests_translators.evosuite_to_cpp import (
EvosuiteToCpp,
)
EVAL_SCRIPT_FOLDER = {
"test": REPO_ROOT.joinpath("data/transcoder_evaluation_gfg"),
"valid": REPO_ROOT.joinpath("data/transcoder_evaluation_gfg"),
}
CODENET_EVAL_FOLDER = REPO_ROOT.joinpath("data/CodeNet_eval_dataset/")
TOFILL = {
"python": "#TOFILL",
"java": "//TOFILL",
"cpp": "//TOFILL",
"go": "//TOFILL",
"rust": "//TOFILL",
}
primitive_types = {"short", "int", "long", "float", "double", "boolean", "char"}
EVOSUITE_TESTS_TRANSCODER_PATH = (
REPO_ROOT.joinpath("data")
.joinpath("evosuite_unit_tests")
.joinpath("transcoder_test_set.json")
)
EVALUATORS = {
"cpp": test_runners.CppInputOutputEvaluator(),
"rust": test_runners.RustInputOutputEvaluator(),
"go": test_runners.GoInputOutputEvaluator(),
"java": test_runners.JavaInputOutputEvaluator(),
}
logger = getLogger()
def eval_state(proc: tp.Any, proc_name: str) -> tp.Tuple[str, tp.Optional[str]]:
results = ""
stderr = b""
try:
try:
result, stderr = proc.communicate(timeout=120)
except subprocess.TimeoutExpired:
c = (
"kill `ps aux | grep '"
+ proc_name
+ "' | grep -v jupyter | grep -v grep | awk '{print($2)}'`"
)
subprocess.run(
c, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return "timeout", None
results = result.decode("utf8", errors="replace")
success, n_test = results.split("#Results:")[-1].split(",")
if int(success) == int(n_test):
return "success", None
else:
return "failure", result.decode("utf-8", errors="replace")
except KeyboardInterrupt:
raise
except:
if COMPILATION not in results or COMPILED in results:
return "error", stderr.decode("utf-8", errors="replace")
else:
return "compilation", stderr.decode("utf-8", errors="replace")
def run_rust_program(
script_path: str, i: int
) -> tp.Tuple[tp.Tuple[str, tp.Optional[str]], int]:
code_path = Path(script_path)
bin_path = str(code_path.with_suffix("")) + "_rust"
try:
code_runners.compile_rust(
code_path, compilation_timeout=30, output_path=Path(bin_path),
)
except code_runners.CompilationError as e:
return ("compilation", str(e)), i
except code_runners.Timeout:
return ("compilation", "timeout"), i
test_cmd = f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; {bin_path}"
proc = subprocess.Popen(
test_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
)
res = eval_state(proc, f"{bin_path}")
return res, i
def run_go_program(script_path, i):
code_path = Path(script_path)
bin_path = str(code_path.with_suffix("")) + "_go"
try:
code_runners.compile_go(
code_path,
compilation_timeout=30,
output_path=Path(bin_path),
run_go_imports=True,
)
except code_runners.CompilationError as e:
return ("compilation", str(e)), i
except code_runners.Timeout:
return ("compilation", "timeout"), i
test_cmd = f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; {bin_path}"
proc = subprocess.Popen(
test_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
)
res = eval_state(proc, f"{bin_path}")
return res, i
def run_python_program(script_path, i):
proc = subprocess.Popen(
f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; python {script_path}",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
)
res = eval_state(proc, f"python {script_path}")
return res, i
def run_java_program(script_path, i):
folder = os.path.dirname(script_path)
name = os.path.basename(script_path).split(".")[0]
proc = subprocess.Popen(
f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; cd {folder} && echo '{COMPILATION}'; {os.path.join(get_java_bin_path(), 'javac')} {name}.java && echo '{COMPILED}' && {os.path.join(get_java_bin_path(), 'java')} {name}",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
)
res = eval_state(proc, f"java {name}")
return res, i
def run_cpp_program(script_path, i):
folder = os.path.dirname(script_path)
name = os.path.basename(script_path).split(".")[0]
proc = subprocess.Popen(
f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; cd {folder} && echo '{COMPILATION}'; g++ {name}.cpp -o {name}_cpp && echo '{COMPILED}' && ./{name}_cpp",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
)
res = eval_state(proc, f"{name}_cpp")
return res, i
def make_arg_string(argtype, argval):
if "[" not in argtype:
return f"{argtype} {argval}"
dim = argtype.count("[")
argtype = argtype.replace("[", "").replace("]", "")
return f'{argtype} {argval} {"[ ]" * dim}'
def convert_filled_arguments(script_model, f, lang, lang_processor, f_name=None):
assert lang in {"java", "cpp"}
header = []
arguments_gold = lang_processor.extract_arguments(script_model)
return_type_gold = get_return_type(script_model)
arguments_filled = lang_processor.extract_arguments(f)
return_type_filled = get_return_type(f)
if arguments_gold[0] == arguments_filled[0]:
return None
if f_name is None:
f_name = lang_processor.get_function_name(f)
argument_types_gold = [t.strip() for t in arguments_gold[0]]
arguments_strings = [
make_arg_string(arg_type, f"param{i}")
for i, arg_type in enumerate(argument_types_gold)
]
new_function_lines = [
f'static {return_type_gold} f_filled({", ".join(arguments_strings)})',
"{",
]
new_params_strings = []
for param_index, (param_type_gold, param_type_filled) in enumerate(
zip(argument_types_gold, arguments_filled[0])
):
param_type_filled = param_type_filled.strip()
param_type_gold = param_type_gold.strip()
if param_type_filled == param_type_gold:
new_params_strings.append(f"param{param_index}")
elif lang == "cpp":
if "vector" in param_type_filled:
if "int" not in argument_types_gold:
return None
ints_indices = [
i
for i, t in enumerate(argument_types_gold)
if t == "int" and i > param_index
]
if any([i > param_index for i in ints_indices]):
array_length_arg = min([i for i in ints_indices if i > param_index])
else:
array_length_arg = min(ints_indices)
new_function_lines.append(
f'{param_type_filled.replace("&", "")} vect_param{param_index}(param{param_index}, param{param_index} + param{array_length_arg});'
)
new_params_strings.append(f"vect_param{param_index}")
elif param_type_filled == "string" and "char" in param_type_gold:
new_function_lines.append(
f'{param_type_filled.replace("&", "")} string_param{param_index}(param{param_index});'
)
new_params_strings.append(f"string_param{param_index}")
elif param_type_gold == "string" and "char" in param_type_filled:
new_function_lines.append(
f"char char_arr_param{param_index}[param{param_index}.length() + 1];"
)
new_function_lines.append(
f"strcopy(char_arr_param{param_index}, param{param_index}.c_str());"
)
new_params_strings.append(f"char_arr_param{param_index}")
else:
new_params_strings.append(f"({param_type_filled}) param{param_index}")
elif lang == "java":
if (
param_type_filled == "String" and "char" in param_type_gold
) or param_type_filled == transform_to_java_object_type(param_type_gold):
new_params_strings.append(
f"{param_type_filled}.valueOf(param{param_index})"
)
header.append("#include <cstring>")
elif param_type_gold == "String":
new_params_strings.append(f"param{param_index}.toCharArray()")
else:
new_params_strings.append(f"({param_type_filled}) param{param_index}")
else:
return None
inner_function_name = "f_filled_inner"
outer_f_return_string = f'{inner_function_name}({",".join(new_params_strings)})'
if return_type_filled != return_type_gold:
outer_f_return_string = f"({return_type_gold}) {outer_f_return_string}"
new_function_lines += [f"return {outer_f_return_string};", "}"]
f = lang_processor.detokenize_code(f.replace(f_name, inner_function_name))
return "\n".join(list(set(header))) + script_model.replace(
TOFILL[lang], "\n".join([f, "\n"] + new_function_lines)
)
def submit_codenet_functions(functions_list, id, lang, start_lang, data_folder):
runner = EVALUATORS[lang]
id = id.rstrip()
results_list = []
problem_id = id.split("_")[0]
inputs_path = data_folder.joinpath(
f"{'-'.join(sorted([start_lang, lang]))}_{lang}", "inputs", f"{problem_id}.in"
)
if not inputs_path.exists():
# print(f"{inputs_path} not found")
return [return_script_not_found()], id
try:
input_outputs = eval(open(inputs_path).read())
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f"WARN: {type(e)} {e}: could not parse file {inputs_path}")
return [return_script_not_found()], id
full_solution = open(
data_folder.joinpath("full_solutions", lang, f"{problem_id}.{lang}")
).read()
ref_solution = (
open(data_folder.joinpath("reference_functions", lang, f"{id}.{lang}"))
.read()
.strip()
)
if functions_list[0].startswith(FAILED_IR_COMP_):
return [("Failed IR computation", None)], id
if ref_solution not in full_solution:
return (
[
(
"Could not replace",
f"could not find: {ref_solution}\nin \n{full_solution}",
)
],
id,
)
inputs = input_outputs[::2]
outputs = input_outputs[1::2]
for try_id, f_fill in enumerate(functions_list):
replaced_code = full_solution.replace(ref_solution, f_fill)
assert replaced_code != ref_solution
result = runner.check_outputs(replaced_code, inputs, outputs)
results_list.append(
((result[0], None) if ":" not in result[0] else result[0].split(":", 1))
)
if result[0] == "success":
return results_list, id
return results_list, id
def submit_evosuite_functions(
functions_list, id, lang, test_dictionary,
):
assert lang in {"cpp", "python"}, f"{lang} is not supported for evosuite tests"
if lang == "cpp":
test_runner = test_runners.CppEvosuiteTestRunner(
timeout=30, compilation_timeout=30
)
else:
assert lang == "python"
test_runner = test_runners.PythonEvosuiteTestRunner(timeout=30)
lang_processor = LangProcessor.processors[lang]()
id = id.rstrip()
if id not in test_dictionary or test_dictionary[id] == "missing":
return [return_script_not_found()], id
test = test_dictionary[id]
results_list = []
for try_id, f_fill in enumerate(functions_list):
f = f_fill.rstrip()
result = test_runner.get_tests_results(f, test)
results_list.append((result[0], None))
if result[0] == "success":
return results_list, id
return results_list, id
def detokenize_before_running(f, lang_processor, tokenization_mode):
if tokenization_mode == "fastbpe":
f = lang_processor.detokenize_code(f)
else:
f = f.replace(TOK_AVOID_NEWLINE, "\n")
return f
def submit_functions(
functions_list, id, ref, lang, outfolder, script_folder, retry_mismatching_types,
):
lang_processor = LangProcessor.processors[lang]()
results_list = []
i = id.rstrip()
for try_id, f_fill in enumerate(functions_list):
f = f_fill.strip()
script_model_path = os.path.join(script_folder, f"{lang}/{i}{EXT[lang]}")
if not os.path.exists(script_model_path):
return [return_script_not_found()], i
script_model = open(script_model_path, "r", encoding="utf-8").read()
if f.startswith(FAILED_IR_COMP_):
return [("Failed IR computation", None)], i
try:
f_name = lang_processor.get_function_name(lang_processor.tokenize_code(f))
f = f.replace(f_name, "f_filled")
except KeyboardInterrupt:
raise
except:
results_list.append(("error", "Could not replace function name"))
continue
try:
if f_fill.strip() == ref.strip() or lang_processor.tokenize_code(
f_fill
) == lang_processor.tokenize_code(ref):
results_list.append(("success", "identical to gold"))
return results_list, i
except KeyboardInterrupt:
raise
except Exception as e:
logger.info(f"Error {type(e)}: {e} when tokenizing reference {ref}")
script = script_model.replace(TOFILL[lang], f)
if lang == "python":
script = f"import numpy as np \nimport math\nfrom math import *\nimport collections\nfrom collections import *\nimport heapq\nimport itertools\nimport random\nimport sys\n\n{script}"
script_path = f"{outfolder}/{i}{EXT[lang]}"
open(script_path, "w", encoding="utf-8").write(script)
run_pg = globals()[f"run_{lang}_program"]
result, _ = run_pg(script_path, i)
if result[0] == "success":
results_list.append(result)
return results_list, i
elif retry_mismatching_types and lang in {"cpp", "java"}:
try:
script_transform_args = convert_filled_arguments(
script_model, f_fill, lang, lang_processor, f_name=f_name
)
except KeyboardInterrupt:
raise
except:
script_transform_args = None
if script_transform_args is not None:
open(script_path, "w", encoding="utf-8").write(script_transform_args)
run_pg = globals()[f"run_{lang}_program"]
result2, _ = run_pg(script_path, i)
if result2[0] == "success":
results_list.append(result2)
return results_list, i
else:
result = (
result2[0],
"".join(
[
result[1] if result[1] else "",
f"|| second run handling types mismatch: ## function ## {script_transform_args} ## output ## {result2[1]}",
]
),
)
results_list.append(result)
return results_list, i
def eval_function_output(
ref_path,
hyp_paths,
id_path,
lang1,
lang2,
outfolder,
script_folder,
retry_mismatching_types,
tokenization_mode,
tests_type,
evosuite_tests=None,
):
assert tests_type in {
EVOSUITE,
GFG,
CODE_NET,
}, f"Test type {tests_type} not recognized"
functions = list(zip(*[read_file_lines(path) for path in hyp_paths]))
ids = read_file_lines(id_path)
ids_to_num = {id_str.strip(): i for i, id_str in enumerate(ids)}
refs = read_file_lines(ref_path)
assert len(functions) == len(ids), f"{len(functions), len(ids)}"
assert len(functions) == len(refs), f"{len(functions), len(refs)}"
lang = lang2.split("_")[0]
jobs = []
lang_processor = LangProcessor.processors[lang]()
executor = ProcessPoolExecutor()
for hyp_list, i, r in zip(functions, ids, refs):
r = detokenize_before_running(
r, lang_processor=lang_processor, tokenization_mode=tokenization_mode
)
hyp_list = [
detokenize_before_running(
f, lang_processor=lang_processor, tokenization_mode=tokenization_mode
)
for f in hyp_list
]
if tests_type == EVOSUITE:
jobs.append(
executor.submit(
submit_evosuite_functions, hyp_list, i, lang, evosuite_tests[lang],
)
)
elif tests_type == GFG:
jobs.append(
executor.submit(
submit_functions,
hyp_list,
i,
r,
lang,
outfolder,
script_folder,
retry_mismatching_types,
)
)
elif tests_type == CODE_NET:
jobs.append(
executor.submit(
submit_codenet_functions,
hyp_list,
i,
lang,
lang1.split("_")[0],
script_folder,
)
)
results_stats = {
"success": 0,
"failure": 0,
"error": 0,
"timeout": 0,
"script_not_found": 0,
"identical_gold": 0,
}
results = ["" for _ in range(len(ids))]
for job in jobs:
results_list, i = job.result()
# print(results_list)
nb_success = sum([r[0] == "success" for r in results_list])
nb_identical = sum(
[r[0] == "success" and r[1] == "identical to gold" for r in results_list]
)
assert nb_success <= 1, "Should stop after first success"
if nb_success > 0:
results_stats["success"] += 1
if nb_identical > 0:
results_stats["identical_gold"] += 1
else:
results_stats[results_list[0][0]] = (
results_stats.get(results_list[0][0], 0) + 1
)
results[ids_to_num[i.strip()]] = []
for result, stderr in results_list:
if stderr is not None:
stderr = stderr.replace("\n", " ")
else:
stderr = "None"
results[ids_to_num[i.strip()]].append(f"{result} : {stderr}")
results_stats["total"] = len(functions)
results_stats["total_evaluated"] = (
len(functions) - results_stats["script_not_found"]
)
results_stats = {k: results_stats[k] for k in sorted(results_stats.keys())}
return results_stats, results
def load_evosuite_transcoder_tests():
cpp_test_translator = EvosuiteToCpp()
python_test_translator = EvosuiteToPython()
tests = {"java": {}, "java_scaffolding": {}, "python": {}, "cpp": {}}
with open(EVOSUITE_TESTS_TRANSCODER_PATH, "r") as f:
for l in f:
json_line = json.loads(l)
if json_line["tests_strings"] == "missing":
continue
tests["java"][json_line["TARGET_CLASS"]] = json_line["tests_strings"]
tests["java_scaffolding"][json_line["TARGET_CLASS"]] = json_line[
"scaffoldings_strings"
]
python_test = python_test_translator.translate(json_line["tests_strings"])
if not python_test_filter(python_test):
continue
tests["python"][json_line["TARGET_CLASS"]] = python_test
cpp_test = cpp_test_translator.translate(json_line["tests_strings"])
tests["cpp"][json_line["TARGET_CLASS"]] = cpp_test
return tests
def python_test_filter(python_test):
return (
python_test.count("try ") == 0
and python_test.count("catch(") == 0
and python_test.count("assert ") > 0
)
def return_script_not_found():
return "script_not_found", None
def transform_to_java_object_type(t):
if t not in primitive_types:
return t
if t == "int":
return "Integer"
if t == "char":
return "Character"
return t.capitalize()
def get_return_type(tokenized_java):
return tokenized_java.split("(")[0].split()[-2]
def init_eval_scripts_folder(data_set, lang1, lang2, params):
params.eval_scripts_folders[(lang1, lang2, data_set)] = os.path.join(
params.eval_scripts_root, "{0}-{1}.{2}".format(lang1, lang2, data_set),
)
subprocess.Popen(
"mkdir -p %s" % params.eval_scripts_folders[(lang1, lang2, data_set)],
shell=True,
).wait()
|
CodeGen-main
|
codegen_sources/model/src/evaluation/comp_acc_computation.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import os
import subprocess
import time
import typing as tp
from collections import OrderedDict, defaultdict
from concurrent.futures.process import ProcessPoolExecutor
from logging import getLogger
from pathlib import Path
import fastBPE
import numpy as np
import torch
from sklearn.metrics import roc_auc_score, average_precision_score
from codegen_sources.IR_tools.utils_ir import code_to_ir, ERROR_MESSAGE
from codegen_sources.preprocessing.lang_processors import LangProcessor, IRProcessor
from .comp_acc_computation import (
load_evosuite_transcoder_tests,
eval_function_output,
GFG,
FAILED_IR_COMP_,
init_eval_scripts_folder,
CODENET_EVAL_FOLDER,
EVAL_SCRIPT_FOLDER,
)
from ..data.loader import DATASET_SPLITS
from ..trainer import get_programming_language_name
from ..utils import (
to_cuda,
restore_segmentation,
concat_batches,
show_batch,
add_noise,
convert_to_text,
REPO_ROOT,
restore_segmentation_sentence,
read_file_lines,
)
from .subtoken_score import run_subtoken_score
import sys
from ..vizualization_utils import vizualize_do_files, vizualize_translated_files
sys.path.append(str(REPO_ROOT))
PathLike = tp.Union[Path, str]
SRC_ST_LANGS = "java"
TARGET_ST_LANG = {"cpp", "python"}
EVAL_OBF_PROBAS: tp.List[float] = []
BLEU_SCRIPT_PATH = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "multi-bleu.perl"
)
EVAL_DATASET_SPLITS = [ds for ds in DATASET_SPLITS if ds != "train"]
assert os.path.isfile(BLEU_SCRIPT_PATH)
ROOT_FOLDER = Path(__file__).parents[4]
logger = getLogger()
class Evaluator(object):
def __init__(self, trainer, data, params) -> None:
"""
Initialize evaluator.
"""
self.trainer = trainer
self.data = data
self.dico = data["dico"]
self.params = params
# create directory to store hypotheses, and reference files for BLEU evaluation
if self.params.is_master:
params.hyp_path = os.path.join(params.dump_path, "hypotheses")
subprocess.Popen("mkdir -p %s" % params.hyp_path, shell=True).wait()
params.eval_scripts_root = os.path.join(params.dump_path, "eval_scripts")
subprocess.Popen(
"mkdir -p %s" % params.eval_scripts_root, shell=True
).wait()
self.params.ref_paths = {}
self.params.id_paths = {}
self.params.eval_scripts_folders = {}
if params.eval_bleu or params.eval_subtoken_score:
self.create_reference_files()
if self.params.eval_st:
logger.info("Loading evosuite tests")
self.evosuite_tests_dico = load_evosuite_transcoder_tests()
else:
self.evosuite_tests_dico = None
def get_iterator(
self, data_set, lang1, lang2=None, stream=False, span=None, subsample=1000,
):
"""
Create a new iterator for a dataset.
"""
assert data_set in EVAL_DATASET_SPLITS
assert lang1 in self.params.langs
assert (
lang2 is None
or lang2 in self.params.langs
or (lang1, lang2) in self.params.classif_steps
)
assert stream is False or lang2 is None
n_sentences = self.params.n_sentences_eval
if lang2 is None or lang2 == lang1:
key = lang1 if span is None else (lang1, span)
if stream and lang2 is None:
iterator = self.data["mono_stream"][key][data_set].get_iterator(
shuffle=False, subsample=subsample
)
else:
iterator = self.data["mono"][key][data_set].get_iterator(
tokens_per_batch=self.params.eval_tokens_per_batch,
max_batch_size=-1,
shuffle=False,
group_by_size=True,
n_sentences=n_sentences,
)
else:
assert stream is False
_lang1, _lang2 = (lang1, lang2) if lang1 < lang2 else (lang2, lang1)
key = (_lang1, _lang2) if span is None else (_lang1, _lang2, span)
iterator = self.data["para"][key][data_set].get_iterator(
shuffle=False,
group_by_size=True,
n_sentences=n_sentences,
tokens_per_batch=self.params.eval_tokens_per_batch,
max_batch_size=-1,
)
for batch in iterator:
yield batch if lang2 is None or lang1 == lang2 or lang1 <= lang2 else batch[
::-1
]
def create_reference_files(self):
"""
Create reference files for BLEU evaluation.
"""
params = self.params
for key in list(self.data["para"].keys()) + [
(l, l) for l in self.params.eval_computation_pivot_self
]:
span = None
if len(key) == 3:
lang1, lang2, span = key
else:
assert len(key) == 2
lang1, lang2 = key
assert lang1 < lang2 or (
lang1 == lang2 and lang1 in self.params.eval_computation_pivot_self
), (lang1, lang2)
for data_set in EVAL_DATASET_SPLITS:
init_eval_scripts_folder(data_set, lang1, lang2, params)
init_eval_scripts_folder(data_set, lang2, lang1, params)
# define data paths
lang1_path = os.path.join(
params.hyp_path,
"ref.{0}-{1}.{2}.txt".format(lang2, lang1, data_set),
)
lang2_path = os.path.join(
params.hyp_path,
"ref.{0}-{1}.{2}.txt".format(lang1, lang2, data_set),
)
spans_path = os.path.join(
params.hyp_path,
"ref.{0}-{1}-{3}.{2}.txt".format(lang1, lang2, span, data_set),
)
id_path = os.path.join(
params.hyp_path,
"ids.{0}-{1}.{2}.txt".format(lang1, lang2, data_set),
)
# store data paths
params.ref_paths[(lang2, lang1, data_set)] = lang1_path
params.ref_paths[(lang1, lang2, data_set)] = lang2_path
params.id_paths[(lang1, lang2, data_set)] = id_path
params.id_paths[(lang2, lang1, data_set)] = id_path
# text sentences
lang1_txt = []
lang2_txt = []
id_txt = []
spans = []
has_sent_ids = None
# convert to text
for i, batch in enumerate(
self.get_iterator(data_set, lang1, lang2, span=span)
):
if len(batch) == 2:
(sent1, len1, id1, lenid1), (sent2, len2, id2, lenid2) = batch
elif len(batch) == 4:
sent1, len1, id1, lenid1 = batch
sent2, len2, id2, lenid2 = batch
else:
(
(sent1, len1, id1, lenid1),
(sent2, len2, id2, lenid2),
(span_batch, len_span, _, _),
) = batch
spans.extend(list(span_batch.T))
lang1_txt.extend(convert_to_text(sent1, len1, self.dico, params))
lang2_txt.extend(convert_to_text(sent2, len2, self.dico, params))
has_sent_ids = id1 is not None and id2 is not None
if has_sent_ids:
assert id1.equal(id2) and lenid1.equal(lenid2)
id_txt.extend(convert_to_text(id1, lenid1, self.dico, params))
# replace <unk> by <<unk>> as these tokens cannot be counted in BLEU
lang1_txt = [x.replace("<unk>", "<<unk>>") for x in lang1_txt]
lang2_txt = [x.replace("<unk>", "<<unk>>") for x in lang2_txt]
# export hypothesis
with open(lang1_path, "w", encoding="utf-8") as f:
f.write("\n".join(lang1_txt) + "\n")
with open(lang2_path, "w", encoding="utf-8") as f:
f.write("\n".join(lang2_txt) + "\n")
if len(spans) > 0:
with open(spans_path, "w", encoding="utf-8") as f:
f.write("\n".join([str(s) for s in spans]) + "\n")
# restore original segmentation
restore_segmentation(
lang1_path,
tokenization_mode=params.tokenization_mode,
single_line=True,
sentencepiece_model_path=params.sentencepiece_model_path,
)
restore_segmentation(
lang2_path,
tokenization_mode=params.tokenization_mode,
single_line=True,
sentencepiece_model_path=params.sentencepiece_model_path,
)
if has_sent_ids:
with open(id_path, "w", encoding="utf-8") as f:
f.write("\n".join(id_txt) + "\n")
restore_segmentation(
id_path,
tokenization_mode=params.tokenization_mode,
single_line=True,
sentencepiece_model_path=params.sentencepiece_model_path,
)
def mask_out(self, x, lengths, rng):
"""
Decide of random words to mask out.
We specify the random generator to ensure that the test is the same at each epoch.
"""
params = self.params
slen, bs = x.size()
# words to predict - be sure there is at least one word per sentence
to_predict = rng.rand(slen, bs) <= params.word_pred
to_predict[0] = 0
for i in range(bs):
to_predict[lengths[i] - 1 :, i] = 0
if not np.any(to_predict[: lengths[i] - 1, i]):
v = rng.randint(1, lengths[i] - 1)
to_predict[v, i] = 1
pred_mask = torch.from_numpy(to_predict.astype(np.uint8))
pred_mask = pred_mask == 1
# generate possible targets / update x input
_x_real = x[pred_mask]
_x_mask = _x_real.clone().fill_(params.mask_index)
x = x.masked_scatter(pred_mask, _x_mask)
assert 0 <= x.min() <= x.max() < params.n_words
assert x.size() == (slen, bs)
assert pred_mask.size() == (slen, bs)
return x, _x_real, pred_mask
def run_all_evals(self, trainer):
"""
Run all evaluations.
"""
params = self.params
scores = OrderedDict({"epoch": trainer.epoch})
deobf_probas_to_eval = [1 - x for x in EVAL_OBF_PROBAS]
deobfuscation_proba = 1 - params.obf_proba
if deobfuscation_proba not in deobf_probas_to_eval:
deobf_probas_to_eval.append(deobfuscation_proba)
with torch.no_grad():
for data_set in EVAL_DATASET_SPLITS:
# causal prediction task (evaluate perplexity and accuracy)
for lang1, lang2 in params.clm_steps:
self.evaluate_clm(scores, data_set, lang1, lang2)
# prediction task (evaluate perplexity and accuracy)
for lang1, lang2 in params.mlm_steps:
self.evaluate_mlm(scores, data_set, lang1, lang2)
# machine translation task (evaluate perplexity and accuracy)
for lang1 in sorted(params.eval_computation_pivot_self):
self.evaluate_mt(
scores,
data_set,
lang1,
lang1,
params.eval_bleu,
False,
True,
params.eval_subtoken_score,
span=None,
)
set_keys = set(
params.mt_steps
+ [(l1, l2) for l1, langs2 in params.st_steps for l2 in langs2]
+ [(l2, l1) for l1, langs2 in params.st_steps for l2 in langs2]
+ [
(l2_1, l2_2)
for l1, langs2 in params.st_steps
for l2_1 in langs2
for l2_2 in langs2
if l2_1 != l2_2
]
+ params.mt_spans_steps
+ params.eval_computation
+ params.eval_computation_pivot
)
if params.eval_bt_pairs:
set_keys |= set([(l2, l3) for _, l2, l3 in params.bt_steps])
for i_set, keys in enumerate(sorted(set_keys)):
print(f"Evaluating pair {i_set + 1} / {len(set_keys)}")
spans = None
assert len(keys) == 2 or len(keys) == 3
lang1, lang2 = keys[0], keys[1]
if len(keys) == 3:
spans = keys[2]
self.evaluate_mt(
scores,
data_set,
lang1,
lang2,
params.eval_bleu,
(lang1, lang2) in params.eval_computation,
(lang1, lang2) in params.eval_computation_pivot,
params.eval_subtoken_score,
spans,
eval_ir_similarity=(lang1, lang2) in params.eval_ir_similarity,
)
if self.params.eval_denoising:
for lang in sorted(set(params.ae_steps)):
assert lang in params.langs, lang
self.evaluate_mt(
scores,
data_set,
lang,
lang,
eval_bleu=False,
eval_computation=False,
eval_computation_pivot=False,
eval_subtoken_score=False,
span=None,
)
# machine translation task (evaluate perplexity and accuracy)
for lang1, lang2 in sorted(set(params.do_steps)):
assert len(deobf_probas_to_eval) == len(
set(deobf_probas_to_eval)
), f"deobf_probas_to_eval should have no duplicates, was {deobf_probas_to_eval}"
self.evaluate_mt(
scores,
data_set,
lang1,
lang2,
params.eval_bleu,
eval_computation=False,
eval_computation_pivot=False,
eval_subtoken_score=params.eval_subtoken_score,
span=None,
deobfuscate=True,
deobfuscate_probas=deobf_probas_to_eval,
)
# prediction task (evaluate perplexity and accuracy)
for lang1, lang2 in sorted(params.classif_steps):
self.evaluate_classif(scores, data_set, lang1, lang2)
# report average metrics per language
if len(params.do_steps) > 0 and params.is_master:
for obfuscation_proba in deobf_probas_to_eval:
for score_type in ["precision", "recall", "F1"]:
scores[
"%s_obf_proba_%s_mt_subtoken_%s"
% (data_set, 1 - obfuscation_proba, score_type)
] = np.mean(
[
scores[
"%s_%s_mt_subtoken_%s"
% (
data_set,
get_l1l2_string(
lang1, lang2, obfuscation_proba
),
score_type,
)
]
for lang1, lang2 in params.do_steps
]
)
_clm_mono = [l1 for (l1, l2) in params.clm_steps if l2 is None]
if len(_clm_mono) > 0:
scores["%s_clm_ppl" % data_set] = np.mean(
[
scores["%s_%s_clm_ppl" % (data_set, lang)]
for lang in _clm_mono
]
)
scores["%s_clm_acc" % data_set] = np.mean(
[
scores["%s_%s_clm_acc" % (data_set, lang)]
for lang in _clm_mono
]
)
_mlm_mono = [l1 for (l1, l2) in params.mlm_steps if l2 is None]
if len(_mlm_mono) > 0:
scores["%s_mlm_ppl" % data_set] = np.mean(
[
scores["%s_%s_mlm_ppl" % (data_set, lang)]
for lang in _mlm_mono
]
)
scores["%s_mlm_acc" % data_set] = np.mean(
[
scores["%s_%s_mlm_acc" % (data_set, lang)]
for lang in _mlm_mono
]
)
if params.is_master:
logger.info(f"On GPU {params.global_rank}, scores computed \n\n")
return scores
else:
return {}
def eval_mode(self):
[enc.eval() for enc in self.encoder]
if self.decoder is not None:
[dec.eval() for dec in self.decoder]
def evaluate_clm(self, scores, data_set, lang1, lang2):
"""
Evaluate perplexity and next word prediction accuracy.
"""
params = self.params
assert data_set in EVAL_DATASET_SPLITS
assert lang1 in params.langs
assert lang2 in params.langs or lang2 is None
model = self.model[0] if params.encoder_only else self.decoder[0]
model.eval()
model = model.module if params.multi_gpu else model
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2] if lang2 is not None else None
l1l2 = lang1 if lang2 is None else f"{lang1}-{lang2}"
n_words = 0
xe_loss = 0
n_valid = 0
n_bytes = 0
valid_bytes = 0
for batch in self.get_iterator(data_set, lang1, lang2, stream=(lang2 is None)):
# batch
if lang2 is None:
x, lengths = batch
positions = None
langs = x.clone().fill_(lang1_id) if params.n_langs > 1 else None
else:
(sent1, len1), (sent2, len2) = batch
x, lengths, positions, langs = concat_batches(
sent1,
len1,
lang1_id,
sent2,
len2,
lang2_id,
params.pad_index,
params.eos_index,
reset_positions=True,
)
# words to predict
alen = torch.arange(lengths.max(), dtype=torch.long, device=lengths.device)
pred_mask = alen[:, None] < lengths[None] - 1
y = x[1:].masked_select(pred_mask[:-1])
assert pred_mask.sum().item() == y.size(0)
# cuda
x, lengths, positions, langs, pred_mask, y = to_cuda(
x, lengths, positions, langs, pred_mask, y
)
# forward / loss
tensor = model(
"fwd",
x=x,
lengths=lengths,
positions=positions,
langs=langs,
causal=True,
)
word_scores, loss = model(
"predict", tensor=tensor, pred_mask=pred_mask, y=y, get_scores=True
)
# update stats
n_words += y.size(0)
xe_loss += loss.item() * len(y)
predictions = word_scores.max(1)[1]
n_valid += (predictions == y).sum().item()
gt_bytes = [self.dico.id2word[i.item()].encode("utf-8") for i in y]
n_bytes += sum(len(b) for b in gt_bytes)
pred_bytes = [
self.dico.id2word[i.item()].encode("utf-8")
for i in word_scores.max(1)[1]
]
valid_bytes += sum(
sum(1 if pred == gt else 0 for pred, gt in zip(pred_seq, gt_seq))
for pred_seq, gt_seq in zip(pred_bytes, gt_bytes)
)
# log
logger.info(
"Found %i words in %s. %i were predicted correctly."
% (n_words, data_set, n_valid)
)
logger.info(
"Found %i bytes in %s. %i were predicted correctly."
% (n_bytes, data_set, valid_bytes)
)
# compute perplexity and prediction accuracy
ppl_name = "%s_%s_clm_ppl" % (data_set, l1l2)
acc_name = "%s_%s_clm_acc" % (data_set, l1l2)
byte_name = "%s_%s_clm_byte_acc" % (data_set, l1l2)
scores[ppl_name] = np.exp(xe_loss / n_words)
scores[acc_name] = 100.0 * n_valid / n_words
scores[byte_name] = 100.0 * valid_bytes / n_bytes
def evaluate_mlm(self, scores, data_set, lang1, lang2):
"""
Evaluate perplexity and next word prediction accuracy.
"""
params = self.params
assert data_set in EVAL_DATASET_SPLITS
assert lang1 in params.langs
assert lang2 in params.langs or lang2 is None
model = self.model[0] if params.encoder_only else self.encoder[0]
model.eval()
model = model.module if params.multi_gpu else model
rng = np.random.RandomState(0)
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2] if lang2 is not None else None
l1l2 = lang1 if lang2 is None else f"{lang1}_{lang2}"
n_words = 0
xe_loss = 0
n_valid = 0
for i, batch in enumerate(
self.get_iterator(data_set, lang1, lang2, stream=(lang2 is None))
):
if i > 50:
break
# batch
if lang2 is None:
x, lengths = batch
positions = None
langs = x.clone().fill_(lang1_id) if params.n_langs > 1 else None
else:
(sent1, len1, _, _), (sent2, len2, _, _) = batch
x, lengths, positions, langs = concat_batches(
sent1,
len1,
lang1_id,
sent2,
len2,
lang2_id,
params.pad_index,
params.eos_index,
reset_positions=True,
)
# words to predict
x, y, pred_mask = self.mask_out(x, lengths, rng)
# log first batch of training
if i < 1:
show_batch(
logger,
[("masked source", x.transpose(0, 1))],
self.data["dico"],
self.params.tokenization_mode,
"Evaluation",
self.params.sentencepiece_model_path,
)
# cuda
x, y, pred_mask, lengths, positions, langs = to_cuda(
x, y, pred_mask, lengths, positions, langs
)
# forward / loss
tensor = model(
"fwd",
x=x,
lengths=lengths,
positions=positions,
langs=langs,
causal=False,
)
word_scores, loss = model(
"predict", tensor=tensor, pred_mask=pred_mask, y=y, get_scores=True
)
# update stats
n_words += len(y)
xe_loss += loss.item() * len(y)
n_valid += (word_scores.max(1)[1] == y).sum().item()
# compute perplexity and prediction accuracy
ppl_name = "%s_%s_mlm_ppl" % (data_set, l1l2)
acc_name = "%s_%s_mlm_acc" % (data_set, l1l2)
scores[ppl_name] = np.exp(xe_loss / n_words) if n_words > 0 else 1e9
scores[acc_name] = 100.0 * n_valid / n_words if n_words > 0 else 0.0
def evaluate_classif(self, scores, data_set, lang1, lang2):
params = self.params
assert data_set in EVAL_DATASET_SPLITS
assert lang1 in params.langs
lang1_id = params.lang2id[lang1]
model = self.model[0] if params.encoder_only else self.encoder[0]
model.eval()
model = model.module if params.multi_gpu else model
assert self.classifier is not None
classifier = self.classifier[0].eval()
n_words = 0
n_valid = 0
labels = []
word_probas = []
n_words_by_cl = [0 for c in range(self.params.n_classes_classif)]
n_valid_by_cl = [0 for c in range(self.params.n_classes_classif)]
n_attribution_by_cl = [0 for c in range(self.params.n_classes_classif)]
for batch in self.get_iterator(data_set, lang1, lang2, stream=False):
(x1, len1, _, _), (y, len2, _, _) = batch
pred_mask = (x1 != self.params.eos_index) * (x1 != self.params.pad_index)
assert len1.equal(len2)
langs1 = x1.clone().fill_(lang1_id)
# cuda
x1, len1, langs1, y = to_cuda(x1, len1, langs1, y)
# encode source sentence
enc1 = model("fwd", x=x1, lengths=len1, langs=langs1, causal=False)
if self.params.fp16:
enc1 = enc1.half()
# classification + loss
word_scores, loss = classifier(enc1, y, pred_mask)
# update stats
y_ = y[pred_mask].view(-1,)
n_words += len(y_)
n_valid += (word_scores.max(1)[1] == y_).sum().item()
labels.extend(y_.cpu().numpy())
word_probas.extend(word_scores.cpu().numpy())
for cl in range(self.params.n_classes_classif):
n_words_by_cl[cl] += (y_ == cl).sum().item()
n_valid_by_cl[cl] += (
((word_scores.max(1)[1] == y_) * (y_ == cl)).sum().item()
)
n_attribution_by_cl[cl] += (word_scores.max(1)[1] == cl).sum().item()
if len(set(labels)) > 1:
for target_label in range(self.params.n_classes_classif):
roc_auc_name = "%s_%s-%s_roc_auc_label_cl%i" % (
data_set,
lang1,
lang2,
target_label,
)
new_labels = [1 if l == target_label else 0 for l in labels]
word_level_scores = [wp[target_label] for wp in word_probas]
scores[roc_auc_name] = roc_auc_score(new_labels, word_level_scores)
pr_auc_name = "%s_%s-%s_pr_auc_cl%i" % (
data_set,
lang1,
lang2,
target_label,
)
scores[pr_auc_name] = average_precision_score(
new_labels, word_level_scores
)
roc_auc_name = "%s_%s-%s_roc_auc_label_all_changes" % (
data_set,
lang1,
lang2,
)
new_labels = [1 if l > 0 else 0 for l in labels]
word_level_scores = [1 - s[0] for s in word_probas]
scores[roc_auc_name] = roc_auc_score(new_labels, word_level_scores)
pr_auc_name = "%s_%s-%s_pr_auc_label_all_changes" % (data_set, lang1, lang2)
scores[pr_auc_name] = average_precision_score(new_labels, word_level_scores)
# compute perplexity and prediction accuracy
class_proportion_name = "%s_%s-%s_class_proportion" % (data_set, lang1, lang2)
acc_name = "%s_%s-%s_classif_acc" % (data_set, lang1, lang2)
recall_name = "%s_%s-%s_classif_recall" % (data_set, lang1, lang2)
precision_name = "%s_%s-%s_classif_precision" % (data_set, lang1, lang2)
scores[class_proportion_name] = [
(100.0 * x / n_words) if n_words > 0 else 0.0 for x in n_words_by_cl
]
scores[acc_name] = (100.0 * n_valid / n_words) if n_words > 0 else 0.0
# scores[recall_name] = [(100. * n_valid_by_cl[cl] / n_words_by_cl[cl]) if n_words_by_cl[cl] > 0 else 0 for cl in range(self.params.n_classes_classif)]
# scores[precision_name] = [(100. * n_valid_by_cl[cl] / n_attribution_by_cl[cl]) if n_attribution_by_cl[cl] > 0 else 0 for cl in range(self.params.n_classes_classif)]
for cl in range(params.n_classes_classif):
scores[f"{recall_name}_{cl}"] = (
100.0 * n_valid_by_cl[cl] / n_words_by_cl[cl]
if n_words_by_cl[cl] > 0
else 0
)
for cl in range(params.n_classes_classif):
scores[f"{precision_name}_{cl}"] = (
100.0 * n_valid_by_cl[cl] / n_attribution_by_cl[cl]
if n_attribution_by_cl[cl] > 0
else 0
)
class SingleEvaluator(Evaluator):
def __init__(self, trainer, data, params) -> None:
"""
Build language model evaluator.
"""
super().__init__(trainer, data, params)
self.model = trainer.model
if params.use_classifier:
self.classifier = trainer.classifier
def gather_model_outputs(model_outputs_list):
model_outputs = {}
for k in ["n_words", "xe_loss", "n_valid"]:
model_outputs[k] = sum(d.get(k, 0) for d in model_outputs_list)
for k in ["hypothesis", "references", "sources", "computed_irs"]:
model_outputs[k] = [] # First element of each list, then second...
for i in range(len(model_outputs_list[0][k])):
for d in model_outputs_list:
if k not in d or len(d[k]) <= i:
continue
model_outputs[k].extend(d[k][i])
return model_outputs
class EncDecEvaluator(Evaluator):
def __init__(self, trainer, data, params) -> None:
"""
Build encoder / decoder evaluator.
"""
super().__init__(trainer, data, params)
self.encoder = trainer.encoder
self.decoder = trainer.decoder
def evaluate_mt(
self,
scores,
data_set: str,
lang1: str,
lang2: str,
eval_bleu: bool,
eval_computation: bool,
eval_computation_pivot: bool,
eval_subtoken_score,
span,
deobfuscate=False,
deobfuscate_probas=None,
eval_ir_similarity=False,
):
"""
Evaluate perplexity and next word prediction accuracy.
"""
params = self.params
assert data_set in EVAL_DATASET_SPLITS
assert lang1 in params.langs
assert lang2 in params.langs
rng = np.random.RandomState(0)
torch_rng = torch.Generator().manual_seed(0)
do_eval = {
"bleu": eval_bleu,
"st": params.eval_st,
"computation": eval_computation,
"computation_pivot": eval_computation_pivot,
"subtoken_score": eval_subtoken_score,
"ir_similarity": eval_ir_similarity,
}
bpe_model = None
if do_eval["computation_pivot"]:
bpe_model = fastBPE.fastBPE(params.pivot_bpe_model) # type: ignore
logger.info(f"Computing pivot CA for {lang1} to {lang2}")
# store hypothesis to compute BLEU score
if params.eval_bleu_test_only:
datasets_for_bleu = ["test"]
else:
datasets_for_bleu = [s for s in EVAL_DATASET_SPLITS if s != "train"]
lang2_id = params.lang2id[lang2]
self.eval_mode()
encoder = self.encoder[0].module if params.multi_gpu else self.encoder[0]
decoder = (
self.decoder[lang2_id] if params.separate_decoders else self.decoder[0]
)
decoder = decoder.module if params.multi_gpu else decoder
for deobfuscation_proba in (
deobfuscate_probas if deobfuscate_probas is not None else [None]
):
if deobfuscate:
rng = np.random.RandomState(0)
word_metrics: tp.Mapping[str, float] = defaultdict(float)
text_files: tp.Mapping[str, tp.Any] = defaultdict(list)
logger.info(
f"{params.global_rank}: generating MT hypotheses {lang1} -> {lang2}"
)
time_start_generate = time.perf_counter()
will_compute_bleu = (
any(
do_eval[k]
for k in (
"bleu",
"computation",
"subtoken_score",
"computation_pivot",
)
)
and data_set in datasets_for_bleu
)
for i, batch in enumerate(
self.get_iterator(
data_set, lang1, lang2 if lang2 != lang1 else None, span=span
)
):
if i % params.world_size != params.global_rank:
continue # Distribute batches on all GPUs
computed_irs_upd, ir_creation_errors = [], None
show_example = i == 0
seq1, seq2, spans = self.extract_batch(
lang1,
lang2,
batch,
rng,
torch_rng,
deobfuscate,
deobfuscation_proba,
params,
do_eval["computation_pivot"],
)
if seq1 is None:
continue
if do_eval["computation_pivot"]:
seq1, computed_irs_upd, ir_creation_errors = self.sequence_to_ir(
seq1, lang1, params, bpe_model
)
text_files["computed_irs"].append(computed_irs_upd)
enc1, dec2 = self.do_forward(
encoder, decoder, seq1, seq2, spans, params.fp16
)
self.update_word_metrics(
word_metrics,
seq2,
decoder,
dec2,
do_eval["computation_pivot"],
ir_creation_errors,
)
self.update_text_files(
text_files,
decoder,
seq1,
seq2,
enc1,
params,
lang1,
lang2,
data_set,
will_compute_bleu,
do_eval["computation_pivot"],
ir_creation_errors,
show_example,
)
time_hyp_generated = time.perf_counter()
logger.info(
f"Timing: Generated hypotheses in {time_hyp_generated - time_start_generate:.2f}s"
)
model_outputs = {**word_metrics, **text_files}
if params.world_size > 1:
torch.distributed.barrier()
model_outputs_list = [None for _ in range(params.world_size)]
torch.distributed.all_gather_object(model_outputs_list, model_outputs)
else:
model_outputs_list = [model_outputs] # type: ignore
if not params.is_master:
continue
model_outputs = gather_model_outputs(model_outputs_list)
self.compute_metrics(
model_outputs,
data_set,
lang1,
lang2,
params,
scores,
deobfuscate,
deobfuscation_proba,
do_eval,
datasets_for_bleu,
will_compute_bleu,
)
logger.info(
f"Timing: Computed metrics in {time.perf_counter() - time_hyp_generated:.2f}s"
)
def extract_batch(
self,
lang1,
lang2,
batch,
rng,
torch_rng,
deobfuscate,
deobfuscation_proba,
params,
eval_computation_pivot,
):
spans = None
assert len(batch) >= 2
if len(batch) == 2:
if lang1 == lang2:
x2, len2 = batch
x1, len1 = add_noise(
x2, len2, self.params, len(self.data["dico"]) - 1, rng, torch_rng,
)
else:
(x1, len1, ids1, len_ids1), (x2, len2, ids2, len_ids2) = batch
assert x1 is not None
if deobfuscate:
(x1, len1, x2, len2) = self.trainer.deobfuscate_by_variable(
x1,
x2,
deobfuscation_proba,
params.tokenization_mode == "roberta",
rng,
)
if x1 is None:
return None, None, None
elif len(batch) == 4:
assert lang1 == lang2
if eval_computation_pivot:
x1, len1, _, _ = batch
x2, len2 = x1, len1
else:
x2, len2, _, _ = batch
x1, len1 = add_noise(
x2, len2, self.params, len(self.data["dico"]) - 1, rng, torch_rng,
)
else:
assert len(batch) == 3
(
(x1, len1, ids1, len_ids1),
(x2, len2, ids2, len_ids2),
(spans, len_spans, _, _),
) = batch
lang1_id = params.lang2id[lang1]
lang2_id = params.lang2id[lang2]
langs1 = x1.clone().fill_(lang1_id)
langs2 = x2.clone().fill_(lang2_id)
# cuda
x1, len1, langs1, x2, len2, langs2, spans = to_cuda(
x1, len1, langs1, x2, len2, langs2, spans
)
return (x1, len1, langs1), (x2, len2, langs2), spans
def sequence_to_ir(self, seq1, lang1, params, bpe_model):
x1, len1, langs1 = seq1
assert "ir_sa" in params.lgs
input_sent_irs = self.tokens_to_code(x1, len1, lang1, params)
input_sent_irs, ir_creation_errors = self.batch_to_irs(input_sent_irs, lang1)
computed_irs_upd = [ir for ir in input_sent_irs]
x1, len1, lang1_id, ir_creation_errors = self.create_ir_sent_batch(
input_sent_irs, ir_creation_errors, bpe_model, x1, params
)
langs1 = x1.clone().fill_(lang1_id)
return to_cuda(x1, len1, langs1), computed_irs_upd, ir_creation_errors
def do_forward(self, encoder, decoder, seq1, seq2, spans, is_fp16):
x1, len1, langs1 = seq1
x2, len2, langs2 = seq2
# encode source sentence
enc1 = encoder(
"fwd", x=x1, lengths=len1, langs=langs1, causal=False, spans=spans
)
enc1 = enc1.transpose(0, 1)
enc1 = enc1.half() if is_fp16 else enc1
# decode target sentence
dec2 = decoder(
"fwd",
x=x2,
lengths=len2,
langs=langs2,
causal=True,
src_enc=enc1,
src_len=len1,
spans=spans,
)
return enc1, dec2
def update_word_metrics(
self,
word_metrics,
seq2,
decoder,
dec2,
eval_computation_pivot,
ir_creation_errors,
):
x2, len2, _ = seq2
# target words to predict
alen = torch.arange(len2.max(), dtype=torch.long, device=len2.device)
pred_mask = (
alen[:, None] < len2[None] - 1
) # do not predict anything given the last target word
if eval_computation_pivot:
# dec2: (len, bs, dim)
err_mask = torch.BoolTensor([not err for err in ir_creation_errors]).to(
x2.device
)
dec2 = dec2[:, err_mask]
pred_mask = pred_mask[:, err_mask]
y = x2[1:, err_mask].masked_select(pred_mask[:-1])
else:
y = x2[1:].masked_select(pred_mask[:-1])
assert len(y) == (len2 - 1).sum().item()
# loss
word_scores, loss = decoder(
"predict", tensor=dec2, pred_mask=pred_mask, y=y, get_scores=True
)
word_metrics["n_words"] += y.size(0)
word_metrics["xe_loss"] += loss.item() * len(y)
word_metrics["n_valid"] += (
(word_scores.max(1)[1] == y).sum().item() if y.size(0) else 0
)
def update_text_files(
self,
text_files,
decoder,
seq1,
seq2,
enc1,
params,
lang1,
lang2,
data_set,
will_compute_bleu,
eval_computation_pivot,
ir_creation_errors,
show_example,
):
x1, len1, _ = seq1
x2, len2, _ = seq2
# generate translation - translate / convert to text
text_hyps_upd, ref_upd, sources_upd = [], [], []
if will_compute_bleu:
lang2_id = params.lang2id[lang2]
text_hyps_upd, generated = self.generate_mt_hypotheses(
enc1, len1, lang2_id, decoder, params
)
if eval_computation_pivot:
assert ir_creation_errors is not None
text_hyps_upd = [
[FAILED_IR_COMP_ + h if err else h for h in hyp]
for hyp, err in zip(text_hyps_upd, ir_creation_errors)
]
ref_upd = convert_to_text(x2, len2, self.dico, params)
sources_upd = convert_to_text(x1, len1, self.dico, params)
if show_example:
# show 1 evaluation example and the corresponding model generation
show_batch(
logger,
[
("source", x1.transpose(0, 1)),
("target", x2.transpose(0, 1)),
(
"gen",
generated.transpose(0, 1)
if len(generated.shape) == 2
else generated[:, 0, :].transpose(0, 1),
),
],
self.data["dico"],
self.params.tokenization_mode,
f"{data_set} {lang1}-{lang2}",
self.params.sentencepiece_model_path,
)
text_files["hypothesis"].append(text_hyps_upd)
text_files["references"].append(ref_upd)
text_files["sources"].append(sources_upd)
def compute_metrics(
self,
model_outputs,
data_set,
lang1,
lang2,
params,
scores,
deobfuscate,
deobfuscation_proba,
do_eval,
datasets_for_bleu,
will_compute_bleu,
):
n_words = model_outputs["n_words"]
# compute perplexity and prediction accuracy
l1l2 = get_l1l2_string(lang1, lang2, deobfuscation_proba)
is_pivot = "pivot_" if do_eval["computation_pivot"] else ""
scores["%s_%s_%smt_ppl" % (data_set, l1l2, is_pivot)] = (
np.exp(model_outputs["xe_loss"] / n_words) if n_words > 0 else -1
)
scores["%s_%s_%smt_acc" % (data_set, l1l2, is_pivot)] = (
100.0 * model_outputs["n_valid"] / n_words if n_words > 0 else -1
)
hypothesis = model_outputs["hypothesis"]
if len(hypothesis) == 0:
return
common_variables = { # Variables that are the input to several eval functions
"data_set": data_set,
"hypothesis": hypothesis,
"lang1": lang1,
"lang2": lang2,
"params": params,
"scores": scores,
}
# write hypotheses
hyp_paths = ref_path = src_path = irs_path = None
if will_compute_bleu:
hyp_paths, ref_path, src_path, irs_path = self.write_hypo_ref_src(
**common_variables,
references=model_outputs["references"],
sources=model_outputs["sources"],
deobfuscation_proba=deobfuscation_proba,
computed_irs=model_outputs["computed_irs"],
)
# check how many functions compiles + return same output as GT
if (
do_eval["computation"] or do_eval["computation_pivot"]
) and data_set in datasets_for_bleu:
print(f"compute_comp_acc with {params.translation_eval_set} evaluation set")
self.compute_comp_acc(
**common_variables,
hyp_paths=hyp_paths,
ref_path=ref_path,
tests_type=params.translation_eval_set,
tokenization_mode=params.tokenization_mode,
irs_path=irs_path,
)
if do_eval["ir_similarity"] and data_set in datasets_for_bleu:
assert "ir" in lang1
self.compute_ir_similarity(
hypothesis, hyp_paths, src_path, lang1, lang2, data_set, scores
)
if (
do_eval["st"]
and data_set in datasets_for_bleu
and get_programming_language_name(lang1) == SRC_ST_LANGS
and get_programming_language_name(lang2) in TARGET_ST_LANG
):
logger.info("Computing ST comp acc")
self.compute_comp_acc(
**common_variables,
hyp_paths=hyp_paths,
ref_path=ref_path,
tests_type="evosuite",
tokenization_mode=params.tokenization_mode,
)
if do_eval["subtoken_score"] and data_set in datasets_for_bleu:
subtoken_level_scores = run_subtoken_score(ref_path, hyp_paths)
for score_type, value in subtoken_level_scores.items():
logger.info(
"Subtoken %s score %s %s : %f"
% (score_type, hyp_paths, ref_path, value)
)
scores[
"%s_%s_mt_subtoken_%s"
% (
data_set,
get_l1l2_string(lang1, lang2, deobfuscation_proba),
score_type,
)
] = value
# compute BLEU score
if do_eval["bleu"] and data_set in datasets_for_bleu:
compute_bleu(
hyp_paths[0],
ref_path,
"%s_%s_%smt_bleu"
% (
data_set,
get_l1l2_string(lang1, lang2, deobfuscation_proba),
is_pivot,
),
scores,
filter_failed_irs=do_eval["computation_pivot"],
)
if (
deobfuscate
and do_eval["bleu"]
or do_eval["subtoken_score"]
and data_set in datasets_for_bleu
):
# TODO clean lang1
vizualize_do_files(lang1, src_path, ref_path, hyp_paths)
if hyp_paths:
for hyp_path in hyp_paths:
Path(hyp_path).unlink()
def generate_mt_hypotheses(self, enc1, len1, lang2_id, decoder, params):
len_v = (10 * len1 + 10).clamp(max=params.max_len)
if params.beam_size == 1:
if params.number_samples > 1:
assert params.eval_temperature is not None
generated, lengths = decoder.generate(
enc1.repeat_interleave(params.number_samples, dim=0),
len1.repeat_interleave(params.number_samples, dim=0),
lang2_id,
max_len=len_v.repeat_interleave(params.number_samples, dim=0),
sample_temperature=params.eval_temperature,
)
generated = generated.T.reshape(
-1, params.number_samples, generated.shape[0]
).T
lengths, _ = lengths.reshape(-1, params.number_samples).max(dim=1)
else:
generated, lengths = decoder.generate(
enc1, len1, lang2_id, max_len=len_v
)
# print(f'path 1: {generated.shape}')
else:
assert params.number_samples == 1
generated, lengths, _ = decoder.generate_beam(
enc1,
len1,
lang2_id,
beam_size=params.beam_size,
length_penalty=params.length_penalty,
early_stopping=params.early_stopping,
max_len=len_v,
)
# print(f'path 2: {generated.shape}')
text_hyps = convert_to_text(
generated, lengths, self.dico, params, generate_several_reps=True,
)
return text_hyps, generated
def create_ir_sent_batch(
self, input_sent_irs, ir_creation_errors, bpe_model, x1, params
):
input_irs = [
["</s>"]
+ " ".join(
[
x
for x in bpe_model.apply("" if ir_err else s.split(" "))
# if x.strip() != ""
]
).split(" ")
+ ["</s>"]
for s, ir_err in zip(input_sent_irs, ir_creation_errors)
]
too_long = [len(s) > params.max_len for s in input_irs]
ir_creation_errors = [
err or len(s) > params.max_len
for s, err in zip(input_irs, ir_creation_errors)
]
logger.info(
f"{sum(too_long)} too long failures ({sum(ir_creation_errors)} in total) among {x1.shape[1]} examples"
)
ir_creation_errors = [
err or too_long for err, too_long in zip(ir_creation_errors, too_long)
]
input_irs = [
ir if len(ir) <= params.max_len else ["</s>", "</s>"] for ir in input_irs
]
old_x1_shape = x1.shape
x1_device = x1.device
input_irs = [np.array([self.dico.index(w) for w in ir]) for ir in input_irs]
# Create ir batch
len1 = torch.LongTensor([len(ir) for ir in input_irs]).to(x1_device)
x1 = torch.LongTensor(len1.max().item(), len1.size(0)).fill_(params.pad_index)
for i, s in enumerate(input_irs):
x1[: len1[i], i].copy_(torch.from_numpy(s.astype(np.int64)))
x1.to(x1_device)
assert (x1 == params.eos_index).sum() == 2 * x1.size(1), (
x1.shape,
(x1 == params.eos_index).sum(),
)
assert x1.shape[1] == old_x1_shape[1], (x1.shape, old_x1_shape)
assert "ir_sa" in params.lang2id
lang1_id = params.lang2id["ir_sa"]
return x1, len1, lang1_id, ir_creation_errors
def batch_to_irs(self, input_sent_irs, lang1):
number_examples = len(input_sent_irs)
executor = ProcessPoolExecutor()
ir_verbosity = False
jobs = [
executor.submit(
code_to_ir, s, get_programming_language_name(lang1), True, ir_verbosity,
)
for s in input_sent_irs
]
input_sent_irs = [j.result() for j in jobs]
ir_creation_errors = [
len(s) == 0 or s[0].startswith(ERROR_MESSAGE) for s in input_sent_irs
]
logger.info(
f"{len([x for x in ir_creation_errors if x])} failures among {number_examples} examples"
)
input_sent_irs = [
s[0] if s else f"{ERROR_MESSAGE}: no IR" for s in input_sent_irs
]
ir_tokenizer = IRProcessor().tokenize_code
input_sent_irs = [" ".join(ir_tokenizer(c)) for c in input_sent_irs]
return input_sent_irs, ir_creation_errors
def tokens_to_code(self, x1, len1, lang1, params):
input_sent_irs = convert_to_text(x1, len1, self.dico, params)
assert x1.shape[1] == len(input_sent_irs), (
x1.shape[1],
len(input_sent_irs),
input_sent_irs,
)
input_sent_irs = unsegment_and_detokenize(
input_sent_irs,
lang1,
params.tokenization_mode,
sentencepiece_model_path=params.sentencepiece_model_path,
)
return input_sent_irs
@staticmethod
def write_hypo_ref_src(
data_set,
hypothesis,
lang1,
lang2,
params,
references,
scores,
sources=None,
deobfuscation_proba=None,
computed_irs=None,
):
# hypothesis / reference paths
hyp_paths = []
ref_name = "ref.{0}.{1}.txt".format(
get_l1l2_string(lang1, lang2, deobfuscation_proba), data_set
)
ref_path = os.path.join(params.hyp_path, ref_name)
# export sentences to hypothesis file / restore BPE segmentation
for beam_number in range(len(hypothesis[0])):
hyp_name = "hyp{0}.{1}.{2}_beam{3}.txt".format(
scores["epoch"],
get_l1l2_string(lang1, lang2, deobfuscation_proba),
data_set,
beam_number,
)
hyp_path = os.path.join(params.hyp_path, hyp_name)
hyp_paths.append(hyp_path)
print(f"outputing hypotheses in {hyp_path}")
with open(hyp_path, "w", encoding="utf-8") as f:
f.write("\n".join([hyp[beam_number] for hyp in hypothesis]) + "\n")
restore_segmentation(
hyp_path,
tokenization_mode=params.tokenization_mode,
single_line=True,
sentencepiece_model_path=params.sentencepiece_model_path,
)
# export reference to ref file / restore BPE segmentation
EncDecEvaluator.log_eval_outputs(ref_path, references, params)
src_path = None
if sources:
src_path = ref_path.replace("ref.", "src.")
EncDecEvaluator.log_eval_outputs(src_path, sources, params)
irs_path = None
if computed_irs:
irs_path = ref_path.replace("ref.", "irs.")
EncDecEvaluator.log_eval_outputs(
irs_path, computed_irs, params, restore_bpe=False
)
return hyp_paths, ref_path, src_path, irs_path
@staticmethod
def log_eval_outputs(path, values, params, restore_bpe=True):
with open(path, "w", encoding="utf-8") as f:
f.write("\n".join([s for s in values]) + "\n")
if restore_bpe:
restore_segmentation(
path,
tokenization_mode=params.tokenization_mode,
single_line=True,
sentencepiece_model_path=params.sentencepiece_model_path,
)
def compute_ir_similarity(
self,
hypothesis: tp.List[tp.List[str]],
hyp_paths: tp.List[str],
ref_path: str,
lang1: str,
lang2: str,
data_set: str,
scores: tp.Dict[str, float],
):
assert "ir" in lang1
input_sent_irs, ir_creation_errors = self.batch_to_irs(
unsegment_and_detokenize(
[h[0] for h in hypothesis],
lang2,
self.params.tokenization_mode,
self.params.sentencepiece_model_path,
),
lang2,
)
ratio_computed_irs = 1 - np.mean(ir_creation_errors)
logger.info(f"IR correct: {hyp_paths[0]}: {ratio_computed_irs}")
scores["%s_%s-%s_ir_correct" % (data_set, lang1, lang2)] = ratio_computed_irs
with open(ref_path) as f:
refs = f.readlines()
irs_path = hyp_paths[0].replace(".txt", "_recomputed_irs.txt")
refs_ir_path = ref_path.replace(".txt", "_recomputed_irs.txt")
count_equal = 0
assert len(input_sent_irs) == len(refs) == len(ir_creation_errors)
with open(irs_path, "w") as f_irs:
with open(refs_ir_path, "w") as f_refs:
for hyp_ir, ref, error in zip(input_sent_irs, refs, ir_creation_errors):
if error:
continue
if hyp_ir.strip() == ref.strip():
count_equal += 1
f_irs.write(hyp_ir.strip() + "\n")
f_refs.write(ref.strip() + "\n")
scores["%s_%s-%s_mt_ir_equal" % (data_set, lang1, lang2)] = count_equal / len(
input_sent_irs
)
compute_bleu(
irs_path,
refs_ir_path,
"%s_%s-%s_mt_ir_bleu" % (data_set, lang1, lang2),
scores,
)
def compute_comp_acc(
self,
data_set,
hyp_paths,
hypothesis,
lang1,
lang2,
params,
ref_path,
scores,
tests_type,
tokenization_mode="fastbpe",
irs_path=None,
):
prefix = "st" if tests_type == "evosuite" else ""
assert self.evosuite_tests_dico is not None or tests_type != "evosuite"
func_run_stats, func_run_out = eval_function_output(
ref_path,
hyp_paths,
params.id_paths[(lang1, lang2, data_set)],
lang1,
lang2,
params.eval_scripts_folders[(lang1, lang2, data_set)],
EVAL_SCRIPT_FOLDER[data_set] if tests_type == GFG else CODENET_EVAL_FOLDER,
params.retry_mistmatching_types,
tokenization_mode,
tests_type=tests_type,
evosuite_tests=self.evosuite_tests_dico,
)
out_paths = []
success_for_beam_number = [0 for _ in range(len(hypothesis[0]))]
for beam_number in range(len(hypothesis[0])):
out_name = prefix + "hyp{0}.{1}-{2}.{3}_beam{4}.out.txt".format(
scores["epoch"], lang1, lang2, data_set, beam_number
)
out_path = os.path.join(params.hyp_path, out_name)
out_paths.append(out_path)
with open(out_path, "w", encoding="utf-8") as f:
for results_list in func_run_out:
result_for_beam = (
results_list[beam_number]
if beam_number < len(results_list)
else ""
)
if result_for_beam.startswith("success"):
success_for_beam_number[beam_number] += 1
f.write(result_for_beam + "\n")
f.write("\n")
vizualize_translated_files(
lang1,
lang2,
params.ref_paths[(lang2, lang1, data_set)],
hyp_paths,
params.id_paths[(lang1, lang2, data_set)],
ref_path,
out_paths,
irs_path,
tokenization_mode=tokenization_mode,
)
logger.info(
prefix
+ "Computation res %s %s %s : %s"
% (data_set, lang1, lang2, json.dumps(func_run_stats))
)
scores["%s_%s-%s_mt_comp_acc" % (data_set + prefix, lang1, lang2)] = (
100.0
* func_run_stats["success"]
/ (max(func_run_stats["total_evaluated"], 1))
)
successful_irs = func_run_stats["total_evaluated"] - func_run_stats.get(
"Failed IR computation", 0
)
scores["%s_%s-%s_mt_comp_acc_pivot_IR" % (data_set + prefix, lang1, lang2)] = (
100.0
* func_run_stats["success"]
/ (successful_irs if successful_irs else 1)
)
scores["%s_%s-%s_mt_failed_pivot_IR" % (data_set + prefix, lang1, lang2)] = (
100.0
* func_run_stats.get("Failed IR computation", 0)
/ (max(func_run_stats["total_evaluated"], 1) if successful_irs else 1)
)
for beam_number, success_for_beam in enumerate(success_for_beam_number):
scores[
"%s_%s-%smt_comp_acc_contrib_beam_%i"
% (data_set + prefix, lang1, lang2, beam_number)
] = (
100.0
* success_for_beam
/ (
func_run_stats["total_evaluated"]
if func_run_stats["total_evaluated"]
else 1
)
)
for out_path in out_paths:
Path(out_path).unlink()
def get_l1l2_string(lang1, lang2, deobfuscation_proba):
l1l2 = [lang1, lang2]
if deobfuscation_proba is not None:
l1l2.append(f"obf_proba_{1 - deobfuscation_proba}")
l1l2 = "-".join(l1l2)
return l1l2
def eval_moses_bleu(ref, hyp):
"""
Given a file of hypothesis and reference files,
evaluate the BLEU score using Moses scripts.
"""
assert os.path.isfile(hyp)
assert os.path.isfile(ref) or os.path.isfile(ref + "0")
assert os.path.isfile(BLEU_SCRIPT_PATH)
command = BLEU_SCRIPT_PATH + " %s < %s"
p = subprocess.Popen(command % (ref, hyp), stdout=subprocess.PIPE, shell=True)
result = p.communicate()[0].decode("utf-8")
if result.startswith("BLEU"):
return float(result[7 : result.index(",")])
else:
logger.warning('Impossible to parse BLEU score! "%s"' % result)
return -1
def unsegment_and_detokenize(
sentences,
lang,
tokenization_mode: str,
sentencepiece_model_path: tp.Optional[PathLike] = None,
):
sentences = [
restore_segmentation_sentence(
s,
tokenization_mode=tokenization_mode,
sentencepiece_model_path=sentencepiece_model_path,
)
for s in sentences
]
lang1_detokenizer = LangProcessor.processors[
get_programming_language_name(lang)
]().detokenize_code
sentences = [lang1_detokenizer(s) for s in sentences]
return sentences
def compute_bleu(
gen_path: str,
ref_path: str,
score_name: str,
scores: tp.Dict[str, float],
filter_failed_irs=False,
):
if filter_failed_irs:
hypotheses = read_file_lines(gen_path)
references = read_file_lines(ref_path)
errors = [h.strip().startswith(FAILED_IR_COMP_) for h in hypotheses]
assert len(hypotheses) == len(references) == len(errors)
hypotheses = [elmt for elmt, err in zip(hypotheses, errors) if not err]
references = [elmt for elmt, err in zip(references, errors) if not err]
ref = ref_path.replace(".txt", "_filtered.txt")
gen = gen_path.replace(".txt", "_filtered.txt")
with open(gen, "w") as f:
f.writelines(hypotheses)
with open(ref, "w") as f:
f.writelines(references)
else:
ref = ref_path
gen = gen_path
bleu = eval_moses_bleu(ref, gen)
logger.info("BLEU %s %s : %f" % (gen, ref, bleu))
scores[score_name] = bleu
|
CodeGen-main
|
codegen_sources/model/src/evaluation/evaluator.py
|
CodeGen-main
|
codegen_sources/model/src/data/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import json
from pathlib import Path
import math
import typing as tp
from logging import getLogger
import numpy as np
import torch
sys.path.append(str(Path(__file__).parents[4]))
print("adding to path", str(Path(__file__).parents[4]))
from codegen_sources.model.src.utils import (
restore_segmentation_sentence,
get_programming_language_name,
batch_sentences,
)
TARGET_CLASS = "TARGET_CLASS"
MUTATION_SCORE = "mutation_score"
ASSERTS_COUNT = "asserts_count"
logger = getLogger()
class StreamDataset(object):
def __init__(self, sent, pos, bs, params) -> None:
"""
Prepare batches for data iterator.
"""
bptt = params.bptt
self.eos = params.eos_index
# checks
assert len(pos) == (sent == self.eos).sum()
assert len(pos) == (sent[pos[:, 1]] == self.eos).sum()
n_tokens = len(sent)
n_batches = math.ceil(n_tokens / (bs * bptt))
t_size = n_batches * bptt * bs
buffer = np.zeros(t_size, dtype=sent.dtype) + self.eos
buffer[t_size - n_tokens :] = sent
buffer = buffer.reshape((bs, n_batches * bptt)).T
self.data = np.zeros((n_batches * bptt + 1, bs), dtype=sent.dtype) + self.eos
self.data[1:] = buffer
self.bptt = bptt
self.n_tokens = n_tokens
self.n_batches = n_batches
self.n_sentences = len(pos)
self.lengths = torch.LongTensor(bs).fill_(bptt)
self.add_eof = params.add_eof_to_stream
def __len__(self):
"""
Number of sentences in the dataset.
"""
return self.n_sentences
def select_data(self, a, b):
"""
Only select a subset of the dataset.
"""
if not (0 <= a < b <= self.n_batches):
logger.warning("Invalid split values: %i %i - %i" % (a, b, self.n_batches))
return
assert 0 <= a < b <= self.n_batches
logger.info("Selecting batches from %i to %i ..." % (a, b))
# sub-select
self.data = self.data[a * self.bptt : b * self.bptt]
self.n_batches = b - a
self.n_sentences = (self.data == self.eos).sum().item()
def get_iterator(self, shuffle, subsample=1):
"""
Return a sentences iterator.
"""
indexes = (np.random.permutation if shuffle else range)(
self.n_batches // subsample
)
for i in indexes:
a = self.bptt * i
b = self.bptt * (i + 1)
batch = self.data[a:b]
if self.add_eof:
batch[0] = self.eos
yield torch.from_numpy(batch.astype(np.int64)), self.lengths
class Dataset(object):
def __init__(
self, sent, pos, params, has_sentence_ids, unit_tests_st=False
) -> None:
self.has_sentence_ids = has_sentence_ids
self.unit_tests_st = unit_tests_st
self.eos_index = params.eos_index
self.pad_index = params.pad_index
self.sep_index = params.sep_index
self.batch_size = params.batch_size
self.max_batch_size = params.max_batch_size
self.sent = sent
self.pos = pos
self.lengths = self.pos[:, 1] - self.pos[:, 0]
self.unit_tests: tp.Dict[str, tp.Dict[str, str]] = {
get_programming_language_name(lang): {} for lang in params.st_tgt_langs
}
self.unit_tests_scores: tp.Dict[str, tp.Dict[str, float]] = dict()
self.st_tests_scores: tp.Optional[tp.List[float]] = None # TODO fix type
# check number of sentences
assert len(self.pos) == (self.sent == self.eos_index).sum()
# # remove empty sentences
self.remove_empty_sentences()
# load unit tests for self training
if unit_tests_st:
assert (
has_sentence_ids
), "Dataset should have sentence IDs for self training"
self.load_unit_test_data(params.unit_tests_path)
# sanity checks
self.check()
def __len__(self):
"""
Number of sentences in the dataset.
"""
return len(self.pos)
def check(self):
"""
Sanity checks.
"""
eos = self.eos_index
# check sentences indices
assert len(self.pos) == (self.sent[self.pos[:, 1]] == eos).sum()
assert self.st_tests_scores is None or len(self.pos) == len(
self.st_tests_scores
)
# assert self.lengths.min() > 0 # check empty sentences
def batch_sentences(self, sentences, split_sentences_ids):
"""
Take as input a list of n sentences (torch.LongTensor vectors) and return
a tensor of size (slen, n) where slen is the length of the longest
sentence, and a vector lengths containing the length of each sentence.
"""
if split_sentences_ids is None:
split_sentences_ids = self.has_sentence_ids
if split_sentences_ids:
ids, lengths_ids, sentences = self.prepare_sent_with_ids(sentences)
else:
ids = None
lengths_ids = None
sent, lengths = batch_sentences(sentences, self.pad_index, self.eos_index)
return sent, lengths, ids, lengths_ids
def prepare_sent_with_ids(self, sentences):
sentences_WITH_IDS = sentences
sentences = []
ids_ = []
for s1 in sentences_WITH_IDS:
id, sent = self.extract_sent_id(s1)
sentences.append(sent)
ids_.append(id)
lengths_ids = torch.LongTensor([len(i) + 2 for i in ids_])
ids = torch.LongTensor(lengths_ids.max().item(), lengths_ids.size(0)).fill_( # type: ignore
self.pad_index
)
ids[0] = self.eos_index
for i, s in enumerate(ids_):
if lengths_ids[i] > 2: # if sentence not empty
ids[1 : lengths_ids[i] - 1, i].copy_(
torch.from_numpy(s.astype(np.int64))
)
ids[lengths_ids[i] - 1, i] = self.eos_index
return ids, lengths_ids, sentences
def extract_sent_id(self, s):
"""
Takes a sentence with ids and returns the id and the sentence
"""
pos = np.where(s == self.sep_index)[0][0]
sentence = s[pos + 1 :]
ids = s[:pos]
return ids, sentence
def remove_empty_sentences(self):
"""
Remove empty sentences.
"""
init_size = len(self.pos)
indices = np.arange(len(self.pos))
indices = indices[self.lengths[indices] > 0]
self.pos = self.pos[indices]
self.lengths = self.pos[:, 1] - self.pos[:, 0]
if self.st_tests_scores is not None:
self.st_tests_scores = self.st_tests_scores[indices]
logger.info("Removed %i empty sentences." % (init_size - len(indices)))
self.check()
def remove_long_sentences(self, max_len):
"""
Remove sentences exceeding a certain length.
"""
assert max_len >= 0
if max_len == 0:
return
init_size = len(self.pos)
indices = np.arange(len(self.pos))
indices = indices[self.lengths[indices] <= max_len]
self.pos = self.pos[indices]
self.lengths = self.pos[:, 1] - self.pos[:, 0]
if self.st_tests_scores is not None:
self.st_tests_scores = self.st_tests_scores[indices]
logger.info("Removed %i too long sentences." % (init_size - len(indices)))
self.check()
def load_unit_test_data(self, unit_tests_path):
assert Path(unit_tests_path).is_file(), f"{unit_tests_path} is not a file"
with open(unit_tests_path, "r") as f:
for line in f:
json_line = json.loads(line)
for lang in self.unit_tests.keys():
self.unit_tests[lang][json_line[TARGET_CLASS]] = json_line[
f"{get_programming_language_name(lang)}_translated_tests"
]
self.unit_tests_scores[json_line[TARGET_CLASS]] = {
MUTATION_SCORE: float(json_line[MUTATION_SCORE]),
ASSERTS_COUNT: int(json_line[ASSERTS_COUNT]),
}
def compute_st_scores(self, params, dico):
assert self.unit_tests_st and self.has_sentence_ids
self.st_tests_scores = [
self.get_unit_test_scores(self.sent[a:b], dico, params) for a, b in self.pos
]
def get_unit_test_scores(self, sentence, dico, params):
sent_id, _ = self.extract_sent_id(sentence)
sent_id = " ".join([dico[i] for i in sent_id])
sent_id = restore_segmentation_sentence(
sent_id,
tokenization_mode=params.tokenization_mode,
sentencepiece_model_path=params.sentencepiece_model_path,
)
assert (
sent_id in self.unit_tests_scores
), f"The unit test dataset is missing the element {sent_id}"
return (
self.unit_tests_scores[sent_id][MUTATION_SCORE],
self.unit_tests_scores[sent_id][ASSERTS_COUNT],
)
def select_data(self, a, b):
"""
Only select a subset of the dataset.
"""
assert 0 <= a < b <= len(self.pos)
logger.info("Selecting sentences from %i to %i ..." % (a, b))
# sub-select
self.pos = self.pos[a:b]
self.lengths = self.pos[:, 1] - self.pos[:, 0]
# re-index
min_pos = self.pos.min()
max_pos = self.pos.max()
self.pos -= min_pos
self.sent = self.sent[min_pos : max_pos + 1]
# sanity checks
self.check()
def get_batches_iterator(
self,
batches: tp.List[np.ndarray],
return_indices: bool,
max_batch_size: tp.Optional[int] = None,
):
"""
Return a sentences iterator, given the associated sentence batches.
"""
assert type(return_indices) is bool
if max_batch_size is None:
max_batch_size = self.max_batch_size
for sentence_ids in batches:
if 0 < max_batch_size < len(sentence_ids):
np.random.shuffle(sentence_ids)
sentence_ids = sentence_ids[:max_batch_size]
pos = self.pos[sentence_ids]
sent = [self.sent[a:b] for a, b in pos]
sent = self.batch_sentences(sent, self.has_sentence_ids)
yield (sent, sentence_ids) if return_indices else sent
def get_iterator(
self,
shuffle: bool,
tokens_per_batch: int,
group_by_size: bool = False,
n_sentences: int = -1,
seed: tp.Optional[int] = None,
return_indices: bool = False,
st_scores_cutoff: tp.Optional[tp.Tuple[float, int]] = None,
max_batch_size: tp.Optional[int] = None,
):
"""
Return a sentences iterator.
"""
assert seed is None or shuffle is True and type(seed) is int
rng = np.random.RandomState(seed)
n_sentences = len(self.pos) if n_sentences == -1 else n_sentences
n_sentences = min(len(self.pos), n_sentences)
assert 0 < n_sentences <= len(self.pos)
assert type(shuffle) is bool and type(group_by_size) is bool
# assert group_by_size is False or shuffle is True
# sentence lengths
lengths = self.lengths + 2
# select sentences to iterate over
if shuffle:
indices = rng.permutation(len(self.pos))[:n_sentences]
else:
indices = np.arange(n_sentences)
if st_scores_cutoff is not None:
logger.info(f"st scores cutoff: {st_scores_cutoff}")
assert self.st_tests_scores is not None
assert len(self.st_tests_scores) == len(
indices
), f"lenght of scores should be same as indices, were {len(st_scores_cutoff), len(indices)}"
initial_size = len(indices)
assert (
len(st_scores_cutoff) == 2
), f"st_scores_cutoff should contain min mutation score and asserts, was {st_scores_cutoff}"
min_mutation_score, min_asserts = st_scores_cutoff
indices = np.array(
[
i
for i in indices
if self.st_tests_scores[i][0] >= min_mutation_score
and self.st_tests_scores[i][1] >= min_asserts
]
)
logger.info(
f"st scores cutoff: removed {initial_size - len(indices)} element from the {initial_size} initial elements"
)
# group sentences by lengths
if group_by_size:
indices = indices[np.argsort(lengths[indices], kind="mergesort")]
# create batches - either have a fixed number of sentences, or a similar number of tokens
if tokens_per_batch == -1:
batches = np.array_split(
indices, math.ceil(len(indices) * 1.0 / self.batch_size)
)
else:
batch_ids = np.cumsum(lengths[indices]) // tokens_per_batch
_, bounds = np.unique(batch_ids, return_index=True)
batches = [
indices[bounds[i] : bounds[i + 1]] for i in range(len(bounds) - 1)
]
if bounds[-1] < len(indices):
batches.append(indices[bounds[-1] :])
# optionally shuffle batches
if shuffle:
rng.shuffle(batches)
# sanity checks
assert len(indices) == sum([len(x) for x in batches])
if st_scores_cutoff is None:
assert len(indices) == n_sentences
assert lengths[indices].sum() == sum([lengths[x].sum() for x in batches])
# assert set.union(*[set(x.tolist()) for x in batches]) == set(range(n_sentences)) # slow
# return the iterator
return self.get_batches_iterator(batches, return_indices, max_batch_size)
class ParallelDataset(Dataset):
def __init__(
self,
sent_list,
pos_list,
params,
span_prediction=False,
has_sentence_ids=None,
unit_tests_st=False,
) -> None:
"""
:param sent_list: list of sentences tensors. The order is (src, tgt, (optional) span)
:param pos_list: list of positions of each sample
:param span_prediction: whether it predicts spans or sentences
The length of the lists should be 2 when doing translation or span classification
and 3 when doing translation using spans
"""
assert len(pos_list) == 2 or len(pos_list) == 3
self.eos_index = params.eos_index
self.pad_index = params.pad_index
self.sep_index = params.sep_index
self.batch_size = params.batch_size
self.max_batch_size = params.max_batch_size
self.has_sentence_ids = has_sentence_ids
self.unit_tests_st = unit_tests_st
self.sent_list = sent_list
self.pos_list = pos_list
self.lengths_list: tp.List[torch.Tensor] = [
pos[:, 1] - pos[:, 0] for pos in self.pos_list
]
self.span_prediction = span_prediction
self.mt_with_spans = len(self.pos_list) == 3
# check number of sentences
assert all(
[len(pos) == len(self) > 0 for pos in self.pos_list]
), f"number of sentences do not match {[len(pos) for pos in self.pos_list]}"
# remove empty sentences
self.remove_empty_sentences()
# sanity checks
self.check()
def __len__(self):
"""
Number of sentences in the dataset.
"""
return len(self.pos_list[0])
def check(self):
"""
Sanity checks.
"""
eos = self.eos_index
# check number of sentences
assert all([len(pos) == len(self) > 0 for pos in self.pos_list])
# check sentences indices
for i, (pos, sent) in enumerate(zip(self.pos_list, self.sent_list)):
assert (
len(pos) == (sent[pos[:, 1]] == eos).sum()
), f"size of pos: {len(pos)}. num eos:{(sent == self.eos_index).sum()}"
# check dictionary indices
assert eos <= sent.min() < sent.max()
if self.span_prediction:
break
if i == 1:
break
if self.span_prediction:
assert (
len(self.pos_list) == len(self.sent_list) == len(self.lengths_list) == 2
)
assert len(self.sent_list[0]) == len(self.sent_list[1])
if self.has_sentence_ids:
assert all(self.lengths_list[0] > self.lengths_list[1])
else:
assert all(self.lengths_list[0] == self.lengths_list[1])
if self.mt_with_spans:
# the spans are in position 3
assert (
len(self.pos_list) == len(self.sent_list) == len(self.lengths_list) == 3
)
assert len(self.sent_list[0]) == len(self.sent_list[2])
if self.has_sentence_ids:
assert all(self.lengths_list[0] > self.lengths_list[2])
else:
assert all(self.lengths_list[0] == self.lengths_list[2])
# check empty sentences
for lengths in self.lengths_list:
assert lengths.min() > 0
def remove_empty_sentences(self):
"""
Remove empty sentences.
"""
init_size = len(self)
indices = np.arange(len(self))
for lengths in self.lengths_list:
indices = indices[lengths[indices] > 0]
self.pos_list = [pos[indices] for pos in self.pos_list]
self.lengths_list = [pos[:, 1] - pos[:, 0] for pos in self.pos_list]
logger.info("Removed %i empty sentences." % (init_size - len(indices)))
self.check()
def remove_long_sentences(self, max_len):
"""
Remove sentences exceeding a certain length.
"""
assert max_len >= 0
if max_len == 0:
return
init_size = len(self)
indices = np.arange(len(self))
for lengths in self.lengths_list:
indices = indices[lengths[indices] <= max_len]
self.pos_list = [pos[indices] for pos in self.pos_list]
self.lengths_list = [pos[:, 1] - pos[:, 0] for pos in self.pos_list]
logger.info("Removed %i too long sentences." % (init_size - len(indices)))
self.check()
def select_data(self, a, b):
"""
Only select a subset of the dataset.
"""
assert 0 <= a < b <= len(self)
logger.info("Selecting sentences from %i to %i ..." % (a, b))
# sub-select
self.pos_list = [pos[a:b] for pos in self.pos_list]
self.lengths_list = [pos[:, 1] - pos[:, 0] for pos in self.pos_list]
# re-index
min_pos_list = [pos.min() for pos in self.pos_list]
max_pos_list = [pos.max() for pos in self.pos_list]
self.pos_list = [
pos - min_pos for pos, min_pos in zip(self.pos_list, min_pos_list)
]
self.sent_list = [
sent[min_pos : max_pos + 1]
for sent, min_pos, max_pos in zip(
self.sent_list, min_pos_list, max_pos_list
)
]
# sanity checks
self.check()
def get_batches_iterator(
self,
batches: tp.List[np.ndarray],
return_indices: bool,
max_batch_size: tp.Optional[int] = None,
):
"""
Return a sentences iterator, given the associated sentence batches.
"""
assert type(return_indices) is bool
if max_batch_size is None:
max_batch_size = self.max_batch_size
for sentence_ids in batches:
if 0 < max_batch_size < len(sentence_ids):
np.random.shuffle(sentence_ids)
sentence_ids = sentence_ids[:max_batch_size]
pos = [pos[sentence_ids] for pos in self.pos_list]
split_sentences_id = [self.has_sentence_ids] * len(pos)
# Do not split sentence IDs for spans
if self.span_prediction:
assert len(split_sentences_id) == 2
split_sentences_id[1] = False
if self.mt_with_spans:
assert len(split_sentences_id) == 3
split_sentences_id[2] = False
sents = [
self.batch_sentences([sent[a:b] for a, b in pos], split_id)
for split_id, pos, sent in zip(split_sentences_id, pos, self.sent_list)
]
yield (sents, sentence_ids) if return_indices else sents
def get_iterator(
self,
shuffle: bool,
tokens_per_batch: int,
group_by_size: bool = False,
n_sentences: int = -1,
seed: tp.Optional[int] = None,
return_indices: bool = False,
st_scores_cutoff: tp.Optional[tp.Tuple[float, int]] = None,
max_batch_size: tp.Optional[int] = None,
):
"""
Return a sentences iterator.
"""
assert seed is None or shuffle is True and type(seed) is int
rng = np.random.RandomState(seed)
n_sentences = len(self) if n_sentences == -1 else n_sentences
n_sentences = min(len(self), n_sentences)
assert 0 < n_sentences <= len(self)
assert type(shuffle) is bool and type(group_by_size) is bool
# sentence lengths
lengths: tp.Any = sum(self.lengths_list) + 2 * len(self.lengths_list)
# select sentences to iterate over
if shuffle:
indices = rng.permutation(len(self))[:n_sentences]
else:
indices = np.arange(n_sentences)
# group sentences by lengths
if group_by_size:
indices = indices[np.argsort(lengths[indices], kind="mergesort")] # type: ignore
# create batches - either have a fixed number of sentences, or a similar number of tokens
if tokens_per_batch == -1:
batches = np.array_split(
indices, math.ceil(len(indices) * 1.0 / self.batch_size)
)
else:
batch_ids = np.cumsum(lengths[indices]) // tokens_per_batch
_, bounds = np.unique(batch_ids, return_index=True)
batches = [
indices[bounds[i] : bounds[i + 1]] for i in range(len(bounds) - 1)
]
if bounds[-1] < len(indices):
batches.append(indices[bounds[-1] :])
# optionally shuffle batches
if shuffle:
rng.shuffle(batches)
# sanity checks
assert n_sentences == sum([len(x) for x in batches])
assert lengths[indices].sum() == sum([lengths[x].sum() for x in batches])
# assert set.union(*[set(x.tolist()) for x in batches]) == set(range(n_sentences)) # slow
# return the iterator
return self.get_batches_iterator(batches, return_indices, max_batch_size)
|
CodeGen-main
|
codegen_sources/model/src/data/dataset.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from logging import getLogger
import numpy as np
import torch
from .dataset import StreamDataset, Dataset, ParallelDataset
from .dictionary import BOS_WORD, EOS_WORD, PAD_WORD, UNK_WORD, MASK_WORD
SELF_TRAINED = "self_training"
DATASET_SPLITS = ["train", "valid", "test"]
TRAIN_SPLITS = {"train", SELF_TRAINED}
logger = getLogger()
def process_binarized(data, params):
"""
Process a binarized dataset and log main statistics.
"""
dico = data["dico"]
assert (
(data["sentences"].dtype == np.uint16)
and (dico is None or (len(dico) < 1 << 16))
or (data["sentences"].dtype == np.int32)
and (dico is None or (1 << 16 <= len(dico) < 1 << 31))
)
logger.info(
"%i words (%i unique) in %i sentences. %i unknown words (%i unique) covering %.2f%% of the data."
% (
len(data["sentences"]) - len(data["positions"]),
len(dico) if dico else 0,
len(data["positions"]),
sum(data["unk_words"].values()),
len(data["unk_words"]),
100.0
* sum(data["unk_words"].values())
/ (len(data["sentences"]) - len(data["positions"])),
)
)
if params.max_vocab != -1:
assert params.max_vocab > 0
logger.info("Selecting %i most frequent words ..." % params.max_vocab)
if dico is not None:
dico.max_vocab(params.max_vocab)
data["sentences"][data["sentences"] >= params.max_vocab] = dico.index(
UNK_WORD
)
unk_count = (data["sentences"] == dico.index(UNK_WORD)).sum()
logger.info(
"Now %i unknown words covering %.2f%% of the data."
% (
unk_count,
100.0
* unk_count
/ (len(data["sentences"]) - len(data["positions"])),
)
)
if params.min_count > 0:
logger.info("Selecting words with >= %i occurrences ..." % params.min_count)
dico.min_count(params.min_count)
data["sentences"][data["sentences"] >= len(dico)] = dico.index(UNK_WORD)
unk_count = (data["sentences"] == dico.index(UNK_WORD)).sum()
logger.info(
"Now %i unknown words covering %.2f%% of the data."
% (
unk_count,
100.0
* unk_count
/ (len(data["sentences"]) - len(data["positions"])),
)
)
if (data["sentences"].dtype == np.int32) and (len(dico) < 1 << 16):
logger.info("Less than 65536 words. Moving data from int32 to uint16 ...")
data["sentences"] = data["sentences"].astype(np.uint16)
return data
def load_binarized(path, params):
"""
Load a binarized dataset.
"""
assert path.endswith(".pth")
if params.debug_train:
path = path.replace("train", "valid")
if getattr(params, "multi_gpu", False):
assert params.split_data_accross_gpu in ["local", "global"]
if params.split_data_accross_gpu == "local":
split_path = "%s.%i.pth" % (path[:-4], params.local_rank)
else:
split_path = "%s.%i.pth" % (path[:-4], params.global_rank)
if os.path.isfile(split_path):
assert params.split_data is False
path = split_path
if not os.path.isfile(path):
path = "%s.%i.pth" % (path[:-4], 0)
assert os.path.isfile(path), path
logger.info("Loading data from %s ..." % path)
data = torch.load(path)
data = process_binarized(data, params)
return data
def set_dico_parameters(params, data, dico):
"""
Update dictionary parameters.
"""
if dico is None:
return
if "dico" in data:
assert data["dico"] == dico
else:
data["dico"] = dico
n_words = len(dico)
bos_index = dico.index(BOS_WORD)
eos_index = dico.index(EOS_WORD)
pad_index = dico.index(PAD_WORD)
unk_index = dico.index(UNK_WORD)
mask_index = dico.index(MASK_WORD)
if hasattr(params, "bos_index"):
assert params.n_words == n_words
assert params.bos_index == bos_index
assert params.eos_index == eos_index
assert params.pad_index == pad_index
assert params.unk_index == unk_index
assert params.mask_index == mask_index
else:
params.n_words = n_words
params.bos_index = bos_index
params.eos_index = eos_index
params.pad_index = pad_index
params.unk_index = unk_index
params.mask_index = mask_index
params.sep_index = dico.index("|")
def load_mono_data(params, data):
"""
Load monolingual data.
"""
data["mono"] = {}
data["mono_stream"] = {}
for lang in params.mono_dataset.keys():
logger.info("============ Monolingual data (%s)" % lang)
assert lang in params.langs and lang not in data["mono"]
data["mono"][lang] = {}
data["mono_stream"][lang] = {}
for splt, data_path in params.mono_dataset[lang].items():
if splt == SELF_TRAINED and lang not in params.st_src_langs:
# continue if not doing self training for this language
continue
# no need to load training data for evaluation
if splt in TRAIN_SPLITS and params.eval_only:
continue
if splt not in TRAIN_SPLITS and params.train_only:
continue
# load data / update dictionary parameters / update data
mono_data = load_binarized(data_path, params)
set_dico_parameters(params, data, mono_data["dico"])
# create stream dataset
bs = params.batch_size if splt == "train" else 1
data["mono_stream"][lang][splt] = StreamDataset(
mono_data["sentences"], mono_data["positions"], bs, params
)
# if there are several processes on the same machine, we can split the dataset
if (
splt in TRAIN_SPLITS
and params.split_data
and 1
< params.n_gpu_per_node
<= data["mono_stream"][lang][splt].n_batches
):
n_batches = (
data["mono_stream"][lang][splt].n_batches // params.n_gpu_per_node
)
a = n_batches * params.local_rank
b = n_batches * params.local_rank + n_batches
data["mono_stream"][lang][splt].select_data(a, b)
# for denoising auto-encoding and online back-translation, we need a non-stream (batched) dataset
if (
lang in params.ae_steps
or lang in params.bt_src_langs
or lang
in [l1 for l1, l2 in params.cmt_steps]
+ [l1 for l1, l2 in params.disc_steps]
or (lang in params.st_src_langs and splt == SELF_TRAINED)
or lang in params.eval_computation_pivot_self
):
# create batched dataset
dataset = Dataset(
mono_data["sentences"],
mono_data["positions"],
params,
has_sentence_ids=(splt, (lang,)) in params.has_sentence_ids,
unit_tests_st=splt == SELF_TRAINED,
)
# remove empty and too long sentences
# if splt in TRAIN_SPLITS:
dataset.remove_empty_sentences()
dataset.remove_long_sentences(params.max_len)
if splt == SELF_TRAINED:
dataset.compute_st_scores(params, data["dico"])
data[f"java_st_unit_tests"] = dataset.unit_tests
data[f"java_st_tests_scores"] = dataset.st_tests_scores
# if there are several processes on the same machine, we can split the dataset
if (
splt in TRAIN_SPLITS
and params.n_gpu_per_node > 1
and params.split_data
):
n_sent = len(dataset) // params.n_gpu_per_node
a = n_sent * params.local_rank
b = n_sent * params.local_rank + n_sent
dataset.select_data(a, b)
data["mono"][lang][splt] = dataset
logger.info("")
logger.info("")
def load_para_data(params, data):
"""
Load parallel data.
"""
data["para"] = {}
required_para_train = set(
params.clm_steps
+ params.mlm_steps
+ params.mt_steps
+ params.mt_spans_steps
+ params.do_steps
+ params.classif_steps
+ params.tae_steps
)
for key in params.para_dataset.keys():
span = None
if len(key) == 2:
src, tgt = key
else:
src, tgt, span = key
if span is None:
logger.info("============ Parallel data (%s-%s)" % (src, tgt))
else:
logger.info("============ Parallel data (%s/%s-%s)" % (src, span, tgt))
assert key not in data["para"]
data["para"][key] = {}
for splt in DATASET_SPLITS:
# no need to load training data for evaluation
if splt in TRAIN_SPLITS and params.eval_only:
continue
if splt not in TRAIN_SPLITS and params.train_only:
continue
# for back-translation, we can't load training data
if splt == "train" and (
(
span is None
and (src, tgt) not in required_para_train
and (tgt, src) not in required_para_train
)
or (
span is not None
and (src, tgt, span) not in required_para_train
and (tgt, src, span) not in required_para_train
)
):
continue
# load binarized datasets
paths = params.para_dataset[key][splt]
span_path = None
if span is None:
src_path, tgt_path = paths
else:
src_path, tgt_path, span_path = paths
src_data = load_binarized(src_path, params)
tgt_data = load_binarized(tgt_path, params)
span_data = load_binarized(span_path, params) if span_path else None
# update dictionary parameters
set_dico_parameters(params, data, src_data["dico"])
set_dico_parameters(params, data, tgt_data["dico"])
if span_data is not None:
set_dico_parameters(params, data, span_data["dico"])
sent_list = [src_data["sentences"], tgt_data["sentences"]]
pos_list = [src_data["positions"], tgt_data["positions"]]
if span_data is not None:
sent_list.append(span_data["sentences"])
pos_list.append(span_data["positions"])
print(f"loading parallel {splt} {src}, {tgt} data")
dataset = ParallelDataset(
sent_list,
pos_list,
params,
span_prediction=tgt_data["dico"] is None,
has_sentence_ids=(splt, (src, tgt)) in params.has_sentence_ids,
)
# remove empty and too long sentences
# if splt == 'train':
dataset.remove_empty_sentences()
dataset.remove_long_sentences(params.max_len)
# if there are several processes on the same machine, we can split the dataset
if splt in TRAIN_SPLITS and params.n_gpu_per_node > 1 and params.split_data:
n_sent = len(dataset) // params.n_gpu_per_node
a = n_sent * params.local_rank
b = n_sent * params.local_rank + n_sent
dataset.select_data(a, b)
if span is None:
data["para"][(src, tgt)][splt] = dataset
else:
data["para"][(src, tgt, span)][splt] = dataset
logger.info("")
logger.info("")
def check_data_params(params):
"""
Check datasets parameters.
"""
# data path
assert os.path.isdir(params.data_path), f"Not a directory: {params.data_path}"
if params.eval_tokens_per_batch is None:
params.eval_tokens_per_batch = params.tokens_per_batch
# check languages
params.langs = params.lgs.split("-") if params.lgs != "debug" else ["en"]
assert len(params.langs) == len(set(params.langs)) >= 1, [
l for l in params.langs if params.langs.count(l) >= 2
]
# assert sorted(params.langs) == params.langs
params.id2lang = {k: v for k, v in enumerate(sorted(params.langs))}
params.lang2id = {k: v for v, k in params.id2lang.items()}
params.n_langs = len(params.langs)
if params.lgs_id_mapping != "":
mappings = params.lgs_id_mapping.split(",")
for m in mappings:
split = m.split(":")
assert len(split) == 2, f"Cannot parse {m} in {params.lgs_id_mapping}"
source, dest = split
assert (
source in params.langs
), f"unknown source {source} from {m}. Not part of the languages in {params.langs}"
assert (
dest in params.langs
), f"unknown destination language {dest} from {m}. Not part of the languages in {params.langs}"
params.lang2id[source] = params.lang2id[dest]
# CLM steps
clm_steps = [s.split("-") for s in params.clm_steps.split(",") if len(s) > 0]
params.clm_steps = [(s[0], None) if len(s) == 1 else tuple(s) for s in clm_steps]
assert all(
[
(l1 in params.langs) and (l2 in params.langs or l2 is None)
for l1, l2 in params.clm_steps
]
)
assert len(params.clm_steps) == len(set(params.clm_steps))
# MLM / TLM steps
mlm_steps = [s.split("-") for s in params.mlm_steps.split(",") if len(s) > 0]
params.mlm_steps = [(s[0], None) if len(s) == 1 else tuple(s) for s in mlm_steps]
assert all(
[
(l1 in params.langs) and (l2 in params.langs or l2 is None)
for l1, l2 in params.mlm_steps
]
)
assert len(params.mlm_steps) == len(set(params.mlm_steps))
# machine translation steps
params.mt_steps = [
tuple(s.split("-")) for s in params.mt_steps.split(",") if len(s) > 0
]
assert all([len(x) == 2 for x in params.mt_steps])
assert all(
[l1 in params.langs and l2 in params.langs for l1, l2 in params.mt_steps]
)
assert all([l1 != l2 for l1, l2 in params.mt_steps])
assert len(params.mt_steps) == len(set(params.mt_steps))
params.mt_spans_steps = [
tuple(s.split("-")) for s in params.mt_spans_steps.split(",") if len(s) > 0
]
assert all((len(split) == 3 for split in params.mt_spans_steps))
assert all(
[l1 != l2 and l1 != l3 and l2 != l3 for l1, l2, l3 in params.mt_spans_steps]
)
assert len(params.mt_spans_steps) == len(set(params.mt_spans_steps))
assert (
len(params.mt_steps) + len(params.mt_spans_steps) == 0
or not params.encoder_only
)
assert (
len(params.mt_spans_steps) > 0
) == params.spans_emb_encoder, f"mt_spans steps but spans are not used or trying to use spans without spans steps {len(params.mt_spans_steps)}, {params.spans_emb_encoder}"
# do steps
params.do_steps = [
tuple(s.split("-")) for s in params.do_steps.split(",") if len(s) > 0
]
assert all([len(x) == 2 for x in params.do_steps])
assert all(
[l1 in params.langs and l2 in params.langs for l1, l2 in params.do_steps]
), f"One or more step of {params.do_steps} is not in languages {params.langs}"
assert all([l1 != l2 for l1, l2 in params.do_steps])
assert len(params.do_steps) == len(set(params.do_steps))
# classification steps
params.classif_steps = [
tuple(s.split("-")) for s in params.classif_steps.split(",") if len(s) > 0
]
assert all([len(x) == 2 for x in params.classif_steps])
assert all([l1 in params.langs for l1, l2 in params.classif_steps])
assert all([l1 != l2 for l1, l2 in params.classif_steps])
assert len(params.classif_steps) == len(set(params.classif_steps))
assert (
len(params.classif_steps) + len(params.mt_spans_steps) == 0
or not params.n_classes_classif <= 0
)
params.use_classifier = True if len(params.classif_steps) > 0 else False
# denoising auto-encoder steps
params.ae_steps = [s for s in params.ae_steps.split(",") if len(s) > 0]
assert all([lang in params.langs for lang in params.ae_steps])
assert len(params.ae_steps) == len(set(params.ae_steps))
assert len(params.ae_steps) == 0 or not params.encoder_only
params.tae_steps = [
tuple(s.split("-")) for s in params.tae_steps.split(",") if len(s) > 0
]
assert all([lang in params.langs for langs in params.tae_steps for lang in langs])
assert len(params.tae_steps) == len(
set(params.tae_steps)
), f"non unique elements in {params.tae_steps}"
assert len(params.tae_steps) == 0 or not params.encoder_only
# back-translation steps
params.bt_steps = [
tuple(s.split("-")) for s in params.bt_steps.split(",") if len(s) > 0
]
assert all([len(x) == 3 for x in params.bt_steps])
assert all(
[
l1 in params.langs and l2 in params.langs and l3 in params.langs
for l1, l2, l3 in params.bt_steps
]
)
assert all([l1 == l3 and l1 != l2 for l1, l2, l3 in params.bt_steps])
assert len(params.bt_steps) == len(set(params.bt_steps))
assert len(params.bt_steps) == 0 or not params.encoder_only
params.bt_src_langs = [l1 for l1, _, _ in params.bt_steps]
# self-training steps
params.st_steps = sorted(
[
(s.split("-")[0], tuple(s.split("-")[1].split("|")))
for s in params.st_steps.split(",")
if len(s) > 0
]
)
assert all([len(x) == 2 for x in params.st_steps])
assert all(
[
l1 in params.langs and all([l2 in params.langs for l2 in langs2])
for l1, langs2 in params.st_steps
]
), params.st_steps
assert all([l1 != l2 for l1, langs2 in params.st_steps for l2 in langs2])
assert len(params.st_steps) == len(set(params.st_steps))
assert all([len(langs2) > 0 for l1, langs2 in params.st_steps]), params.st_steps
params.st_src_langs = [l1 for l1, _ in params.st_steps]
params.st_tgt_langs = list(
set([l2 for _, langs2 in params.st_steps for l2 in langs2])
)
if len(params.st_src_langs) > 0:
logger.info(f"st source langs: {params.st_src_langs}")
logger.info(f"st target langs: {params.st_tgt_langs}")
# unit tests path
assert os.path.isfile(params.unit_tests_path), params.unit_tests_path
# pairs for which we should evaluate computationally with pivot
params.eval_computation_pivot = sorted(
[
tuple(s.split("-"))
for s in params.eval_computation_pivot.split(",")
if len(s) > 0
]
)
params.eval_computation_pivot_self = sorted(
[x[0] for x in params.eval_computation_pivot if len(x) == 1]
)
params.eval_computation_pivot = sorted(
[x for x in params.eval_computation_pivot if len(x) != 1]
)
assert all([len(x) == 2 for x in params.eval_computation_pivot])
assert all(
[
l1 in params.langs and l2 in params.langs
for l1, l2 in params.eval_computation_pivot
]
)
assert all(
[l in params.langs for l in params.eval_computation_pivot_self]
), params.eval_computation_pivot_self
assert all([l1 != l2 for l1, l2 in params.eval_computation_pivot])
assert len(params.eval_computation_pivot) == len(set(params.eval_computation_pivot))
# check monolingual datasets
required_mono = set(
[l1 for l1, l2 in (params.mlm_steps + params.clm_steps) if l2 is None]
+ params.ae_steps
+ params.bt_src_langs
)
params.mono_dataset = {
lang: {
splt: os.path.join(params.data_path, "%s.%s.pth" % (splt, lang))
for splt in DATASET_SPLITS
}
for lang in params.langs
if lang in required_mono
}
for lang in params.eval_computation_pivot_self:
if lang not in params.mono_dataset:
params.mono_dataset[lang] = dict()
for splt in [s for s in DATASET_SPLITS if s not in TRAIN_SPLITS]:
params.mono_dataset[lang][splt] = os.path.join(
params.data_path, "%s.%s.pth" % (splt, lang)
)
for lang in params.st_src_langs:
if lang not in params.mono_dataset:
params.mono_dataset[lang] = dict()
params.mono_dataset[lang][SELF_TRAINED] = os.path.join(
params.data_path, "%s.%s.pth" % (SELF_TRAINED, lang)
)
for paths in params.mono_dataset.values():
for p in paths.values():
if not os.path.isfile(p):
logger.error(f"{p} not found")
assert not (params.eval_only and params.train_only)
if params.train_only:
assert params.stopping_criterion == ""
if not (params.eval_only or params.train_only):
assert all(
[
all(
[
os.path.isfile(p) or os.path.isfile(p.replace("pth", "0.pth"))
for p in paths.values()
]
)
for paths in params.mono_dataset.values()
]
), [
[
p
for p in paths.values()
if not (os.path.isfile(p) or os.path.isfile(p.replace("pth", "0.pth")))
]
for paths in params.mono_dataset.values()
]
assert isinstance(
params.n_sentences_eval, int
), f"n_sentences_eval was {params.n_sentences_eval}, it should be an int"
# check parallel datasets
required_para_train = set(
params.clm_steps
+ params.mlm_steps
+ params.mt_steps
+ params.classif_steps
+ params.do_steps
+ params.tae_steps
)
required_para = (
required_para_train
| set([(l2, l3) for _, l2, l3 in params.bt_steps])
| set([(l1, l2) for l1, langs2 in params.st_steps for l2 in langs2])
| set([(l2, l1) for l1, langs2 in params.st_steps for l2 in langs2])
| set(
[
(l2_1, l2_2)
for l1, langs2 in params.st_steps
for l2_1 in langs2
for l2_2 in langs2
if l2_1 != l2_2
]
)
)
# pairs for which we should evaluate computationally
if params.eval_computation.lower() == "false" or params.eval_computation == "1":
params.eval_computation = ""
if (
params.eval_computation.lower() == "true"
or params.eval_computation == "1"
or params.eval_computation.lower() == "all"
):
params.eval_computation = list(required_para)
else:
params.eval_computation = [
tuple(s.split("-"))
for s in params.eval_computation.split(",")
if len(s) > 0
]
assert all([len(x) == 2 for x in params.eval_computation])
assert all(
[
l1 in params.langs and l2 in params.langs
for l1, l2 in params.eval_computation
]
)
assert all([l1 != l2 for l1, l2 in params.eval_computation])
assert len(params.eval_computation) == len(set(params.eval_computation))
required_para |= set(params.eval_computation)
params.eval_computation.sort()
# pairs for which we should evaluate by recomputing the IR
if params.eval_ir_similarity.lower() == "false" or params.eval_ir_similarity == "1":
params.eval_ir_similarity = ""
if (
params.eval_ir_similarity.lower() == "true"
or params.eval_ir_similarity == "1"
or params.eval_ir_similarity.lower() == "all"
):
params.eval_ir_similarity = [
(lang1, lang2) for lang1, lang2 in required_para if "ir" in lang1
]
else:
params.eval_ir_similarity = [
tuple(s.split("-"))
for s in params.eval_ir_similarity.split(",")
if len(s) > 0
]
assert all([len(x) == 2 for x in params.eval_ir_similarity])
assert all(
[
l1 in params.langs and l2 in params.langs
for l1, l2 in params.eval_ir_similarity
]
)
assert all([l1 != l2 for l1, l2 in params.eval_ir_similarity])
assert len(params.eval_ir_similarity) == len(set(params.eval_ir_similarity))
required_para |= set(params.eval_ir_similarity)
params.eval_ir_similarity.sort()
required_para |= set(params.eval_computation_pivot)
if len(params.eval_computation_pivot) > 0:
assert os.path.exists(
params.pivot_bpe_model
), f"'{params.pivot_bpe_model}' not found, required for ir pivot"
params.eval_computation_pivot.sort()
params.para_dataset = {
(src, tgt): {
splt: (
os.path.join(
params.data_path, "%s.%s-%s.%s.pth" % (splt, src, tgt, src)
),
os.path.join(
params.data_path, "%s.%s-%s.%s.pth" % (splt, src, tgt, tgt)
),
)
for splt in DATASET_SPLITS
if splt != "train"
or (src, tgt) in required_para_train
or (tgt, src) in required_para_train
}
for src in params.langs
for tgt in params.langs
if src < tgt and ((src, tgt) in required_para or (tgt, src) in required_para)
}
for lang, label in params.classif_steps:
params.para_dataset[(lang, label)] = {
splt: (
os.path.join(
params.data_path, "%s.%s-%s.%s.pth" % (splt, lang, label, lang)
),
os.path.join(
params.data_path, "%s.%s-%s.%s.pth" % (splt, lang, label, label)
),
)
for splt in DATASET_SPLITS
}
for lang1, lang2, span in params.mt_spans_steps:
params.para_dataset[(lang1, lang2, span)] = {
splt: (
os.path.join(
params.data_path, "%s.%s-%s.%s.pth" % (splt, lang1, lang2, lang1)
),
os.path.join(
params.data_path, "%s.%s-%s.%s.pth" % (splt, lang1, lang2, lang2)
),
os.path.join(
params.data_path, "%s.%s-%s.%s.pth" % (splt, lang1, span, span)
),
)
for splt in DATASET_SPLITS
}
for step_paths in params.para_dataset.values():
for paths in step_paths.values():
for p in paths:
if not os.path.isfile(p):
logger.error(f"{p} not found")
params.validation_metrics = params.validation_metrics.replace(
"#obf_proba", str(params.obf_proba)
)
params.stopping_criterion = params.stopping_criterion.replace(
"#obf_proba", str(params.obf_proba)
)
# parse which datasets should have sentence ids
params.has_sentence_ids = (
[s.split("|") for s in params.has_sentence_ids.split(",")]
if params.has_sentence_ids != ""
else []
)
assert all([len(x) == 2 for x in params.has_sentence_ids]), params.has_sentence_ids
params.has_sentence_ids = [
(split, tuple(langs.split("-"))) for split, langs in params.has_sentence_ids
]
assert all(
[len(langs) == 1 or len(langs) == 2 for split, langs in params.has_sentence_ids]
), params.has_sentence_ids
for split, langs in params.has_sentence_ids:
if langs == ("para",) or langs == ("all",):
params.has_sentence_ids += [
(split, langs) for langs in params.para_dataset.keys()
]
if langs == ("mono",) or langs == ("all",):
params.has_sentence_ids += [
(split, (lang,)) for lang in params.mono_dataset.keys()
]
assert all(
[
all([lang in params.langs + ["para", "mono", "all"] for lang in langs])
for split, langs in params.has_sentence_ids
]
), params.has_sentence_ids
print(f"Datasets with IDs: {params.has_sentence_ids}")
assert len(set(params.has_sentence_ids)) == len(params.has_sentence_ids)
assert (
len(params.mono_dataset) > 0 or len(params.para_dataset) > 0
), "No dataset to be loaded, you probably forget to set a training step."
for l1, l2 in params.eval_computation + params.eval_computation_pivot:
if l1 < l2:
for splt in DATASET_SPLITS:
if splt not in TRAIN_SPLITS:
assert (
splt,
(l1, l2),
) in params.has_sentence_ids, f"computation eval for {(splt, (l1, l2))} but no sentence ids: {params.has_sentence_ids}"
params.bt_max_len = (
params.max_len if params.bt_max_len is None else params.bt_max_len
)
# assert all([all([os.path.isfile(p1) and os.path.isfile(p2) for p1, p2 in paths.values()]) for paths in params.para_dataset.values()])
# check that we can evaluate on BLEU
# assert params.eval_bleu is False or len(params.mt_steps + params.bt_steps) > 0
def load_data(params):
"""
Load monolingual data.
The returned dictionary contains:
- dico (dictionary)
- vocab (FloatTensor)
- train / valid / test (monolingual datasets)
"""
data = {}
# monolingual datasets
load_mono_data(params, data)
# parallel datasets
load_para_data(params, data)
# monolingual data summary
logger.info("============ Data summary")
for lang, v in data["mono_stream"].items():
for data_set in v.keys():
logger.info(
"{: <18} - {: >5} - {: >12}:{: >10}".format(
"Monolingual data", data_set, lang, len(v[data_set])
)
)
# parallel data summary
for key, v in data["para"].items():
for data_set in v.keys():
logger.info(
"{: <18} - {: >5} - {: >12}:{: >10}".format(
"Parallel data", data_set, "%s" % "-".join(key), len(v[data_set])
)
)
logger.info("")
return data
|
CodeGen-main
|
codegen_sources/model/src/data/loader.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import typing as tp
from pathlib import Path
from logging import getLogger
import numpy as np
import torch
logger = getLogger()
D = tp.TypeVar("D", bound="Dictionary")
BOS_WORD = "<s>"
EOS_WORD = "</s>"
PAD_WORD = "<pad>"
UNK_WORD = "<unk>"
SPECIAL_WORD = "<special%i>"
SPECIAL_WORDS = 10
OBF = {"CLASS": "CLASS_%i", "FUNC": "FUNC_%i", "VAR": "VAR_%i"}
OBFS = {"CLASS": 100, "FUNC": 200, "VAR": 200}
OBFS_TOTAL = sum(OBFS.values())
ENDBLOCK = "#ENDBLOCK"
ENDFUNC = "#ENDFUNC"
ENDCLASS = "#ENDCLASS"
SEP_WORD = SPECIAL_WORD % 0
MASK_WORD = SPECIAL_WORD % 1
NUM_SPECIAL_TOKENS = 4 + SPECIAL_WORDS + OBFS_TOTAL
class Dictionary:
def __init__(self, id2word, word2id, counts) -> None:
assert len(id2word) == len(word2id) == len(counts), (
len(id2word),
len(word2id),
len(counts),
)
self.id2word = id2word
self.word2id = word2id
self.counts = counts
self.bos_index = word2id[BOS_WORD]
self.eos_index = word2id[EOS_WORD]
self.pad_index = word2id[PAD_WORD]
self.unk_index = word2id[UNK_WORD]
if OBF["CLASS"] % 0 in word2id:
self.obf_index = {
"CLASS": word2id[OBF["CLASS"] % 0],
"FUNC": word2id[OBF["FUNC"] % 0],
"VAR": word2id[OBF["VAR"] % 0],
}
else:
self.obf_index = dict()
self.n_obf_tokens = OBFS_TOTAL
self.check_valid()
def __len__(self) -> int:
"""
Returns the number of words in the dictionary.
"""
return len(self.id2word)
def __getitem__(self, i: int) -> str:
"""
Returns the word of the specified index.
"""
return self.id2word[i]
def __contains__(self, w: str) -> bool:
"""
Returns whether a word is in the dictionary.
"""
return w in self.word2id
def __eq__(self, y):
"""
Compare this dictionary with another one.
"""
self.check_valid()
y.check_valid()
if len(self.id2word) != len(y):
return False
return all(self.id2word[i] == y[i] for i in range(len(y)))
def check_valid(self) -> None:
"""
Check that the dictionary is valid.
"""
assert self.bos_index == 0
assert self.eos_index == 1
assert self.pad_index == 2
assert self.unk_index == 3
assert all(
self.id2word[4 + i] == SPECIAL_WORD % i for i in range(SPECIAL_WORDS)
)
for TYPE in ["CLASS", "FUNC", "VAR"]:
assert all(
len(self.obf_index) == 0
or self.id2word[i + self.obf_index[TYPE]] == OBF[TYPE] % i
for i in range(OBFS[TYPE])
)
assert len(self.id2word) == len(self.word2id) == len(self.counts)
assert set(self.word2id.keys()) == set(self.counts.keys())
for i in range(len(self.id2word)):
assert self.word2id[self.id2word[i]] == i
last_count = 1e18
for i in range(NUM_SPECIAL_TOKENS, len(self.id2word) - 1):
count = self.counts[self.id2word[i]]
assert count <= last_count
last_count = count
def index(self, word: str, no_unk: bool = False) -> int:
"""
Returns the index of the specified word.
"""
try: # faster to ask for forginess if need be
return self.word2id[word]
except KeyError as e:
if no_unk:
raise e
return self.unk_index
def max_vocab(self, max_vocab):
"""
Limit the vocabulary size.
"""
assert max_vocab >= 1
init_size = len(self)
self.id2word = {k: v for k, v in self.id2word.items() if k < max_vocab}
self.word2id = {v: k for k, v in self.id2word.items()}
self.counts = {k: v for k, v in self.counts.items() if k in self.word2id}
self.check_valid()
logger.info(
"Maximum vocabulary size: %i. Dictionary size: %i -> %i (removed %i words)."
% (max_vocab, init_size, len(self), init_size - len(self))
)
def min_count(self, min_count):
"""
Threshold on the word frequency counts.
"""
assert min_count >= 0
init_size = len(self)
self.id2word = {
k: v
for k, v in self.id2word.items()
if self.counts[self.id2word[k]] >= min_count or k < NUM_SPECIAL_TOKENS
}
self.word2id = {v: k for k, v in self.id2word.items()}
self.counts = {k: v for k, v in self.counts.items() if k in self.word2id}
self.check_valid()
logger.info(
"Minimum frequency count: %i. Dictionary size: %i -> %i (removed %i words)."
% (min_count, init_size, len(self), init_size - len(self))
)
@classmethod
def read_vocab(cls: tp.Type[D], vocab_path: tp.Union[str, Path]) -> D:
"""
Create a dictionary from a vocabulary file.
"""
vocab_path = str(vocab_path)
skipped = 0
assert os.path.isfile(vocab_path), vocab_path
word2id = {BOS_WORD: 0, EOS_WORD: 1, PAD_WORD: 2, UNK_WORD: 3}
for i in range(SPECIAL_WORDS):
word2id[SPECIAL_WORD % i] = 4 + i
for i in range(OBFS_TOTAL):
if i < OBFS["CLASS"]:
word2id[OBF["CLASS"] % i] = 4 + SPECIAL_WORDS + i
elif i < OBFS["CLASS"] + OBFS["FUNC"]:
word2id[OBF["FUNC"] % (i - OBFS["CLASS"])] = 4 + SPECIAL_WORDS + i
else:
word2id[OBF["VAR"] % (i - OBFS["CLASS"] - OBFS["FUNC"])] = (
4 + SPECIAL_WORDS + i
)
counts = {k: 0 for k in word2id.keys()}
f = open(vocab_path, "r", encoding="utf-8")
for i, line_str in enumerate(f):
if "\u2028" in line_str:
skipped += 1
continue
line = line_str.rstrip().split()
if len(line) != 2:
skipped += 1
continue
assert len(line) == 2, (i, line)
# assert line[0] not in word2id and line[1].isdigit(), (i, line)
assert line[1].lstrip("-").isdigit(), (i, line)
if line[0] in word2id:
skipped += 1
logger.info("%s already in vocab" % line[0])
continue
if not line[1].lstrip("-").isdigit():
skipped += 1
logger.info("Empty word at line %s with count %s" % (i, line))
continue
word2id[line[0]] = (
NUM_SPECIAL_TOKENS + i - skipped
) # shift because of extra words
counts[line[0]] = int(line[1])
f.close()
id2word = {v: k for k, v in word2id.items()}
dico = cls(id2word, word2id, counts)
logger.info("Read %i words from the vocabulary file." % len(dico))
if skipped > 0:
logger.warning("Skipped %i empty lines!" % skipped)
return dico
@staticmethod
def index_data(path, bin_path, dico):
"""
Index sentences with a dictionary.
Parameters (to be confirmed)
----------
path: input
bin_path: output
dico: Dictionary
"""
if bin_path is not None and os.path.isfile(bin_path):
logger.info("Loading data from %s ..." % bin_path)
data = torch.load(bin_path)
assert dico == data["dico"]
return data
positions = []
sentences = []
unk_words = {}
# index sentences
f = open(path, "r", encoding="utf-8")
for i, line in enumerate(f):
if i % 1000000 == 0 and i > 0:
print(i)
s = line.rstrip().split()
# skip empty sentences
if len(s) == 0:
print("Empty sentence in line %i." % i)
# index sentence words
count_unk = 0
indexed = []
for w in s:
word_id = dico.index(w, no_unk=False)
# if we find a special word which is not an unknown word, skip the sentence
if 0 <= word_id < 4 + SPECIAL_WORDS and word_id != 3:
logger.warning(
'Found unexpected special word "%s" (%i)!!' % (w, word_id)
)
continue
assert word_id >= 0
indexed.append(word_id)
if word_id == dico.unk_index:
unk_words[w] = unk_words.get(w, 0) + 1
count_unk += 1
# add sentence
positions.append([len(sentences), len(sentences) + len(indexed)])
sentences.extend(indexed)
sentences.append(1) # EOS index
f.close()
# tensorize data
positions = np.int64(positions)
if len(dico) < 1 << 16:
sentences = np.uint16(sentences)
elif len(dico) < 1 << 31:
sentences = np.int32(sentences)
else:
raise Exception("Dictionary is too big.")
assert sentences.min() >= 0
data = {
"dico": dico,
"positions": positions,
"sentences": sentences,
"unk_words": unk_words,
}
if bin_path is not None:
print("Saving the data to %s ..." % bin_path)
torch.save(data, bin_path, pickle_protocol=4)
return data
|
CodeGen-main
|
codegen_sources/model/src/data/dictionary.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import multiprocessing
import os
from pathlib import Path
import argparse
import submitit
from codegen_sources.preprocessing.utils import bool_flag
from codegen_sources.preprocessing import bpe_modes
from codegen_sources.preprocessing import dataset_modes
from codegen_sources.model.src.logger import create_logger
def preprocess(args):
create_logger(filepath=None, rank=0)
logger = logging.getLogger()
logger.info(f"Dataset pipeline for {args.input_path}")
dataset_class = dataset_modes.DatasetMode.modes
if args.mode not in dataset_class:
raise ValueError(
f"No mode {args.mode!r}, available are: {list(dataset_class.keys())}"
) # datasets must be added to dataset_modes/__init__ for auto-inclusion
dataset_mode = dataset_class[args.mode]
# bpe mode
assert args.bpe_mode in ["fast", "roberta"]
if args.bpe_mode == "fast":
BPE_mode = bpe_modes.FastBPEMode(
vocab_path=args.fastbpe_vocab_path,
codes=args.fastbpe_code_path,
use_vocab=args.fastbpe_use_vocab,
)
else:
BPE_mode = bpe_modes.RobertaBPEMode()
inpath = Path(args.input_path)
executors = {
name: submitit.AutoExecutor(
folder=inpath.joinpath("log"), cluster="local" if args.local else None
)
for name in ["tokenization", "train_bpe", "apply_bpe"]
}
timeouts = {
"tokenization": args.tokenization_timeout,
"train_bpe": args.train_bpe_timeout,
"apply_bpe": args.bpe_timeout,
}
for name, executor in executors.items():
executor.update_parameters(timeout_min=timeouts[name])
if not args.local:
executor.update_parameters(
slurm_partition="learnlab",
mem_gb=args.job_mem,
array_parallelism=200,
cpus_per_task=args.cpu_per_task if name == "tokenization" else 1,
)
dataset = dataset_mode(
folder=args.input_path,
languages=args.langs,
bpe=BPE_mode,
nb_train_split=args.train_splits,
keep_comments=args.keep_comments,
repo_split=args.repo_split,
)
dataset.extract_data_and_tokenize(
executor=executors["tokenization"],
local_parallelism=args.local_parallelism,
tokenize_line_timeout=args.tokenize_line_timeout,
)
dataset.get_train_test_valid_splits(
percent_test=args.percent_test_valid,
percent_valid=args.percent_test_valid,
dedupe=True,
)
dataset.learn_bpe(ncodes=args.ncodes, executor=executors["train_bpe"])
dataset.apply_bpe(
executor=executors["apply_bpe"], local_parallelism=args.local_parallelism
)
dataset.get_vocab(executor=executors["train_bpe"])
dataset.binarize(
executor=executors["apply_bpe"], local_parallelism=args.local_parallelism
)
dataset.check_files_and_symlink_for_XLM()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("input_path", help="root folder")
parser.add_argument(
"--local",
type=bool_flag,
default=True,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
parser.add_argument(
"--local_parallelism",
type=int,
default=None,
help="When running locally, number of files read at the same time.",
)
parser.add_argument(
"--langs",
nargs="+",
default=["python", "java", "cpp"],
help="list of languages to run on",
)
parser.add_argument(
"--mode",
type=str,
default="monolingual_functions",
choices=list(dataset_modes.DatasetMode.modes.keys()),
help="Type of dataset.",
) # datasets must be added to dataset_modes/__init__ for auto-inclusion
parser.add_argument(
"--train_splits", type=int, default=8, help="Number of train splits."
)
parser.add_argument(
"--job_mem",
type=int,
default=250,
help="Memory in GB for jobs run on the cluster",
)
parser.add_argument(
"--tokenization_timeout",
type=int,
default=1000,
help="Timeout for tokenization/obfuscation jobs",
)
parser.add_argument(
"--tokenize_line_timeout",
type=int,
default=240,
help="Timeout for tokenizing and processing a line",
)
parser.add_argument(
"--bpe_timeout", type=int, default=240, help="Timeout for bpe jobs"
)
parser.add_argument(
"--train_bpe_timeout", type=int, default=500, help="Timeout for bpe jobs"
)
parser.add_argument(
"--cpu_per_task",
type=int,
default=10,
help="Number of cpus per job for the tokenization",
)
parser.add_argument(
"--bpe_mode",
type=str,
default="fast",
choices=["fast", "roberta"],
help="Type of BPE, should be roberta or fast.",
)
parser.add_argument(
"--fastbpe_use_vocab",
type=bool_flag,
default=False,
help="Whether to use the vocab when applying BPE",
)
parser.add_argument(
"--fastbpe_vocab_path",
type=str,
default=None,
help="Path to existing fastbpe vocab",
)
parser.add_argument(
"--keep_comments",
type=bool_flag,
default=False,
help="Whether to keep the comments (does not happen with deobfuscation dataset).",
)
parser.add_argument(
"--fastbpe_code_path",
type=str,
default=None,
help="Path to existing fastbpe codes",
)
parser.add_argument(
"--ncodes",
type=int,
default=50000,
help="Number of codes to be learnt with fast bpe if no bpe codes is given.",
)
parser.add_argument(
"--percent_test_valid",
type=int,
default=1,
help="Percentage of data that will be put into test and valid sets.",
)
parser.add_argument(
"--repo_split",
type=bool_flag,
default=True,
help="Percentage of data that will be put into test and valid sets.",
)
args = parser.parse_args()
args.input_path = os.path.abspath(args.input_path)
multiprocessing.set_start_method("fork")
preprocess(args)
|
CodeGen-main
|
codegen_sources/preprocessing/preprocess.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import errno
import math
import os
import signal
import time
from functools import partial, wraps
class TimeoutError(BaseException):
pass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(repeat_id, signum, frame):
# logger.warning(f"Catched the signal ({repeat_id}) Setting signal handler {repeat_id + 1}")
signal.signal(signal.SIGALRM, partial(_handle_timeout, repeat_id + 1))
signal.alarm(seconds)
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
old_signal = signal.signal(signal.SIGALRM, partial(_handle_timeout, 0))
old_time_left = signal.alarm(seconds)
assert type(old_time_left) is int and old_time_left >= 0
if 0 < old_time_left < seconds: # do not exceed previous timer
signal.alarm(old_time_left)
start_time = time.time()
try:
result = func(*args, **kwargs)
finally:
if old_time_left == 0:
signal.alarm(0)
else:
sub = time.time() - start_time
signal.signal(signal.SIGALRM, old_signal)
signal.alarm(max(0, math.ceil(old_time_left - sub)))
return result
return wraps(func)(wrapper)
return decorator
|
CodeGen-main
|
codegen_sources/preprocessing/timeout.py
|
CodeGen-main
|
codegen_sources/preprocessing/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import random
import subprocess
from pathlib import Path
from logging import getLogger
from codegen_sources.model.preprocess import XLM_preprocess
import typing as tp
PathLike = tp.Union[str, Path]
REPO_ROOT = str(Path(__file__).parents[2])
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
logger = getLogger()
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")
def is_valid_file(filepath: tp.Optional[PathLike]) -> bool:
if filepath is None:
return False
if isinstance(filepath, str):
filepath = Path(filepath)
else:
assert isinstance(filepath, Path)
return filepath.is_file() and filepath.stat().st_size > 0
def get_nlines(file_path):
assert file_path.is_file(), file_path
process = subprocess.run(
f"wc -l {file_path}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0
out = process.stdout.decode()
return int(out.lstrip().split(" ")[0])
def check_same_number_of_lines(file_path1, file_path2):
nlines1 = get_nlines(file_path1)
nlines2 = get_nlines(file_path2)
assert (
nlines1 == nlines2
), f"{file_path1} contains {nlines1} examples vs {file_path2}: {nlines2} examples"
def head(file_path, n):
n = int(n)
with file_path.open("r", encoding="utf-8") as f:
h = [next(f) for i in range(n)]
return h
def get_subset_file(file_paths: tp.List[Path], subset_size_gb: int, output_path: Path):
"""
Return one file containing a subset of files file_paths.
The subset is of size subset_size_gb.
The subset contains an equal portion on all files.
"""
if output_path.is_file():
return f"{output_path}"
for file_path in file_paths:
size_gb = file_path.stat().st_size / 1024 ** 3
n_lines = get_nlines(file_path)
subset_n_lines = int((subset_size_gb / len(file_paths)) * (n_lines / size_gb))
process = subprocess.run(
f"head -q -n {subset_n_lines} {file_path} >> {output_path}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable="/bin/bash",
)
assert process.returncode == 0
logger.info(
f"Subset of {[f.name for f in file_paths]} created at: {output_path.name}. Size=({output_path.stat().st_size / 1024 ** 3:.2f}GB)."
)
shuf_file(output_path)
return f"{output_path}.shuf"
def truncate_files(file_paths):
all_lines = []
for f in file_paths:
with f.open("r", encoding="utf-8") as f:
lines = f.readlines()
all_lines.append(lines)
mini = min([len(lines) for lines in all_lines])
for f, i in enumerate(file_paths):
if len(all_lines[i]) > mini:
with f.open("w", encoding="utf-8") as f:
for j in range(mini):
f.write(all_lines[i][j])
def write_head(file_path, n):
n = int(n)
with file_path.open("r", encoding="utf-8") as f:
h = [next(f) for i in range(n)]
with file_path.open("w", encoding="utf-8") as f:
f.write("".join(h))
return h
def shuf_file(file_path):
process = subprocess.run(
f"shuf {file_path} -o {file_path}.shuf",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert (
process.returncode == 0
), f"failed to shuffle {file_path}\n Error {process.stderr.decode()}"
def get_all_pairs(items):
return [
(items[i], items[j])
for i in range(len(items))
for j in range(i + 1, len(items))
]
def shuf_parallel_files(file_paths: tp.List[PathLike]) -> None:
lines_order: tp.List[int] = []
for input_path in file_paths:
input_path = Path(input_path)
with input_path.open("r", encoding="utf8") as f:
lines = f.readlines()
if not lines_order:
lines_order = list(range(len(lines)))
random.shuffle(lines_order)
random.shuffle(lines_order)
if len(lines_order) != len(lines):
raise RuntimeError(
f"files with different number of lines in {file_paths} "
f"({len(lines_order)} and {len(lines)})"
)
reordered = [lines[i] for i in lines_order]
with open(f"{input_path}.shuf", "w", encoding="utf8") as f:
f.writelines(reordered)
def get_repo_to_group_dict(repo_groups_path):
repo_groups = open(repo_groups_path, "r").read().strip()
repo_groups_dict = json.loads(repo_groups)
repo_to_group = dict()
for k, values in repo_groups_dict.items():
for v in values:
assert v not in repo_to_group
repo_to_group[v] = k
return repo_to_group
def binarize_for_XLM_file(file_path, vocab):
assert get_nlines(file_path) > 0
return XLM_preprocess(str(vocab), str(file_path), str(file_path) + ".pth")
def create_symlink(file_path, symlink):
if isinstance(file_path, str):
file_path = Path(file_path)
if isinstance(symlink, str):
symlink = Path(symlink)
assert (
file_path.is_file() or symlink.parent.joinpath(file_path).resolve().is_file()
), f"{file_path} is not a file: resolved into {symlink.parent.joinpath(file_path).resolve()}"
assert not symlink.is_file(), f"{symlink} already exists"
process = subprocess.run(
f"ln -s {file_path} {symlink}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert (
symlink.is_file() and process.returncode == 0
), f"failed to create symlink {symlink} for file {file_path} "
def matched(str):
count = 0
is_in_string = False
string_char = ""
previous_char = ""
for i, c in enumerate(str):
if is_in_string:
if c == string_char and (
previous_char != "\\" or (i >= 2 and str[i - 2] == "\\")
):
is_in_string = False
previous_char = c
continue
if c == "(":
count += 1
elif c == ")":
count -= 1
if count < 0:
return False
if c == '"' or c == "'":
is_in_string = True
string_char = c
return count == 0
def split_arguments(s):
open_parentheses = {"[", "{", "("}
close_parentheses = {"]", "}", ")"}
s = s.strip()
while s.startswith("(") and s.endswith(")") and matched(s[1:-1]):
s = s[1:-1]
parenth_count = 0
arguments = [[]]
is_in_string = False
string_char = ""
previous_char = ""
for i, c in enumerate(s):
if is_in_string:
arguments[-1].append(c)
if c == string_char and (
previous_char != "\\" or (i >= 2 and s[i - 2] == "\\")
):
is_in_string = False
previous_char = c
continue
if c in open_parentheses:
parenth_count += 1
if c in close_parentheses:
parenth_count -= 1
if c == "," and parenth_count == 0:
arguments.append([])
else:
arguments[-1].append(c)
previous_char = c
if c == '"' or c == "'":
is_in_string = True
string_char = c
assert parenth_count == 0, (parenth_count, s)
return ["".join(chars) for chars in arguments]
|
CodeGen-main
|
codegen_sources/preprocessing/utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
import typing as tp
TMP_EXT = ".tmp"
class BPEMode:
"""
the base BPE mode logic for running apply_bpe and repair_bpe
"""
# TODO add restore BPE of XLM utils into that class
def __init__(
self, ext: str, vocab_path: tp.Optional[str], process_strings: bool
) -> None:
self.ext = ext
self.vocab_path = None if vocab_path is None else Path(vocab_path)
self.process_strings = process_strings
def learn_bpe_file(self, file: str, ncodes: int) -> None:
raise NotImplementedError
def apply_bpe(self, code: str) -> str:
raise NotImplementedError
def apply_bpe_file(self, file: str, output: str) -> None:
raise NotImplementedError
@staticmethod
def repair_bpe_for_obfuscation_line(line: str) -> str:
raise NotImplementedError
def repair_bpe_for_obfuscation_file(self, file: str, output: str) -> None:
raise NotImplementedError
|
CodeGen-main
|
codegen_sources/preprocessing/bpe_modes/bpe_mode.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import os
import re
from pathlib import Path
from codegen_sources.preprocessing.bpe_modes.bpe_mode import BPEMode
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import (
OBFUSCATED_PREFIXES,
)
from transformers import RobertaTokenizer
import typing as tp
logger = logging.getLogger()
class RobertaBPEMode(BPEMode):
"""
apply the BPE with the roberta logic
"""
def __init__(self) -> None:
vocab_path = str(
Path(__file__).parents[3].joinpath("data/bpe/roberta-base-vocab")
)
logger.info(
f"Roberta BPE mode use Roberta pretrained codes and vocab {vocab_path}."
)
super().__init__(ext=".bperob", vocab_path=vocab_path, process_strings=False)
def learn_bpe_file(self, file: str, ncodes: int):
logger.warning("Roberta BPE codes don't need to be trained. Use default ones.")
def get_vocab_file(self, file, nvocab=64000):
logger.warning("Roberta BPE vocab doesn't need to be trained. Use default one.")
def apply_bpe(self, code: str):
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
lines = code.split("\n")
return "\n".join(
[" ".join(tokenizer._tokenize(line.strip())) for line in lines]
)
def apply_bpe_file(self, file: str, output: str) -> None:
assert os.path.exists(
file
), f"cannot apply bpe on file {file}, it doesnt exists."
if output is None:
output = file.replace(".tok", ".rob-bpe")
with open(file, encoding="utf-8") as f:
code = f.read()
with open(output, "w", encoding="utf-8") as f:
f.write(self.apply_bpe(code))
@staticmethod
def repair_bpe_for_obfuscation_line(line: str):
line = line.replace("CLASS _ ", "CLASS_")
line = line.replace("FUN C _ ", "FUNC_")
line = line.replace("V AR _ ", "VAR_")
line = re.sub("< special ([0-9]+) >", r"<special\1>", line)
for prefix in OBFUSCATED_PREFIXES + ["<special"]:
n_replacements = 1
line = line.replace(f"Ġ{prefix}", f"Ġ {prefix}")
while n_replacements > 0:
line, n_replacements = re.subn(
f"({prefix}[0-9]+) ([0-9]+)", r"\1\2", line
)
return line
def repair_bpe_for_obfuscation_file(self, file: str, output: str) -> None:
output_file = open(output, "w", encoding="utf-8")
with open(str(file), "r", encoding="utf-8") as input_file:
for line in input_file:
line = self.repair_bpe_for_obfuscation_line(line)
output_file.write(line)
|
CodeGen-main
|
codegen_sources/preprocessing/bpe_modes/roberta_bpe_mode.py
|
from .bpe_mode import BPEMode as BPEMode
from .fast_bpe_mode import FastBPEMode as FastBPEMode
from .roberta_bpe_mode import RobertaBPEMode as RobertaBPEMode
|
CodeGen-main
|
codegen_sources/preprocessing/bpe_modes/__init__.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import subprocess
from logging import getLogger
from pathlib import Path
import fastBPE
import typing as tp
from codegen_sources.preprocessing.bpe_modes.bpe_mode import BPEMode
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import (
OBFUSCATED_PREFIXES,
)
FAST = str(Path(__file__).resolve().parents[3].joinpath("fastBPE/fast"))
logger = getLogger()
class FastBPEMode(BPEMode):
"""
apply the BPE with the fast BPE logic
"""
def __init__(
self,
vocab_path: tp.Optional[str],
codes: tp.Optional[str],
use_vocab: bool = False,
):
super().__init__(ext=".bpe", vocab_path=vocab_path, process_strings=True)
assert vocab_path is None or codes is not None
assert Path(FAST).exists(), f"Missing FastBPE install at {FAST}"
if codes is None or codes == "None":
self.codes: tp.Optional[Path] = None
self.vocab_path = None
else:
self.codes = Path(codes)
self.use_vocab = use_vocab
def learn_bpe_file(self, file: str, ncodes: int):
if ncodes > 50000:
logger.warning(
f"Number of codes is very large: {ncodes}. Usually we chose ncodes < 50000."
)
process = subprocess.run(
f"{FAST} learnbpe {ncodes} {file} > {self.codes} ",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert (
process.returncode == 0 and Path(f"{self.codes}").is_file()
), f"failed to learn bpe on {file}, command: {FAST} learnbpe {ncodes} {file} > {self.codes}"
def get_vocab_file(self, file, nvocab=64000):
process = subprocess.run(
f"{FAST} getvocab {file} > {str(self.vocab_path)}.all",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
process2 = subprocess.run(
f"head -n {nvocab} {str(self.vocab_path)}.all > {str(self.vocab_path)}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert (
self.vocab_path.is_file
and process.returncode == 0
and process2.returncode == 0
), f"failed to get vocab for {file}, command: {FAST} getvocab {file} > {str(self.vocab_path)}.all & head -n nvocab {str(self.vocab_path)}.all > {str(self.vocab_path)}"
def apply_bpe(self, code: str):
bpe_model = fastBPE.fastBPE(str(self.codes)) # type: ignore
assert isinstance(code, str)
return " ".join(bpe_model.apply(code.split()))
def apply_bpe_file(self, file: str, output: str) -> None:
if output is None:
output = file + self.ext
vocab = self.vocab_path if self.vocab_path is not None else ""
process = subprocess.run(
f"{FAST} applybpe {output} {file} {self.codes} {vocab if self.use_vocab else ''}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert (
Path(output).is_file() and process.returncode == 0
), f"failed to apply bpe on {file}, command: \n {FAST} applybpe {output} {file} {self.codes} {vocab if self.use_vocab else ''}"
@staticmethod
def repair_bpe_for_obfuscation_line(line: str) -> str:
# special case for separator
line = re.sub("<@@ special@@ ([0-9]+)@@ >", r"<special\1>", line)
for prefix in OBFUSCATED_PREFIXES:
line = re.sub(
f'{"(@@ )?".join(prefix)}(@@ )?([0-9]+($| ))',
f"{prefix}\\{len(prefix)+1}",
line,
)
n_replacements = 1
while n_replacements > 0:
line, n_replacements = re.subn(
f"({prefix}[0-9]+)@@ ([0-9]+)", r"\1\2", line
)
return line
def repair_bpe_for_obfuscation_file(self, file: str, output: str) -> None:
output_file = open(output, "w", encoding="utf-8")
with open(str(file), "r", encoding="utf-8") as input_file:
for line in input_file:
line = self.repair_bpe_for_obfuscation_line(line)
output_file.write(line)
|
CodeGen-main
|
codegen_sources/preprocessing/bpe_modes/fast_bpe_mode.py
|
CodeGen-main
|
codegen_sources/preprocessing/tests/__init__.py
|
|
CodeGen-main
|
codegen_sources/preprocessing/tests/pipeline/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import logging
import shutil
import unittest
from pathlib import Path
import pytest
from codegen_sources.preprocessing.preprocess import preprocess
from codegen_sources.model.src.utils import AttrDict
input_path = Path(__file__).parents[4].joinpath("data/test_dataset")
bpe_path = Path(__file__).parents[4].joinpath("data/bpe/cpp-java-python")
logger = logging.getLogger(__name__)
RELATIVE_TOLERANCE = 0.005
def _deactivate_in_ci() -> None:
"""Diminish number of used processors in the CI since it triggers
memory errors (with Roberta mode)
"""
if os.environ.get("CIRCLECI", False):
# might be related to downloading the model, and/or to load in multiple
# processes
raise unittest.SkipTest("Roberta is deactivated because of OOM in the CI")
DEFAULT_PARAMETERS = AttrDict(
{
"input_path": str(input_path),
"local": "True",
"train_splits": 1,
"ncodes": 100,
"percent_test_valid": 10,
"keep_comments": False,
"local_parallelism": None,
"tokenization_timeout": 2,
"bpe_timeout": 2,
"train_bpe_timeout": 5,
"repo_split": True,
"fastbpe_code_path": None,
"fastbpe_vocab_path": None,
"fastbpe_use_vocab": False,
"tokenize_line_timeout": 60,
}
)
@pytest.fixture(autouse=True)
def setup() -> None:
subdirs = ["log", "XLM-syml"]
for name in subdirs:
subpath = input_path / name
if subpath.is_dir():
try:
shutil.rmtree(subpath)
except OSError as e:
logger.warning(f"Could not delete the folder ({subpath}):\n{e}")
for f in input_path.glob("*"):
if not f.name.endswith(".json.gz") and f.name not in subdirs:
try:
f.unlink()
except OSError as e:
if not f.name.startswith(".nfs"):
raise e
logger.warning(f"Could not delete file:\n{e}")
def check_number_lines_is(nb):
assert abs(count_bpe_lines() - nb) / nb < RELATIVE_TOLERANCE, count_bpe_lines()
def count_bpe_lines():
tok_files = list(input_path.glob("*.bpe")) + list(input_path.glob("*.bperob"))
count = 0
for path in tok_files:
with open(path) as f:
count += len(f.readlines())
return count
# Roberta Mode
def test_obfuscation_roberta_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{"langs": ["java", "python"], "mode": "obfuscation", "bpe_mode": "roberta"}
)
_deactivate_in_ci()
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
def test_obfuscation_functions_roberta_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python"],
"mode": "obfuscation_functions",
"bpe_mode": "roberta",
}
)
_deactivate_in_ci()
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
def test_monolingual_roberta_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python", "cpp"],
"mode": "monolingual",
"bpe_mode": "roberta",
}
)
_deactivate_in_ci()
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
def test_monolingual_functions_roberta_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python", "cpp"],
"mode": "monolingual_functions",
"bpe_mode": "roberta",
}
)
_deactivate_in_ci()
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
# Fast BPE Mode
def test_monolingual_fast_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python", "cpp"],
"mode": "monolingual",
"bpe_mode": "fast",
"fastbpe_code_path": None,
"fastbpe_vocab_path": None,
"fastbpe_use_vocab": False,
}
)
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
def test_monolingual_functions_fast_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python", "cpp"],
"mode": "monolingual_functions",
"bpe_mode": "fast",
"fastbpe_code_path": None,
"fastbpe_vocab_path": None,
"fastbpe_use_vocab": False,
}
)
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
def test_monolingual_functions_fast_pipeline_keep_comments():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python", "cpp"],
"mode": "monolingual_functions",
"bpe_mode": "fast",
"keep_comments": True,
"fastbpe_code_path": None,
"fastbpe_vocab_path": None,
"fastbpe_use_vocab": False,
}
)
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
def test_obfuscation_fast_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python"],
"mode": "obfuscation",
"bpe_mode": "fast",
"fastbpe_code_path": f"{os.path.abspath(bpe_path.joinpath('codes'))}",
"fastbpe_vocab_path": f"{os.path.abspath(bpe_path.joinpath('vocab'))}",
"fastbpe_use_vocab": False,
"ncodes": 50000,
}
)
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
def test_obfuscation_functions_fast_pipeline():
args = AttrDict(DEFAULT_PARAMETERS)
args.update(
{
"langs": ["java", "python"],
"mode": "obfuscation_functions",
"bpe_mode": "fast",
"fastbpe_code_path": f"{os.path.abspath(bpe_path.joinpath('codes'))}",
"fastbpe_vocab_path": f"{os.path.abspath(bpe_path.joinpath('vocab'))}",
"fastbpe_use_vocab": False,
"ncodes": 50000,
}
)
preprocess(args)
shutil.rmtree(input_path.joinpath("XLM-syml"))
|
CodeGen-main
|
codegen_sources/preprocessing/tests/pipeline/test_pipeline.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from codegen_sources.preprocessing.lang_processors.cpp_processor import CppProcessor
from codegen_sources.preprocessing.tests.obfuscation.utils import diff_tester
from codegen_sources.preprocessing.tests.tokenization.tokenization_tests_utils import (
tokenizer_test,
detokenize_non_invertible,
detokenize_invertible,
tokenize_twice,
compare_funcs,
)
processor = CppProcessor(root_folder=Path(__file__).parents[4].joinpath("tree-sitter"))
TESTS = []
TESTS.append(
(
r"""
// This is a comment
// ------- ******* -------
int main() {
std::cout << "Hello World!";
return 0;
}""",
[
"int",
"main",
"(",
")",
"{",
"std",
"::",
"cout",
"<<",
'" Hello ▁ World ! "',
";",
"return",
"0",
";",
"}",
],
)
)
TESTS.append(
(
r"""
overload((byte)1);
overload(1L);
overload(1.0f);""",
[
"overload",
"(",
"(",
"byte",
")",
"1",
")",
";",
"overload",
"(",
"1L",
")",
";",
"overload",
"(",
"1.0f",
")",
";",
],
)
)
TESTS.append(
(
r"""auto glambda = [](auto a, auto&& b) { return a < b; };""",
[
"auto",
"glambda",
"=",
"[",
"]",
"(",
"auto",
"a",
",",
"auto",
"&&",
"b",
")",
"{",
"return",
"a",
"<",
"b",
";",
"}",
";",
],
)
)
# test reference, pointor
TESTS.append(
(
r""" int a = 0;
int * b = new int(0);
int& ref = a;""",
[
"int",
"a",
"=",
"0",
";",
"int",
"*",
"b",
"=",
"new",
"int",
"(",
"0",
")",
";",
"int",
"&",
"ref",
"=",
"a",
";",
],
)
)
# test incrementation - equality/uniquality
TESTS.append(
(
r"""a = 0;
b = 0;
a += 10;
a ++;
a --;
a -= 100;
if (a == b) {
cout<<"yes"<<endl;
}
if (a != b) {
cout << "no" << endl;
}""",
[
"a",
"=",
"0",
";",
"b",
"=",
"0",
";",
"a",
"+=",
"10",
";",
"a",
"++",
";",
"a",
"--",
";",
"a",
"-=",
"100",
";",
"if",
"(",
"a",
"==",
"b",
")",
"{",
"cout",
"<<",
'" yes "',
"<<",
"endl",
";",
"}",
"if",
"(",
"a",
"!=",
"b",
")",
"{",
"cout",
"<<",
'" no "',
"<<",
"endl",
";",
"}",
],
)
)
TESTS.append(
(
"std::unordered_map<MyCustomObject, std::string> hashmap;",
[
"std",
"::",
"unordered_map",
"<",
"MyCustomObject",
",",
"std",
"::",
"string",
">",
"hashmap",
";",
],
)
)
TESTS.append(
(
r"""string s = "Hi I am\nMarie";""",
["string", "s", "=", '" Hi ▁ I ▁ am \\n Marie "', ";"],
)
)
TESTS_KEEP_COMMENTS = [
(
r"""
// This is a comment
// ----------*****
int main() {
std::cout << "Hello World!";
return 0;
}""",
[
"// ▁ This ▁ is ▁ a ▁ comment ENDCOM",
"int",
"main",
"(",
")",
"{",
"std",
"::",
"cout",
"<<",
'" Hello ▁ World ! "',
";",
"return",
"0",
";",
"}",
],
),
(
r"""
/* This is a
multiline comment */
/*----------------this is the docstring */
/* ----*----*-*---- ====== *** */
int main() {
std::cout << "Hello World!";
return 0;
}""",
[
"/* ▁ This ▁ is ▁ a STRNEWLINE multiline ▁ comment ▁ */",
"/* - - - - - this ▁ is ▁ the ▁ docstring ▁ */",
"int",
"main",
"(",
")",
"{",
"std",
"::",
"cout",
"<<",
'" Hello ▁ World ! "',
";",
"return",
"0",
";",
"}",
],
),
]
TESTS_CHARS = [
(
r"""
char a = 'a' ;
""",
["char", "a", "=", "' a '", ";"],
)
]
TESTS_DETOKENIZE_CHARS = [
(
"char a = 'a';",
r"""char a = 'a' ;
""",
)
]
TESTS_STRINGS = [
(
r"""
string s = "Hello !" ;""",
["string", "s", "=", f'" Hello ▁ ! "', ";"],
),
(
r"""
string s = L"Hello !" ;""",
["string", "s", "=", f'L" Hello ▁ ! "', ";"],
),
]
TESTS_MULTILINE_STRINGS = [
(
r"""
string s =
"First line"
"Second line";
""",
["string", "s", "=", f'" First ▁ line "', '" Second ▁ line "', ";"],
)
]
TESTS_DETOKENIZE_MULTILINE_STRINGS = [
(
r"""
string s =
"First line"
"Second line";
""",
r"""string s = "First line" "Second line" ;
""",
)
]
TESTS_DETOKENIZE_SPECIAL_STRINGS = [
(
r'L"Hello world";',
r"""L"Hello world" ;
""",
)
]
DETOKENIZE_WITH_DEFINE_TEST = [
(
r"""
#define sf scanf
#define pf printf
int main ( ) {
int i;
sf ( "%d" , & i ) ;
pf ( "%d\n" , i ) ;
}""",
r"""#define sf scanf
#define pf printf
int main ( ) {
int i ;
sf ( "%d" , & i ) ;
pf ( "%d\n" , i ) ;
}
""",
),
(
r"""#define rep(p, q) for(int i = p; i < q;i++)""",
"""#define rep( p , q ) for(int i = p; i < q;i++)""",
),
]
DETOKENIZE_TESTS = []
DETOKENIZE_TESTS.append(
(
r"""
// This is a comment
int main() {
std::cout << "Hello World!";
return 0;
}""",
r"""int main ( ) {
std :: cout << "Hello World!" ;
return 0 ;
}
""",
)
)
DETOKENIZE_TESTS.append(
(
r"""int a = 0;
int * b = new int(0);
int& ref = a;""",
r"""int a = 0 ;
int * b = new int ( 0 ) ;
int & ref = a ;
""",
)
)
DETOKENIZE_TESTS.append(
(
r"""a = 0;
b = 0
a += 10;
a ++;
a --;
a -= 100;
if (a == b) {
cout<<"yes"<<endl;
}
if (a != b) {
cout << "no" << endl;
}
""",
r"""a = 0 ;
b = 0 a += 10 ;
a ++ ;
a -- ;
a -= 100 ;
if ( a == b ) {
cout << "yes" << endl ;
}
if ( a != b ) {
cout << "no" << endl ;
}
""",
)
)
TESTS_INCLUDES = [
(
"""// basic file operations
#include <iostream>
#include <fstream>
using namespace std;
int main () {
ofstream myfile;
myfile.open ("example.txt");
myfile << "Writing this to a file.\n";
myfile.close();
return 0;
}""",
[
"#include",
"<iostream>",
"NEW_LINE",
"#include",
"<fstream>",
"NEW_LINE",
"using",
"namespace",
"std",
";",
"int",
"main",
"(",
")",
"{",
"ofstream",
"myfile",
";",
"myfile",
".",
"open",
"(",
'" example . txt "',
")",
";",
"myfile",
"<<",
'" Writing ▁ this ▁ to ▁ a ▁ file . STRNEWLINE "',
";",
"myfile",
".",
"close",
"(",
")",
";",
"return",
"0",
";",
"}",
],
),
(
'#include "berryDefaultActivator.h"\n\nnamespace berry {\n\nvoid\nDefaultActivator::Start(IBundleContext::Pointer /*context*/)\n{\n\n}\n\nvoid\nDefaultActivator::Stop(IBundleContext::Pointer /*context*/)\n{\n\n}\n\n}\n',
[
"#include",
'" berryDefaultActivator . h "',
"NEW_LINE",
"namespace",
"berry",
"{",
"void",
"DefaultActivator",
"::",
"Start",
"(",
"IBundleContext",
"::",
"Pointer",
")",
"{",
"}",
"void",
"DefaultActivator",
"::",
"Stop",
"(",
"IBundleContext",
"::",
"Pointer",
")",
"{",
"}",
"}",
],
),
]
TESTS_DETOKENIZE_INCLUDES = [
(
r"""// basic file operations
#include <iostream>
#include <fstream>
using namespace std;
int main () {
ofstream myfile;
myfile.open ("example.txt");
myfile << "Writing this to a file.\n";
myfile.close();
return 0;
}""",
r"""#include <iostream>
#include <fstream>
using namespace std ;
int main ( ) {
ofstream myfile ;
myfile . open ( "example.txt" ) ;
myfile << "Writing this to a file.\n" ;
myfile . close ( ) ;
return 0 ;
}
""",
)
]
def test_cpp_tokenizer_discarding_comments():
tokenizer_test(TESTS, processor, keep_comments=False)
def test_cpp_tokenizer_keep_comments():
tokenizer_test(TESTS_KEEP_COMMENTS, processor, keep_comments=True)
def test_cpp_chars():
tokenizer_test(TESTS_CHARS, processor, keep_comments=False)
def test_detokenize_chars():
detokenize_non_invertible(TESTS_DETOKENIZE_CHARS, processor)
def test_cpp_strings():
tokenizer_test(
TESTS_STRINGS + TESTS_MULTILINE_STRINGS, processor, keep_comments=False
)
def test_cpp_includes():
tokenizer_test(TESTS_INCLUDES, processor, keep_comments=False)
def test_detokenize_includes():
detokenize_non_invertible(TESTS_DETOKENIZE_INCLUDES, processor)
def test_cpp_detokenize():
detokenize_non_invertible(DETOKENIZE_TESTS, processor)
def test_cpp_detokenize_defines():
detokenize_non_invertible(DETOKENIZE_WITH_DEFINE_TEST, processor)
def test_detokenize_cpp_chars():
detokenize_invertible(TESTS_CHARS, processor)
def test_detokenize_string():
detokenize_invertible(TESTS_STRINGS, processor)
def test_detokenize_multiline_string():
detokenize_non_invertible(TESTS_DETOKENIZE_MULTILINE_STRINGS, processor)
def test_detokenize_special_string():
detokenize_non_invertible(TESTS_DETOKENIZE_SPECIAL_STRINGS, processor)
def test_tokenize_twice_equal_tokenize_remove_comments():
tokenize_twice(TESTS + TESTS_STRINGS + TESTS_CHARS, processor)
def test_tokenize_twice_equal_tokenize_keep_comments():
tokenize_twice(
TESTS + TESTS_STRINGS + TESTS_CHARS + TESTS_KEEP_COMMENTS,
processor,
keep_comments=True,
)
TEST_FUNC_EXTRACTION = [
(
"""class Room {
public:
double length;
double breadth;
double height;
double calculateArea(){
return length * breadth;
}
double calculateVolume(){
return length * breadth * height;
}
};
// sample function
void sampleFunction() {
// create objects
Room room1, room2;
}
""",
[
["void sampleFunction ( ) { Room room1 , room2 ; }"],
[
"double calculateArea ( ) { return length * breadth ; }",
"double calculateVolume ( ) { return length * breadth * height ; }",
],
],
),
(
"""#include<iostream>
int main(){
return 0;
}
""",
(["int main ( ) { return 0 ; }"], []),
),
(
"""#include<cstdio>
#include<cstring>
#include<cstdlib>
#include<algorithm>
#include<set>
using namespace std;
#define mem(Arr,x) memset(Arr,x,sizeof(Arr))
const int maxN=1010000*2;
const int maxM=maxN<<1;
const int Mod=1e9+7;
int n;
pair<int,int> P[maxN];
set<pair<int,int> > S;
int Nxt[maxN],St[maxN],vis[maxN];
int edgecnt=-1,Head[maxN],Next[maxM],V[maxM];
void Add_Edge(int u,int v);
void dfs(int u,int w);
int func0()
{
return 0;
}
int func1(int u,int v)
{
return 1;
}
void func2()
{
return
}""",
(
[
"int func0 ( ) { return 0 ; }",
"int func1 ( int u , int v ) { return 1 ; }",
"void func2 ( ) { return }",
],
[],
),
),
(
"""
question::question(string ques){
this->ques = ques;
};""",
([], ["question :: question ( string ques ) { this -> ques = ques ; }"]),
),
(
"""class Rectangle {
int width, height;
public:
Rectangle ();
Rectangle (int,int);
int area (void) {return (width*height);}
};
Rectangle::Rectangle () {
width = 5;
height = 5;
}""",
(
[],
[
"int area ( void ) { return ( width * height ) ; }",
"Rectangle :: Rectangle ( ) { width = 5 ; height = 5 ; }",
],
),
),
]
def test_extract_cpp_functions():
for input_file, expected_funcs in TEST_FUNC_EXTRACTION:
actual_funcs_sa, actual_funcs_cl = processor.extract_functions(
processor.tokenize_code(input_file)
)
print(actual_funcs_sa, actual_funcs_cl)
expected_sa, expected_cl = expected_funcs
compare_funcs(actual_funcs_sa, expected_sa)
compare_funcs(actual_funcs_cl, expected_cl)
def test_formatter():
input_f = """static int factorial (int n ){ if (n == 0) return 1; return n * factorial(n-1);}"""
expected = """static int factorial(int n) {
if (n == 0)
return 1;
return n * factorial(n - 1);
}"""
actual = processor.format(input_f)
diff_tester(expected, actual)
def test_formatter_partial_code():
input_f = """static int factorial (int n ){ if """
expected = """static int factorial(int n) {
if"""
actual = processor.format(input_f)
diff_tester(expected, actual)
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_tokenize_cpp.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from codegen_sources.preprocessing.utils import split_arguments
def test_parentheses_split_args():
input_str = "((1,2,3), (4,5), (1,2,3), new int[] {(-1), 0, 0}, intArray0)"
res = split_arguments(input_str)
expected = [
"(1,2,3)",
" (4,5)",
" (1,2,3)",
" new int[] {(-1), 0, 0}",
" intArray0",
]
assert res == expected, f"got \n{res} instead of \n{expected}"
input_str = "(1,2,3), (4,5), (1,2,3), new int[] {(-1), 0, 0}, intArray0"
res = split_arguments(input_str)
assert res == expected, f"got \n{res} instead of \n{expected}"
input_str = "(1,2,3), (4,5), (1,2,3)"
res = split_arguments(input_str)
expected = [
"(1,2,3)",
" (4,5)",
" (1,2,3)",
]
assert res == expected, f"got \n{res} instead of \n{expected}"
input_str = "((1,2,3), (4,5), (1,2,3))"
res = split_arguments(input_str)
assert res == expected, f"got \n{res} instead of \n{expected}"
def test_strings_split_args():
input_str = '("ni(TvJz:uAhKZ", "ABC")'
res = split_arguments(input_str)
expected = ['"ni(TvJz:uAhKZ"', ' "ABC"']
assert res == expected, f"got \n{res} instead of \n{expected}"
input_str = '("ni(TvJz:uAhKZ\\" ", "ABC")'
res = split_arguments(input_str)
expected = ['"ni(TvJz:uAhKZ\\" "', ' "ABC"']
assert res == expected, f"got \n{res} instead of \n{expected}"
def test_strings_split_escaped_backslash():
input_str = "'\\\\', char0"
res = split_arguments(input_str)
expected = ["'\\\\'", " char0"]
assert res == expected, f"got \n{res} instead of \n{expected}"
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
import pytest
from codegen_sources.preprocessing.lang_processors.javascript_processor import (
JavascriptProcessor,
)
processor = JavascriptProcessor(
root_folder=Path(__file__).parents[4].joinpath("tree-sitter")
)
TESTS_COMMENTS_STRINGS = []
TESTS_COMMENTS_STRINGS.append(
(
r"""
function myFunction() { // Declare a function
document.getElementById("demo").innerHTML = "Hello World!";
}
myFunction(); // Call the function""",
[
"function",
"myFunction",
"(",
")",
"{",
"document",
".",
"getElementById",
"(",
'" demo "',
")",
".",
"innerHTML",
"=",
'" Hello ▁ World ! "',
";",
"}",
"myFunction",
"(",
")",
";",
],
)
)
TESTS_COMMENTS_STRINGS.append(
(
r"""
public function getExample(): string {
return static::METADATA['example'];
}""",
[
"public",
"function",
"getExample",
"(",
")",
":",
"string",
"{",
"return",
"static",
":",
":",
"METADATA",
"[",
"' example '",
"]",
";",
"}",
],
)
)
TESTS_COMMENTS_STRINGS.append(
(
r"""
function myFunction(p1, p2) {
return p1 * p2; // The function returns the product of p1 and p2
}""",
[
"function",
"myFunction",
"(",
"p1",
",",
"p2",
")",
"{",
"return",
"p1",
"*",
"p2",
";",
"}",
],
)
)
TESTS2 = []
TESTS2.append(
r"""
BmpDecoder.prototype.parseBGR = function() {
this.pos = this.offset;
try {
var bitn = "bit" + this.bitPP;
var len = this.width * this.height * 4;
this.data = new Uint8Array(len);
this[bitn]();
} catch (e) {
console.log("bit decode error:" + e);
}
};
BmpDecoder.prototype.bit1 = function() {
var xlen = Math.ceil(this.width / 8);
var mode = xlen % 4;
var y;
for (y = this.height - 1; y >= 0; y--) {
var line = this.bottom_up ? y : this.height - 1 - y;
for (var x = 0; x < xlen; x++) {
var b = this.datav.getUint8(this.pos++, true);
var location = line * this.width * 4 + x * 8 * 4;
for (var i = 0; i < 8; i++) {
if (x * 8 + i < this.width) {
var rgb = this.palette[(b >> (7 - i)) & 0x1];
this.data[location + i * 4] = rgb.blue;
this.data[location + i * 4 + 1] = rgb.green;
this.data[location + i * 4 + 2] = rgb.red;
this.data[location + i * 4 + 3] = 0xff;
} else {
break;
}
}
}
"""
)
# The tests below use java code. We may need to replace them if we find out that javascript tokenization should be different in similar cases
TESTS2.append(
r"""
private enum Answer {
YES {
@Override public String toString() {
return "yes";
}
},
NO,
MAYBE
}"""
)
TESTS2.append(
r"""
return new MyClass() {
@Override public void method() {
if (condition()) {
try {
something();
} catch (ProblemException e) {
recover();
}
} else if (otherCondition()) {
somethingElse();
} else {
lastThing();
}
}
};"""
)
TESTS2.append(
r"""
public boolean equals(Object o_) {
if ( o_ == null ) {
return false;
}
if ( o_.getClass() != this.getClass() ) {
return false;
}
Pair<?, ?> o = (Pair<?, ?>) o_;
return x.equals(o.x) && y.equals(o.y);
}
}
"""
)
TESTS3 = []
TESTS3.append(
(
r"""/*
This is the docstring !!
*/
/* ---------- */
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"/* STRNEWLINE This ▁ is ▁ the ▁ docstring ▁ ! ! STRNEWLINE */",
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
TESTS3.append(
(
r"""
overload((byte)1);
// this is my comfff
// ----- ***
overload(1); // this is my comfff
""",
[
"overload",
"(",
"(",
"byte",
")",
"1",
")",
";",
"// ▁ this ▁ is ▁ my ▁ comfff ENDCOM",
"overload",
"(",
"1",
")",
";",
"// ▁ this ▁ is ▁ my ▁ comfff ENDCOM",
],
)
)
TESTS_TOKENIZE_DETOKENIZE_STRING = [
r"""public int read ( ) throws IOException {
int current = super . read ( ) ;
if ( current == '\r' || ( current == '\n' && lastChar != '\r' ) ) {
lineCounter ++ ;
}
lastChar = current ;
return lastChar ;
}""",
r"""public int curly_brackets ( ) throws IOException {
System . out . println ( "This } is the output" ) ;
System . out . println ( "This {} is the output" ) ;
System . out . println ( '}' ) ;
}""",
r"""public int commas ( ) throws IOException {
System . out . println ( "This ; is the output" ) ;
System . out . println ( "This , is the output" ) ;
System . out . println ( ';' ) ;
System . out . println ( ',' ) ;
}""",
r"""public void inException ( ) {
throw new IllegalArgumentException ( "Type \'" + typeToEvaluate + "\' is not a Class, " + "ParameterizedType, GenericArrayType or TypeVariable. Can't extract type." ) ;
}
""",
]
def test_javascript_tokenizer_discarding_comments():
for i, (x, y) in enumerate(TESTS_COMMENTS_STRINGS):
y_ = processor.tokenize_code(x)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_javascript_detokenizer_discarding_comments():
for i, x in enumerate(
[x[0] for x in TESTS_COMMENTS_STRINGS] + [x[0] for x in TESTS3] + TESTS2
):
tokens = processor.tokenize_code(x)
x_ = processor.detokenize_code(tokens)
tokens_ = processor.tokenize_code(x_)
if tokens != tokens:
line_diff = [
j
for j, (line, line_) in enumerate(zip(tokens, tokens_))
if line != line_
]
raise Exception(
f"Difference at {line_diff}\n========== Original:\n{x}\n========== Tokenized {tokens} \n Detokenized:\n{x_} \n Retokenized {tokens_}"
)
def test_javascript_tokenizer_keeping_comments():
for i, (x, y) in enumerate(TESTS3):
y_ = processor.tokenize_code(x, keep_comments=True)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_javascript_detokenizer_keeping_comments():
for i, x in enumerate(
[x[0] for x in TESTS_COMMENTS_STRINGS] + [x[0] for x in TESTS3] + TESTS2
):
tokens = processor.tokenize_code(x, keep_comments=True)
x_ = processor.detokenize_code(tokens)
tokens_ = processor.tokenize_code(x_, keep_comments=True)
if tokens != tokens_:
line_diff = [
j
for j, (line, line_) in enumerate(zip(tokens, tokens_))
if line != line_
]
raise Exception(
f"Difference at {line_diff}\n========== Original:\n{x}\n========== Tokenized {tokens} \n Detokenized:\n{x_} \n Retokenized {tokens_}"
)
def test_tokenize_detokenize():
test_detokenize_invertible(TESTS_TOKENIZE_DETOKENIZE_STRING)
@pytest.mark.skip("Helper function")
def test_detokenize_invertible(test_examples):
for i, x in enumerate(test_examples):
x_ = processor.detokenize_code(processor.tokenize_code(x, keep_comments=True))
if x_.strip() != x.strip():
raise Exception(
f"Expected:\n==========\n{x.strip()}\nbut found:\n==========\n{x_.strip()}"
)
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_tokenize_javascript.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from codegen_sources.preprocessing.lang_processors import JavaProcessor
from codegen_sources.preprocessing.tests.obfuscation.utils import diff_tester
from codegen_sources.preprocessing.tests.tokenization.tokenization_tests_utils import (
tokenizer_test,
detokenize_non_invertible,
detokenize_invertible,
compare_funcs,
)
processor = JavaProcessor(root_folder=Path(__file__).parents[4].joinpath("tree-sitter"))
TESTS = []
TESTS.append(
(
r"""
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
TESTS.append(
(
r"""
overload((byte)1);
overload(1L);
overload(1.0f);""",
[
"overload",
"(",
"(",
"byte",
")",
"1",
")",
";",
"overload",
"(",
"1L",
")",
";",
"overload",
"(",
"1.0f",
")",
";",
],
)
)
TESTS.append(
(
r"""Runnable r = ()-> System.out.print("Run method");""",
[
"Runnable",
"r",
"=",
"(",
")",
"->",
"System",
".",
"out",
".",
"print",
"(",
'" Run ▁ method "',
")",
";",
],
)
)
TESTS.append(
(
r"""String s = "Hi I am\nMarie";""",
["String", "s", "=", '" Hi ▁ I ▁ am \\n Marie "', ";"],
)
)
TESTS2 = []
TESTS2.append(
r"""
import java.util.concurrent.TimeUnit;
public class Mensuration{ //mensuration of a child
private int height;
private int weight;
private String child_name;
public Mensuration(int height, int weight, String name):{
this.height = height;
this.weight = weight;
this.child_name = name;
}
public int get_height(){
return height;
}
public int get_weight(){
return weight;
}
public String get_name(){
String s = "Name:\n" + child_name;
return s;
}
}"""
)
TESTS2.append(
r"""
private enum Answer {
YES {
@Override public String toString() {
return "yes";
}
},
NO,
MAYBE
}"""
)
TESTS2.append(
r"""
return new MyClass() {
@Override public void method() {
if (condition()) {
try {
something();
} catch (ProblemException e) {
recover();
}
} else if (otherCondition()) {
somethingElse();
} else {
lastThing();
}
}
};"""
)
TESTS2.append(
r"""
public boolean equals(Object o_) {
if ( o_ == null ) {
return false;
}
if ( o_.getClass() != this.getClass() ) {
return false;
}
Pair<?, ?> o = (Pair<?, ?>) o_;
return x.equals(o.x) && y.equals(o.y);
}
}
"""
)
TESTS3 = []
TESTS3.append(
(
r"""/*
This is the docstring !!
*/
/* ---------- */
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"/* STRNEWLINE This ▁ is ▁ the ▁ docstring ▁ ! ! STRNEWLINE */",
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
TESTS3.append(
(
r"""
overload((byte)1);
// this is my comfff
// ----- ***
overload(1L); // this is my comfff
overload(1.0f);""",
[
"overload",
"(",
"(",
"byte",
")",
"1",
")",
";",
"// ▁ this ▁ is ▁ my ▁ comfff ENDCOM",
"overload",
"(",
"1L",
")",
";",
"// ▁ this ▁ is ▁ my ▁ comfff ENDCOM",
"overload",
"(",
"1.0f",
")",
";",
],
)
)
TESTS_TOKENIZE_DETOKENIZE_STRING = [
(
r"""public int read ( ) throws IOException {
int current = super . read ( ) ;
if ( current == '\r' || ( current == '\n' && lastChar != '\r' ) ) {
lineCounter ++ ;
}
lastChar = current ;
return lastChar ;
}""",
"""""",
),
(
r"""public int curly_brackets ( ) throws IOException {
System . out . println ( "This } is the output" ) ;
System . out . println ( "This {} is the output" ) ;
System . out . println ( '}' ) ;
}""",
"""""",
),
(
r"""public int commas ( ) throws IOException {
System . out . println ( "This ; is the output" ) ;
System . out . println ( "This , is the output" ) ;
System . out . println ( ';' ) ;
System . out . println ( ',' ) ;
}""",
"""""",
),
(
r"""public void inException ( ) {
throw new IllegalArgumentException ( "Type \'" + typeToEvaluate + "\' is not a Class, " + "ParameterizedType, GenericArrayType or TypeVariable. Can't extract type." ) ;
}
""",
"""""",
),
(r"""s . replaceAll ( "\\s+$" , "" ) . split ( " " ) ;""", ""),
(
r"""import java . util . * ;
import java . io . * ;
public class Addition {
public static void main ( String [ ] args ) throws IOException {
BufferedReader bufferedReader = new BufferedReader ( new InputStreamReader ( System . in ) ) ;
String [ ] inputs = bufferedReader . readLine ( ) . replaceAll ( "\\s+$" , "" ) . split ( " " ) ;
Integer a = Integer . parseInt ( inputs [ 0 ] ) ;
Integer b = Integer . parseInt ( inputs [ 1 ] ) ;
System . out . println ( a + b ) ;
}
}""",
"",
),
]
TESTS_DONT_PROCESS_STRINGS = [
(
r"""
public class HelloWorld
{
// This is a comment
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"public",
"class",
"HelloWorld",
"{",
"// This is a comment ENDCOM",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'"Hello \\n World!"',
")",
";",
"}",
"}",
],
),
(
r"""
public class HelloEarth
{
/* This is a
multiline
comment */
public void main(String[] args) {
System.out.println("Hello \nEarth!");
}
}""",
[
"public",
"class",
"HelloEarth",
"{",
"/* This is a\\n multiline\\n comment */",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'"Hello \\nEarth!"',
")",
";",
"}",
"}",
],
),
]
TESTS_BACK_R_CHAR = [
(
"""
public class HelloWorld
{\r
public void main(String[] args) {
System.out.println("Hello \rWorld!");
}
}""",
[
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'"Hello World!"',
")",
";",
"}",
"}",
],
)
]
TESTS_IMPORTS = [
(
(
r"""
import java.lang.*;
import javafx.util.Pair;
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"import",
"java",
".",
"lang",
".",
"*",
";",
"import",
"javafx",
".",
"util",
".",
"Pair",
";",
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
]
TESTS_IMPORTS = [
(
(
r"""
import java.lang.*;
import javafx.util.Pair;
public class HelloWorld
{
public void main(String[] args) {
System.out.println("Hello \n World!");
}
}""",
[
"import",
"java",
".",
"lang",
".",
"*",
";",
"import",
"javafx",
".",
"util",
".",
"Pair",
";",
"public",
"class",
"HelloWorld",
"{",
"public",
"void",
"main",
"(",
"String",
"[",
"]",
"args",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
'" Hello ▁ \\n ▁ World ! "',
")",
";",
"}",
"}",
],
)
)
]
TESTS_CHARS = [
(
r"""
char a = 'a' ;
""",
["char", "a", "=", "' a '", ";"],
)
]
TESTS_DETOKENIZE_CHARS = [
(
r"char a='a';",
r"""char a = 'a' ;
""",
)
]
def test_java_tokenizer_discarding_comments():
for i, (x, y) in enumerate(TESTS):
y_ = processor.tokenize_code(x)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_tokenize_imports():
for i, (x, y) in enumerate(TESTS_IMPORTS):
y_ = processor.tokenize_code(x)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_java_detokenizer_discarding_comments():
for i, x in enumerate([x[0] for x in TESTS] + [x[0] for x in TESTS3] + TESTS2):
tokens = processor.tokenize_code(x)
x_ = processor.detokenize_code(tokens)
tokens_ = processor.tokenize_code(x_)
if tokens != tokens:
line_diff = [
j
for j, (line, line_) in enumerate(zip(tokens, tokens_))
if line != line_
]
raise Exception(
f"Difference at {line_diff}\n========== Original:\n{x}\n========== Tokenized {tokens} \n Detokenized:\n{x_} \n Retokenized {tokens_}"
)
def test_java_tokenizer_keeping_comments():
for i, (x, y) in enumerate(TESTS3):
y_ = processor.tokenize_code(x, keep_comments=True)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_dont_process_strings():
for i, (x, y) in enumerate(TESTS_DONT_PROCESS_STRINGS):
y_ = processor.tokenize_code(x, keep_comments=True, process_strings=False)
print(y_)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_backr_chars():
for i, (x, y) in enumerate(TESTS_BACK_R_CHAR):
y_ = processor.tokenize_code(x, keep_comments=True, process_strings=False)
print(y_)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_java_detokenizer_keeping_comments():
for i, x in enumerate([x[0] for x in TESTS] + [x[0] for x in TESTS3] + TESTS2):
tokens = processor.tokenize_code(x, keep_comments=True)
x_ = processor.detokenize_code(tokens)
tokens_ = processor.tokenize_code(x_, keep_comments=True)
if tokens != tokens_:
line_diff = [
j
for j, (line, line_) in enumerate(zip(tokens, tokens_))
if line != line_
]
raise Exception(
f"Difference at {line_diff}\n========== Original:\n{x}\n========== Tokenized {tokens} \n Detokenized:\n{x_} \n Retokenized {tokens_}"
)
def test_tokenize_detokenize():
detokenize_invertible(TESTS_TOKENIZE_DETOKENIZE_STRING, processor)
def test_java_chars():
tokenizer_test(TESTS_CHARS, processor, keep_comments=False)
def test_detokenize_chars():
detokenize_non_invertible(TESTS_DETOKENIZE_CHARS, processor)
FUNC_EXTRACTION = [
(
"""
@SuppressWarnings("resource")
public class Main {
public static void main(String args[]) {
return 0;
}
}""",
(["public static void main ( String args [ ] ) { return 0 ; }"], []),
),
(
"""
public class Room {
double length;
double breadth;
public static int return_zero() {
return 0;
}
public double area(){
return length * breadth;
}
}""",
[
["public static int return_zero ( ) { return 0 ; }"],
["public double area ( ) { return length * breadth ; }"],
],
),
]
def test_extract_java_functions():
for input_file, expected_funcs in FUNC_EXTRACTION:
actual_funcs_sa, actual_funcs_cl = processor.extract_functions(
processor.tokenize_code(input_file)
)
print(actual_funcs_sa, actual_funcs_cl)
expected_sa, expected_cl = expected_funcs
compare_funcs(actual_funcs_sa, expected_sa)
compare_funcs(actual_funcs_cl, expected_cl)
def test_extract_java_functions_untokenized():
for input_file, expected_funcs in FUNC_EXTRACTION:
actual_funcs_sa, actual_funcs_cl = processor.extract_functions(input_file)
print(actual_funcs_sa, actual_funcs_cl)
expected_sa, expected_cl = expected_funcs
# check equality after tokenization
actual_funcs_sa = [
" ".join(processor.tokenize_code(x)) for x in actual_funcs_sa
]
actual_funcs_cl = [
" ".join(processor.tokenize_code(x)) for x in actual_funcs_cl
]
compare_funcs(actual_funcs_sa, expected_sa)
compare_funcs(actual_funcs_cl, expected_cl)
def test_formatter():
input_f = """public static int factorial (int n ){ if (n == 0) return 1; return n * factorial(n-1);}"""
expected = """public static int factorial(int n) {
if (n == 0)
return 1;
return n * factorial(n - 1);
}"""
actual = processor.format(input_f)
diff_tester(expected, actual)
def test_formatter_partial_code():
input_f = """public static int factorial (int n ){ if """
expected = """public static int factorial(int n) {
if"""
actual = processor.format(input_f)
diff_tester(expected, actual)
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_tokenize_java.py
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import difflib
import typing as tp
def tokenizer_test(test_examples, processor, keep_comments):
for i, (x, y) in enumerate(test_examples):
y_ = processor.tokenize_code(x, keep_comments=keep_comments)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def detokenize_invertible(test_examples, processor):
for i, (x, _) in enumerate(test_examples):
print(x)
print(processor.tokenize_code(x, keep_comments=False))
x_ = processor.detokenize_code(processor.tokenize_code(x, keep_comments=False))
if x_.strip() != x.strip():
raise Exception(
f"Expected:\n==========\n{x.strip()}\nbut found:\n==========\n{x_.strip()}"
)
def detokenize_non_invertible(test_examples, processor):
for i, (x, y) in enumerate(test_examples):
y_ = processor.detokenize_code(processor.tokenize_code(x, keep_comments=False))
if y_ != y:
lenght = min(len(y_), len(y))
char_message = ""
for j in range(lenght):
if y_[j] != y[j]:
char_message = (
f"expected character '{y[j]}' at index {j} but found '{y_[j]}'"
)
if char_message == "":
char_message = f"expected length {len(y)}, found {len(y_)}"
raise Exception(
f"Expected:\n==========\n{y}\nbut found:\n==========\n{y_} \n==========\n{char_message}"
)
def tokenize_twice(test_examples, processor, keep_comments=False):
for i, (x, _) in enumerate(test_examples):
tokenized_once = processor.tokenize_code(x, keep_comments=keep_comments)
tokenized_twice = processor.tokenize_code(
processor.detokenize_code(tokenized_once), keep_comments=keep_comments
)
if tokenized_once != tokenized_twice:
lenght = min(len(tokenized_twice), len(tokenized_once))
char_message = ""
for j in range(lenght):
if tokenized_twice[j] != tokenized_once[j]:
char_message = f"expected token '{tokenized_once[j]}' at index {j} but found '{tokenized_twice[j]}'"
if char_message == "":
char_message = f"expected length {len(tokenized_once)}, found {len(tokenized_twice)}"
raise Exception(
f"Expected:\n==========\n{tokenized_once}\nbut found:\n==========\n{tokenized_twice} \n==========\n{char_message}"
)
def compare_funcs(
actual, expected, normalization: tp.Callable = lambda x: x,
):
d = difflib.Differ()
if expected != actual:
print("Expected:")
print(expected)
print("#" * 50)
print("Got:")
print(actual)
print("#" * 50)
diff = d.compare(normalization(expected), normalization(actual))
for line in diff:
print(line)
raise Exception(
f"Differences between\n========== Expected:\n{expected}\n========== \n and actual :\n{actual}"
)
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/tokenization_tests_utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import ast
import difflib
import typing as tp
from pathlib import Path
import pytest
from codegen_sources.preprocessing.lang_processors import (
PythonProcessor,
PythonTreeSitterProcessor,
LangProcessor,
)
from codegen_sources.preprocessing.tests.tokenization.tokenization_tests_utils import (
compare_funcs,
)
processors = (PythonProcessor(), PythonTreeSitterProcessor())
with_both_processors = pytest.mark.parametrize("processor", processors)
def test_python_tree_sitter_on_all_codegen_sources() -> None:
processor = processors[1]
root = Path(__file__).parents[3]
assert root.name == "codegen_sources"
errors = []
total = 0
fail = 0
for fp in root.rglob("**/*.py"):
if any(f"preprocessing/{x}" in str(fp) for x in ("tests", "lang_processors")):
continue # ignore since it's mostly due to token names making a mess
total += 1
text = fp.read_text()
tokens = processor.tokenize_code(text)
string = processor.detokenize_code(tokens)
try:
ast.parse(string)
except SyntaxError as e:
fail += 1
errors.extend([str(fp), text, " ".join(tokens), str(e), string])
print(fp)
if errors:
Path("errors.txt").write_text(
"\n##################################\n".join(errors), encoding="utf8"
)
raise AssertionError(f"{fail} failures out of {total} files. Check error.txt")
TESTS = []
TESTS.append(("a = [3.14,4]", ["a", "=", "[", "3.14", ",", "4", "]", "NEW_LINE"]))
TESTS.append(
(
(
"""from src.tokenize import _tok
@decorated
def func1(a):
assert isinstance(a, int)
a+=1
return a"""
),
[
"from",
"src",
".",
"tokenize",
"import",
"_tok",
"NEW_LINE",
"@",
"decorated",
"NEW_LINE",
"def",
"func1",
"(",
"a",
")",
":",
"NEW_LINE",
"INDENT",
"assert",
"isinstance",
"(",
"a",
",",
"int",
")",
"NEW_LINE",
"a",
"+=",
"1",
"NEW_LINE",
"return",
"a",
"NEW_LINE",
"DEDENT",
],
)
)
TESTS.append(
(
(
"""#comment blabla
''' lll coo
# kkk ''' """
),
[],
)
)
TESTS.append(
(
(
"""#comment blabla
a = 10 """
),
["a", "=", "10", "NEW_LINE"],
)
)
TESTS.append(
(
(
"""'''comment
blabla'''
a = 10 """
),
["a", "=", "10", "NEW_LINE"],
)
)
TESTS.append(
(
(
"""'''comment
blabla'''
a = ('fff',
'fff') """
),
["a", "=", "(", "' fff '", ",", "' fff '", ")", "NEW_LINE"],
)
)
TESTS.append(
(
(
"""
a = '''fff
fff''' """
),
["a", "=", "''' fff STRNEWLINE fff '''", "NEW_LINE"],
)
)
TESTS.append(
(
(
"""
a = \"\"\"
'fff'
\"\"\"
"""
),
["a", "=", '""" STRNEWLINE \' fff \' STRNEWLINE """', "NEW_LINE"],
)
)
TESTS.append(
(
(
"""with open('ff.txt', 'r') as f:
x = f.read()
line = x.readline()"""
),
[
"with",
"open",
"(",
"' ff . txt '",
",",
"' r '",
")",
"as",
"f",
":",
"NEW_LINE",
"INDENT",
"x",
"=",
"f",
".",
"read",
"(",
")",
"NEW_LINE",
"DEDENT",
"line",
"=",
"x",
".",
"readline",
"(",
")",
"NEW_LINE",
],
)
)
TESTS.append(
(
(r'''WELCOME_MSG = "Hello you!\n what's up?"'''),
["WELCOME_MSG", "=", '" Hello ▁ you ! \\n ▁ what \' s ▁ up ? "', "NEW_LINE"],
)
)
TESTS.append(
(
(
r"""'''this is a
docstring on 2 lines '''"""
),
[],
)
)
TESTS.append(
(
r"""tab = ['a',
'b',
'c']""",
["tab", "=", "[", "' a '", ",", "' b '", ",", "' c '", "]", "NEW_LINE"],
)
)
TESTS.append(
(
r"""import xxx
a='Hello \n word'""",
["import", "xxx", "NEW_LINE", "a", "=", "' Hello ▁ \\n ▁ word '", "NEW_LINE"],
)
)
TESTS.append(
(
r"""def gen(num: int) -> tp.Iterable[int]:
for k in range(3): # commented
out = \
yield k""",
[
"def",
"gen",
"(",
"num",
":",
"int",
")",
"->",
"tp",
".",
"Iterable",
"[",
"int",
"]",
":",
"NEW_LINE",
"INDENT",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"NEW_LINE",
"INDENT",
"out",
"=",
"yield",
"k",
"NEW_LINE",
"DEDENT",
"DEDENT",
],
)
)
TESTS.append(
(
'''def myfunc():
"""my doc with comment""" # my comment
return 1
''',
[
"def",
"myfunc",
"(",
")",
":",
"NEW_LINE",
"INDENT",
"return",
"1",
"NEW_LINE",
"DEDENT",
],
)
)
# TESTS.append(
# ('''bin_path = path.with_suffix("")
# out = f'{param_type_filled.replace("&", "")}'
# ''',
# ['bin_path', '=', 'path', '.', 'with_suffix', '(', '" "', ')', 'NEW_LINE', 'out', '=',
# 'f\' { param _ type _ filled . replace ( " & " , ▁ " " ) } \'', 'NEW_LINE'])
# )
TESTS2 = []
TESTS2.append(
r"""''' module with one class and one function
'''
import torch
from ..src.nnnn import jjj
import .knpon.module
class myclass:
# comment blala
# comment blabl2
def geometric_suite():
i = 0
j = 1
for i in range(2):
# this function will print "Hello Word\nI am boby ! what's up ?"
i += 1
j += 3
l = module.function()
print("Hello Word\nI am boby !")
return i, j"""
)
TESTS3 = []
TESTS3.append(
(
(
"""'''comment
blabla'''
a = ('fff',
'fff') """
),
[
"''' comment STRNEWLINE blabla '''",
"NEW_LINE",
"a",
"=",
"(",
"' fff '",
",",
"' fff '",
")",
"NEW_LINE",
],
)
)
TESTS3.append(
(
(
"""'''comment
blabla'''
a = 10 """
),
["''' comment STRNEWLINE blabla '''", "NEW_LINE", "a", "=", "10", "NEW_LINE"],
)
)
TESTS3.append(
(
(
"""
a = '''fff
fff''' """
),
["a", "=", "''' fff STRNEWLINE fff '''", "NEW_LINE"],
)
)
TESTS3.append(
(
(
"""#comment blabla
# --- ** *
a = 10 """
),
["# comment ▁ blabla ENDCOM", "a", "=", "10", "NEW_LINE"],
)
)
TESTS3.append(
(
("""a = 10 #comment blabla"""),
["a", "=", "10", "# comment ▁ blabla ENDCOM", "NEW_LINE"],
)
)
TESTS3.append(
(
(
"""def my_func():
''' ********'''
return 0"""
),
[
"def",
"my_func",
"(",
")",
":",
"NEW_LINE",
"INDENT",
"return",
"0",
"NEW_LINE",
"DEDENT",
],
)
)
TESTS_DONT_PROCESS_STRINGS = [
(
r"""import xxx
# this is a comment
a='Hello \nworld'
""",
[
"import",
"xxx",
"NEW_LINE",
"# this is a comment ENDCOM",
"a",
"=",
"'Hello \\nworld'",
"NEW_LINE",
],
),
(
(
"""from src.tokenize import _tok
def func1(a):
a+=1
return a"""
),
[
"from",
"src",
".",
"tokenize",
"import",
"_tok",
"NEW_LINE",
"def",
"func1",
"(",
"a",
")",
":",
"NEW_LINE",
"INDENT",
"a",
"+=",
"1",
"NEW_LINE",
"return",
"a",
"NEW_LINE",
"DEDENT",
],
),
(
r"""import xxx
a='''
Hello
world'''""",
["import", "xxx", "NEW_LINE", "a", "=", "'''\\nHello\\nworld'''", "NEW_LINE"],
),
(
(
"""from src.tokenize import _tok
def func1(a):
a+=1
return a"""
),
[
"from",
"src",
".",
"tokenize",
"import",
"_tok",
"NEW_LINE",
"def",
"func1",
"(",
"a",
")",
":",
"NEW_LINE",
"INDENT",
"a",
"+=",
"1",
"NEW_LINE",
"return",
"a",
"NEW_LINE",
"DEDENT",
],
),
]
TESTS_SPECIAL_STRINGS = [
"""m = re.match ( r'(?:py.*-)?([\d\.]+)(?:-(\w+))?' , vers )
""",
"""print ( f"{epoch} : {score}" )
""",
]
TESTS_IMPORTS = [
(
"""import numpy as np
from math import sqrt
print(sqrt(2))
""",
[
"import",
"numpy",
"as",
"np",
"NEW_LINE",
"from",
"math",
"import",
"sqrt",
"NEW_LINE",
"print",
"(",
"sqrt",
"(",
"2",
")",
")",
"NEW_LINE",
],
)
]
TEST_EXTRACT_FUNCTIONS = [
(
(
"""from src.tokenize import _tok
def func1(a):
return a
class Foo():
def bar(self):
return 1
""",
(
["def func1 ( a ) : NEW_LINE INDENT return a NEW_LINE DEDENT"],
["def bar ( self ) : NEW_LINE INDENT return 1 NEW_LINE DEDENT"],
),
)
)
]
def assert_tokens_equal(
actual: tp.List[str], expected: tp.List[str], code: tp.Optional[str] = None
) -> None:
if actual == expected:
return
line_diff = [
j for j, (line, line_) in enumerate(zip(actual, expected)) if line != line_
]
line_num = line_diff[-1] if len(line_diff) > 0 else -1
strings = [
f"Difference at {line_num}\nExpected:\n==========\n{expected}\nbut found:\n==========\n{actual}"
]
if code is not None:
strings.append(f"# # for input # #\n{code!r}")
strings.append(f"# # which prints as follows # #\n{code}")
raise Exception("\n\n".join(strings))
@with_both_processors
@pytest.mark.parametrize("code,expected", TESTS)
def test_python_tokenizer(
code: str, expected: tp.List[str], processor: LangProcessor
) -> None:
if isinstance(processor, PythonProcessor) and "my doc with comment" in code:
pytest.skip("TODO")
y_ = processor.tokenize_code(code)
assert_tokens_equal(y_, expected, code)
@with_both_processors
@pytest.mark.parametrize("code,expected", TESTS_IMPORTS)
def test_imports(code: str, expected: tp.List[str], processor: LangProcessor) -> None:
y_ = processor.tokenize_code(code)
assert_tokens_equal(y_, expected, code)
@with_both_processors
@pytest.mark.parametrize("code,expected", TESTS3)
def test_python_tokenizer_with_coms(
code: str, expected: tp.List[str], processor: LangProcessor
) -> None:
y_ = processor.tokenize_code(code, keep_comments=True)
assert_tokens_equal(y_, expected, code)
@with_both_processors
@pytest.mark.parametrize("code,expected", TESTS_DONT_PROCESS_STRINGS)
def test_python_dont_process_strings(
processor: LangProcessor, code: str, expected: tp.List[str]
) -> None:
y_ = processor.tokenize_code(code, keep_comments=True, process_strings=False)
assert_tokens_equal(y_, expected, code)
@with_both_processors
@pytest.mark.parametrize("code", [x[0] for x in TESTS] + TESTS2)
def test_python_detokenizer(code: str, processor: LangProcessor) -> None:
if isinstance(processor, PythonProcessor) and "my doc with comment" in code:
pytest.skip("TODO")
tokens = processor.tokenize_code(code)
x_ = processor.detokenize_code(tokens)
tokens_ = processor.tokenize_code(x_)
print("# Rebuilding #\n", x_, "\n# from #\n", code)
assert_tokens_equal(tokens_, tokens, code)
@with_both_processors
def test_detokenizer_output(processor: LangProcessor) -> None:
for i, x in enumerate(TESTS_SPECIAL_STRINGS):
tokens = processor.tokenize_code(x)
x_ = processor.detokenize_code(tokens)
d = difflib.Differ()
if x != x_:
diff = d.compare(x.split("\n"), x_.split("\n"))
for line in diff:
print(line)
raise Exception(
f"Differences between\n========== Original:\n{x}\n========== \n and actual Detokenized:\n{x_}"
)
@with_both_processors
def test_extract_functions(processor: LangProcessor) -> None:
for input_file, expected_funcs in TEST_EXTRACT_FUNCTIONS:
actual_funcs_sa, actual_funcs_cl = processor.extract_functions(
processor.tokenize_code(input_file)
)
print(actual_funcs_sa, actual_funcs_cl)
expected_sa, expected_cl = expected_funcs
compare_funcs(actual_funcs_sa, expected_sa, normalization=lambda x: x.strip())
compare_funcs(actual_funcs_cl, expected_cl, normalization=lambda x: x.strip())
def test_extract_functions_without_tok() -> None:
processor = PythonTreeSitterProcessor()
for input_file, expected_funcs in TEST_EXTRACT_FUNCTIONS:
actual_funcs_sa, actual_funcs_cl = processor.extract_functions(
input_file, tokenized=False
)
print(actual_funcs_sa, actual_funcs_cl)
expected_sa, expected_cl = expected_funcs
compare_funcs(
[" ".join(processor.tokenize_code(x)) for x in actual_funcs_sa], expected_sa
)
compare_funcs(
[" ".join(processor.tokenize_code(x)) for x in actual_funcs_cl], expected_cl
)
AST_ERRORS = [
"""test_out_path = Path(__file__).parent.joinpath(
"test_output_should_not_be_written_go.out"
)
if test_out_path.exists():
os.remove(test_out_path)
""",
'''class Test:
"""docstring""" # comment
def __init__(self):
pass
''',
'''
class Error(RuntimeError):
"""doc"""
def _check_command(command: str):
pass
''',
"""
bin_path = path.with_suffix("")
out = f'{param_type_filled.replace("&", "")}'
""",
]
@with_both_processors
@pytest.mark.parametrize("code", AST_ERRORS)
def test_ast_errors(processor: LangProcessor, code: str) -> None:
if isinstance(processor, PythonProcessor):
pytest.skip("Standard Python tokenizer does not work for now")
ast.parse(code)
tokens = processor.tokenize_code(code)
string = processor.detokenize_code(tokens)
try:
ast.parse(string)
except SyntaxError:
print("\n########\n".join([code, " ".join(tokens), string]))
raise AssertionError("Cannot parse output")
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_tokenize_python.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from codegen_sources.preprocessing.lang_processors import GoProcessor
from codegen_sources.preprocessing.tests.tokenization.tokenization_tests_utils import (
compare_funcs,
detokenize_invertible,
detokenize_non_invertible,
tokenize_twice,
tokenizer_test,
)
processor = GoProcessor()
TESTS = []
TESTS.append(
(
r"""
func main() {
fmt.Println("hello world")
}""",
[
"func",
"main",
"(",
")",
"{",
"fmt",
".",
"Println",
"(",
'" hello ▁ world "',
")",
"NEW_LINE",
"}",
],
)
)
TESTS.append(
(
r"""
/* This is a multiline comment
// ------- ******* ------- */
func main() {
fmt.Println("hello world")
}""",
[
"func",
"main",
"(",
")",
"{",
"fmt",
".",
"Println",
"(",
'" hello ▁ world "',
")",
"NEW_LINE",
"}",
],
)
)
TESTS.append(
(r"""var b, c int = 1, 2""", ["var", "b", ",", "c", "int", "=", "1", ",", "2"],)
)
# test reference, pointor
TESTS.append(
(
r"""var a = 10
*a_ptr := &a""",
["var", "a", "=", "10", "NEW_LINE", "*", "a_ptr", ":=", "&", "a"],
)
)
TESTS.append((r"""s := "Hi I am\nMarie" """, ["s", ":=", '" Hi ▁ I ▁ am \\n Marie "'],))
TESTS_KEEP_COMMENTS = [
(
r"""
// This is a comment
// ----------*****
func main() {
fmt.Println("hello world")
}""",
[
"// ▁ This ▁ is ▁ a ▁ comment ENDCOM",
"func",
"main",
"(",
")",
"{",
"fmt",
".",
"Println",
"(",
'" hello ▁ world "',
")",
"NEW_LINE",
"}",
],
),
]
TEST_FUNC_EXTRACTION = [
(
r"""
package Main
func Get_sum(num1 int, num2 int) int {
var num3 = num1 + num2
return num3
}
""",
[
[
"func Get_sum ( num1 int , num2 int ) int { var num3 = num1 + num2 NEW_LINE return num3 NEW_LINE }"
],
[],
],
),
(
r"""
package doubleLinkedList
type List struct {
head, tail *Node
}
type Node struct {
value string
next, prev *Node
}
func (l *List) First() *Node {
return l.head
}
func (n *Node) Next() *Node {
return n.next
}
func (n *Node) Prev() *Node {
return n.prev
}
func (l *List) Push(val string) *List {
n := &Node{value: val}
if l.head == nil { //first node
l.head = n
} else {
l.tail.next = n
n.prev = l.tail
}
l.tail = n
return l
}
func (l *List) Pop() string {
if l.tail == nil {
return ""
}
value := l.tail.value
l.tail = l.tail.prev
if l.tail == nil {
l.head = nil
}
return value
}
func (l *List) Find(val string) *Node {
for n := l.First(); n != nil; n = n.Next() {
if n.value == val {
return n
}
}
return nil
}
func (l *List) Erase(val string) bool {
node := l.Find(val)
if node != nil {
node.prev.next = node.next
node.next.prev = node.prev
return true
}
return false
}
""",
[
[
"func ( l * List ) First ( ) * Node { return l . head NEW_LINE }",
"func ( n * Node ) Next ( ) * Node { return n . next NEW_LINE }",
"func ( n * Node ) Prev ( ) * Node { return n . prev NEW_LINE }",
"func ( l * List ) Push ( val string ) * List { n := & Node { value : val } NEW_LINE if l . head == nil { l . head = n NEW_LINE } else { l . tail . next = n NEW_LINE n . prev = l . tail NEW_LINE } NEW_LINE l . tail = n NEW_LINE return l NEW_LINE }",
'func ( l * List ) Pop ( ) string { if l . tail == nil { return " " NEW_LINE } NEW_LINE value := l . tail . value NEW_LINE l . tail = l . tail . prev NEW_LINE if l . tail == nil { l . head = nil NEW_LINE } NEW_LINE return value NEW_LINE }',
"func ( l * List ) Find ( val string ) * Node { for n := l . First ( ) ; n != nil ; n = n . Next ( ) { if n . value == val { return n NEW_LINE } NEW_LINE } NEW_LINE return nil NEW_LINE }",
"func ( l * List ) Erase ( val string ) bool { node := l . Find ( val ) NEW_LINE if node != nil { node . prev . next = node . next NEW_LINE node . next . prev = node . prev NEW_LINE return true NEW_LINE } NEW_LINE return false NEW_LINE }",
],
[],
],
),
]
def test_go_tokenizer_discarding_comments():
tokenizer_test(TESTS, processor, keep_comments=False)
def test_go_tokenizer_keep_comments():
tokenizer_test(TESTS_KEEP_COMMENTS, processor, keep_comments=True)
def test_extract_go_functions():
for input_file, expected_funcs in TEST_FUNC_EXTRACTION:
actual_funcs_sa, actual_funcs_cl = processor.extract_functions(
processor.tokenize_code(input_file)
)
print(actual_funcs_sa, actual_funcs_cl)
expected_sa, expected_cl = expected_funcs
compare_funcs(actual_funcs_sa, expected_sa)
compare_funcs(actual_funcs_cl, expected_cl)
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_tokenize_go.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from codegen_sources.preprocessing.lang_processors import RustProcessor
from codegen_sources.preprocessing.tests.tokenization.tokenization_tests_utils import (
compare_funcs,
detokenize_invertible,
detokenize_non_invertible,
tokenize_twice,
tokenizer_test,
)
processor = RustProcessor()
TESTS = []
TESTS.append(
(
r"""
// This is a comment
// ------- ******* -------
fn main() {
println!("Hello World!");
}""",
[
"fn",
"main",
"(",
")",
"{",
"println",
"!",
"(",
'" Hello ▁ World ! "',
")",
";",
"}",
],
)
)
TESTS.append(
(
r"""
/* This is a multiline comment
// ------- ******* ------- */
fn main() {
println!("Hello World!");
}""",
[
"fn",
"main",
"(",
")",
"{",
"println",
"!",
"(",
'" Hello ▁ World ! "',
")",
";",
"}",
],
)
)
TESTS.append(
(
r"""let closure_annotated = |i: i32| -> i32 { i + 1 };""",
[
"let",
"closure_annotated",
"=",
"|",
"i",
":",
"i32",
"|",
"->",
"i32",
"{",
"i",
"+",
"1",
"}",
";",
],
)
)
# test reference, pointor
TESTS.append(
(
r"""let a: i32 = 10;
let a_ptr: *const i32 = &a;
""",
[
"let",
"a",
":",
"i32",
"=",
"10",
";",
"let",
"a_ptr",
":",
"*",
"const",
"i32",
"=",
"&",
"a",
";",
],
)
)
TESTS.append(
(
"""use std::collections::HashMap;
let mut book_reviews = HashMap::new();
book_reviews.insert(
"b1".to_string(),
"My favorite book.".to_string(),
);""",
[
"use",
"std",
"::",
"collections",
"::",
"HashMap",
";",
"let",
"mut",
"book_reviews",
"=",
"HashMap",
"::",
"new",
"(",
")",
";",
"book_reviews",
".",
"insert",
"(",
'" b1"',
".",
"to_string",
"(",
")",
",",
'" My ▁ favorite ▁ book . "',
".",
"to_string",
"(",
")",
",",
")",
";",
],
)
)
TESTS.append(
(
r"""let s = "Hi I am\nMarie";""",
["let", "s", "=", '" Hi ▁ I ▁ am \\n Marie "', ";"],
)
)
TESTS_PRINTING = [
(
r"""println!("{}", var1 + var2);""",
["println", "!", "(", '" { } "', ",", "var1", "+", "var2", ")", ";"],
)
]
TESTS_KEEP_COMMENTS = [
(
r"""
// This is a comment
// ----------*****
fn main() {
println!("Hello World!");
}""",
[
"// ▁ This ▁ is ▁ a ▁ comment ENDCOM",
"fn",
"main",
"(",
")",
"{",
"println",
"!",
"(",
'" Hello ▁ World ! "',
")",
";",
"}",
],
),
(
r"""
/* This is a
multiline comment */
/*----------------this is the docstring */
/* ----*----*-*---- ====== *** */
fn main() {
println!("Hello World!");
}""",
[
"/* ▁ This ▁ is ▁ a STRNEWLINE multiline ▁ comment ▁ */",
"/* - - - - - this ▁ is ▁ the ▁ docstring ▁ */",
"fn",
"main",
"(",
")",
"{",
"println",
"!",
"(",
'" Hello ▁ World ! "',
")",
";",
"}",
],
),
]
TESTS_CHARS = [
(
r"""
let a_char = 'a' ;
""",
["let", "a_char", "=", "' a '", ";"],
)
]
TESTS_STRINGS = [
(
r"""
let s = "Hello !" ;""",
["let", "s", "=", f'" Hello ▁ ! "', ";"],
),
]
TESTS_MULTILINE_STRINGS = [
(
r"""
let s =
"First line
Second line \
End second line";
""",
[
"let",
"s",
"=",
'" First ▁ line STRNEWLINE Second ▁ line ▁ \\ STRNEWLINE End ▁ second ▁ line "',
";",
],
)
]
TESTS_DETOKENIZE_MULTILINE_STRINGS = [
(
r"""
let s =
"First line
Second line \
End second line";
""",
r"""let s = "First line
Second line \
End second line" ;
""",
)
]
DETOKENIZE_TESTS = []
DETOKENIZE_TESTS.append(
(
r"""
// This is a comment
fn main() {
println!("Hello World!");
}
""",
r"""fn main ( ) {
println ! ( "Hello World!" ) ;
}
""",
)
)
DETOKENIZE_TESTS.append(
(
r"""let a : i32 = 10;
let a_ptr : *const i32 = &a;
""",
r"""let a : i32 = 10 ;
let a_ptr : * const i32 = & a ;
""",
)
)
def test_rust_tokenizer_discarding_comments():
tokenizer_test(TESTS, processor, keep_comments=False)
def test_print_tokenization():
tokenizer_test(TESTS_PRINTING, processor, keep_comments=False)
def test_rust_tokenizer_keep_comments():
tokenizer_test(TESTS_KEEP_COMMENTS, processor, keep_comments=True)
def test_rust_chars():
tokenizer_test(TESTS_CHARS, processor, keep_comments=False)
def test_rust_strings():
tokenizer_test(
TESTS_STRINGS + TESTS_MULTILINE_STRINGS, processor, keep_comments=False
)
def test_rust_detokenize():
detokenize_non_invertible(DETOKENIZE_TESTS, processor)
def test_detokenize_rust_chars():
detokenize_invertible(TESTS_CHARS, processor)
def test_detokenize_string():
detokenize_invertible(TESTS_STRINGS, processor)
def test_detokenize_multiline_string():
detokenize_non_invertible(TESTS_DETOKENIZE_MULTILINE_STRINGS, processor)
def test_tokenize_twice_equal_tokenize_remove_comments():
tokenize_twice(TESTS + TESTS_STRINGS + TESTS_CHARS, processor)
def test_tokenize_twice_equal_tokenize_keep_comments():
tokenize_twice(
TESTS + TESTS_STRINGS + TESTS_CHARS + TESTS_KEEP_COMMENTS,
processor,
keep_comments=True,
)
TEST_FUNC_EXTRACTION = [
(
"""struct Point {
x: f64,
y: f64,
}
// Implementation block, all `Point` methods go in here
impl Point {
// This is a static method
// Static methods don't need to be called by an instance
// These methods are generally used as constructors
fn origin() -> Point {
Point { x: 0.0, y: 0.0 }
}
// Another static method, taking two arguments:
fn new(x: f64, y: f64) -> Point {
Point { x: x, y: y }
}
}
fn hello() {
println!("Hello World!");
}""",
[
["""fn hello ( ) { println ! ( " Hello ▁ World ! " ) ; }"""],
[
"fn origin ( ) -> Point { Point { x : 0.0 , y : 0.0 } }",
"fn new ( x : f64 , y : f64 ) -> Point { Point { x : x , y : y } }",
],
],
),
(
"""trait Quack {
fn quack(&self);
}
struct Duck ();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool
}
impl Quack for RandomBird {
fn quack(&self) {
if ! self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
let duck1 = Duck();
let duck2 = RandomBird{is_a_parrot: false};
let parrot = RandomBird{is_a_parrot: true};
let ducks: Vec<&Quack> = vec![&duck1,&duck2,&parrot];
for d in &ducks {
d.quack();
}""",
[
[],
[
'fn quack ( & self ) { println ! ( " quack ! " ) ; }',
'fn quack ( & self ) { if ! self . is_a_parrot { println ! ( " quack ! " ) ; } else { println ! ( " squawk ! " ) ; } }',
],
],
),
]
def test_extract_rust_functions():
for input_file, expected_funcs in TEST_FUNC_EXTRACTION:
actual_funcs_sa, actual_funcs_cl = processor.extract_functions(
processor.tokenize_code(input_file)
)
print(actual_funcs_sa, actual_funcs_cl)
expected_sa, expected_cl = expected_funcs
compare_funcs(actual_funcs_sa, expected_sa)
compare_funcs(actual_funcs_cl, expected_cl)
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_tokenize_rust.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
from codegen_sources.preprocessing.lang_processors.java_processor import JavaProcessor
from pathlib import Path
from codegen_sources.preprocessing.lang_processors.tokenization_utils import (
process_string,
tokenize_string,
detokenize_string,
)
processor = JavaProcessor(root_folder=Path(__file__).parents[4].joinpath("tree-sitter"))
TESTS = []
TESTS.append(
(
"lalala! this: is a string lala?",
[
"lalala",
"!",
"▁",
"this",
":",
"▁",
"is",
"▁",
"a",
"▁",
"string",
"▁",
"lala",
"?",
],
)
)
TESTS.append(("isn't it nice?", ["isn", "'", "t", "▁", "it", "▁", "nice", "?"]))
def test_java_tokenizer_discarding_comments():
for i, (x, y) in enumerate(TESTS):
y_ = tokenize_string(x)
if y_ != y:
line_diff = [
j for j, (line, line_) in enumerate(zip(y, y_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{y}\nbut found:\n==========\n{y_}"
)
def test_string_tokenization_invertible():
for i, (x, y) in enumerate(TESTS):
y_ = tokenize_string(x)
x_ = detokenize_string(y_)
if x_ != x:
line_diff = [
j for j, (line, line_) in enumerate(zip(x, x_)) if line != line_
]
line_diff = line_diff[-1] if len(line_diff) > 0 else -1
raise Exception(
f"Difference at {line_diff}\nExpected:\n==========\n{x}\nbut found:\n==========\n{x_}"
)
|
CodeGen-main
|
codegen_sources/preprocessing/tests/tokenization/test_tokenize_strings.py
|
CodeGen-main
|
codegen_sources/preprocessing/tests/obfuscation/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import difflib
import typing as tp
def diff_tester(
expected: tp.Union[str, tp.Iterable[tp.Any]],
res: tp.Union[str, tp.Iterable[tp.Any]],
split: str = "\n",
normalization: tp.Optional[tp.Callable] = str,
) -> None:
d = difflib.Differ()
if expected != res:
print("Expected:")
print(expected)
print("#" * 50)
print("Got:")
print(res)
print("#" * 50)
if isinstance(expected, str):
expected_split = expected.split(split)
else:
expected_split = expected # type: ignore
if isinstance(res, str):
res_split = res.split(split)
else:
res_split = res # type: ignore
if normalization is not None:
expected_split = [normalization(x) for x in expected_split]
res_split = [normalization(x) for x in res_split]
diff = d.compare(expected_split, res_split)
for line in diff:
print(line)
assert split.join(expected_split) == split.join(
res_split
), f"EXPECTED: \n{split.join(expected_split)}\n\nGOT: \n{split.join(res_split)}\n"
|
CodeGen-main
|
codegen_sources/preprocessing/tests/obfuscation/utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from codegen_sources.preprocessing.lang_processors.java_processor import JavaProcessor
from codegen_sources.preprocessing.tests.obfuscation.utils import diff_tester
processor = JavaProcessor(root_folder=Path(__file__).parents[4].joinpath("tree-sitter"))
def test_obfuscation_var_definition():
java_code = """public class Factorial{
public static Long factorial(Long n){
Long res = 1L;
for ( int i = 1; i <= n; ++i) res *= (i + 1);
return res;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public static Long FUNC_0 ( Long VAR_0 ) {
Long VAR_1 = 1L ;
for ( int VAR_2 = 1 ; VAR_2 <= VAR_0 ; ++ VAR_2 ) VAR_1 *= ( VAR_2 + 1 ) ;
return VAR_1 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Factorial | FUNC_0 factorial | VAR_0 n | VAR_1 res | VAR_2 i",
dico,
split=" | ",
)
def test_obfuscation_recursive_method():
java_code = """public class Factorial{
public Long factorial(Long n){
if (n == 1L) return 1L;
return n * factorial(n-1);
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public Long FUNC_0 ( Long VAR_0 ) {
if ( VAR_0 == 1L ) return 1L ;
return VAR_0 * FUNC_0 ( VAR_0 - 1 ) ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester("CLASS_0 Factorial | FUNC_0 factorial | VAR_0 n", dico, split=" | ")
def test_obfuscation_identical_names():
java_code = """
public class HelloWorld{
public static void main(String []args){
Factorial factorial = new Factorial();
System.out.println(factorial.factorial(3L));
}
}
class Factorial{
public Long factorial(Long n){
if (n == 1L) return 1L;
return n * factorial(n-1);
}
}"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public static void main ( String [ ] VAR_0 ) {
CLASS_1 VAR_1 = new CLASS_1 ( ) ;
System . out . println ( VAR_1 . FUNC_0 ( 3L ) ) ;
}
}
class CLASS_1 {
public Long FUNC_0 ( Long VAR_2 ) {
if ( VAR_2 == 1L ) return 1L ;
return VAR_2 * FUNC_0 ( VAR_2 - 1 ) ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 HelloWorld | CLASS_1 Factorial | FUNC_0 factorial | VAR_0 args | VAR_1 factorial | VAR_2 n",
dico,
split=" | ",
)
def test_methods_overloading():
java_code = """public class Factorial{
public static Long factorial(Long n, bool verbose){
Long res = 1L;
for ( int i = 1; i <= n; ++i) res *= (i + 1);
if (verbose) System.out.println(res);
return res;
}
public static Long factorial(Long n){
return factorial(n, 0);
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public static Long FUNC_0 ( Long VAR_0 , bool VAR_2 ) {
Long VAR_3 = 1L ;
for ( int VAR_4 = 1 ; VAR_4 <= VAR_0 ; ++ VAR_4 ) VAR_3 *= ( VAR_4 + 1 ) ;
if ( VAR_2 ) System . out . println ( VAR_3 ) ;
return VAR_3 ;
}
public static Long FUNC_0 ( Long VAR_1 ) {
return FUNC_0 ( VAR_1 , 0 ) ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Factorial | FUNC_0 factorial | VAR_0 n | VAR_1 n | VAR_2 verbose | VAR_3 res | VAR_4 i",
dico,
split=" | ",
)
def test_class_constructor_and_attributes():
java_code = """public class Factorial{
private Long n;
public Factorial(Long number){
this.n = number;
}
public Long compute(){
Long res = 1L;
for ( int i = 1; i <= this.n; ++i) res *= i;
return res;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
private Long VAR_0 ;
public CLASS_0 ( Long VAR_1 ) {
this . VAR_0 = VAR_1 ;
}
public Long FUNC_0 ( ) {
Long VAR_2 = 1L ;
for ( int VAR_3 = 1 ; VAR_3 <= this . VAR_0 ; ++ VAR_3 ) VAR_2 *= VAR_3 ;
return VAR_2 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Factorial | FUNC_0 compute | VAR_0 n | VAR_1 number | VAR_2 res | VAR_3 i",
dico,
split=" | ",
)
def test_class_constructor_and_attributes_without_this():
java_code = """public class Factorial{
private Long n;
public Factorial(Long number){
this.n = number;
}
public Long compute(){
Long res = 1L;
for ( int i = 1; i <= n; ++i) res *= i;
return res;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
private Long VAR_0 ;
public CLASS_0 ( Long VAR_1 ) {
this . VAR_0 = VAR_1 ;
}
public Long FUNC_0 ( ) {
Long VAR_2 = 1L ;
for ( int VAR_3 = 1 ; VAR_3 <= VAR_0 ; ++ VAR_3 ) VAR_2 *= VAR_3 ;
return VAR_2 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Factorial | FUNC_0 compute | VAR_0 n | VAR_1 number | VAR_2 res | VAR_3 i",
dico,
split=" | ",
)
def test_multiple_definitions():
java_code = """public class Operations{
public int PlusMinus(int a, int b){
int sum = a + b, dif = a - b;
return dif > 0 ? dif : sum;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public int FUNC_0 ( int VAR_0 , int VAR_1 ) {
int VAR_2 = VAR_0 + VAR_1 , VAR_3 = VAR_0 - VAR_1 ;
return VAR_3 > 0 ? VAR_3 : VAR_2 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Operations | FUNC_0 PlusMinus | VAR_0 a | VAR_1 b | VAR_2 sum | VAR_3 dif",
dico,
split=" | ",
)
def test_handling_scopes():
java_code = """public class Operations{
public int sum(int n){
int res = 0 ;
for ( int i = 0 ; i < n ; ++ i ) res += i ;
for ( int i = 0 ; i < n ; ++ i ) res -= i ;
return res;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public int FUNC_0 ( int VAR_0 ) {
int VAR_1 = 0 ;
for ( int VAR_2 = 0 ; VAR_2 < VAR_0 ; ++ VAR_2 ) VAR_1 += VAR_2 ;
for ( int VAR_3 = 0 ; VAR_3 < VAR_0 ; ++ VAR_3 ) VAR_1 -= VAR_3 ;
return VAR_1 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Operations | FUNC_0 sum | VAR_0 n | VAR_1 res | VAR_2 i | VAR_3 i",
dico,
split=" | ",
)
def test_constants():
java_code = """
public class Operations{
public static final Long LIMIT = 1000L;
public int sum(int n){
int res = 0 ;
for ( int i = 0 ; i < n ; ++ i ) res += i ;
return res < LIMIT ? res : LIMIT ;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public static final Long VAR_0 = 1000L ;
public int FUNC_0 ( int VAR_1 ) {
int VAR_2 = 0 ;
for ( int VAR_3 = 0 ; VAR_3 < VAR_1 ; ++ VAR_3 ) VAR_2 += VAR_3 ;
return VAR_2 < VAR_0 ? VAR_2 : VAR_0 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Operations | FUNC_0 sum | VAR_0 LIMIT | VAR_1 n | VAR_2 res | VAR_3 i",
dico,
split=" | ",
)
def test_standard_function():
java_code = """
public class Operations{
public int maximum(int a, int b){
return Math.max(a, b) ;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public class CLASS_0 {
public int FUNC_0 ( int VAR_0 , int VAR_1 ) {
return Math . max ( VAR_0 , VAR_1 ) ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Operations | FUNC_0 maximum | VAR_0 a | VAR_1 b", dico, split=" | "
)
def test_imports():
java_code = """
import java.io.*;
import java.util.*;
class ArrayListExample {
public static void main(String[] args)
{
int n = 5;
ArrayList<Integer> arrli = new ArrayList<Integer>(n);
for (int i = 1; i <= n; i++) arrli.add(i);
System.out.println(arrli);
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """import java . io . * ;
import java . util . * ;
class CLASS_0 {
public static void main ( String [ ] VAR_0 )
{
int VAR_1 = 5 ;
ArrayList < Integer > VAR_2 = new ArrayList < Integer > ( VAR_1 ) ;
for ( int VAR_3 = 1 ; VAR_3 <= VAR_1 ; VAR_3 ++ ) VAR_2 . add ( VAR_3 ) ;
System . out . println ( VAR_2 ) ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 ArrayListExample | VAR_0 args | VAR_1 n | VAR_2 arrli | VAR_3 i",
dico,
split=" | ",
)
def test_inheritance_with_this():
# not working perfectly at the moment. get_speed() returns the wrong variable if we remove the "this."
java_code = """
class Bicycle
{
public int gear;
public int speed;
public Bicycle(int gear, int speed)
{
this.gear = gear;
this.speed = speed;
}
}
class MountainBike extends Bicycle
{
public int seatHeight;
public MountainBike(int gear, int speed, int startHeight)
{
super(gear, speed);
seatHeight = startHeight;
}
public int get_speed()
{
return this.speed;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """class CLASS_0
{
public int VAR_0 ;
public int VAR_3 ;
public CLASS_0 ( int VAR_1 , int VAR_4 )
{
this . VAR_0 = VAR_1 ;
this . VAR_3 = VAR_4 ;
}
}
class CLASS_1 extends CLASS_0
{
public int VAR_6 ;
public CLASS_1 ( int VAR_2 , int VAR_5 , int VAR_7 )
{
super ( VAR_2 , VAR_5 ) ;
VAR_6 = VAR_7 ;
}
public int FUNC_0 ( )
{
return this . VAR_3 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Bicycle | CLASS_1 MountainBike | FUNC_0 get_speed | VAR_0 gear | VAR_1 gear | VAR_2 gear | VAR_3 speed | VAR_4 speed | VAR_5 speed | VAR_6 seatHeight | VAR_7 startHeight",
dico,
split=" | ",
)
def test_inheritance_inverted():
java_code = """
class MountainBike extends Bicycle
{
public int seatHeight;
public MountainBike(int gear, int speed, int startHeight)
{
super(gear, speed);
seatHeight = startHeight;
}
public int get_speed()
{
return this.speed;
}
}
class Bicycle
{
public int gear;
public int speed;
public Bicycle(int gear, int speed)
{
this.gear = gear;
this.speed = speed;
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """
class CLASS_0 extends CLASS_1
{
public int VAR_0 ;
public CLASS_0 ( int VAR_1 , int VAR_4 , int VAR_7 )
{
super ( VAR_1 , VAR_4 ) ;
VAR_0 = VAR_7 ;
}
public int FUNC_0 ( )
{
return this . VAR_5 ;
}
}
class CLASS_1
{
public int VAR_2 ;
public int VAR_5 ;
public CLASS_1 ( int VAR_3 , int VAR_6 )
{
this . VAR_2 = VAR_3 ;
this . VAR_5 = VAR_6 ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 MountainBike | CLASS_1 Bicycle | FUNC_0 get_speed | VAR_0 seatHeight | VAR_1 gear | VAR_2 gear | VAR_3 gear | VAR_4 speed | VAR_5 speed | VAR_6 speed | VAR_7 startHeight",
dico,
split=" | ",
)
def test_interfaces():
java_code = """
public interface LinkFilter {
public boolean accept ( String url ) ;
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """public interface CLASS_0 {
public boolean FUNC_0 ( String VAR_0 ) ;
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester("CLASS_0 LinkFilter | FUNC_0 accept | VAR_0 url", dico, split=" | ")
def test_enums():
java_code = """
enum Color
{
RED, GREEN, BLUE;
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """
enum CLASS_0
{
VAR_0 , VAR_1 , VAR_2 ;
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Color | VAR_0 RED | VAR_1 GREEN | VAR_2 BLUE", dico, split=" | "
)
def test_inherited_methods():
java_code = """
class Vehicle {
public void honk() {
System.out.println("Tuut, tuut!");
}
}
class Car extends Vehicle {
String y = "sub";
}
public class Test {
public static void main(String[] args) {
Car myCar = new Car();
myCar.honk(); // how is this obfuscated?
}
}
"""
res, dico = processor.obfuscate_code(java_code)
expected = """
class CLASS_0 {
public void FUNC_0 ( ) {
System . out . println ( "Tuut, tuut!" ) ;
}
}
class CLASS_1 extends CLASS_0 {
String VAR_0 = "sub" ;
}
public class CLASS_2 {
public static void main ( String [ ] VAR_1 ) {
CLASS_1 VAR_2 = new CLASS_1 ( ) ;
VAR_2 . FUNC_0 ( ) ;
}
}
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Vehicle | CLASS_1 Car | CLASS_2 Test | FUNC_0 honk | VAR_0 y | VAR_1 args | VAR_2 myCar",
dico,
split=" | ",
)
# TODO: defines
|
CodeGen-main
|
codegen_sources/preprocessing/tests/obfuscation/test_java_obfuscation.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
import codegen_sources.preprocessing.lang_processors as lp
from codegen_sources.preprocessing.obfuscation import utils_deobfuscation
from codegen_sources.preprocessing.tests.obfuscation.utils import diff_tester
processors = (lp.PythonProcessor(), lp.PythonTreeSitterProcessor())
with_both_processors = pytest.mark.parametrize("processor", processors)
# # # # # Type obfuscation
def test_type_obfuscation() -> None:
processor = processors[1]
input_program = """from pathlib import Path
import typing as tp
global_var: tp.List[str] = []
class Something:
'''accentué'''
class_var: int = 12
def __init__(self, something: tp.Union[str,
Path]) -> None:
self.uninitialized_var: int
self.var: dict = {}
self.var[0] = 12
async def func(self, input_: str = None) -> tp.List[str]:
self.uninitialized_var = 2
self.func(None)
return ["aaa"]
@classmethod
def myself(cls, other) -> "Something":
return self
def fail(cls, other: tp.Optional[str] = None, stuff: str| None = None):
return self
"""
res, dico = processor.obfuscate_types(input_program)
expected = """from pathlib import Path
global_var: VAR_0 = []
class Something:
'''accentué'''
class_var: VAR_1 = 12
def __init__(self, something: VAR_2) -> None:
self.uninitialized_var: VAR_3
self.var: VAR_4 = {}
self.var[0] = 12
async def func(self, input_: VAR_5 = None) -> VAR_6:
self.uninitialized_var = 2
self.func(None)
return ["aaa"]
@classmethod
def myself(cls, other) -> VAR_7:
return self
def fail(cls, other: VAR_8 = None, stuff: VAR_9 = None):
return self
"""
diff_tester(expected.strip(), res.strip())
expected_types = [
"List [ str ]",
"int",
"Union [ Path , str ]",
"int",
"Dict [ str , Any ]",
"Optional [ str ]",
"List [ str ]",
"Something",
"Optional [ str ]",
"Optional [ str ]",
]
expected_dict = " | ".join(f"VAR_{k} {x}" for k, x in enumerate(expected_types))
diff_tester(
expected_dict, dico, split=" | ",
)
as_dict = utils_deobfuscation.read_dict(expected_dict)
assert as_dict["VAR_2"] == "Union [ Path , str ]"
# # # # # Name obfuscation
@with_both_processors
def test_obfuscation_var_definition(processor: lp.LangProcessor) -> None:
input_program = """import os
class Factorial:
def factorial(self, n, path):
res, res2, res3 = 1, 1, 1
for i in range(n):
res *= (i + 1)
with open(os.path.join(path, 'res'), 'w') as f:
f.write(str(res))
return res
"""
res, dico = processor.obfuscate_code(input_program)
expected = """
import os
class CLASS_0():
def FUNC_0(VAR_0, VAR_1, VAR_2):
(VAR_3, VAR_4, VAR_5) = (1, 1, 1)
for VAR_6 in range(VAR_1):
VAR_3 *= (VAR_6 + 1)
with open(os.path.join(VAR_2, 'res'), 'w') as VAR_7:
VAR_7.write(str(VAR_3))
return VAR_3
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Factorial | FUNC_0 factorial | VAR_0 self | VAR_1 n | VAR_2 path | VAR_3 res | VAR_4 res2 | VAR_5 res3 | VAR_6 i | VAR_7 f",
dico,
split=" | ",
)
@with_both_processors
def test_obfuscation_recursive_method(processor: lp.LangProcessor) -> None:
input_program = """class Factorial:
def factorial(self, n):
if n == 1:
return 1
return n * self.factorial(n-1)
"""
res, dico = processor.obfuscate_code(input_program)
expected = """class CLASS_0():
def FUNC_0(VAR_0, VAR_1):
if (VAR_1 == 1):
return 1
return (VAR_1 * VAR_0.FUNC_0((VAR_1 - 1)))
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Factorial | FUNC_0 factorial | VAR_0 self | VAR_1 n", dico, split=" | "
)
@with_both_processors
def test_obfuscation_class_attributes(processor: lp.LangProcessor) -> None:
input_program = """class Factorial:
def __init__(self, number):
self.n = number
def factorial(self):
if self.n == 1:
return 1
return self.n * self.factorial(self.n-1)
"""
res, dico = processor.obfuscate_code(input_program)
expected = """class CLASS_0():
def __init__(VAR_0, VAR_1):
VAR_0.VAR_2 = VAR_1
def FUNC_0(VAR_3):
if (VAR_3.VAR_2 == 1):
return 1
return (VAR_3.VAR_2 * VAR_3.FUNC_0((VAR_3.VAR_2 - 1)))
"""
diff_tester(expected.strip(), res.strip())
diff_tester(
"CLASS_0 Factorial | FUNC_0 factorial | VAR_0 self | VAR_1 number | VAR_2 n | VAR_3 self",
dico,
split=" | ",
)
@with_both_processors
def test_obfuscation_imported_var(processor: lp.LangProcessor) -> None:
input_program = """from something import stuff
def factorial(n):
if n == 1:
return stuff
return n * factorial(n-1)
"""
res, dico = processor.obfuscate_code(input_program)
expected = """from something import stuff
def FUNC_0(VAR_0):
if (VAR_0 == 1):
return stuff
return (VAR_0 * FUNC_0((VAR_0 - 1)))
"""
diff_tester(expected.strip(), res.strip())
diff_tester("FUNC_0 factorial | VAR_0 n", dico, split=" | ")
@with_both_processors
def test_function_scope(processor: lp.LangProcessor) -> None:
input_program = """
def factorial(n):
if n == 1:
return n
return n * factorial(n-1)
def sum(n):
if n == 1:
return n
return n + sum(n-1)
"""
res, dico = processor.obfuscate_code(input_program)
expected = """
def FUNC_0(VAR_0):
if (VAR_0 == 1):
return VAR_0
return (VAR_0 * FUNC_0((VAR_0 - 1)))
def FUNC_1(VAR_1):
if (VAR_1 == 1):
return VAR_1
return (VAR_1 + FUNC_1((VAR_1 - 1)))
"""
diff_tester(expected.strip(), res.strip())
diff_tester("FUNC_0 factorial | FUNC_1 sum | VAR_0 n | VAR_1 n", dico, split=" | ")
|
CodeGen-main
|
codegen_sources/preprocessing/tests/obfuscation/test_python_obfuscator.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from logging import getLogger
from pathlib import Path
import submitit
import typing as tp
from codegen_sources.preprocessing.dataset_modes.dataset_mode import DatasetMode
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import REPLACE_DICT
from codegen_sources.preprocessing import bpe_modes
from codegen_sources.preprocessing.timeout import timeout
from codegen_sources.preprocessing.utils import get_subset_file, is_valid_file
MONOLINGUAL_SUFFIXES = ["monolingual"]
logger = getLogger()
class MonolingualMode(DatasetMode):
"""
Callable where we track the repos processed so that we can checkpoint with submitit
"""
def __init__(
self,
folder,
languages,
bpe,
processed_lines: tp.Optional[tp.Set] = None,
nb_train_split: int = 8,
keep_comments: bool = False,
repo_split: bool = True,
):
super().__init__(
suffixes=MONOLINGUAL_SUFFIXES,
folder=folder,
languages=languages,
bpe=bpe,
parallel_dataset=False,
processed_lines=processed_lines,
nb_train_split=nb_train_split,
keep_comments=keep_comments,
repo_split=repo_split,
)
# broken as non-callable
# def checkpoint(
# self, input_path: str, process_strings: bool
# ) -> submitit.helpers.DelayedSubmission:
# return submitit.helpers.DelayedSubmission(
# self.__class__(
# self.folder, self.languages, self.bpe, self.processed_lines,
# ),
# input_path,
# process_strings,
# )
def extract_data_for_line(
self,
line_id: str,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
default_return = line_id, None, None
if "content" not in json_line:
return default_return
content = json_line["content"]
for k, v in REPLACE_DICT.items():
content = content.replace(k, v)
tokenize = lang_processor.tokenize_code
try:
return (
line_id,
json_line["repo_name"],
{
"monolingual": [
" ".join(
tokenize(
content,
process_strings=process_strings,
keep_comments=self.keep_comments,
)
)
]
},
)
except KeyboardInterrupt:
raise
except Exception as e:
sys.stderr.write(f"Error tokenizing content {e}")
return default_return
def _learn_bpe(
self, ncodes: int, executor: tp.Optional["ExecutorLike"] = None
) -> None:
# get data to training data for bpe
assert (
len(self.suffixes) == 1
), "too many suffixes for dataset, cannot compute BPE safely."
all_shufs = [
self.folder.joinpath(f"{lang}.all.{self.suffixes[0]}.tok.shuf")
for lang in self.languages
]
if any(not shuf.is_file() for shuf in all_shufs):
self.regroup_all_tok()
self.shuffle_all_tok()
assert all(shuf.is_file() for shuf in all_shufs)
data_train_bpe = get_subset_file(
file_paths=all_shufs,
subset_size_gb=50,
output_path=self.folder.joinpath(
f"{'-'.join(self.languages)}.{self.suffixes[0]}.tok.shuf.{50}gb"
),
)
# train bpe codes
assert isinstance(self.bpe, bpe_modes.FastBPEMode)
logger.info(f"training bpe on {data_train_bpe}...")
if executor is None:
executor = submitit.LocalExecutor(self.folder.joinpath("log"))
job = executor.submit(self.bpe.learn_bpe_file, data_train_bpe, ncodes)
job.result()
assert is_valid_file(
self.bpe.codes
), f"Invalid filepath {self.bpe.codes} for {self.bpe}"
logger.info(f"Successfully learnt bpe. Bpe codes stored in {self.bpe.codes}.")
def _get_vocab(self, executor: tp.Optional["ExecutorLike"] = None) -> None:
# get data to learn vocab
assert isinstance(self.bpe, bpe_modes.FastBPEMode)
data_get_vocab_list = [
self.folder.joinpath(f"{lang}.train.{self.suffixes[0]}.0.bpe")
for lang in self.languages
]
data_get_vocab = get_subset_file(
data_get_vocab_list,
20,
output_path=self.folder.joinpath(
f"{'-'.join(self.languages)}.train.{self.suffixes[0]}.0.20BG.bpe"
),
)
assert Path(
data_get_vocab
).is_file(), f"cannot get vocab, {data_get_vocab} doesnt not exist."
# get vocab
logger.info(f"Getting vocab from {data_get_vocab} ...")
if executor is None:
executor = submitit.LocalExecutor(folder=self.folder.joinpath("log"))
job = executor.submit(self.bpe.get_vocab_file, data_get_vocab)
job.result()
assert self.bpe.vocab_path is not None and self.bpe.vocab_path.is_file()
logger.info(f"Successfully got vocab. Vocab stored in {self.bpe.vocab_path}.")
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/monolingual_mode.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from logging import getLogger
from pathlib import Path
import submitit
import typing as tp
from codegen_sources.preprocessing.dataset_modes.dataset_mode import DatasetMode
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.preprocessing import bpe_modes
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import REPLACE_DICT
from codegen_sources.preprocessing.timeout import timeout
from codegen_sources.preprocessing.utils import get_subset_file, is_valid_file
# functions stand alone and functions of class
MONOLINGUAL_FUNC_SUFFIXES = ["sa", "cl"]
logger = getLogger()
# TODO make option to go from tokenized version, not necessary to retokenized the whole thing, a fonction "Select starting point"
class MonolingualFunctionsMode(DatasetMode):
"""
Callable where we track the repos processed so that we can checkpoint with submitit
"""
def __init__(
self,
folder,
languages,
bpe,
processed_lines: tp.Optional[tp.Set] = None,
nb_train_split: int = 8,
keep_comments: bool = False,
repo_split: bool = True,
):
super().__init__(
suffixes=MONOLINGUAL_FUNC_SUFFIXES,
folder=folder,
languages=languages,
bpe=bpe,
parallel_dataset=False,
processed_lines=processed_lines,
nb_train_split=nb_train_split,
keep_comments=keep_comments,
repo_split=repo_split,
)
# TODO not callable so buggy
# def checkpoint(
# self, input_path: str, process_strings: bool
# ) -> submitit.helpers.DelayedSubmission:
# return submitit.helpers.DelayedSubmission(
# self.__class__(
# self.folder, self.languages, self.bpe, self.processed_lines,
# ),
# input_path,
# process_strings,
# )
def extract_data_for_line(
self,
line_id: str,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
default_return = line_id, None, None
if "content" not in json_line:
return default_return
content = json_line["content"]
for k, v in REPLACE_DICT.items():
content = content.replace(k, v)
try:
tokenized_file = " ".join(
lang_processor.tokenize_code(
content,
process_strings=process_strings,
keep_comments=self.keep_comments,
)
)
f_standalone, f_class = lang_processor.extract_functions(tokenized_file)
except KeyboardInterrupt:
raise
except Exception as e:
sys.stderr.write(f"error {e} tokenizing and extracting functions\n")
return default_return
return (
line_id,
json_line["repo_name"],
{"sa": f_standalone, "cl": f_class},
)
def _learn_bpe(
self, ncodes: int, executor: tp.Optional["ExecutorLike"] = None
) -> None:
# get data to training data for bpe
all_shufs = [
self.folder.joinpath(f"{lang}.all.{suffix}.tok.shuf")
for lang in self.languages
for suffix in self.suffixes
]
if any(not shuf.is_file() for shuf in all_shufs):
self.regroup_all_tok()
self.shuffle_all_tok()
assert all(shuf.is_file() for shuf in all_shufs)
data_train_bpe = get_subset_file(
file_paths=all_shufs,
subset_size_gb=50,
output_path=self.folder.joinpath(
f"{'-'.join(self.languages)}.{'-'.join(self.suffixes)}.tok.shuf.{50}gb"
),
)
# train bpe codes
assert isinstance(self.bpe, bpe_modes.FastBPEMode)
logger.info(f"training bpe on {data_train_bpe}...")
if executor is None:
executor = submitit.LocalExecutor(folder=self.folder.joinpath("log"))
job = executor.submit(self.bpe.learn_bpe_file, data_train_bpe, ncodes)
job.result()
assert is_valid_file(self.bpe.codes)
logger.info(f"Successfully learnt bpe. Bpe codes stored in {self.bpe.codes}.")
def _get_vocab(self, executor: tp.Optional["ExecutorLike"] = None) -> None:
# get data to learn vocab
assert isinstance(self.bpe, bpe_modes.FastBPEMode)
data_get_vocab = [
self.folder.joinpath(f"{lang}.train.{suffix}.0.bpe")
for lang in self.languages
for suffix in self.suffixes
]
consolidated_data_get_vocab = get_subset_file(
data_get_vocab,
20,
output_path=self.folder.joinpath(
f"{'-'.join(self.languages)}.train.{'-'.join(self.suffixes)}.0.20BG.bpe"
),
)
assert Path(
consolidated_data_get_vocab
).is_file(), (
f"cannot get vocab, {consolidated_data_get_vocab} doesnt not exist."
)
# get vocab
logger.info(f"Getting vocab from {consolidated_data_get_vocab} ...")
if executor is None:
executor = submitit.LocalExecutor(folder=self.folder.joinpath("log"))
job = executor.submit(self.bpe.get_vocab_file, consolidated_data_get_vocab)
job.result()
assert self.bpe.vocab_path is not None and self.bpe.vocab_path.is_file()
logger.info(f"Successfully get vocab. Vocab stored in {self.bpe.vocab_path}.")
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/monolingual_functions_mode.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import typing as tp
from logging import getLogger
from codegen_sources.IR_tools.utils_ir import code_to_ir, ir_had_errors
from codegen_sources.preprocessing.dataset_modes.dataset_mode import (
DATASET_SPLITS,
DatasetMode,
)
from codegen_sources.preprocessing.lang_processors import LangProcessor, IRProcessor
from codegen_sources.preprocessing.utils import (
check_same_number_of_lines,
create_symlink,
get_all_pairs,
is_valid_file,
)
IR_SUFFIXES = ["sa", "ir_sa"]
logger = getLogger()
class IRFullFilesMode(DatasetMode):
"""
Callable where we track the repos processed so that we can checkpoint with submitit
""" # TODO currently not callable nor checkpointable
def __init__(
self,
folder,
languages,
bpe,
processed_lines: tp.Optional[tp.Set] = None,
nb_train_split: int = 8,
keep_comments: bool = False,
repo_split: bool = True,
):
super().__init__(
folder=folder,
languages=languages,
bpe=bpe,
processed_lines=processed_lines,
nb_train_split=nb_train_split,
keep_comments=keep_comments,
repo_split=repo_split,
)
self.id_is_line = False
# TODO reactivate when callable
# def checkpoint(
# self, input_path: str, process_strings: bool
# ) -> submitit.helpers.DelayedSubmission:
# return submitit.helpers.DelayedSubmission(
# self.__class__(
# self.folder, self.languages, self.bpe, self.processed_lines,
# ),
# input_path,
# process_strings,
# )
def extract_data_for_line(
self,
line_id: str,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
ir_processor = IRProcessor()
default_return = line_id, None, None
if "content" not in json_line:
return default_return
content = json_line["content"]
try:
tokenized_file = " ".join(
lang_processor.tokenize_code(
content,
process_strings=process_strings,
keep_comments=self.keep_comments,
)
)
irs = [
code_to_ir(
content,
lang_processor.language,
func_level=False,
clean_dir=True,
verbose=False,
)
]
ir_errors = [
len(ir) == 0 or (len(ir) > 0 and ir_had_errors(ir[0])) for ir in irs
]
logger.info(f"error rate: {sum(ir_errors) / len(ir_errors):.3%}")
if any(ir_errors):
return default_return
irs = [
" ".join(ir_processor.tokenize_code(ir[0]))
for ir, err in zip(irs, ir_errors)
if not err
]
except KeyboardInterrupt:
raise
except Exception as e:
sys.stderr.write(f"error {e} tokenizing and extracting functions\n")
return default_return
return (
line_id,
json_line["repo_name"],
{"sa": [tokenized_file], "ir_sa": irs},
)
def check_files_and_symlink_for_XLM(self):
logger.info("")
logger.info("")
logger.info("========== Check and Create symlinks ===========")
# check that all files exist and are not empty
for lang in self.languages:
for suffix in self.suffixes:
for split in DATASET_SPLITS:
if split == "train":
for i in range(self.nb_train_split):
f = self.folder.joinpath(
f"{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth"
)
if not is_valid_file(f):
logger.warning(f"doest not exist {f}")
else:
f = self.folder.joinpath(
f"{lang}.{split}.{suffix}{self.bpe.ext}.pth"
)
if not is_valid_file(f):
logger.warning(f"doest not exist {f}")
logger.info("create symlinks for XLM ...")
XLM_folder = self.folder.joinpath("XLM-syml")
XLM_folder.mkdir(exist_ok=True)
for lang in self.languages:
for split in DATASET_SPLITS:
if self.parallel_dataset:
for suffix1, suffix2 in get_all_pairs(self.suffixes):
name_suff1, name_suff2 = [
suffix if "ir_" in suffix else f"{lang}_{suffix}"
for suffix in [suffix1, suffix2]
]
if name_suff1 > name_suff2:
name_suff1, name_suff2 = name_suff2, name_suff1
suffix1, suffix2 = suffix2, suffix1
for suffix, name_suff in [
(suffix1, name_suff1),
(suffix2, name_suff2),
]:
if split == "train":
for i in range(self.nb_train_split):
# when parallel dataset, check files have same number of lines
if suffix == suffix1:
check_same_number_of_lines(
self.folder.joinpath(
f"{lang}.{split}.{suffix1}.{i}{self.bpe.ext}"
),
self.folder.joinpath(
f"{lang}.{split}.{suffix2}.{i}{self.bpe.ext}"
),
)
create_symlink(
self.folder.joinpath(
f"{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth"
),
XLM_folder.joinpath(
f"{split}.{name_suff1}-{name_suff2}.{name_suff}.{i}.pth"
),
)
else:
if suffix == suffix1:
check_same_number_of_lines(
self.folder.joinpath(
f"{lang}.{split}.{suffix1}{self.bpe.ext}"
),
self.folder.joinpath(
f"{lang}.{split}.{suffix2}{self.bpe.ext}"
),
)
create_symlink(
self.folder.joinpath(
f"{lang}.{split}.{suffix}{self.bpe.ext}.pth"
),
XLM_folder.joinpath(
f"{split}.{name_suff1}-{name_suff2}.{name_suff}.pth"
),
)
logger.info("Check and symlink done.")
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/ir_full_files_mode.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from itertools import chain
from logging import getLogger
import submitit
import typing as tp
from codegen_sources.preprocessing.bpe_modes.bpe_mode import TMP_EXT
from codegen_sources.preprocessing.dataset_modes.dataset_mode import (
DATASET_SPLITS,
DatasetMode,
)
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import REPLACE_DICT
from codegen_sources.preprocessing.timeout import timeout
OUTLIER_INDICES_THRESHOLDS = {"VAR_": 200, "FUNC_": 200, "CLASS_": 100}
OBFUSCATION_SUFFIXES = ["obfuscated", "dictionary"]
logger = getLogger()
class ObfuscationMode(DatasetMode):
"""
Callable where we track the repos processed so that we can checkpoint with submitit
"""
def __init__(
self,
folder,
languages,
bpe,
processed_lines: tp.Optional[tp.Set] = None,
nb_train_split: int = 8,
keep_comments: bool = False,
repo_split: bool = True,
):
super().__init__(
suffixes=OBFUSCATION_SUFFIXES,
folder=folder,
languages=languages,
bpe=bpe,
parallel_dataset=True,
processed_lines=processed_lines,
nb_train_split=nb_train_split,
keep_comments=keep_comments,
repo_split=repo_split,
)
# broken as non-callable
# def checkpoint(
# self, input_path: str, process_strings: bool
# ) -> submitit.helpers.DelayedSubmission:
# return submitit.helpers.DelayedSubmission(
# self.__class__(
# self.folder, self.languages, self.bpe, self.processed_lines,
# ),
# input_path,
# process_strings,
# )
def extract_data_for_line(
self,
line_id: str,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
default_return = line_id, None, None
if "content" not in json_line:
return default_return
content = json_line["content"]
for k, v in REPLACE_DICT.items():
content = content.replace(k, v)
try:
obfuscated, dico = lang_processor.obfuscate_code(content)
tokenized_obfuscated_file = " ".join(
lang_processor.tokenize_code(
obfuscated,
process_strings=process_strings,
keep_comments=self.keep_comments,
)
)
except NotImplementedError:
logger.error(
f"Obfuscate method is not implemented for {lang_processor.__class__.__name__}"
)
raise
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f"Error obfuscating content {e} \n")
return default_return
return (
line_id,
json_line["repo_name"],
{"obfuscated": [tokenized_obfuscated_file], "dictionary": [dico]},
)
def post_tok_filter(self, tokenized_data):
assert all(s in tokenized_data for s in self.suffixes)
assert len(tokenized_data["dictionary"]) == 1
assert isinstance(tokenized_data["dictionary"][0], str)
for var_prefix, var_number in OUTLIER_INDICES_THRESHOLDS.items():
if f"{var_prefix}{var_number}" in tokenized_data["dictionary"][0]:
return True
return False
def _learn_bpe(
self, ncodes: int, executor: tp.Optional["ExecutorLike"] = None
) -> None:
raise Exception(
"BPE codes should not be learnt from obfuscated data. Learn them on monolingual data."
"Please provide bpe codes or learn them."
"To do so, please run pipepline with monolingual mode until BPE learning."
)
def apply_bpe(
self,
executor: tp.Optional["ExecutorLike"] = None,
local_parallelism: tp.Optional[int] = None,
) -> None:
"""
Overwrite the method as in the obfuscation mode, need to restore the BPE.
"""
if executor is None:
executor = submitit.LocalExecutor(folder=self.folder.joinpath("log"))
# apply BPE with tmp suffix
_bpe_ext = self.bpe.ext
self.bpe.ext += TMP_EXT
super().apply_bpe(executor)
self.bpe.ext = _bpe_ext
# restore BPE on obfuscation special tokens
jobs = []
to_restore = list(
chain(
*[
self.folder.glob(f"{lang}.{split}.*{self.bpe.ext}{TMP_EXT}")
for split in DATASET_SPLITS
for lang in self.languages
]
)
)
for f in to_restore:
job = executor.submit(
self.bpe.repair_bpe_for_obfuscation_file, f, f.with_suffix("")
)
jobs.append(job)
for job in jobs:
job.result()
for f in to_restore:
assert f.with_suffix("").is_file()
f.unlink()
def _get_vocab(self, executor: tp.Optional["ExecutorLike"] = None) -> None:
raise Exception(
"Vocab should not be learnt from obfuscated data. Learn it on monolingual data."
"Please provide vocab or learn them."
"To do so, please run pipepline with monolingual mode until get_vocab."
)
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/obfuscation_mode.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import typing as tp
from logging import getLogger
from codegen_sources.IR_tools.utils_ir import code_to_ir, ir_had_errors
from codegen_sources.preprocessing.dataset_modes.dataset_mode import (
DATASET_SPLITS,
DatasetMode,
)
from codegen_sources.preprocessing.lang_processors import LangProcessor, IRProcessor
from codegen_sources.preprocessing.utils import (
check_same_number_of_lines,
create_symlink,
get_all_pairs,
is_valid_file,
)
IR_SUFFIXES = ["sa", "ir_sa"]
logger = getLogger()
class IRFunctionsMode(DatasetMode):
"""
Callable where we track the repos processed so that we can checkpoint with submitit
"""
def __init__(
self,
folder,
languages,
bpe,
processed_lines: tp.Optional[tp.Set] = None,
nb_train_split: int = 8,
keep_comments: bool = False,
repo_split: bool = True,
):
super().__init__(
suffixes=IR_SUFFIXES,
folder=folder,
languages=languages,
bpe=bpe,
parallel_dataset=True,
processed_lines=processed_lines,
nb_train_split=nb_train_split,
keep_comments=keep_comments,
repo_split=repo_split,
)
self.id_is_line = False
# TODO update to make it work (currently buggy because non-callable)
# def checkpoint(
# self, input_path: str, process_strings: bool
# ) -> submitit.helpers.DelayedSubmission:
# return submitit.helpers.DelayedSubmission(
# self.__class__(
# self.folder, self.languages, self.bpe, self.processed_lines,
# ),
# input_path,
# process_strings,
# )
def extract_data_for_line(
self,
line_id: str,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
ir_processor = IRProcessor()
default_return = line_id, None, None
if "content" not in json_line:
return default_return
content = json_line["content"]
try:
tokenized_file = " ".join(
lang_processor.tokenize_code(
content,
process_strings=process_strings,
keep_comments=self.keep_comments,
)
)
f_standalone, f_class = lang_processor.extract_functions(tokenized_file)
funcs = f_standalone + f_class
if len(f_standalone) == 0:
return default_return
irs = [
code_to_ir(
lang_processor.detokenize_code(x),
lang_processor.language,
func_level=True,
)
for x in funcs
]
ir_errors = [len(ir) > 0 and ir_had_errors(ir[0]) for ir in irs]
logger.info(f"error rate: {sum(ir_errors) / len(ir_errors):.3%}")
f_standalone = [f for f, err in zip(funcs, ir_errors) if not err]
irs = [
" ".join(ir_processor.tokenize_code(ir[0]))
for ir, err in zip(irs, ir_errors)
if not err
]
assert len(f_standalone) == len(irs), (len(f_standalone), len(irs))
if len(f_standalone) == 0:
return default_return
except KeyboardInterrupt:
raise
except Exception as e:
sys.stderr.write(f"error {e} tokenizing and extracting functions\n")
return default_return
return (
line_id,
json_line["repo_name"],
{"sa": f_standalone, "ir_sa": irs},
)
# def _learn_bpe(self, ncodes: int, executor: Executor = None):
# # get data to training data for bpe
# all_shufs = [
# self.folder.joinpath(f"{lang}.all.{suf}.tok.shuf")
# for lang in self.languages
# for suf in self.suffixes
# ]
# if any(not shuf.is_file() for shuf in all_shufs):
# self.regroup_all_tok()
# self.shuffle_all_tok()
# assert all(shuf.is_file() for shuf in all_shufs)
# data_train_bpe = get_subset_file(
# file_paths=all_shufs,
# subset_size_gb=50,
# output_path=self.folder.joinpath(
# f"{'-'.join(self.languages)}.{'-'.join(self.suffixes)}.tok.shuf.{50}gb"
# ),
# )
#
# # train bpe codes
# logger.info(f"training bpe on {data_train_bpe}...")
# if executor is None:
# executor = LocalExecutor(self.folder.joinpath("log"))
# job = executor.submit(self.bpe.learn_bpe_file, data_train_bpe, ncodes)
# job.result()
# assert is_valid_file(self.bpe.codes)
# logger.info(f"Successfully learnt bpe. Bpe codes stored in {self.bpe.codes}.")
#
# def _get_vocab(self, executor: Executor = None):
# # get data to learn vocab
# data_get_vocab = [
# self.folder.joinpath(f"{lang}.train.{suf}.0.bpe")
# for lang in self.languages
# for suf in self.suffixes
# ]
# data_get_vocab = get_subset_file(
# data_get_vocab,
# 20,
# output_path=self.folder.joinpath(
# f"{'-'.join(self.languages)}.train.{'-'.join(self.suffixes)}.0.20BG.bpe"
# ),
# )
# assert Path(
# data_get_vocab
# ).is_file(), f"cannot get vocab, {data_get_vocab} doesnt not exist."
#
# # get vocab
# logger.info(f"Getting vocab from {data_get_vocab} ...")
# if executor is None:
# executor = LocalExecutor(folder=self.folder.joinpath("log"))
# job = executor.submit(self.bpe.get_vocab_file, data_get_vocab)
# job.result()
# assert self.bpe.vocab_path.is_file()
# logger.info(f"Successfully get vocab. Vocab stored in {self.bpe.vocab_path}.")
def check_files_and_symlink_for_XLM(self):
logger.info("")
logger.info("")
logger.info("========== Check and Create symlinks ===========")
# check that all files exist and are not empty
for lang in self.languages:
for suffix in self.suffixes:
for split in DATASET_SPLITS:
if split == "train":
for i in range(self.nb_train_split):
f = self.folder.joinpath(
f"{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth"
)
if not is_valid_file(f):
logger.warning(f"doest not exist {f}")
else:
f = self.folder.joinpath(
f"{lang}.{split}.{suffix}{self.bpe.ext}.pth"
)
if not is_valid_file(f):
logger.warning(f"doest not exist {f}")
logger.info("create symlinks for XLM ...")
XLM_folder = self.folder.joinpath("XLM-syml")
XLM_folder.mkdir(exist_ok=True)
for lang in self.languages:
for split in DATASET_SPLITS:
if self.parallel_dataset:
for suffix1, suffix2 in get_all_pairs(self.suffixes):
name_suff1, name_suff2 = [
suffix if "ir_" in suffix else f"{lang}_{suffix}"
for suffix in [suffix1, suffix2]
]
if name_suff1 > name_suff2:
name_suff1, name_suff2 = name_suff2, name_suff1
suffix1, suffix2 = suffix2, suffix1
for suffix, name_suff in [
(suffix1, name_suff1),
(suffix2, name_suff2),
]:
if split == "train":
for i in range(self.nb_train_split):
# when parallel dataset, check files have same number of lines
if suffix == suffix1:
check_same_number_of_lines(
self.folder.joinpath(
f"{lang}.{split}.{suffix1}.{i}{self.bpe.ext}"
),
self.folder.joinpath(
f"{lang}.{split}.{suffix2}.{i}{self.bpe.ext}"
),
)
create_symlink(
self.folder.joinpath(
f"{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth"
),
XLM_folder.joinpath(
f"{split}.{name_suff1}-{name_suff2}.{name_suff}.{i}.pth"
),
)
else:
if suffix == suffix1:
check_same_number_of_lines(
self.folder.joinpath(
f"{lang}.{split}.{suffix1}{self.bpe.ext}"
),
self.folder.joinpath(
f"{lang}.{split}.{suffix2}{self.bpe.ext}"
),
)
create_symlink(
self.folder.joinpath(
f"{lang}.{split}.{suffix}{self.bpe.ext}.pth"
),
XLM_folder.joinpath(
f"{split}.{name_suff1}-{name_suff2}.{name_suff}.pth"
),
)
logger.info("Check and symlink done.")
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/ir_functions_mode.py
|
# datasests must be registered here for automatic inclusion
from .dataset_mode import DatasetMode
from .monolingual_functions_mode import MonolingualFunctionsMode
from .ir_functions_mode import IRFunctionsMode
from .ir_full_files_mode import IRFullFilesMode
from .monolingual_mode import MonolingualMode
from .obfuscation_mode import ObfuscationMode
from .obfuscation_functions_mode import ObfuscationFunctionsMode
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/__init__.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import concurrent
import sys
import json
import time
from typing import Optional
import zlib
import fileinput
import subprocess
import contextlib
from pathlib import Path
from hashlib import sha256
from logging import getLogger
from itertools import chain, repeat
from concurrent.futures.process import ProcessPoolExecutor
from multiprocessing import Pool, cpu_count
import pebble # type: ignore
import submitit
from pebble import ProcessExpired # type: ignore
import typing as tp
from codegen_sources.preprocessing import timeout
from codegen_sources.preprocessing.lang_processors import (
LangProcessor,
PythonTreeSitterProcessor,
)
from codegen_sources.preprocessing import bpe_modes
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import SEPARATOR
from codegen_sources.preprocessing.utils import (
binarize_for_XLM_file,
check_same_number_of_lines,
create_symlink,
get_all_pairs,
is_valid_file,
shuf_file,
shuf_parallel_files,
)
PathLike = tp.Union[Path, str]
TIMEOUT = "timeout"
logger = getLogger(__name__)
DATASET_SPLITS = ["train", "valid", "test"]
lang_processors: tp.Dict[str, LangProcessor] = {}
@contextlib.contextmanager
def open_file_dict(
filepaths: tp.Mapping[str, str], mode: str = "w"
) -> tp.Iterator[tp.Dict[str, tp.TextIO]]:
"""Context for opening a dict of filepaths and safely close them at the end"""
with contextlib.ExitStack() as stack:
handles = {
x: stack.enter_context(
Path(fp).open(mode, encoding="utf-8", errors="ignore")
)
for x, fp in filepaths.items()
}
yield handles # type: ignore
@contextlib.contextmanager
def batch_if_available(executor: "ExecutorLike") -> tp.Iterator[None]:
"""Only submitit executors have a batch context, so we need different
cases for other executor (eg: concurrent.futures)
Batch context in submitit allows for using arrays in slurm, which is
better for the cluster health.
"""
if hasattr(executor, "batch"):
with executor.batch(): # type: ignore
yield
else:
yield
def extract_language_name(path: Path) -> str:
return path.name.split(".")[0]
class DatasetMode:
modes: tp.Dict[str, tp.Type["DatasetMode"]] = {}
@classmethod
def __init_subclass__(cls) -> None:
"""auto-register modes for use in preprocessing/preprocess.py, as long
as they are imported in dataset_modes/__init__.py
"""
super().__init_subclass__()
parts = cls.__name__.split("Mode")
if len(parts) != 2 or parts[1]:
raise RuntimeError(
"dataset mode class names should be that format: "
f"YourNameMode (got: {cls.__name__})"
)
snake_name = (
"".join(["_" + c.lower() if c.isupper() else c for c in parts[0]])
.lstrip("_")
.replace("i_r_", "ir_")
)
cls.modes[snake_name] = cls
def __init__(
self,
suffixes: tp.List[str],
folder: str,
languages: tp.List[str],
bpe: bpe_modes.BPEMode,
parallel_dataset: bool,
processed_lines: tp.Optional[tp.Set[str]] = None,
suffixes_for_postprocessing: tp.Tuple[str, ...] = (),
nb_train_split: int = 8,
keep_comments: bool = False,
repo_split: bool = True,
):
self.suffixes = suffixes
self.suffixes_for_postprocessing = suffixes_for_postprocessing
if processed_lines is None:
self.processed_lines = set()
else:
self.processed_lines = processed_lines
self.parallel_dataset = parallel_dataset
self.keep_comments = keep_comments
self.folder = Path(folder)
self.languages = languages
self.languages.sort()
self.initialize_processor()
self.bpe = bpe
self.nb_train_split = nb_train_split
self.repo_split = repo_split
def initialize_processor(self) -> None:
global lang_processors
lang_processors = {
lang: LangProcessor.processors[lang]()
if lang != "python"
else PythonTreeSitterProcessor()
for lang in self.languages
}
# BEWARE: probably does not work since self is not callable
# def checkpoint(
# self, input_path: str, process_strings: bool
# ) -> submitit.helpers.DelayedSubmission:
# return submitit.helpers.DelayedSubmission(
# self.__class__(
# self.suffixes,
# self.folder,
# self.languages,
# self.bpe,
# self.parallel_dataset,
# self.processed_lines,
# self.suffixes_for_postprocessing,
# ),
# input_path,
# process_strings,
# )
def extract_data_and_tokenize(
self,
executor: "OptExecutor" = None,
local_parallelism: tp.Optional[int] = None,
tokenize_line_timeout: int = 240,
) -> None:
"""
Takes the root folder of the dataset, containing json files as input
For each json in it extract data, tokenize, and save in dedicated .tok file
"""
logger.info("")
logger.info("")
logger.info("========== Extract and Tokenize ===========")
if local_parallelism is not None:
logger.info(f"Using {local_parallelism} processors.")
executor = ProcessPoolExecutor(max_workers=local_parallelism)
assert executor is not None
jobs: tp.List["JobLike"] = []
assert any(
len(list(self.folder.glob(f"{lang}.*.json.gz"))) > 0
for lang in self.languages
), f"there is no json in {str(self.folder)}"
json_files = [
(json_file, language)
for language in self.languages
for json_file in self.folder.glob(f"{language}.*.json.gz")
if extract_language_name(json_file) == language
and not all(
[
is_valid_file(Path(name))
for name in self.get_tok_files_for_json(json_file).values()
]
)
]
file_langs = [f[1] for f in json_files]
files = [f[0] for f in json_files]
logger.info(
f"{' '.join(self.languages)}: tokenizing {len(json_files)} json files ...: {json_files}"
)
if len(json_files) > 0:
if isinstance(executor, submitit.Executor):
jobs += executor.map_array(
self.extract_from_json_and_tokenize,
files,
file_langs,
repeat(self.bpe.process_strings),
repeat(local_parallelism),
repeat(tokenize_line_timeout),
)
else:
for f, flang in zip(files, file_langs):
jobs.append(
executor.submit(
self.extract_from_json_and_tokenize,
f,
flang,
self.bpe.process_strings,
local_parallelism,
tokenize_line_timeout,
)
)
else:
return logger.info("Data extraction and tokenization already done.")
for job in jobs:
job.result()
def extract_from_json_and_tokenize(
self,
input_path: str,
lang: str,
process_strings: bool,
local_parallelism: tp.Optional[int] = None,
tokenize_line_timeout=240,
):
"""
Takes one json file as input. For each document, it extracts data and tokenizes it.
The results is written into a .tok file.
"""
logger.info(f"Extracting data from {input_path}")
# {suffix: open(output)}
tok_filepaths = self.get_tok_files_for_json(input_path)
lines = []
hook = fileinput.hook_compressed
pre_filtered = 0
with fileinput.input(str(input_path), openhook=hook) as fi:
for i, line in enumerate(fi):
try:
parsed_json = json.loads(line)
if self.pre_tok_filter(parsed_json):
pre_filtered += 1
continue
lines.append(
(f"{input_path}:{i}", parsed_json, lang, process_strings,)
)
except KeyboardInterrupt as e:
raise e
except:
pass
logger.info(
f"Pre-filtered {pre_filtered} json lines among {pre_filtered + len(lines)} ({pre_filtered / (pre_filtered + len(lines)):.2%})"
)
number_errors = 0
number_timeouts = 0
multilines_code = 0
number_lines = len(lines)
logger.info(f"Number of lines to process: {number_lines}")
filtered_examples = 0
try:
start = time.time()
if local_parallelism:
assert cpu_count() > (
local_parallelism - 1
), "Number of processors must be greater than number of max workers in ProcessPoolExecutor"
# Leave one processor free for other tasks.
parallelism_ = local_parallelism
else:
parallelism_ = cpu_count()
with pebble.ProcessPool(
max_workers=parallelism_, initializer=self.initialize_processor,
) as executor:
future = executor.map(
self.checkpoint_line, lines, timeout=tokenize_line_timeout
)
results_for_line = future.result()
with open_file_dict(
tok_filepaths, mode="a" if self.processed_lines else "w"
) as tok_files:
while True:
try:
line_id = "None"
line_id, repo, tokenized_data = next(results_for_line)
except StopIteration:
break
except concurrent.futures.TimeoutError as error:
logger.info(
f"function took longer than {tokenize_line_timeout} seconds"
)
number_timeouts += 1
continue
except ProcessExpired as error:
number_errors += 1
logger.info(
f"Line tokenization error: {line_id}: %s. Exit code: %d"
% (error, error.exitcode)
)
continue
except KeyboardInterrupt:
raise
except Exception as error:
logger.info(
f"Line tokenization error: {line_id}: function raised %s"
% error
)
number_errors += 1
if hasattr(error, "traceback"):
logger.info(
error.traceback # type: ignore
) # Python's traceback of remote process self.processed_lines.add(line_id)
raise error
continue
# returning None means there was an issue
if tokenized_data == TIMEOUT:
number_timeouts += 1
continue
if (
tokenized_data is None
or all(v is None for v in tokenized_data.values())
or len(tokenized_data) == 0
or repo is None
):
logger.info(
f"Line tokenization error: {line_id}: The output was None for line {line_id}"
)
number_errors += 1
continue
if self.parallel_dataset:
if any(v is None for v in tokenized_data.values()):
logger.info(
f"Line tokenization error: {line_id}: Parallel dataset with values to None: {tokenized_data}"
)
number_errors += 1
continue
expected_length = len(next(iter(tokenized_data.values())))
if not all(
expected_length == len(v)
for v in tokenized_data.values()
):
logger.info(
f"Line tokenization error: {line_id}: Non-matching tokenized data size: {tokenized_data}"
)
number_errors += 1
continue
if self.post_tok_filter(tokenized_data):
filtered_examples += 1
continue
for suffix, tok_codes in tokenized_data.items():
if tok_codes is None:
logger.info(
f"Line tokenization error: {line_id}: Tokenized data is None for line {line_id}"
)
assert not self.parallel_dataset
number_errors += 1
continue
for tok_code in tok_codes:
if not len(tok_code.splitlines()) <= 1:
print(f"MULTILINE code:\n{tok_code}")
print(tok_code.splitlines())
print("#" * 50)
multilines_code += 1
try:
tok_files[suffix].write(repo + SEPARATOR + tok_code)
tok_files[suffix].write("\n")
except KeyboardInterrupt:
raise
except Exception:
sys.stderr.write(
f"Exception writing data: {tok_code}\n"
)
number_errors += 1
continue
for suffix, _ in tokenized_data.items():
tok_files[suffix].flush()
end = time.time()
logger.info(f"Time elapsed: {round((end - start),2)}")
# if number_errors > 0:
logger.warning(
f"Tokenization of {input_path}:"
f"{number_errors} errors out of {number_lines} lines"
f"({number_errors / number_lines:.2%})"
)
# if number_timeouts > 0:
logger.warning(
f"Tokenization of {input_path}:"
f"{number_timeouts} timeouts out of {number_lines} lines"
f"({number_timeouts / number_lines:.2%})"
)
# if filtered_examples > 0:
logger.warning(
f"Tokenization of {input_path}:"
f"{filtered_examples} filtered examples in {number_lines} lines"
f"({filtered_examples / number_lines:.2%})"
)
# if multilines_code > 0:
logger.warning(
f"Tokenization of {input_path}:"
f"{multilines_code} multiline codes {number_lines} lines"
f"({multilines_code / number_lines:.2%})"
)
except TimeoutError:
# The tokenization process is sometimes killed and it makes the multiprocessing hang forever
logger.warning("Program closed automatically after one hour")
finally:
future.cancel()
def checkpoint_line(
self, line: tp.Tuple[str, tp.Dict[str, str], str, bool]
) -> tp.Tuple[str, tp.Optional[str], tp.Optional[str]]:
line_id, json_line, lang, process_strings = line
default_return = line_id, None, None
if line_id in self.processed_lines:
# this was checkpointed, skip it
return default_return
global lang_processors
try:
return self.extract_data_for_line(
line_id, json_line, process_strings, lang_processors[lang]
)
except timeout.TimeoutError:
logger.info("Timeout error extracting data")
return line_id, None, TIMEOUT
def get_tok_files_for_json(self, json_path):
return {
suffix: str(json_path).replace(".json.gz", f".{suffix}.tok")
for suffix in self.suffixes
}
def extract_data_for_line(
self,
line_id: str,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
"""
Is designed to be called by the extract_from_file method.
It should return the repo name,
and lists of source and target codes (if parallel dataset)
"""
raise NotImplementedError(
"The abstract method extract_data_for_line should be overridden"
)
def pre_tok_filter(self, parsed_json: tp.Dict[str, tp.Any]) -> bool:
"""Lines to filter from json before doing any preprocessing"""
required_fields = ["content", "repo_name"]
if not all(field in parsed_json for field in required_fields):
return True
else:
return False
def post_tok_filter(self, tokenized_data: tp.Dict[str, tp.List[str]]) -> bool:
return False
def regroup_all_tok(self) -> None:
"""
Regroup all .tok into a single file.
This regrouping is a concatenation of the .tok files.
Therefore order is preserved and works for parallel datasets as well.
"""
files_to_group_template = "%s.[0-9]*.%s.tok"
all_files_template = "%s.all.%s.tok"
self.regroup_files(all_files_template, files_to_group_template)
def regroup_bpe(self) -> None:
"""
Regroup all the bpe files in a single file
Gives the possibility to train on a single GPU
"""
files_to_group_template = "%s.train.%s.[0-9]*.bpe"
all_files_template = "%s.train.%s.bpe"
self.regroup_files(all_files_template, files_to_group_template)
def regroup_files(
self, all_files_template: str, files_to_group_template: str
) -> None:
for lang in self.languages:
for suffix in self.suffixes:
files_to_group = files_to_group_template % (lang, suffix)
all_files_name = all_files_template % (lang, suffix)
all_tok_path = self.folder.joinpath(all_files_name)
if is_valid_file(all_tok_path):
continue
if len(list(self.folder.glob(files_to_group))) == 0:
continue
command = f"cd {self.folder}; cat {files_to_group} > {all_tok_path}"
proc = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable="/bin/bash",
)
logger.info(
f"all files {lang}.*[0-9].{suffix}.tok regrouped in {all_tok_path} ."
)
# TODO check number of lines
assert proc.returncode == 0, proc.stderr
assert is_valid_file(all_tok_path), all_tok_path
def shuffle_all_tok(self) -> None:
"""
Shuffle all.tok. If dataset is parallel, shuflle them parallely
"""
for lang in self.languages:
filenames = [f"{lang}.all.{suf}.tok" for suf in self.suffixes]
# check inputs
assert all([is_valid_file(self.folder.joinpath(p)) for p in filenames]), (
"files not found: "
+ ",".join(
[p for p in filenames if not is_valid_file(self.folder.joinpath(p))]
)
)
# check outputs doesnt exist
if all(
[is_valid_file(self.folder.joinpath(f"{p}.shuf")) for p in filenames]
):
logger.info(f"shuffle already done for {lang}")
continue
# shuffle
if not self.parallel_dataset:
logger.info(
f"shuffling {len(filenames)} files individualy: {', '.join(filenames)}"
)
for fname in filenames:
shuf_file(self.folder.joinpath(fname))
else:
logger.info(
f"shuffling {len(filenames)} files parallely: {', '.join(filenames)}"
)
shuf_parallel_files(
[self.folder.joinpath(fname) for fname in filenames]
)
def split_train_test_valid(
self, percent_test: int = 1, percent_valid: int = 1, dedupe: bool = True
):
"""
Take the tokenized data, that has been regroupe into .tok,
and split them into a training, test and validation tests
Do it in parallel for parallel datasets.
"""
for lang in self.languages:
if dedupe is False:
suffix_to_dedup = []
logger.info(
f"{lang}: No deduplication will be run. Dedup is set to False."
)
elif self.parallel_dataset:
suffix_to_dedup = [self.suffixes[0]]
logger.info(
f"{lang}: Deduplication on '{self.suffixes[0]}' and propagated on other suffixes."
)
else:
suffix_to_dedup = self.suffixes
logger.info(f"{lang}: Deduplication on {self.suffixes}.")
# start with obfuscated to dedupe based on the content of the file
seen_contents: tp.Set[str] = set()
ids_to_remove: tp.Set[int] = set()
for suffix in self.suffixes:
if not self.parallel_dataset:
seen_contents = set()
ids_to_remove = set()
all_tok_path = self.folder.joinpath(f"{lang}.all.{suffix}.tok.shuf")
assert is_valid_file(all_tok_path)
output_paths = {
split: self.folder.joinpath(f"{lang}.{split}.tok")
for split in ([f"valid.{suffix}"] if percent_valid > 0 else [])
+ ([f"test.{suffix}"] if percent_test > 0 else [])
+ (
[f"train.{suffix}.{n}" for n in range(self.nb_train_split)]
if percent_test + percent_valid < 100
else []
)
}
if all([is_valid_file(path) for path in output_paths.values()]):
logger.info(f"shuffle already done for {lang} and suffix {suffix}")
continue
output_nlines = {k: 0 for k in output_paths.keys()}
with open_file_dict(output_paths, mode="w") as outputs:
with open(
all_tok_path, "r", encoding="utf-8", errors="ignore"
) as all_splits_file:
# Deduplication
for line_id, line in enumerate(all_splits_file):
if line.startswith("CodeNet_"):
line = line.replace("CodeNet_", "CodeNet/", 1)
if "|" not in line:
logger.warning(
f"Missing ID at line {line_id}. Skipping line: {line}"
)
continue
repo, content = line.split("|", 1)
if not self.repo_split:
repo = f"{line_id}/{line_id}"
if "/" not in repo:
logger.warning(f"Incorrect repo ID at line {line_id}")
continue
if suffix in suffix_to_dedup:
content_hash = sha256(
content.encode("utf-8")
).hexdigest()
if content_hash in seen_contents:
ids_to_remove.add(line_id)
continue
seen_contents.add(content_hash)
elif line_id in ids_to_remove:
# line for reference suffix is a duplicate. Dedupe
continue
# select the repo name without the username of the repo creator
assert (
"/" in repo
), f"Repository {repo} should contain a / character"
username, repo = repo.split("/", 1)
if username == "CodeNet":
repo = repo.split("_")[0]
hash_repo = zlib.adler32(repo.encode("utf-8")) % 100
output_split = (
"test"
if (hash_repo < percent_test)
else (
"train"
if hash_repo >= (percent_test + percent_valid)
else "valid"
)
)
output_split += f".{suffix}"
if output_split.startswith("train"):
output_split += f".{line_id % self.nb_train_split}"
outputs[output_split].write(content)
output_nlines[output_split] += 1
logger.info(
f"{lang}: Duplicated lines for {suffix}: {len(ids_to_remove)} / {line_id + 1}"
)
for k, v in output_nlines.items():
logger.info(f"{lang}: {k} -> {v} lines")
def get_train_test_valid_splits(
self, percent_test: int = 1, percent_valid: int = 1, dedupe: bool = True
) -> None:
"""
Take all tokenized file and regroup them into train/test/validation sets.
"""
logger.info("")
logger.info("")
logger.info("========== Deduplicate and Split ===========")
# regroup all tokenized files
self.regroup_all_tok()
# shuffle
self.shuffle_all_tok()
# split into a train, test and valid sets
self.split_train_test_valid(
percent_test=percent_test, percent_valid=percent_valid, dedupe=dedupe
)
logger.info(
"Sucessfully regroup, deduplicate and split tokenized data into a train/valid/test sets."
)
def learn_bpe(self, ncodes: int, executor: tp.Optional[submitit.Executor] = None):
logger.info("")
logger.info("")
logger.info("========== Learn BPE ===========")
if not isinstance(self.bpe, bpe_modes.FastBPEMode):
logger.info(
f"No need to train bpe codes for {self.bpe.__class__.__name__}."
)
return
elif is_valid_file(self.bpe.codes):
logger.info(
f"No need to train bpe codes, already trained. Codes: {self.bpe.codes}"
)
return
self.bpe.codes = self.folder.joinpath(
f"{'-'.join(self.languages)}.{'-'.join(str(s) for s in self.suffixes)}.codes"
)
if is_valid_file(self.bpe.codes):
logger.info(
f"BPE codes already trained for this dataset. Codes: {self.bpe.codes}"
)
return
self._learn_bpe(ncodes, executor)
def _learn_bpe(
self, ncodes: int, executor: tp.Optional[submitit.Executor] = None
) -> None:
raise NotImplementedError("Learn bpe method need to be implemented.")
def apply_bpe(
self,
executor: "OptExecutor" = None,
local_parallelism: tp.Optional[int] = None,
) -> None:
logger.info("")
logger.info("")
logger.info("========== Apply BPE ===========")
if executor is None:
if local_parallelism is None:
executor = submitit.LocalExecutor(folder=self.folder.joinpath("log"))
else:
executor = ProcessPoolExecutor(max_workers=local_parallelism)
assert executor is not None
jobs = []
with batch_if_available(executor):
for f in chain(
*[
self.folder.glob(f"{lang}.{split}.*.*tok")
for split in DATASET_SPLITS
for lang in self.languages
]
):
if not is_valid_file(f):
logger.warning(
f"{f} is not a valid file, cannot to apply BPE on it."
)
elif not is_valid_file(f.with_suffix(self.bpe.ext)):
logger.info(f"Applying BPE on {f} ...")
job = executor.submit(
self.bpe.apply_bpe_file, f, f.with_suffix(self.bpe.ext)
)
jobs.append(job)
for job in jobs:
job.result()
logger.info("BPE done.")
# logger.info("Regrouping BPE")
# self.regroup_bpe()
def get_vocab(self, executor: "OptExecutor" = None):
logger.info("")
logger.info("")
logger.info("========== Get VOCAB ===========")
if is_valid_file(self.bpe.vocab_path):
logger.info(
f"No need to get vocab, already exists. Vocab: {self.bpe.vocab_path}"
)
return
self.bpe.vocab_path = self.folder.joinpath(
f"{'-'.join(self.languages)}.{'-'.join(str(s) for s in self.suffixes)}.vocab"
)
if is_valid_file(self.bpe.vocab_path):
logger.info(
f"BPE vocab already trained for this dataset. Vocab: {self.bpe.vocab_path}"
)
return
self._get_vocab(executor)
def _get_vocab(self, executor: "OptExecutor" = None):
raise NotImplementedError("Get vocab method needs to be implemented.")
def binarize(
self,
executor: "OptExecutor" = None,
local_parallelism: tp.Optional[int] = None,
) -> None:
logger.info("")
logger.info("")
logger.info("========== Binarize ===========")
if local_parallelism is not None:
executor = ProcessPoolExecutor(max_workers=local_parallelism)
assert executor is not None
jobs = []
with batch_if_available(executor):
for f in chain(
*[
self.folder.glob(f"{lang}.{split}.*{self.bpe.ext}")
for split in DATASET_SPLITS
for lang in self.languages
]
):
if not is_valid_file(f.with_suffix(f.suffix + ".pth")):
logger.info(f"binarizing {f} ...")
jobs.append(
executor.submit(binarize_for_XLM_file, f, self.bpe.vocab_path)
)
for job in jobs:
job.result()
logger.info("Binarize done.")
def check_files_and_symlink_for_XLM(self) -> None:
logger.info("")
logger.info("")
logger.info("========== Check and Create symlinks ===========")
# check that all files exist and are not empty
for lang in self.languages:
for suffix in self.suffixes:
for split in DATASET_SPLITS:
if split == "train":
for i in range(self.nb_train_split):
f = self.folder.joinpath(
f"{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth"
)
if not is_valid_file(f):
logger.warning(f"doest not exist {f}")
else:
f = self.folder.joinpath(
f"{lang}.{split}.{suffix}{self.bpe.ext}.pth"
)
if not is_valid_file(f):
logger.warning(f"doest not exist {f}")
logger.info("create symlinks for XLM ...")
XLM_folder = self.folder.joinpath("XLM-syml")
XLM_folder.mkdir(exist_ok=True)
for lang in self.languages:
for split in DATASET_SPLITS:
if self.parallel_dataset:
for suffix1, suffix2 in get_all_pairs(self.suffixes):
suffix1, suffix2 = sorted([suffix1, suffix2])
for suffix in [suffix1, suffix2]:
if split == "train":
for i in range(self.nb_train_split):
# when parallel dataset, check files have same number of lines
if suffix == suffix1:
check_same_number_of_lines(
self.folder.joinpath(
f"{lang}.{split}.{suffix1}.{i}{self.bpe.ext}"
),
self.folder.joinpath(
f"{lang}.{split}.{suffix2}.{i}{self.bpe.ext}"
),
)
create_symlink(
f"../{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth",
XLM_folder.joinpath(
f"{split}.{lang}_{suffix1}-{lang}_{suffix2}.{lang}_{suffix}.{i}.pth"
),
)
else:
if suffix == suffix1:
check_same_number_of_lines(
self.folder.joinpath(
f"{lang}.{split}.{suffix1}{self.bpe.ext}"
),
self.folder.joinpath(
f"{lang}.{split}.{suffix2}{self.bpe.ext}"
),
)
create_symlink(
f"../{lang}.{split}.{suffix}{self.bpe.ext}.pth",
XLM_folder.joinpath(
f"{split}.{lang}_{suffix1}-{lang}_{suffix2}.{lang}_{suffix}.pth"
),
)
else:
for suffix in self.suffixes:
if split == "train":
for i in range(self.nb_train_split):
create_symlink(
f"../{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth",
XLM_folder.joinpath(
f"{split}.{lang}_{suffix}.{i}.pth"
),
)
if len(self.suffixes) == 1:
create_symlink(
f"../{lang}.{split}.{suffix}.{i}{self.bpe.ext}.pth",
XLM_folder.joinpath(f"{split}.{lang}.{i}.pth"),
)
else:
create_symlink(
f"../{lang}.{split}.{suffix}{self.bpe.ext}.pth",
XLM_folder.joinpath(f"{split}.{lang}_{suffix}.pth"),
)
if len(self.suffixes) == 1:
create_symlink(
f"../{lang}.{split}.{suffix}{self.bpe.ext}.pth",
XLM_folder.joinpath(f"{split}.{lang}.pth"),
)
logger.info("Check and symlink done.")
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/dataset_mode.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .. import dataset_modes
def test_modes_dict() -> None:
must_be_avail = {
"obfuscation",
"monolingual",
"monolingual_functions",
"obfuscation_functions",
"ir_functions",
"ir_full_files",
}
avail = dataset_modes.DatasetMode.modes
remain = must_be_avail - set(avail.keys())
assert not remain
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/test_dataset_modes.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from itertools import chain
from logging import getLogger
import submitit
import typing as tp
from codegen_sources.preprocessing.bpe_modes.bpe_mode import TMP_EXT
from codegen_sources.preprocessing.dataset_modes.dataset_mode import (
DATASET_SPLITS,
DatasetMode,
)
from codegen_sources.preprocessing.lang_processors import LangProcessor
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import (
REPLACE_DICT,
cleanup_obfuscated_function,
)
from codegen_sources.preprocessing.timeout import timeout
OUTLIER_INDICES_THRESHOLDS = {"VAR_": 200, "FUNC_": 200, "CLASS_": 100}
FUNC_OBFUSCATION_SUFFIXES = ["obfuscated_func", "dictionary_func"]
logger = getLogger()
class ObfuscationFunctionsMode(DatasetMode):
"""
Callable where we track the repos processed so that we can checkpoint with submitit
"""
def __init__(
self,
folder,
languages,
bpe,
processed_lines: tp.Optional[tp.Set] = None,
nb_train_split: int = 8,
keep_comments: bool = False,
repo_split: bool = True,
):
super().__init__(
suffixes=FUNC_OBFUSCATION_SUFFIXES,
folder=folder,
languages=languages,
bpe=bpe,
parallel_dataset=True,
processed_lines=processed_lines,
nb_train_split=nb_train_split,
keep_comments=keep_comments,
repo_split=repo_split,
)
# broken since not checkpointable
# def checkpoint(
# self, input_path: str, process_strings: bool
# ) -> submitit.helpers.DelayedSubmission:
# return submitit.helpers.DelayedSubmission(
# self.__class__(
# self.folder, self.languages, self.bpe, self.processed_lines,
# ),
# input_path,
# process_strings,
# )
def extract_data_for_line(
self,
line_id: str,
json_line: dict,
process_strings: bool,
lang_processor: LangProcessor,
):
default_return = line_id, None, None
if "content" not in json_line:
return default_return
content = json_line["content"]
for k, v in REPLACE_DICT.items():
content = content.replace(k, v)
try:
obfuscated, dico = lang_processor.obfuscate_code(content)
tokenized_obfuscated_file = " ".join(
lang_processor.tokenize_code(
obfuscated,
process_strings=process_strings,
keep_comments=self.keep_comments,
)
)
except NotImplementedError:
logger.error(
f"Obfuscate method is not implemented for {lang_processor.__class__.__name__}"
)
raise
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f"Error obfuscating content {e} \n")
return default_return
obfuscated_functions = []
func_dicos = []
try:
f_standalone, f_class = lang_processor.extract_functions(
tokenized_obfuscated_file
)
functions = f_standalone + f_class
for func in functions:
func, func_dico = cleanup_obfuscated_function(func, dico)
obfuscated_functions.append(func)
func_dicos.append(func_dico)
assert len(obfuscated_functions) == len(func_dicos)
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f"error {e} extracting functions\n")
return default_return
return (
line_id,
json_line["repo_name"],
{"obfuscated_func": obfuscated_functions, "dictionary_func": func_dicos},
)
def post_tok_filter(self, tokenized_data):
assert all(s in tokenized_data for s in self.suffixes)
assert len(tokenized_data["dictionary_func"]) == len(
tokenized_data["obfuscated_func"]
)
for var_prefix, var_number in OUTLIER_INDICES_THRESHOLDS.items():
for dico in tokenized_data["dictionary_func"]:
if f"{var_prefix}{var_number}" in dico:
return True
return False
def _learn_bpe(
self, ncodes: int, executor: tp.Optional["ExecutorLike"] = None
) -> None:
raise Exception(
"BPE codes should not be learnt from obfuscated data. Learn them on monolingual data."
"Please provide bpe codes or learn them."
"To do so, please run pipepline with monolingual mode until BPE learning."
)
def apply_bpe(
self,
executor: tp.Optional["ExecutorLike"] = None,
local_parallelism: tp.Optional[int] = None,
) -> None:
"""
Overwrite the method as in the obfuscation mode, need to restore the BPE.
"""
logger.info("")
logger.info("")
logger.info("========== Apply BPE ===========")
if executor is None:
executor = submitit.LocalExecutor(folder=self.folder.joinpath("log"))
# apply BPE with tmp suffix
_bpe_ext = self.bpe.ext
self.bpe.ext += TMP_EXT
super().apply_bpe(executor)
self.bpe.ext = _bpe_ext
# restore BPE on obfuscation special tokens
jobs = []
to_restore = list(
chain(
*[
self.folder.glob(f"{lang}.{split}.*{self.bpe.ext}{TMP_EXT}")
for split in DATASET_SPLITS
for lang in self.languages
]
)
)
for f in to_restore:
job = executor.submit(
self.bpe.repair_bpe_for_obfuscation_file, f, f.with_suffix("")
)
jobs.append(job)
for job in jobs:
job.result()
for f in to_restore:
assert f.with_suffix("").is_file()
f.unlink()
def _get_vocab(self, executor: tp.Optional["ExecutorLike"] = None) -> None:
raise Exception(
"Vocab should not be learnt from obfuscated data. Learn it on monolingual data."
"Please provide vocab or learn them."
"To do so, please run pipepline with monolingual mode until get_vocab."
)
|
CodeGen-main
|
codegen_sources/preprocessing/dataset_modes/obfuscation_functions_mode.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import javalang
from codegen_sources.preprocessing.obfuscation.obfuscated_names_generator import (
ObfuscatedNamesGenerator,
ObfuscatedNameType,
)
from javalang.tokenizer import Identifier, Position
EXCLUDED_TOKENS = {"main"}
def obfuscate(java_program):
tokens = list(javalang.tokenizer.tokenize(java_program))
declarations, declarations_per_vartype, calls_to_replace = get_variable_usages(
java_program
)
names_generator = ObfuscatedNamesGenerator()
# Finding the right tokens for declarations first
for token_name, dec_list in declarations.items():
for dec_info in dec_list:
dec_position = dec_info["position"]
# TODO could make it O(log(n)) with binary search for find first token
for i, tok in enumerate([t for t in tokens if t.position >= dec_position]):
if tok.value == token_name:
tok.value = names_generator.get_new_name(
token_name, dec_info["var_type"]
)
# TODO: check type for variable definitions?
dec_info["new_name"] = tok.value
break
calls_to_replace_index = 0
for current_tok_index, tok in enumerate(tokens):
if calls_to_replace_index < len(calls_to_replace):
# handle special calls or references to replace
current_call_to_replace = calls_to_replace[calls_to_replace_index]
assert current_call_to_replace["var_type"] in ObfuscatedNameType
relevant_declarations = declarations_per_vartype[
current_call_to_replace["var_type"]
][current_call_to_replace["name"]]
assert (
len(relevant_declarations) > 0
), "No relevant declarations in special token to replace. It should have been filtered out"
if tok.position >= current_call_to_replace["position"]:
calls_to_replace_index += 1
for advanced_tok_index in range(current_tok_index, len(tokens)):
if (
tokens[advanced_tok_index].value
== current_call_to_replace["name"]
):
if (
current_call_to_replace["var_type"]
== ObfuscatedNameType.FUNCTION
):
if current_call_to_replace["qualifier"] is None:
# if there is no qualifier, the method is called directly
is_replace_candidate = (
advanced_tok_index == 0
or tokens[advanced_tok_index - 1].value != "."
)
else:
# if there is a qualifier, the qualifier should be before the function call
qualifier_split = current_call_to_replace[
"qualifier"
].split(".")
is_replace_candidate = advanced_tok_index > 2 * len(
qualifier_split
)
for i, qual in enumerate(qualifier_split[::-1]):
is_replace_candidate = (
is_replace_candidate
and tokens[
advanced_tok_index - (2 * i + 1)
].value
== "."
and tokens[
advanced_tok_index - (2 * i + 2)
].value
== qual
)
if is_replace_candidate:
tokens[
advanced_tok_index
].value = relevant_declarations[0]["new_name"]
# handle other tokens using the declarations
if isinstance(tok, Identifier) and tok.value in declarations:
token_declarations = declarations[tok.value]
tok_position = tok.position
previous_declarations = [
dec
for dec in token_declarations
if dec["position"] < tok_position and "new_name" in dec
]
if (
current_tok_index >= 2
and tokens[current_tok_index - 1].value == "."
and tokens[current_tok_index - 2].value == "this"
):
previous_declarations = [
dec for dec in previous_declarations if dec["is_field"]
]
if len(previous_declarations) == 0:
# fields can be declared after in the file an inherited class
previous_declarations = [
dec for dec in token_declarations if dec["is_field"]
]
relevant_declaration = None
if len(previous_declarations) == 0:
class_declarations = declarations_per_vartype[ObfuscatedNameType.CLASS][
tok.value
]
if len(class_declarations) > 0:
relevant_declaration = class_declarations[0]
else:
func_declarations = declarations_per_vartype[
ObfuscatedNameType.FUNCTION
][tok.value]
if len(func_declarations) > 0:
relevant_declaration = func_declarations[0]
else:
relevant_declaration = previous_declarations[-1]
if relevant_declaration is not None:
tok.value = relevant_declaration["new_name"]
res_lines = [[]]
prev_line = 0
for tok in tokens:
if tok.position.line > prev_line:
res_lines.append([])
prev_line = tok.position.line
res_lines[-1].append(tok.value)
return (
"\n".join([" ".join(line) for line in res_lines]),
names_generator.get_dictionary(),
)
def is_position_greater(position1, position2):
return position1.line > position2.line or (
position1.line == position2.line and position1.position > position2.position
)
def is_position_equal(position1, position2):
return position1.line == position2.line and position1.position == position2.position
def is_position_greater_or_equal(position1, position2):
return is_position_greater(position1, position2) or is_position_equal(
position1, position2
)
def get_variable_usages(java_program):
declarations = {}
calls_to_replace = []
ast = javalang.parse.parse(java_program)
previous_position = Position(0, 0)
for path, node in ast:
# Declarations
if (
isinstance(node, javalang.tree.ClassDeclaration)
or isinstance(node, javalang.tree.InterfaceDeclaration)
or isinstance(node, javalang.tree.EnumDeclaration)
):
declarations, previous_position = add_declaration_node(
node.name,
node.position,
ObfuscatedNameType.CLASS,
declarations,
previous_position,
)
if isinstance(node, javalang.tree.MethodDeclaration):
declarations, previous_position = add_declaration_node(
node.name,
node.position,
ObfuscatedNameType.FUNCTION,
declarations,
previous_position,
)
if (
isinstance(node, javalang.tree.LocalVariableDeclaration)
or isinstance(node, javalang.tree.VariableDeclaration)
or isinstance(node, javalang.tree.FieldDeclaration)
):
for name in [d.name for d in node.declarators]:
declarations, previous_position = add_declaration_node(
name,
node.position,
ObfuscatedNameType.VARIABLE,
declarations,
previous_position,
decl_type=node.type.name,
is_field=isinstance(node, javalang.tree.FieldDeclaration),
)
if isinstance(node, javalang.tree.FormalParameter) or isinstance(
node, javalang.tree.EnumConstantDeclaration
):
declarations, previous_position = add_declaration_node(
node.name,
node.position,
ObfuscatedNameType.VARIABLE,
declarations,
previous_position,
)
if isinstance(node, javalang.tree.MethodInvocation):
calls_to_replace, previous_position = add_node_to_replace(
node.member,
node.position,
ObfuscatedNameType.FUNCTION,
calls_to_replace,
previous_position,
qualifier=node.qualifier,
)
if isinstance(node.position, Position):
previous_position = node.position
for i in range(len(calls_to_replace) - 1):
assert calls_to_replace[i]["position"] <= calls_to_replace[i + 1]["position"]
declarations_per_vartype = {}
for vartype in ObfuscatedNameType:
declarations_per_vartype[vartype] = {
k: [dec for dec in v if dec["var_type"] == vartype]
for k, v in declarations.items()
}
calls_to_replace = [
call
for call in calls_to_replace
if len(declarations_per_vartype[call["var_type"]].get(call["name"], [])) > 0
]
return declarations, declarations_per_vartype, calls_to_replace
def add_declaration_node(
name,
position,
var_type,
declarations,
previous_position,
decl_type=None,
is_field=False,
):
if position is None:
new_positions = Position(previous_position.line, previous_position.column + 1)
position = previous_position
else:
new_positions = position
if name in EXCLUDED_TOKENS:
return declarations, position
declarations[name] = declarations.get(name, []) + [
{
"position": new_positions,
"var_type": var_type,
"decl_type": decl_type,
"is_field": is_field,
}
]
return declarations, position
def add_node_to_replace(
name, position, var_type, to_replace, previous_position, qualifier=None
):
if position is None:
new_positions = Position(previous_position.line, previous_position.column + 1)
position = previous_position
else:
new_positions = position
if name in EXCLUDED_TOKENS:
return to_replace, position
to_replace.append(
{
"name": name,
"position": new_positions,
"var_type": var_type,
"qualifier": qualifier,
}
)
return to_replace, position
|
CodeGen-main
|
codegen_sources/preprocessing/obfuscation/javalang_obfuscator.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import typing as tp
from enum import Enum
class ObfuscatedNameType(Enum):
VARIABLE = "VAR"
FUNCTION = "FUNC"
CLASS = "CLASS"
class ObfuscatedNamesGenerator:
def __init__(self, same_name_overloaded_func=True) -> None:
self.same_name_overloaded_func = same_name_overloaded_func
self.obfuscation_dict: tp.Dict[ObfuscatedNameType, tp.Dict[str, str]] = {}
for var_type in ObfuscatedNameType:
self.obfuscation_dict[var_type] = {}
self.funcnames_mapping: tp.Dict[str, str] = {}
self.attributes_mappings: tp.Dict[str, str] = {}
def get_new_name(self, varname, var_type, isAttribute=False):
var_index = len(self.obfuscation_dict[var_type])
if (
var_type is ObfuscatedNameType.FUNCTION
and self.function_is_obfuscated(varname)
and self.same_name_overloaded_func
):
return self.funcnames_mapping[varname]
if isAttribute and varname in self.attributes_mappings:
return self.attributes_mappings[varname]
obfuscated_name = f"{var_type.value}_{var_index}"
self.obfuscation_dict[var_type][obfuscated_name] = varname
if var_type is ObfuscatedNameType.FUNCTION and self.same_name_overloaded_func:
self.funcnames_mapping[varname] = obfuscated_name
if isAttribute:
self.attributes_mappings[varname] = obfuscated_name
return obfuscated_name
def get_dictionary(self):
return {k: v for d in self.obfuscation_dict.values() for k, v in d.items()}
def function_is_obfuscated(self, varname):
return varname in self.funcnames_mapping
|
CodeGen-main
|
codegen_sources/preprocessing/obfuscation/obfuscated_names_generator.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import typing as tp
import re
SEPARATOR = " | "
OBFUSCATED_PREFIXES = ["VAR_", "FUNC_", "CLASS_"]
REPLACE_DICT = {
protected_name: protected_name.lower() for protected_name in OBFUSCATED_PREFIXES
}
def cleanup_obfuscated_function(func: str, dico: str) -> tp.Tuple[str, str]:
rename_dict = build_rename_dict(func)
previous_dict = read_dict(dico)
assert set(rename_dict.keys()).issubset(
set(previous_dict.keys())
), "invalid keys in rename dict"
new_func = " ".join([rename_tok(tok, rename_dict) for tok in func.split()])
func_dico = {
new_token: previous_dict[prev_token]
for prev_token, new_token in rename_dict.items()
}
return new_func, dico_to_string(func_dico)
def rename_tok(token: str, rename_dict: tp.Dict[str, str]) -> str:
for prefix in OBFUSCATED_PREFIXES:
# Replacing tokens with larger numbers first to avoid replacing parts of a token
for match in sorted(re.findall(f"{prefix}\d+", token), reverse=True):
assert match in rename_dict, f"{match} was not in rename dictionary"
token = re.sub(f"{match}(?!\d)", rename_dict[match], token)
return token
def read_dict(dico: str, separator: str = SEPARATOR) -> tp.Dict[str, str]:
return dict(entry.strip().split(maxsplit=1) for entry in dico.split(separator))
def build_rename_dict(func: str) -> tp.Dict[str, str]:
tokens = func.split()
rename_dict = {}
for prefix in OBFUSCATED_PREFIXES:
prefix_count = 0
for token in tokens:
for match in re.findall(f"{prefix}\d+", token):
if match not in rename_dict:
rename_dict[token] = f"{prefix}{prefix_count}"
prefix_count += 1
return rename_dict
def replace_function_name(f: str, fname: str) -> str:
return " ".join(["FUNC_0" if tok == fname else tok for tok in f.split(" ")])
def dico_to_string(dico: tp.Dict[str, str], separator: str = SEPARATOR) -> str:
return separator.join(f"{k} {dico[k]}" for k in sorted(dico))
|
CodeGen-main
|
codegen_sources/preprocessing/obfuscation/utils_deobfuscation.py
|
"""
This file is adapted from https://github.com/Cobertos/bobskater
Obfuscate a python file so it still works
Has issues:
Doesn't support any sort of annotations (skips them, should be okay for now?)
Hacky patch of comprehensions (see top, reverses for one specific thing so the _fields prints out the right way due to)
Comprehesions do not push a stack and basically use Python 2 behavior where their identifiers leak
Eval, strings, and other oddities are unconsidered for identifiers. Attributes are unconsidered too
"""
import keyword
import sys
import ast
import string
import unicodedata
import itertools
import logging
from collections import defaultdict
import astunparse
from codegen_sources.preprocessing.obfuscation.bobskater_frameUtils import (
Frame,
FrameEntry,
getIdsFromNode,
setIdsOnNode,
)
from codegen_sources.preprocessing.obfuscation.obfuscated_names_generator import (
ObfuscatedNamesGenerator,
ObfuscatedNameType,
)
class Struct:
"""
Provides an object property accessing to a dict
"""
def __init__(self, inputDict) -> None:
self.__dict__.update(inputDict)
def iter_fields_patch(node):
"""
patch of ast.py iter_fields so that ast.ListComp, ast.SetComp, etc are iterated in reverse so that
the for clause comes before the expression evaluated by the for clause (we might be able to take
this out now that theirs the 2 stage approach but unconfirmed)
"""
it = (
node._fields
if not isinstance(
node, (ast.ListComp, ast.SetComp, ast.GeneratorExp, ast.DictComp)
)
else reversed(node._fields)
)
for field in it:
try:
yield field, getattr(node, field)
except AttributeError:
pass
ast.iter_fields = iter_fields_patch
def validIdentifierIterator(version=2):
"""
Compute strings of the valid identifier characters (for Python2, including start
and "tail" characters after the first one)
"""
# Determine characters we can use
if version == 2:
validIDStart = string.ascii_letters + "_"
validID = string.ascii_letters + "_" + string.digits
else:
# Version 3
# Get all unicode categories
unicode_category = defaultdict(str)
for c in map(
chr, range(min(sys.maxunicode + 1, 20000))
): # sys.maxunicode is SLOW
unicode_category[unicodedata.category(c)] += c
# id_start = Lu, Ll, Lt, Lm, Lo, Nl, the underscore, and characters with the Other_ID_Start property>
validIDStart = (
unicode_category["Lu"]
+ unicode_category["Ll"]
+ unicode_category["Lt"]
+ unicode_category["Lm"]
+ unicode_category["Lo"]
+ unicode_category["Nl"]
+ "_"
)
# id_continue = id_start, plus Mn, Mc, Nd, Pc and others with the Other_ID_Continue property>
validID = (
validIDStart
+ unicode_category["Mn"]
+ unicode_category["Mc"]
+ unicode_category["Nd"]
+ unicode_category["Pc"]
)
# Yield the strings, starting with 1 character strings
for c in validIDStart:
if c in keyword.kwlist:
continue # Skip keywords
yield c
# Yield 2+ character strings
tailLength = 1
while True:
for c in validIDStart:
for c2 in itertools.combinations_with_replacement(validID, tailLength):
c2 = "".join(c2)
if c + c2 in keyword.kwlist:
continue # Skip keywords
yield c + c2
tailLength += 1
class FrameTrackingNodeVisitor(ast.NodeVisitor):
"""
A NodeTransformer that builds a graph of all relevant identifiers, and their
relevant scoepes
Do not inherit from this but instead instantiate it. It cannot give an accurate
picture for a given identifier if scope usages occur out of order between definition
and usage
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._logger = logging.getLogger(self.__class__.__name__)
# The root frame tracking identifiers and the current frame
# that we're at in the ast walking process
self._rootFrame = Frame.getBuiltinFrame()
self._currentFrame = self._rootFrame
def _handleEnterNode(self, node):
"""
Takes a new node and appends, modifies, or pops the identifiers in that
node to the appropriate stack frame
"""
# Amazingly helpful and even necessary reference/reading to edit
# this section would be:
#
# http://greentreesnakes.readthedocs.io/en/latest/nodes.html#
#
# as it demonstrates how the ast changes through Python versions
# and which identifiers come up as node.Name and which come up as
# a raw string on a field (Ctrl+F raw)
# SCOPE DEFINING CASES (nodes that have an identifier that will make it add
# to the current scope)
# Handle global/nonlocal (Python3) statement
if isinstance(node, (ast.Global, ast.Nonlocal)):
# global_stmt ::= "global" identifier ("," identifier)*
for strId in node.names:
# Don't check for parent identifier existence as it might
# not be defined yet, e.g:
# def a():
# nonlocal b
# b=2
# b=1
# a()
# If the currentFrame contains this identifier (they assigned and then used the identifier)
# do what python does and warn but leave it as local
# This is Python 3.5 behavior so this might need to be changed if ever a problem
if self._currentFrame.getScopedEntry(strId) == self._currentFrame:
self._logger.warning(
"Global/nonlocal found when variable already in local scope"
)
elif isinstance(node, ast.Nonlocal):
# Simply insert a passthrough so all scoping checks hit the
# parent frame instead
self._logger.debug(
"[+Entry]: " + str(node.__class__.__name__) + ' "' + strId + '"'
)
# use ast.Load() so that it doesn't use this scope for this identifier
self._currentFrame.addEntry(
FrameEntry(id=strId, source=node, ctx=ast.Load())
)
else: # isinstance(node, ast.Global)
# The frame we need to point to is the root frame
self._currentFrame.addEntry(
FrameEntry(
id=strId,
source=node,
ctx=ast.Load(),
scope=self._rootFrame.children[0],
)
)
# TODO: Annotations (for everything x:)
# Handle Name (which handles anything that doesn't use raw strings)
elif isinstance(node, ast.Name):
if isinstance(node.ctx, ast.Load):
self._logger.debug(
"[+Entry]: " + str(node.__class__.__name__) + ' "' + node.id + '"'
)
self._currentFrame.addEntry(
FrameEntry(id=node.id, source=node, ctx=node.ctx)
)
# Store
# Or Param (Python 2 ast.arg Name node ctx, instead of raw string)
elif isinstance(node.ctx, (ast.Store, ast.Param)):
self._logger.debug(
"[+Entry]: " + str(node.__class__.__name__) + ' "' + node.id + '"'
)
self._currentFrame.addEntry(
FrameEntry(id=node.id, source=node, ctx=ast.Store())
)
# Delete
# ignore these, consider Python will throw an error and they don't modify scope
# For everything else that has an ID, rely on getIdsFromNode to get the
# names and handle normally
else:
ids = getIdsFromNode(node)
for strId in ids:
self._logger.debug(
"[+Entry]: " + str(node.__class__.__name__) + ' "' + strId + '"'
)
self._currentFrame.addEntry(FrameEntry(id=strId, source=node))
if Frame.nodeCreatesFrame(node):
frame = Frame(source=node)
self._currentFrame.addFrame(frame)
self._currentFrame = frame
self._logger.debug(
"[+Frame]: "
+ str(node.__class__.__name__)
+ ' "'
+ (node.name if hasattr(node, "name") else "")
+ '"'
)
def _handleLeaveNode(self, node):
"""
Takes a node we're leaving and, if necessary, performs cleanup
related to moving it off of the stack
"""
if Frame.nodeCreatesFrame(node):
self._logger.debug("[-Frame]")
self._currentFrame = self._currentFrame.parent
def generic_visit(self, node):
self._handleEnterNode(node)
super().generic_visit(node)
self._handleLeaveNode(node)
def getRootFrame(self):
return self._rootFrame
class ObfuscationTransformer(ast.NodeTransformer):
"""
Parses out things that obfuscate our code,
NOTE: Comments won't be in the AST anyway, so no worries
"""
def __init__(
self,
rootFrame,
*args,
removeDocstrings=True,
obfuscateNames=True,
debug=False,
**kwargs,
):
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = debug
self._rootFrame = rootFrame
self._nodeStack = []
self._debugMsg = None
self._opt = Struct(
{"removeDocstrings": removeDocstrings, "obfuscateNames": obfuscateNames}
)
self.names_generator = ObfuscatedNamesGenerator()
# TODO: Name should eventually be unique per scope, as we
# can better obfuscate by using the same names in scopes that
# don't touch each other
self._name = validIdentifierIterator()
super().__init__(*args, **kwargs)
def getMangledName(self, nodeStack, strId, node):
"""
Determine whether a strId used somewhere should be
mangled
"""
frameEntry = self._rootFrame.findEntryAtStack(nodeStack, strId)
if frameEntry is None:
return False
isBuiltin = frameEntry.parent == self._rootFrame
alreadyMangledName = frameEntry.value
if alreadyMangledName:
if isinstance(node, ast.Attribute):
try:
parent = node.value.id
if (
parent == "self"
or parent.startswith("VAR_")
or parent.startswith("CLASS_")
):
return alreadyMangledName
except AttributeError:
return False
else:
self._debugMsg = 'Already mangled; "' + alreadyMangledName + '"'
return alreadyMangledName # It has a mangled name, use it
if alreadyMangledName is False:
self._debugMsg = "Already mangled; Don't mangle"
return False # It was instructed to not mangle
if isBuiltin:
# Dont rename builtins
self._debugMsg = "Don't mangle; Builtin"
frameEntry.value = False
return False
if strId.startswith("__") and strId.endswith("__"):
# Generally functions that cannot be renamed such as __init__
frameEntry.value = False
return False
stackNode = (
frameEntry.parent.source
) # ClassDef, FunctionDef, etc that defined the stack
sourceNode = node # The node that the id came from
# if isinstance(stackNode, (ast.ClassDef,ast.Module)):
# #Anything in the class namespace (static variables, methods)
# #and anything in the module namespace (will probs be exported)
# #should not be mangled
# self._debugMsg = "Don't mangle; Class or Module namespace"
# frameEntry.value = False
# return False
if isinstance(sourceNode, ast.alias):
# An imported name, don't mangle those
self._debugMsg = "Don't mangle; import name"
frameEntry.value = False
return False
elif (
isinstance(sourceNode, ast.arg)
and hasattr(nodeStack[-1], "defaults")
and nodeStack[-1].defaults
):
self._debugMsg = "Don't mangle; kwargs"
# An argument node with keyword args, don't mangle the keyword args
# Slice the keyword arguments
# TODO: I have no idea if this functions in not Python 3.5
argumentsNode = nodeStack[-1]
# Slice the number of default nodes from the end
kwStrs = list(
map(lambda n: n.arg, argumentsNode.args[-len(argumentsNode.defaults) :])
)
if self.debug:
self._logger.debug(
"kwarg debug %s %s %s", kwStrs, strId, strId in kwStrs
)
if strId in kwStrs:
frameEntry.value = False
return False
# Otherwise, return the name we're mangling to for this
# string ID and store it
# Make sure the mangleName isn't in the current scope already (as an unmangled name)
ids = frameEntry.parent.getAllIds()
if isinstance(sourceNode, ast.ClassDef):
mangledName = self.names_generator.get_new_name(
sourceNode.name, ObfuscatedNameType.CLASS
)
elif isinstance(sourceNode, ast.FunctionDef):
mangledName = self.names_generator.get_new_name(
sourceNode.name, ObfuscatedNameType.FUNCTION
)
elif isinstance(sourceNode, ast.arg):
mangledName = self.names_generator.get_new_name(
sourceNode.arg, ObfuscatedNameType.VARIABLE
)
elif isinstance(sourceNode, ast.Name):
mangledName = self.names_generator.get_new_name(
sourceNode.id, ObfuscatedNameType.VARIABLE
)
elif isinstance(sourceNode, ast.Attribute):
oldname = sourceNode.attr
try:
parent = sourceNode.value.id
if (
parent == "self"
or parent.startswith("VAR_")
or parent.startswith("CLASS_")
):
if self.names_generator.function_is_obfuscated(oldname):
# we consider that if it was already defined as function, it is a function. Otherwise, it is a variable
mangledName = self.names_generator.get_new_name(
oldname, ObfuscatedNameType.FUNCTION
)
else:
mangledName = self.names_generator.get_new_name(
oldname, ObfuscatedNameType.VARIABLE, isAttribute=True
)
else:
# probably an import. Don't mangle
return False
except AttributeError:
return False
frameEntry.value = mangledName
return mangledName
def generic_visit(self, node):
# Remove docstrings
if (
self._opt.removeDocstrings
and isinstance(node, ast.Expr)
and isinstance(
self._nodeStack[-1], (ast.FunctionDef, ast.ClassDef, ast.Module)
)
and isinstance(node.value, ast.Str)
):
return ast.Pass()
# Mangle names
ids = getIdsFromNode(node)
if self._opt.obfuscateNames:
if self.debug:
oldIds = ids[:]
for idx, strId in enumerate(ids):
mangleTo = self.getMangledName(self._nodeStack, strId, node)
if not mangleTo:
continue
ids[idx] = mangleTo
setIdsOnNode(node, ids)
if ids and self.debug:
self._logger.debug(
node.__class__.__name__
+ ": "
+ (str(oldIds) if oldIds else None)
+ " => "
+ (str(ids) if ids else None)
+ " ["
+ str(self._debugMsg)
+ "]"
)
self._debugMsg = ""
# Go in to deeper nodes
self._nodeStack.append(node)
super().generic_visit(node)
self._nodeStack.pop()
return node
def obfuscateString(s, *args, **kwargs):
# Parse string for AST
sAst = ast.parse(s)
# Walk the AST once total to get all the scope information
ftnv = FrameTrackingNodeVisitor()
ftnv.visit(sAst)
logging.getLogger(__name__).debug(ftnv.getRootFrame())
# Walk the AST a second time to obfuscate identifiers with
# queriable scope info
transformer = ObfuscationTransformer(ftnv.getRootFrame(), *args, **kwargs)
sAst = transformer.visit(sAst)
# Unparse AST into source code
return astunparse.unparse(sAst), transformer.names_generator.get_dictionary()
def inverse_dico(d):
return {v: k for k, v in d.items()}
def merge_dico_in_first(d1, d2):
"""
Merge dictionary d2 in d1.
"""
for k, v in d2.items():
if k in d1:
raise ValueError(f"Key {k} should not be in d1")
else:
d1[k] = v
return d1
def obfuscateFile(fp, *args, **kwargs):
f = open(fp, "r")
s = f.read()
f.close()
s = obfuscateString(s, *args, **kwargs)
f = open(fp, "w")
f.write(s)
f.close()
if __name__ == "__main__":
iterative_factorial = """import os
class Factorial:
def factorial(self, n, path):
res, res2, res3 = 1, 1, 1
for i in range(n):
res *= (i + 1)
with open(os.path.join(path, 'res'), 'w') as f:
f.write(str(res))
return res
"""
factorial = """class Factorial:
def factorial(self, n):
if n == 1:
return 1
return n * self.factorial(n-1)
"""
res = obfuscateString(
iterative_factorial, obfuscateNames=True, removeDocstrings=False
)
print(res)
|
CodeGen-main
|
codegen_sources/preprocessing/obfuscation/bobskater_obfuscator.py
|
# This file is adapted from https://github.com/Cobertos/bobskater
"""
Utility classes for tracking identifiers in Python scopes
"""
import ast
import builtins # Do not use __builtins__, it's different in different implementations (like IPython vs CPython)
# TODO: After coming back to this a second time, the names aren't really sticking,
# Consider the name changes
# Source ==> sourceAstNode
# Parent ==> parentFrame
# Children ==> childFrames
# ids ==> entries
class Frame:
"""
Keeps track of a stack frame and all the identifiers that exist in that frame
"""
__slots__ = ["source", "parent", "children", "ids"]
def __init__(self, source=None, parent=None, children=None, ids=None) -> None:
self.source = source
self.parent = parent
self.children = children or []
self.ids = ids or {}
def __str__(self):
return (
"\n"
+ str(self.source.__class__.__name__ + " " if self.source else "Frame ")
+ "{"
+ "\n ".join([s + ": " + str(i) for s, i in self.ids.items()])
+ "}"
+ ("\n=> v v v v" if len(self.children) else "")
+ (
"\n=> ".join(" && ".join([str(s) for s in self.children]).split("\n"))
if self.children
else ""
)
)
def __repr__(self):
return str(self)
def addFrame(self, frame):
"""Adds the given frame as a child of this frame"""
assert isinstance(frame, Frame)
assert frame != self
self.children.append(frame)
frame.parent = self
def addEntry(self, frameEntry):
"""Adds an entry for the given identifier to this frame"""
assert isinstance(frameEntry, FrameEntry)
if frameEntry.id in self.ids:
# Only record the first instance as subsequent instances arent _really_
# allowed to redefine the scope. A global statement after a local assign
# is ignored (Python 3.5). A local assign after a global ctx.Load is an error.
# Im not really sure about nonlocal but if it acts like global then we
# should be fine
return
self.ids[frameEntry.id] = frameEntry
frameEntry.parent = self
def getStack(self):
"""Returns a stack from the root frame to this current frame"""
frames = [self]
frame = frames[0].parent
while frame:
frames.insert(0, frame)
frame = frames[0].parent
return frames
def getScopedEntry(self, frameEntryId):
"""
Searches upward through parents looking for the first instance
of frameEntryId, as if it was doing a scoped search
"""
for frame in reversed(self.getStack()):
if frameEntryId in frame.ids:
entry = frame.ids[frameEntryId]
if isinstance(entry.source, (ast.Global)):
# Return the special scopeParent pointing to
# the root
return entry.scopeParent.ids[entry.id]
if isinstance(entry.ctx, ast.Store) and not isinstance(
entry.source, (ast.Global, ast.Nonlocal)
):
# Only ast.Store will actually define the scope for an ID
# and global and nonlocal nodes need to be pass through as well
return entry
# This happens if the identifier was not seen in the given scope stack.
# Most likely passing something erroneously in
# logging.getLogger(self.__class__.__name__).error("Queried identifier \"" + frameEntryId + "\" was not been seen at the given scope stack")
return None
def findEntryAtStack(self, nodeStack, frameEntryId):
"""
Using the nodeStack, finds the frameEntry for frameEntryId
"""
# Find top frame mentioned in nodeStack. then traverse
# down to find the scoped entry
return self.getFrameStack(nodeStack)[-1].getScopedEntry(frameEntryId)
def getAllIds(self):
"""
Return all the IDs we can see from here in scoped order
TODO: Convert to iterator, not list generator
"""
ids = []
for frame in reversed(self.getStack()):
ids += frame.ids.keys()
return ids
def getFrameStack(self, nodeStack):
"""
Using this frame as a root frame, returns the list of descendant frames
that parallel the nodeStack. Otherwise, it will most likely throw
a StopIteration error (TODO: Make it return None)
"""
# Find the frame in question by traversing through the frames
# using the stack frame creating nodes to compare to those
# previously bookmarked
# TODO: This could be better an iterator, not a list return
frameStack = [self]
for node in filter(Frame.nodeCreatesFrame, nodeStack):
frame = frameStack[-1]
frame = next(filter(lambda f: f.source == node, frame.children))
frameStack.append(frame)
return frameStack
@staticmethod
def nodeCreatesFrame(node):
"""Whether or not the given node should be creating a stack frame"""
# todo: Comprehensions need to push a frame too
return isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.Module))
@staticmethod
def getBuiltinFrame():
"""
Gets a frame with entries for all the builtin variables in Python
which is commonly the root frame for scope operations
"""
frame = Frame()
for b in dir(builtins) + ["__file__"]:
frame.addEntry(FrameEntry(b))
return frame
class FrameEntry:
"""
Keeps track of data related to a scoped identifier that lives in a
a stack frame.
* Source is the node the identifier came from
* Parent is the parent Frame
* Ctx is ast.Load or ast.Store to catalog if it will push to the stack or not
* Value is the return from onEnterStackFrame that was stored for this scoped identifier
* Id is the identifier of this entry
* ScopeParent is the actual parent (like for a global)
"""
__slots__ = ["source", "parent", "ctx", "value", "id", "scopeParent"]
def __init__(
self, id, source=None, ctx=ast.Store(), scope=None, value=None
) -> None:
self.source = source
self.ctx = ctx
self.value = value
self.id = id
self.scopeParent = scope
self.parent = None # The actual frame parent
def __str__(self):
return (
str(self.source.__class__.__name__)
+ (("(" + str(self.ctx.__class__.__name__) + ")") if self.ctx else "")
+ (("=" + str(self.value)) if self.value else "")
)
def __repr__(self):
return str(self)
def getIdsFromNode(node):
"""
Python ast does not make it easy to act simply on the identifiers of a node
(and you have to switch over node types and get specific attributes). To
ease this pain we return an array of all the identifiers flatly in a node
and provide a set() function that takes a similar array.
TODO: Properties that are not defined (that are None) just come back as blanks,
do we want this? Do we want to be able to set the names of ids that aren't
a thing
TODO: If we need more granularity, we need to edit how this works (would need
to return key'd objects)
"""
# Handle global/nonlocal (Python3) statement
if isinstance(node, (ast.Global, ast.Nonlocal)):
return node.names
# Handle import alias's
elif isinstance(node, ast.alias):
return [node.name if node.asname is None else node.asname]
# Except
elif isinstance(node, ast.ExceptHandler):
# Is raw only in Python 3, Name node in Python 2, None if not included
return [node.name] if hasattr(node, "name") and type(node.name) == str else []
# FunctionDef or ClassDef
elif isinstance(node, (ast.FunctionDef, ast.ClassDef)):
return [node.name]
# arguments
# Up to Python 3.3, ast.arguments has kwargs and args as a raw string and not
# as an ast.arg(which we handle in another case) so handle it
elif isinstance(node, ast.arguments):
ret = []
if hasattr(node, "args") and type(node.args) == str:
ret.append(node.args)
if hasattr(node, "kwargs") and type(node.kwargs) == str:
ret.append(node.kwargs)
# TODO:keyword (in Python <3.3)
# arg
elif isinstance(node, ast.arg):
return [node.arg] if type(node.arg) == str else []
# TODO: Annotations (for everything x:)
# Handle Name (which handles anything that doesn't use raw strings)
elif isinstance(node, ast.Name):
return [node.id]
elif isinstance(node, ast.Attribute):
return [node.attr]
return []
def setIdsOnNode(node, names):
"""
Tightly coupled to the implementation of getIdsFromNode. It must unpack
it the EXACT same way
"""
if not names:
return # Passed an empty array, don't do anything
if isinstance(node, (ast.Global, ast.Nonlocal)):
node.names = names
elif isinstance(node, (ast.alias)):
if node.asname is None:
node.name = names[0]
else:
node.asname = names[0]
elif isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.ExceptHandler)):
node.name = names[0]
elif isinstance(node, ast.arguments):
# pop in reverse order
if hasattr(node, "kwargs") and type(node.kwargs) == str:
node.kwargs = names.pop()
if hasattr(node, "args") and type(node.args) == str:
node.args = names.pop()
elif isinstance(node, ast.arg):
node.arg = names[0]
elif isinstance(node, ast.Name):
node.id = names[0]
elif isinstance(node, ast.Attribute):
node.attr = names[0]
|
CodeGen-main
|
codegen_sources/preprocessing/obfuscation/bobskater_frameUtils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import subprocess
import uuid
from pathlib import Path
from .tree_sitter_processor import (
TreeSitterLangProcessor,
NEWLINE_TOK,
TREE_SITTER_ROOT,
)
from .java_processor import JAVA_CHAR2TOKEN, JAVA_TOKEN2CHAR
from .tokenization_utils import ind_iter
import typing as tp
import tree_sitter as ts
from ...code_runners.code_runner import RUN_ROOT_DIR
IDENTIFIERS = {"identifier", "field_identifier"}
CPP_TOKEN2CHAR = JAVA_TOKEN2CHAR.copy()
CPP_CHAR2TOKEN = JAVA_CHAR2TOKEN.copy()
class CppProcessor(TreeSitterLangProcessor):
def __init__(self, root_folder: Path = TREE_SITTER_ROOT) -> None:
super().__init__(
ast_nodes_type_string=["comment", "string_literal", "char_literal"],
stokens_to_chars=CPP_TOKEN2CHAR,
chars_to_stokens=CPP_CHAR2TOKEN,
root_folder=root_folder,
)
def get_function_name(self, function: tp.Union[str, tp.List[str]]) -> str:
return self.get_first_token_before_first_parenthesis(function)
def extract_arguments(self, function: str) -> tp.Tuple[tp.List[str], tp.List[str]]:
return self.extract_arguments_using_parentheses(function)
def clean_hashtags_function(self, function):
function = re.sub('[#][ ][i][n][c][l][u][d][e][ ]["].*?["]', "", function)
function = re.sub("[#][ ][i][n][c][l][u][d][e][ ][<].*?[>]", "", function)
function = re.sub("[#][ ][i][f][n][d][e][f][ ][^ ]*", "", function)
function = re.sub("[#][ ][i][f][d][e][f][ ][^ ]*", "", function)
function = re.sub(
"[#][ ][d][e][f][i][n][e][ ][^ ]*[ ][(][ ].*?[ ][)][ ][(][ ].*[ ][)]",
"",
function,
)
function = re.sub(
"[#][ ][d][e][f][i][n][e][ ][^ ]*[ ][(][ ].*?[ ][)][ ][{][ ].*[ ][}]",
"",
function,
)
function = re.sub(
'[#][ ][d][e][f][i][n][e][ ][^ ]*[ ]([(][ ])?["].*?["]([ ][)])?',
"",
function,
)
function = re.sub(
r"[#][ ][d][e][f][i][n][e][ ][^ ]*[ ]([(][ ])?\d*\.?\d*([ ][+-/*][ ]?\d*\.?\d*)?([ ][)])?",
"",
function,
)
function = re.sub("[#][ ][d][e][f][i][n][e][ ][^ ]", "", function)
function = re.sub(
"[#][ ][i][f][ ][d][e][f][i][n][e][d][ ][(][ ].*?[ ][)]", "", function
)
function = re.sub("[#][ ][i][f][ ][^ ]*", "", function)
function = function.replace("# else", "")
function = function.replace("# endif", "")
function = function.strip()
return function
def _get_functions_from_ast(
self,
code: str,
node: ts.Node,
class_funcs: tp.List[str],
standalone_funcs: tp.List[str],
_in_class: bool = False,
) -> None:
if node.type == "function_definition":
function = code[node.start_byte : node.end_byte]
# Avoid incorrect functions
if (
not function.strip().startswith("class")
and "(" in function
and "{" in function
):
if (
not _in_class or "static" in function[0 : function.index("{")]
) and "::" not in function[0 : function.index("(")]:
standalone_funcs.append(function)
else:
class_funcs.append(function)
for child in node.children:
self._get_functions_from_ast(
code,
child,
class_funcs,
standalone_funcs,
node.type == "class_specifier" or _in_class,
)
def detokenize_code(self, code):
fix_func_defines_pattern = re.compile(r"#define (.*) \(")
detokenized = super().detokenize_code(code)
detokenized = fix_func_defines_pattern.sub(r"#define \1(", detokenized)
return detokenized
@staticmethod
def format(code: str) -> str:
output_dir = RUN_ROOT_DIR / "formatting" / "cpp_formatting"
output_dir.mkdir(exist_ok=True, parents=True)
filename = f"{uuid.uuid4()}.cpp"
filepath = output_dir / filename
try:
with open(filepath, "w") as f:
f.write(code)
cmd = f"clang-format {filepath}"
proc = subprocess.run(
cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True
)
if proc.returncode != 0:
raise ValueError(
f"Failed to format code with error: {proc.stderr.decode()}\nThe code was:\n{code}\nFull command: {cmd}"
)
except Exception:
raise
finally:
filepath.unlink(missing_ok=True)
return proc.stdout.decode()
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/cpp_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import typing as tp
import pytest
from . import python_tree_sitter_processor as ptsp
STAR_IMPORT = "from typing import *"
DICT_IMPORT = "from typing import Dict"
TWO_IMPORTS = "from typing import Dict, List"
IMPORTED = "import typing"
IMPORTED_AS = "import typing as x"
TWO_LINES = "from typing import Dict"
@pytest.mark.parametrize(
"imports",
[[STAR_IMPORT], [DICT_IMPORT], [STAR_IMPORT, DICT_IMPORT], [IMPORTED_AS, IMPORTED]],
)
def test_type_cleaner_imports(imports: tp.List[str]) -> None:
other_import = ["from blublu import Dict"]
function = ["def hello():", " print('Hello')"]
cleaner = ptsp.TypeCleaner()
lines = other_import + imports + other_import + function
output = cleaner.get_imports("\n".join(lines))
assert output == imports
@pytest.mark.parametrize(
"line", ["from __future__ import division, print_function, absolute_import",],
)
def test_type_cleaner_imports_none(line: str) -> None:
other_import = ["from blublu import Dict"]
function = ["def hello():", " print('Hello')"]
cleaner = ptsp.TypeCleaner()
lines = other_import + [line] + other_import + function
output = cleaner.get_imports("\n".join(lines))
assert not output
COMM = """Tuple[ # my comment
str]"""
@pytest.mark.parametrize(
"typestr,expected",
[
("Dict[Text, tp.List[Any]]", "Dict[str,List[Any]]"),
("x.Dict[x.Text, tp.List[Any]]", "Dict[str,List[Any]]"),
("x.Optional[x.TextIO]", "Optional[TextIO]"),
("str|int|List[float]", "Union[int,str,List[float]]"),
("str|List[float|None]", "Union[str,List[Optional[float]]]"),
("str|Union[int,float]", "Union[float,int,str]"),
("Union[int,str,None]", "Optional[Union[int,str]]"),
("str|Union[int,float]", "Union[float,int,str]"),
("typing.List[list]", "List[List[Any]]"),
("x.Union[list, list]", "List[Any]"),
("x.Union[list, tuple]", "Union[List[Any],Tuple[Any,...]]"),
("tuple", "Tuple[Any,...]"),
(COMM, "Tuple[str]"),
("x." + COMM, "Tuple[str]"),
("Set[tp.Type['MyCls']]", "Set[Type[MyCls]]"),
('"MyObj"', "MyObj"),
("x.Union[str, Union[Text, Path], Union[Path, str]]", "Union[Path,str]",),
],
)
def test_type_cleaner(typestr: str, expected: str) -> None:
cleaner = ptsp.TypeCleaner()
output = cleaner.clean(typestr)
assert output == expected
def test_extract_hints() -> None:
code = """def blublu(x: int, y=3, t, w: int = 4):
z: int = x + y
return z
def blublu2() -> int:
pass"""
proc = ptsp.PythonTreeSitterProcessor()
code2, hints = proc.extract_type_hints(code)
repl = {h.uid: h.to_string() for h in hints}
hint0 = hints[0].with_value("Whatever")
assert hint0.uid == hints[0].uid
assert hint0.value != hints[0].value
code3 = code2.format(**repl)
assert code3 == code
assert hints[5].name == "blublu.z"
assert hints[1].default == "3"
assert hints[0].default is None
def test_extract_hints_method() -> None:
code = """class Cls:
def __init__(self):
self.var: int
def stuff(self, x):
"""
proc = ptsp.PythonTreeSitterProcessor()
code2, hints = proc.extract_type_hints(code)
repl = {h.uid: h.to_string() for h in hints}
code3 = code2.format(**repl)
assert code3 == code
expected = ("Cls.__init__", "Cls.var", "Cls.stuff.x", "Cls.stuff")
assert tuple(h.name for h in hints) == expected
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/test_python_utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import typing as tp
from sacrebleu import tokenize_v14_international
# IMPORTED
class ind_iter:
def __init__(self, length: int) -> None:
self.i = 0
self.len = length
def next(self) -> None:
self.i += 1
if self.i > (self.len - 1):
raise StopIteration
def prev(self) -> None:
self.i -= 1
if self.i < 0:
raise StopIteration
# IMPORTED
def process_string(
tok: str,
char2tok: tp.Dict[str, str],
tok2char: tp.Dict[str, str],
is_comment: bool,
do_whole_processing: bool = True,
) -> str:
if not (do_whole_processing or is_comment):
return tok.replace("\n", "\\n").replace("\r", "")
if is_comment:
tok = re.sub(" +", " ", tok)
tok = re.sub(r"(.)\1\1\1\1+", r"\1\1\1\1\1", tok)
if len(re.sub(r"\W", "", tok)) < 2:
return ""
tok = replace_general_string_tok(tok)
tok = replace_tokens(tok, char2tok)
if tok.strip().startswith("STOKEN00"):
if " STRNEWLINE " in tok:
tok = tok.replace(" STRNEWLINE ", " ENDCOM", 1)
else:
tok += " ENDCOM"
if not do_whole_processing:
tok = replace_tokens(
tok, {f" {key} ": value for key, value in tok2char.items()}
)
tok = (
tok.replace(" ▁ ", " ")
.replace(" TABSYMBOL ", "\t")
.replace("\\r", "")
.replace(" STRNEWLINE ", "\\n")
)
return tok
tok = re.sub(" +", " ", tok)
tok = tokenize_v14_international(tok)
tok = re.sub(" +", " ", tok)
tok = tok.replace("\r", "")
for special_token, char in tok2char.items():
tok = tok.replace(special_token, char)
if tok[0].isalpha():
# for special strings, (e.g. L "s" we should remove the space after L)
tok = tok.replace(f"{tok[0]} ", tok[0])
return tok
def tokenize_string(s: str) -> tp.List[str]:
return process_string(
s, char2tok=dict(), tok2char=dict(), is_comment=False, do_whole_processing=True
).split(" ")
def detokenize_string(s: tp.Union[str, tp.List[str]]) -> str:
assert isinstance(s, (str, list))
if isinstance(s, list):
s = " ".join(s)
return s.replace(" ", "").replace("▁", " ")
# IMPORTED
def replace_tokens(tok: str, dictionary: tp.Dict[str, str]) -> str:
for char, special_token in dictionary.items():
tok = tok.replace(char, special_token)
return tok
# IMPORTED
def replace_general_string_tok(tok: str) -> str:
return (
tok.replace(" ", " ▁ ")
.replace("\n", " STRNEWLINE ")
.replace("\t", " TABSYMBOL ")
)
# IMPORTED
def indent_lines(lines: tp.List[str]) -> str:
prefix = ""
for i, line in enumerate(lines):
line = line.strip()
if re.match("CB_COLON|CB_COMA|CB_", line):
prefix = prefix[2:]
line = prefix + line
elif line.endswith("OB_"):
line = prefix + line
prefix += " "
else:
line = prefix + line
lines[i] = line
untok_s = "\n".join(lines)
return untok_s
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/tokenization_utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import logging
import itertools
import typing as tp
from pathlib import Path
import tree_sitter as ts
from .lang_processor import LangProcessor
from .lang_processor import NEWLINE_TOK as NEWLINE_TOK
from .tokenization_utils import indent_lines, process_string, replace_tokens
TREE_SITTER_ROOT: Path = Path(__file__).resolve().parents[3] / "tree-sitter"
logger = logging.getLogger(__name__)
COMMENT_TYPES = {"comment", "line_comment", "block_comment", "docstring"}
class TreeSitterLangProcessor(LangProcessor):
def __init__(
self,
ast_nodes_type_string: tp.List[str],
stokens_to_chars: tp.Dict[str, str],
chars_to_stokens: tp.Dict[str, str],
root_folder: Path, # not default to make sure children implement it
function_declr: tp.Optional[str] = None,
new_line_sensitive: bool = False,
) -> None:
self.new_line_sensitive = new_line_sensitive
self.ast_nodes_type_string = ast_nodes_type_string
self.stokens_to_chars = stokens_to_chars
self.chars_to_stokens = chars_to_stokens
self.root_folder = Path(root_folder)
assert self.root_folder.is_dir(), f"{self.root_folder} is not a directory."
self._parser: tp.Optional[ts.Parser] = None
self.parser # initialize it
self.function_declr = function_declr
@property
def parser(self) -> ts.Parser:
if self._parser is not None:
return self._parser
lib_path = self.root_folder / f"{self.language}.so"
repo_path = self.root_folder / f"tree-sitter-{self.language}"
if not lib_path.exists():
logger.warning("Building %s parser into %s", self.language, lib_path)
assert repo_path.is_dir(), repo_path
ts.Language.build_library(
# Store the library in the `build` directory
str(lib_path),
# Include one or more languages
[str(repo_path)],
)
language = ts.Language(lib_path, self.language)
self._parser = ts.Parser()
self._parser.set_language(language)
return self._parser
def __getstate__(self) -> tp.Dict[str, tp.Any]:
attributes = dict(self.__dict__)
key = "_parser"
if key not in attributes:
raise RuntimeError(f"key {key} should be in the attributes")
attributes[key] = None # TreeSitter is not picklable
return attributes
def tokenize_code(
self, code: str, keep_comments: bool = False, process_strings: bool = True
) -> tp.List[str]:
tokenized_code = []
tokens, token_types = self.get_tokens_and_types(code)
skip_next_new_line = False
for token, token_type in zip(tokens, token_types):
if skip_next_new_line and token == NEWLINE_TOK:
continue
if token_type in COMMENT_TYPES and not keep_comments:
token = "" # Ignored later on
else:
# comments at the end of docstring still require skipping
skip_next_new_line = False
if token_type in self.ast_nodes_type_string:
token = process_string(
token,
self.chars_to_stokens,
self.stokens_to_chars,
token_type in COMMENT_TYPES,
process_strings,
)
if len(token) > 0:
if token_type not in self.ast_nodes_type_string:
token = token.replace("\n", NEWLINE_TOK)
token = token.replace(NEWLINE_TOK * 2, NEWLINE_TOK)
tokenized_code.append(token)
elif token_type == "docstring":
skip_next_new_line = True # make sure we remove extraline in python
tokenized_code2 = []
for tok1, tok2 in itertools.zip_longest(tokenized_code, tokenized_code[1:]):
tokenized_code2.append(tok1)
if (tok1, tok2) == ("INDENT", "DEDENT"):
tokenized_code2.extend(["pass", NEWLINE_TOK])
return tokenized_code2
def get_tokens_and_types(self, code: str) -> tp.Tuple[tp.List[str], tp.List[str]]:
code = code.replace("\r", "")
bcode = bytes(code, "utf8")
tree = self.get_ast(bcode)
tokens: tp.List[str] = []
tokens_type: tp.List[str] = []
self.dfs(bcode, tree.root_node, tokens, tokens_type)
return tokens, tokens_type
def get_ast(self, code: tp.Union[str, bytes]) -> ts.Tree:
assert isinstance(code, (str, bytes))
if isinstance(code, str):
code = bytes(code, "utf8")
tree = self.parser.parse(code)
return tree
def dfs(
self,
code: bytes,
node: ts.Node,
tokens: tp.List[str],
tokens_type: tp.List[str],
) -> None:
if len(node.children) == 0 or node.type in self.ast_nodes_type_string:
bsnippet = code[node.start_byte : node.end_byte].strip(b" ")
snippet = bsnippet.decode("utf8")
if len(snippet) > 0:
tokens.append(snippet)
tokens_type.append(node.type)
return
for child in node.children:
self.dfs(code, child, tokens, tokens_type)
def detokenize_code(self, code: tp.Union[str, tp.List[str]]) -> str:
# TODO make this cleaner with tree sitter AST ?
assert isinstance(code, (list, str))
if isinstance(code, list):
code = " ".join(code)
code = code.replace("ENDCOM", "\n")
code = code.replace(NEWLINE_TOK, "\n")
replaced_tokens = []
# call parser of the tokenizer to find comments and string and
# detokenize them correctly
try:
tokens, token_types = self.get_tokens_and_types(code)
for token, token_type in zip(tokens, token_types):
if token_type in self.ast_nodes_type_string:
token_ = token.replace("STRNEWLINE", "\n").replace(
"TABSYMBOL", "\t"
)
token_ = (
replace_tokens(token_, self.chars_to_stokens)
.replace(" ", "")
.replace("▁", " ")
)
if token_type in COMMENT_TYPES:
token_ += "\n"
replaced_tokens.append(token_)
else:
replaced_tokens.append(token)
if not self.new_line_sensitive and token in {";", "{", "}"}:
replaced_tokens.append("\n")
except KeyboardInterrupt as e:
raise e
except Exception: # pylint: disable=broad-except
pass
code = " ".join(replaced_tokens)
code = code.replace("\n", NEWLINE_TOK)
code = code.replace('} "', 'CB_ "')
code = code.replace('" {', '" OB_')
code = code.replace("} ;", "CB_COLON")
code = code.replace("} ,", "CB_COMA")
code = code.replace("}", "CB_")
code = code.replace("{", "OB_")
code = replace_tokens(code, self.stokens_to_chars)
lines = re.split(NEWLINE_TOK, code)
untok_s = indent_lines(lines)
untok_s = (
untok_s.replace("CB_COLON", "};")
.replace("CB_COMA", "},")
.replace("CB_", "}")
.replace("OB_", "{")
)
untok_s = untok_s.replace("> > >", ">>>").replace("<< <", "<<<")
untok_s = untok_s.replace("> >", ">>").replace("< <", "<<")
return untok_s
def _get_functions_from_ast(
self,
code: str,
node: ts.Node,
class_funcs: tp.List[str],
standalone_funcs: tp.List[str],
_in_class: bool = False,
) -> None:
raise NotImplementedError(
f"Implement _get_functions_from_ast() in {self.__class__.__name__} to extract functions"
)
def extract_functions(
self, code: tp.Union[str, tp.List[str]], tokenized: bool = True,
) -> tp.Tuple[tp.List[str], tp.List[str]]:
"""
Extract functions from python code
tokenized; whether the code is tokenized or not
"""
if isinstance(code, list):
code = " ".join(code)
if tokenized:
code = self.detokenize_code(code)
ast = self.get_ast(code)
class_funcs: tp.List[str] = []
standalone_funcs: tp.List[str] = []
self._get_functions_from_ast(code, ast.root_node, class_funcs, standalone_funcs)
if tokenized:
class_funcs = [" ".join(self.tokenize_code(f)) for f in class_funcs]
standalone_funcs = [
" ".join(self.tokenize_code(f)) for f in standalone_funcs
]
return standalone_funcs, class_funcs
@staticmethod
def extract_arguments_using_parentheses(
function_str: str,
) -> tp.Tuple[tp.List[str], tp.List[str]]:
function = function_str.split(" ")
types = []
names = []
par = 0
arguments = []
function = function[function.index("(") :]
for tok in function:
if tok == "(":
par += 1
elif tok == ")":
par -= 1
arguments.append(tok)
if par == 0:
break
arguments_str = " ".join(arguments[1:-1])
if arguments_str == "":
return ["None"], ["None"]
arguments = arguments_str.split(",")
for arg in arguments:
bracks = re.findall(r"\[ \]", arg)
bracks_str = " ".join(bracks)
arg = arg.replace(bracks_str, "")
arg = arg.strip()
arg = re.sub(" +", " ", arg)
t = " ".join(arg.split(" ")[:-1] + [bracks_str])
n = arg.split(" ")[-1]
types.append(t)
names.append(n)
return types, names
@staticmethod
def get_first_token_before_first_parenthesis(
code: tp.Union[str, tp.List[str]]
) -> str:
assert isinstance(
code, (str, list)
), f"function is not the right type, should be str or list : {code}"
if isinstance(code, str):
code = code.split()
return code[code.index("(") - 1]
def traverse_tree(tree: ts.Tree, final: tp.Sequence[str] = ()) -> tp.Iterator[ts.Node]:
"""Traverses a tree-sitter tree, yielding final nodes
Parameters
----------
final: sequence of str
consider these types as final even if it has children
Yields
------
Node
a final node (either with no children, or in the "final" list)
"""
final = list(final)
cursor = tree.walk()
reached_root = False
while not reached_root:
yield cursor.node
if cursor.node.type not in final and cursor.goto_first_child():
continue
if cursor.goto_next_sibling():
continue
retracing = True
while retracing:
if not cursor.goto_parent():
retracing = False
reached_root = True
if cursor.goto_next_sibling():
retracing = False
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/tree_sitter_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from .java_processor import JAVA_CHAR2TOKEN, JAVA_TOKEN2CHAR
from .tree_sitter_processor import TreeSitterLangProcessor, TREE_SITTER_ROOT
JS_TOKEN2CHAR = JAVA_TOKEN2CHAR.copy()
JS_CHAR2TOKEN = JAVA_CHAR2TOKEN.copy()
class JavascriptProcessor(TreeSitterLangProcessor):
def __init__(self, root_folder: Path = TREE_SITTER_ROOT) -> None:
super().__init__(
ast_nodes_type_string=["comment", "string"],
stokens_to_chars=JS_TOKEN2CHAR,
chars_to_stokens=JS_CHAR2TOKEN,
root_folder=root_folder,
)
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/javascript_processor.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.