python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
Clockwork-main
|
planner/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
import sys
from common.data_types import UnixtimeAssignments
__ALL__ = ["PlanWriter"]
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger: logging.Logger = logging.getLogger(__name__)
class PlanWriter:
"""
This class contains methods that writes the Clockwork plans to a production datastore
"""
def __init__(self) -> None:
pass
async def overwrite_plan(self, plan: UnixtimeAssignments) -> None:
logger.debug(f'Final Plan: {plan}')
|
Clockwork-main
|
planner/plan_writer.py
|
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import asyncio
from abc import ABC
from typing import FrozenSet
import logging
import sys
from algorithm.right_based import (
algorithm as rb_algo,
metadata as rb_meta,
)
from common.data_types import TaskInstance, UnixtimeAssignments
from common.time_interval import Minutes
from common.timestamp import Timestamp
__ALL__ = ["SchedulingAlgorithm", "DummyTestPlan", "NullAlgorithm"]
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger: logging.Logger = logging.getLogger(__name__)
class SchedulingAlgorithm(ABC):
"""
Your __init__ function must have a signature matching
def __init__(self, *args: List[str], **kwargs: Dict[str, str]) -> None
for example
def __init__(self, namespace: str) -> None:
...
"""
async def run(self, tasks: FrozenSet[TaskInstance]) -> UnixtimeAssignments:
"""
If a specific subclass of SchedulingAlgorithm does requires arguments in
addition to the list of tasks, they should be passed in through __init__
run() should return a Dict of size `len(tasks)` which maps each task
instance to a valid unixtime. The unixtime outputted here will be
used as the dispatch time in the Dataswarm Scheduler service.
"""
raise NotImplementedError()
class RightBased(SchedulingAlgorithm):
async def run(self, tasks: FrozenSet[TaskInstance]) -> UnixtimeAssignments:
granularity = Minutes(1)
(
spark_metadata,
presto_metadata,
spark_max_size,
presto_max_size,
) = await asyncio.gather(
rb_meta.get_spark_metadata(tasks),
rb_meta.get_presto_metadata(tasks),
rb_meta.get_max_spark_resources(),
rb_meta.get_max_presto_resources(),
)
logger.debug(f'Presto Metadata Size {len(presto_metadata)}')
presto_plan = rb_algo.schedule_tasks(
presto_metadata, granularity=granularity, max_size=presto_max_size
)
logger.debug(f'Presto Plan Size {len(presto_plan)}')
logger.debug(f'Spark Metadata Size {len(spark_metadata)}')
spark_plan = rb_algo.schedule_tasks(
spark_metadata, granularity=granularity, max_size=spark_max_size
)
logger.debug(f'Spark Metadata Size {len(spark_plan)}')
plan = {}
for task_instance in tasks:
unique_task = task_instance.unique_task
if unique_task in spark_plan:
plan[task_instance] = task_instance.period_id.midnight + spark_plan[unique_task]
elif unique_task in presto_plan:
plan[task_instance] = task_instance.period_id.midnight + presto_plan[unique_task]
return plan
class NullAlgorithm(SchedulingAlgorithm):
async def run(self, tasks: FrozenSet[TaskInstance]) -> UnixtimeAssignments:
return {}
class ReturnZero(SchedulingAlgorithm):
"""
Schedules every task to run on January 1st 1970 (effectively 0 delay)
"""
async def run(self, tasks: FrozenSet[TaskInstance]) -> UnixtimeAssignments:
return {task: Timestamp(0) for task in tasks}
|
Clockwork-main
|
algorithm/algorithm.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from abc import ABC
from typing import FrozenSet
from common.data_types import TaskInstance
from common.timestamp import Timestamp
class TaskFetcher(ABC):
"""
Your __init__ function must have a signature matching
def __init__(self, *args: List[str], **kwargs: Dict[str, str]) -> None
for example
def __init__(self, namespace: str) -> None:
...
"""
async def fetch(self) -> FrozenSet[TaskInstance]:
"""
This method should be a self-contained callable taking no arguments.
If a specific subclass TaskFetcher does require arguments,
they should be passed in through __init__
fetch() should return all matching and currently-active task instances.
It is ultimately up to the individual fetcher what that means. One
simple example might be to find the next-in-line to be scheduled task
instances matching some filters
"""
raise NotImplementedError()
class HardCodedTaskFetcher(TaskFetcher):
async def fetch(self) -> FrozenSet[TaskInstance]:
return frozenset([
TaskInstance('task1', Timestamp(0)),
TaskInstance('task2', Timestamp(0)),
TaskInstance('task3', Timestamp(0)),
TaskInstance('task4', Timestamp(10)),
TaskInstance('task5', Timestamp(10)),
TaskInstance('task6', Timestamp(10)),
])
|
Clockwork-main
|
algorithm/task_fetchers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
Clockwork-main
|
algorithm/__init__.py
|
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict
import logging
import sys
from common.data_types import UniqueTask
from common.skyline_math import SkylineTracker
from common.time_interval import TimeInterval
from algorithm.right_based.metadata import RightBasedMetadata
__all__ = ["schedule_tasks"]
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger: logging.Logger = logging.getLogger(__name__)
def schedule_tasks(
metadata: Dict[UniqueTask, RightBasedMetadata],
granularity: TimeInterval,
max_size: float,
) -> Dict[UniqueTask, TimeInterval]:
global_skyline: SkylineTracker = SkylineTracker(
granularity=granularity, max_size=max_size
)
task_metadata_tuples = sorted(metadata.items(), key=lambda x: x[1], reverse=True)
assignments = {}
for i, (task, meta) in enumerate(task_metadata_tuples):
if i % 1000 == 0:
logger.debug(f"Scheduled {i}/{len(task_metadata_tuples)}, {len(assignments)} accepted")
start_time = meta.max_start_time
while start_time >= meta.min_start_time:
if global_skyline.can_add(start_time, meta.skyline):
global_skyline.add_job(start_time, meta.skyline)
assignments[task] = start_time
break
elif can_decrement(start_time, granularity):
start_time -= granularity
else:
break
return assignments
def can_decrement(time: TimeInterval, interval: TimeInterval) -> bool:
try:
_ = time - interval
return True
except Exception:
return False
|
Clockwork-main
|
algorithm/right_based/algorithm.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, FrozenSet, List, Tuple
from common.data_types import TaskInstance, UniqueTask
from common.skyline_math import SkylineBlock
from common.time_interval import Seconds
@dataclass
class RightBasedMetadata:
min_start_time: Seconds
max_start_time: Seconds
skyline: List[SkylineBlock]
order_tuple: Tuple[Seconds, Seconds] = field(init=False)
def __post_init__(self) -> None:
self.order_tuple = (self.min_start_time, self.max_start_time)
assert self.max_start_time >= self.min_start_time
def __lt__(self, other: RightBasedMetadata) -> bool:
return self.order_tuple < other.order_tuple
def __lte__(self, other: RightBasedMetadata) -> bool:
return self.order_tuple <= other.order_tuple
def __gt__(self, other: RightBasedMetadata) -> bool:
return self.order_tuple > other.order_tuple
def __gte__(self, other: RightBasedMetadata) -> bool:
return self.order_tuple >= other.order_tuple
async def get_max_spark_resources() -> float:
return 3
async def get_max_presto_resources() -> float:
return 3
async def get_presto_metadata(
tasks: FrozenSet[TaskInstance],
) -> Dict[UniqueTask, RightBasedMetadata]:
return {
UniqueTask('task1', Seconds(0)): RightBasedMetadata(
min_start_time=Seconds(10),
max_start_time=Seconds(20),
skyline=[SkylineBlock(Seconds(1), 1.0)],
),
UniqueTask('task5', Seconds(10)):RightBasedMetadata(
min_start_time=Seconds(15),
max_start_time=Seconds(35),
skyline=[SkylineBlock(Seconds(5), 1.0)],
) ,
UniqueTask('task6', Seconds(10)): RightBasedMetadata(
min_start_time=Seconds(50),
max_start_time=Seconds(60),
skyline=[SkylineBlock(Seconds(5), 1.0)],
),
}
async def get_spark_metadata(
tasks: FrozenSet[TaskInstance],
) -> Dict[UniqueTask, RightBasedMetadata]:
return {
UniqueTask('task2', Seconds(0)): RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(100),
skyline=[SkylineBlock(Seconds(1), 1.0), SkylineBlock(Seconds(2), 2.0)],
),
UniqueTask('task3', Seconds(0)): RightBasedMetadata(
min_start_time=Seconds(100),
max_start_time=Seconds(100),
skyline=[SkylineBlock(Seconds(5), 2.0)],
),
UniqueTask('task4', Seconds(10)): RightBasedMetadata(
min_start_time=Seconds(19),
max_start_time=Seconds(59),
skyline=[SkylineBlock(Seconds(4), 4.0)],
),
}
|
Clockwork-main
|
algorithm/right_based/metadata.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
Clockwork-main
|
algorithm/right_based/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
Clockwork-main
|
algorithm/tests/__init__.py
|
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import unittest
from algorithm.right_based.algorithm import schedule_tasks
from algorithm.right_based.metadata import RightBasedMetadata
from common.data_types import UniqueTask
from common.skyline_math import SkylineBlock
from common.time_interval import Seconds
class TestParallelGraph(unittest.TestCase):
# Two jobs set to run at the same time where only one can run at a time
def test_two_nodes_same_start_time(self) -> None:
skyline = [SkylineBlock(Seconds(1), 1)]
task_a = UniqueTask("task_a", Seconds(0))
task_b = UniqueTask("task_b", Seconds(0))
pool = {
task_a: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(1),
skyline=skyline,
),
task_b: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(1),
skyline=skyline,
),
}
res = schedule_tasks(pool, granularity=Seconds(1), max_size=1)
self.assertIn(
res,
[
{
task_a: Seconds(0),
task_b: Seconds(1),
},
{
task_a: Seconds(1),
task_b: Seconds(0),
},
],
)
class TestTandemGraph(unittest.TestCase):
# Test a linked list of nodes
def test_two_tasks_back_to_back(self) -> None:
skyline = [SkylineBlock(Seconds(1), 1)]
first = UniqueTask("first", Seconds(0))
second = UniqueTask("second", Seconds(0))
pool = {
first: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(0),
skyline=skyline
),
second: RightBasedMetadata(
min_start_time=Seconds(1),
max_start_time=Seconds(1),
skyline=skyline
),
}
res = schedule_tasks(pool, granularity=Seconds(1), max_size=2)
self.assertEquals(
res,
{
first: Seconds(0),
second: Seconds(1)
},
)
class TestStaggardSkylines(unittest.TestCase):
# Check that two skyline blocks can be stacked on top of each other
def test_overlapping_skylines(self) -> None:
skyline = [
SkylineBlock(Seconds(1), 1),
SkylineBlock(Seconds(1), 2),
]
reflected_sykline = [
SkylineBlock(Seconds(1), 2),
SkylineBlock(Seconds(1), 1),
]
task_a = UniqueTask("first", Seconds(0))
task_b = UniqueTask("second", Seconds(0))
pool = {
task_a: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(0),
skyline=skyline
),
task_b: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(0),
skyline=reflected_sykline
)
}
res = schedule_tasks(pool, granularity=Seconds(1), max_size=3)
self.assertIn(
res,
[
{
task_a: Seconds(0),
task_b: Seconds(0),
},
]
)
# these two skylines cannot be partially stacked
def test_back_to_back(self) -> None:
skyline = [
SkylineBlock(Seconds(1), 1),
SkylineBlock(Seconds(1), 2),
]
task_a = UniqueTask("first", Seconds(0))
task_b = UniqueTask("second", Seconds(0))
pool = {
task_a: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(1),
skyline=skyline
),
task_b: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(1),
skyline=skyline
)
}
res = schedule_tasks(pool, granularity=Seconds(1), max_size=3)
self.assertIn(
res,
[
{
task_a: Seconds(0),
task_b: Seconds(1),
},
{
task_a: Seconds(1),
task_b: Seconds(0),
},
]
)
class Infeasible(unittest.TestCase):
def test_infeasible_simple(self) -> None:
skyline = [SkylineBlock(Seconds(1), 1)]
task_a = UniqueTask("task_a", Seconds(0))
task_b = UniqueTask("task_b", Seconds(0))
pool = {
task_a: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(0),
skyline=skyline,
),
task_b: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(0),
skyline=skyline,
),
}
res = schedule_tasks(pool, granularity=Seconds(1), max_size=1)
self.assertIn(
res,
[
{
task_a: Seconds(0),
},
{
task_b: Seconds(0),
},
],
)
def test_infesible_complex(self) -> None:
skyline = [SkylineBlock(Seconds(1), 1)]
start_at_9a = UniqueTask("start_at_9a", Seconds(0))
start_at_9b = UniqueTask("start_at_9b", Seconds(0))
start_whenever = UniqueTask("start_whenever", Seconds(0))
start_between_8_and_10 = UniqueTask("start_between_8_and_10", Seconds(0))
pool = {
start_at_9a: RightBasedMetadata(
min_start_time=Seconds(9),
max_start_time=Seconds(9),
skyline=skyline,
),
start_at_9b: RightBasedMetadata(
min_start_time=Seconds(9),
max_start_time=Seconds(9),
skyline=skyline,
),
start_whenever: RightBasedMetadata(
min_start_time=Seconds(0),
max_start_time=Seconds(10),
skyline=skyline,
),
start_between_8_and_10: RightBasedMetadata(
min_start_time=Seconds(8),
max_start_time=Seconds(10),
skyline=skyline,
),
}
res = schedule_tasks(pool, granularity=Seconds(1), max_size=1)
self.assertIn(
res,
[
{
start_between_8_and_10: Seconds(10),
start_at_9a: Seconds(9),
start_whenever: Seconds(8),
},
{
start_between_8_and_10: Seconds(10),
start_at_9b: Seconds(9),
start_whenever: Seconds(8),
},
],
)
|
Clockwork-main
|
algorithm/tests/test_right_based.py
|
#!/usr/bin/self.env python3
# pyre-strict
# pyre-ignore-all-errors[29]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from enum import Enum
from datetime import timedelta
class TimeInterval(object):
def __init__(self, value: int, unit: TimeUnit) -> None:
self._time = timedelta(seconds=0)
self._set_time(int(value), unit)
@property
def seconds(self) -> int:
return self.get_time_scalar(TimeUnit.SECONDS)
@property
def minutes(self) -> int:
return self.get_time_scalar(TimeUnit.MINUTES)
@property
def hours(self) -> int:
return self.get_time_scalar(TimeUnit.HOURS)
@property
def days(self) -> int:
return self.get_time_scalar(TimeUnit.DAYS)
def get_time_scalar(self, unit: TimeUnit) -> int:
return int(self._time.total_seconds()) // self._seconds_to_multiple(unit)
def rescale(self, output_unit: TimeUnit) -> TimeInterval:
return TimeInterval(self.get_time_scalar(output_unit), output_unit)
def __repr__(self) -> str:
return str(self._time)
def __eq__(self, other: TimeInterval) -> bool:
return other._time == self._time
def __lt__(self, other: TimeInterval) -> bool:
return self._time < other._time
def __le__(self, other: TimeInterval) -> bool:
return self._time <= other._time
def __gt__(self, other: TimeInterval) -> bool:
return self._time > other._time
def __add__(self, other: TimeInterval) -> TimeInterval:
total_time_delta = self._time + other._time
total_seconds = int(total_time_delta.total_seconds())
return TimeInterval(total_seconds, TimeUnit.SECONDS)
def __sub__(self, other: TimeInterval) -> TimeInterval:
if other._time > self._time:
raise InvalidTimeAmount(
"{} - {} results in negative time. Use the timedelta class.",
self, other,
)
total_time_delta = self._time - other._time
total_seconds = int(total_time_delta.total_seconds())
return TimeInterval(total_seconds, TimeUnit.SECONDS)
def __hash__(self) -> int:
return self.seconds
def _seconds_to_multiple(self, unit: TimeUnit) -> int:
if unit == TimeUnit.SECONDS:
return 1
if unit == TimeUnit.MINUTES:
return 60
if unit == TimeUnit.HOURS:
return 3600
elif unit == TimeUnit.DAYS:
return 3600 * 24
else:
raise NotImplementedError('Programmer failed to add a enum option.')
def _set_time(self, value: int, unit: TimeUnit) -> None:
if value < 0:
raise InvalidTimeAmount(
"Invalid input " + str(value) + " time is negative."
)
if unit == TimeUnit.SECONDS:
self._time = timedelta(seconds=value)
elif unit == TimeUnit.MINUTES:
self._time = timedelta(minutes=value)
elif unit == TimeUnit.HOURS:
self._time = timedelta(hours=value)
elif unit == TimeUnit.DAYS:
self._time = timedelta(days=value)
else:
raise NotImplementedError('Programmer failed to add a enum option.')
class Seconds(TimeInterval):
def __init__(self, seconds: int) -> None:
super().__init__(seconds, TimeUnit.SECONDS)
class Minutes(TimeInterval):
def __init__(self, minutes: int) -> None:
super().__init__(minutes, TimeUnit.MINUTES)
class Hours(TimeInterval):
def __init__(self, hours: int) -> None:
super().__init__(hours, TimeUnit.HOURS)
class Days(TimeInterval):
def __init__(self, days: int) -> None:
super().__init__(days, TimeUnit.DAYS)
class TimeUnit(Enum):
SECONDS = 's'
MINUTES = 'm'
HOURS = 'hr'
DAYS = 'd'
class InvalidTimeAmount(Exception):
pass
|
Clockwork-main
|
common/time_interval.py
|
#!/usr/bin/self.env python3
# pyre-strict
# pyre-ignore-all-errors[29]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from enum import Enum
from typing import Union
class Memory(object):
def __init__(self, value: Union[float, int], unit: MemoryUnit) -> None:
self._set_bytes(value, unit)
@property
def B(self) -> int:
return self.get_memory_scalar(MemoryUnit.B)
@property
def KB(self) -> int:
return self.get_memory_scalar(MemoryUnit.KB)
@property
def MB(self) -> int:
return self.get_memory_scalar(MemoryUnit.MB)
@property
def GB(self) -> int:
return self.get_memory_scalar(MemoryUnit.GB)
@property
def TB(self) -> int:
return self.get_memory_scalar(MemoryUnit.TB)
def get_memory_scalar(self, unit: MemoryUnit) -> int:
return self._bytes // self._base_unit_multiple(unit)
def rescale(self, output_unit: MemoryUnit) -> Memory:
return Memory(self.get_memory_scalar(output_unit), output_unit)
def get_mem_str(self, unit: MemoryUnit) -> str:
return str(self.get_memory_scalar(unit)) + unit.value
def __repr__(self) -> str:
if self.B >= 1024 ** 4:
return self.get_mem_str(unit=MemoryUnit.GB)
elif self.B >= 1024 ** 3:
return self.get_mem_str(unit=MemoryUnit.MB)
elif self.B >= 1024 ** 2:
return self.get_mem_str(unit=MemoryUnit.KB)
return self.get_mem_str(unit=MemoryUnit.B)
def __bool__(self) -> bool:
return self > B(0)
def __eq__(self, other: Memory) -> bool:
return self.B == other.B
def __lt__(self, other: Memory) -> bool:
return self.B < other.B
def __le__(self, other: Memory) -> bool:
return self.B <= other.B
def __gt__(self, other: Memory) -> bool:
return self.B > other.B
def __ge__(self, other: Memory) -> bool:
return self.B >= other.B
def __add__(self, other: Memory) -> Memory:
return Memory(other.B + self.B, MemoryUnit.B)
def __mul__(self, other: Union[float, int]) -> Memory:
return Memory(other * self.B, MemoryUnit.B)
def __rmul__(self, other: Union[float, int]) -> Memory:
return self * other
def __sub__(self, other: Memory) -> Memory:
if other.B > self.B:
raise InvalidMemoryAmount(f"{self} - {other} results in negative memory.")
return Memory(self.B - other.B, MemoryUnit.B)
def _set_bytes(self, value: Union[float, int], input_unit: MemoryUnit) -> None:
if value < 0:
raise InvalidMemoryAmount("Memory must always be positive.")
elif value in {float("inf"), float("NaN"), float("-inf")}:
raise InvalidMemoryAmount(f"Memory must be real: {value}")
self._bytes = int(value * self._base_unit_multiple(input_unit))
def _base_unit_multiple(self, unit: MemoryUnit) -> int:
if unit == MemoryUnit.B:
return 1
elif unit == MemoryUnit.KB:
return 1024
elif unit == MemoryUnit.MB:
return 1024 ** 2
elif unit == MemoryUnit.GB:
return 1024 ** 3
elif unit == MemoryUnit.TB:
return 1024 ** 4
else:
raise NotImplementedError("Programmer failed to add a enum option.")
class B(Memory):
def __init__(self, B: Union[float, int]) -> None:
super().__init__(B, MemoryUnit.B)
class KB(Memory):
def __init__(self, KB: Union[float, int]) -> None:
super().__init__(KB, MemoryUnit.KB)
class MB(Memory):
def __init__(self, MB: Union[float, int]) -> None:
super().__init__(MB, MemoryUnit.MB)
class GB(Memory):
def __init__(self, GB: Union[float, int]) -> None:
super().__init__(GB, MemoryUnit.GB)
class TB(Memory):
def __init__(self, TB: Union[float, int]) -> None:
super().__init__(TB, MemoryUnit.TB)
class TB(Memory):
def __init__(self, TB: Union[float, int]) -> None:
super().__init__(TB, MemoryUnit.TB)
def str_to_memory(mem_repr: str) -> Memory:
"""
Input Examples: "100mb", "22gb", "8tb", "0", "8.5 TB"
"""
mem_repr = mem_repr.upper().strip()
if mem_repr == "0":
return Memory(0, unit=MemoryUnit.B)
units = [
MemoryUnit.TB,
MemoryUnit.GB,
MemoryUnit.MB,
MemoryUnit.KB,
MemoryUnit.B, # Bytes must go last, b/c every other unit ends in B
]
for unit in units:
if mem_repr.endswith(unit.value):
try:
digit = mem_repr[: -len(unit.value)].strip()
value = float(digit)
except ValueError:
raise InvalidMemoryInput(f"Memory value is not a number: {digit}")
return Memory(value, unit)
raise InvalidMemoryInput(f"Invalid memory units. Input: {mem_repr}")
class MemoryUnit(Enum):
B = "B"
KB = "KB"
MB = "MB"
GB = "GB"
TB = "TB"
class InvalidMemoryAmount(ValueError):
pass
class InvalidMemoryInput(ValueError):
pass
|
Clockwork-main
|
common/memory.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import operator
from dataclasses import dataclass
from typing import Callable, Dict, Generator, List
from common.time_interval import Seconds, TimeInterval
@dataclass
class SkylineBlock:
duration: TimeInterval
size: float
def __post_init__(self):
assert self.size >= 0, "Skyline size must be positive"
class SkylineBoundsExceeded(Exception):
pass
class SkylineTracker:
"""
This abstraction represents a global skyline of jobs that have
been scheduled to run at particular points in time.
This abstraction implicitly expects non-negative values for all
SkylineBlocks (i.e. the allowable range of a skyline is bounded
by zero_baseline and max_size). Any operation that exceeds these
bounds will raise an SkylineBoundsExceeded error
"""
def __init__(self, granularity: TimeInterval, max_size: float) -> None:
self._granularity = granularity
self._max_size = max_size
self.time_series: Dict[TimeInterval, float] = {}
@property
def granularity(self) -> TimeInterval:
return self._granularity
def can_add(self, start_time: TimeInterval, blocks: List[SkylineBlock]) -> bool:
try:
self._get_updated_time_series(start_time, blocks, operator.add)
return True
except Exception:
return False
def can_remove(self, start_time: TimeInterval, blocks: List[SkylineBlock]) -> bool:
try:
self._get_updated_time_series(start_time, blocks, operator.sub)
return True
except Exception:
return False
def add_job(self, start_time: TimeInterval, blocks: List[SkylineBlock]) -> None:
self.time_series = self._get_updated_time_series(
start_time, blocks, operator.add
)
def remove_job(self, start_time: TimeInterval, blocks: List[SkylineBlock]) -> None:
self.time_series = self._get_updated_time_series(
start_time, blocks, operator.sub
)
def _get_updated_time_series(
self,
start_time: TimeInterval,
blocks: List[SkylineBlock],
op: Callable[[float, float], float],
) -> Dict[TimeInterval, float]:
return self._merge_time_series(
self.time_series, self._make_time_series(start_time, blocks), op
)
def _merge_time_series(
self,
series_a: Dict[TimeInterval, float],
series_b: Dict[TimeInterval, float],
op: Callable[[float, float], float],
) -> Dict[TimeInterval, float]:
all_times = set(series_a.keys()).union(series_b)
new_time_series = {}
for time in all_times:
new_size = op(series_a.get(time, 0), series_b.get(time, 0))
new_time_series[time] = new_size
if not (0 <= new_size <= self._max_size):
raise SkylineBoundsExceeded()
return new_time_series
def _make_time_series(
self, start_time: TimeInterval, blocks: List[SkylineBlock]
) -> Dict[TimeInterval, float]:
time = start_time
block_to_time_bins = []
for block in blocks:
start = self._bin(time)
inclusive_end = self._bin(time + block.duration - Seconds(1))
bins = list(
self._time_range(start, inclusive_end, self.granularity, inclusive=True)
)
block_to_time_bins.append((block, bins))
time += block.duration
time_series = {}
for block, bins in block_to_time_bins:
for t_bin in bins:
time_series[t_bin] = max(time_series.get(t_bin, 0), block.size)
return time_series
def _bin(self, time: TimeInterval) -> TimeInterval:
return Seconds(
(time.seconds // self._granularity.seconds) * self._granularity.seconds
)
def _time_range(
self,
start: TimeInterval,
end: TimeInterval,
step: TimeInterval,
inclusive: bool = False,
) -> Generator[TimeInterval, None, None]:
time = start
while time < end or (inclusive and time == end):
yield time
time += step
|
Clockwork-main
|
common/skyline_math.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
Clockwork-main
|
common/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, FrozenSet
from common.timestamp import Timestamp
from common.time_interval import Days, Seconds
__ALL__ = ["TaskInstance", "UnixtimeAssignments"]
@dataclass
class TaskInstance:
task_id: str
period_id: Timestamp
def __hash__(self) -> int:
# Note, the existence of __hash__ implies that TaskInstances
# should always be immutable
return hash((self.task_id, self.period_id.unixtime))
def __eq__(self, other: TaskInstance) -> bool:
return (self.task_id == other.task_id) and (self.period_id == other.period_id)
@property
def unique_task(self) -> UniqueTask:
return UniqueTask(
self.task_id,
Seconds((self.period_id - self.period_id.midnight).seconds),
)
@dataclass
class UniqueTask:
task_id: str
offset: Seconds
def __post_init__(self) -> None:
if self.offset >= Days(1):
raise ValueError(
f'A unique task offset: {self.offset.seconds} greater than a day makes no sense.'
)
def __hash__(self) -> int:
# Note, the existence of __hash__ implies that UniqueTask
# should always be immutable
return hash((self.task_id, self.offset.seconds))
UnixtimeAssignments = Dict[TaskInstance, Timestamp]
UnscheduledTasks = FrozenSet[TaskInstance]
|
Clockwork-main
|
common/data_types.py
|
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import datetime
import time
from typing import Any, overload
import sys
from common.time_interval import Days, Seconds, TimeInterval
__ALL__ = [
"Timestamp",
"Now",
"TwentyFourHoursFromNow",
"MidnightToday",
"TimeZero",
]
# This is only used for Printing the str representation of a timestamp
class Timestamp(object):
def __init__(self, unixtime: int) -> None:
if unixtime < 0:
raise InvalidTime(f"Unixtime cannot be negative: {unixtime}")
elif unixtime > 32503680000:
raise InvalidTime(f"Unixtime greater year 3000: {unixtime}")
self._time = int(unixtime)
@property
def midnight(self) -> int:
"""
For this example code we will assume midnight is 0
"""
return Timestamp(0)
@property
def unixtime(self) -> int:
return self._time
@property
def ds(self) -> str:
return self._localized_time().strftime("%Y-%m-%d")
def _to_str(self) -> str:
if self.unixtime < 100_000: # useful for testing
return f"Timestamp({self.unixtime})"
return self._localized_time().strftime("%Y-%m-%d %H:%M:%S %Z%z")
def __repr__(self) -> str:
return self._to_str()
def __hash__(self) -> int:
return hash(self.unixtime)
def __eq__(self, other: Timestamp) -> bool:
return self.unixtime == other.unixtime
def __lt__(self, other: Timestamp) -> bool:
return self.unixtime < other.unixtime
def __le__(self, other: Timestamp) -> bool:
return self.unixtime <= other.unixtime
def __gt__(self, other: Timestamp) -> bool:
return self.unixtime > other.unixtime
def __ge__(self, other: Timestamp) -> bool:
return self.unixtime >= other.unixtime
def __add__(self, other: TimeInterval) -> Timestamp:
return Timestamp(self.unixtime + other.seconds)
def __radd__(self, other: TimeInterval) -> Timestamp:
return Timestamp(self.unixtime + other.seconds)
@overload
def __sub__(self, other: TimeInterval) -> Timestamp:
pass
@overload
def __sub__(self, other: Timestamp) -> TimeInterval:
pass
def __sub__(self, other): # pyre-fixme
if isinstance(other, Timestamp):
return Seconds(self.unixtime - other.unixtime)
else:
return Timestamp(self.unixtime - other.seconds)
def __hash__(self) -> int:
return self.unixtime
class TimeZero(Timestamp):
"""
Jan 1st 1970 Midnight UTC
"""
def __init__(self) -> None:
super().__init__(0)
class InvalidTime(Exception):
pass
|
Clockwork-main
|
common/timestamp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libbase",
sources=[
"fairseq/clib/libbase/balanced_assignment.cpp",
],
)
]
)
# extensions.extend(
# [
# cpp_extension.CppExtension(
# "fairseq.libnat",
# sources=[
# "fairseq/clib/libnat/edit_dist.cpp",
# ],
# )
# ]
# )
if "CUDA_HOME" in os.environ:
extensions.extend(
[
# cpp_extension.CppExtension(
# "fairseq.libnat_cuda",
# sources=[
# "fairseq/clib/libnat_cuda/edit_dist.cu",
# "fairseq/clib/libnat_cuda/binding.cpp",
# ],
# ),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core>=1.0.7,<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
# "torch",
"tqdm",
"bitarray",
# "torchaudio>=0.8.0",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
|
bart_ls-main
|
fairseq-py/setup.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
"""
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
|
bart_ls-main
|
fairseq-py/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import functools
import importlib
dependencies = [
"dataclasses",
"hydra",
"numpy",
"omegaconf",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# only do fairseq imports after checking for dependencies
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
|
bart_ls-main
|
fairseq-py/hubconf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
"""
python fb_sweep/sweep_model_denoising_easy_mask.py -p md_joint_g512 \
-d /fsx/xwhan/data/pretrain_corpus/long \
-g 8 -n 8 -t -1 --partition a100 --checkpoints-dir /checkpoints/xwhan/model_denoising --resume-failed --no-wandb
"""
def get_grid(args):
grid = []
total_num_udpates = 100000
warmup_updates = 500
num_data_loaders = 4
arch = "loco_variant_large"
task = "model_based_denoising"
criterion = "model_based_denoising"
adam_eps = 1e-06
weight_decay = 0.01
lr = 3e-5
update_freq = 1
grid += [
hyperparam(
"--restore-file",
"/data/home/xwhan/fairseq-py/checkpoints/local_large_v0/model.pt",
# "/checkpoints/xwhan/model_denoising/md_fixf_512.loco_large.ms8192.ts8192.mt1024.uf2.mu100000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz1.adam.beta9999.eps1e-06.clip0.1.s42.lr3e-05.warm500.memfp16.sample0.2.noise0.1.ngpu64/checkpoint_last.pt"
# "/data/home/xwhan/fairseq-py/checkpoints/local_large_v0/model.pt",
# "/data/home/xwhan/fairseq-py/checkpoints/loco_scratch_roberta/model.pt",
# "/checkpoints/xwhan/model_denoising/model_denoisng_joint.loco_large.ms8192.ts8192.mt1024.uf1.mu100000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.beta9999.eps1e-06.clip0.1.s42.lr3e-05.warm500.memfp16.sample0.2.noise0.1.ngpu32/checkpoint_last.pt"
# "/data/home/xwhan/fairseq-py/checkpoints/local_large_v0/model.pt",
),
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--task", task),
hyperparam("--criterion", criterion),
hyperparam("--use-xformers"),
hyperparam("--attention-name", "bs_local"),
hyperparam("--xformer-config", '{"block_size": 1024, "max_seq_len": 8192}'),
hyperparam("--generator-xformer-config", '{"block_size": 512, "max_seq_len": 8192}'),
hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--train-generator"),
hyperparam("--easy-span-ops", ['sample'], save_dir_key=lambda val: f"sm_{val}"),
]
grid += [
hyperparam("--max-source-positions", 8192, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--tokens-per-sample", 8192, save_dir_key=lambda val: f"ts{val}"),
hyperparam("--max-target-positions", 1024, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"
),
hyperparam("--required-batch-size-multiple", 1),
hyperparam(
"--sample-break-mode", ["complete"], save_dir_key=lambda val: f"brk_{val}"
),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--batch-size", 4, save_dir_key=lambda val: f"bsz{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--checkpoint-activations"),
]
# lr scheduler
grid += [
hyperparam("--seed", 42, save_dir_key=lambda val: f"s{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
# validation and checkpoint settings
grid += [
hyperparam("--no-epoch-checkpoints"),
hyperparam("--reset-meters"),
hyperparam("--reset-optimizer"),
hyperparam("--reset-dataloader"),
]
grid += [
hyperparam("--share-all-embeddings"),
hyperparam("--layernorm-embedding"),
hyperparam("--share-decoder-input-output-embed"),
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--combine-val"),
hyperparam("--save-interval-updates", 10000),
]
grid += [
hyperparam("--sample-ratio", 0.2, save_dir_key=lambda val: f"sample{val}"),
hyperparam("--noise-density", 0.1, save_dir_key=lambda val: f"noise{val}"),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_model_denoising_easy_mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
"""
Usage:
./fb_sweep/sweep_lm_wikitext103_transformer_xl.py \
-d ~myleott/data/data-bin/wikitext-103/ \
-p wt103.transformer_xl \
-t 1 -g 4 \
--snapshot-code --snapshot-recurse-dirs fairseq,fairseq_cli,examples/truncated_bptt \
--constraint volta32gb --partition dev
"""
import sweep
from sweep import hyperparam
def get_grid(args):
target_batch_size = 60
max_batch_size_on_v100 = 15
num_gpus = args.num_gpus * args.num_nodes
batch_size_per_gpu = min(max_batch_size_on_v100, target_batch_size // num_gpus)
update_freq = target_batch_size // (batch_size_per_gpu * num_gpus)
assert target_batch_size == update_freq * batch_size_per_gpu * num_gpus
return [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--max-update", 200000),
hyperparam("--user-dir", "examples/truncated_bptt"),
hyperparam("--task", "truncated_bptt_lm"),
hyperparam("--tokens-per-sample", 150),
hyperparam("--arch", "transformer_xl", save_dir_key=lambda val: val),
hyperparam("--n-layer", 16),
hyperparam("--d-model", 410),
hyperparam("--n-head", 10),
hyperparam("--d-head", 41),
hyperparam("--d-inner", 2100),
hyperparam("--dropout", 0.1),
hyperparam("--dropatt", 0.0),
hyperparam("--mem-len", 150),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--clip-norm", 0.25, save_dir_key=lambda val: f"cl{val}"),
hyperparam("--lr-scheduler", "cosine", save_dir_key=lambda val: val),
hyperparam("--warmup-updates", 0),
hyperparam("--lr", 0.00025, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--batch-size", batch_size_per_gpu),
hyperparam("--update-freq", update_freq),
hyperparam("--seed", [2], save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25 if not args.local else 1),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_lm_wikitext103_transformer_xl.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
"""
Usage:
./fb_sweep/apdative_span_sweep.py\
-d ~daju/data/enwik8/eos-data-bin/ \
-p enwiki8.adaptivespan \
-t 1 -g 4 \
--snapshot-code --snapshot-recurse-dirs fairseq,fairseq_cli,examples/truncated_bptt \
--constraint volta32gb --partition dev
"""
import sweep
from sweep import hyperparam
def get_grid(args):
target_batch_size = 64
# max_batch_size_on_v100 = 16
num_gpus = args.num_gpus * args.num_nodes
batch_size_per_gpu = target_batch_size // num_gpus
update_freq = target_batch_size // (batch_size_per_gpu * num_gpus)
assert target_batch_size == update_freq * batch_size_per_gpu * num_gpus
return [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--fp16-no-flatten-grads"),
hyperparam("--max-update", 600000),
hyperparam("--user-dir", "examples/adaptive_span"),
hyperparam("--task", "truncated_bptt_lm"),
hyperparam("--tokens-per-sample", 512),
hyperparam("--arch", "adaptive_span", save_dir_key=lambda val: val),
hyperparam("--n-layer", 12),
hyperparam("--d-model", 512),
hyperparam("--n-head", 8),
hyperparam("--d-inner", 2048),
hyperparam("--dropout", 0.3),
hyperparam("--attn-span", 8192),
hyperparam(
"--optimizer", "adagrad_with_grad_clip", save_dir_key=lambda val: val
),
hyperparam("--adagrad-clip", 0.03, save_dir_key=lambda val: f"ag_cl{val}"),
hyperparam("--validate-interval-updates", 1000),
hyperparam("--save-interval-updates", 1000),
hyperparam("--lr-scheduler", "fixed", save_dir_key=lambda val: val),
hyperparam("--warmup-updates", [32000], save_dir_key=lambda val: f"wu{val}",),
hyperparam("--batch-size-valid", batch_size_per_gpu * 2),
hyperparam("--lr", [0.07], save_dir_key=lambda val: f"lr{val}"),
hyperparam("--criterion", "adaptive_span_loss"),
hyperparam("--batch-size", batch_size_per_gpu),
hyperparam("--update-freq", update_freq),
hyperparam("--seed", [2], save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25 if not args.local else 1),
hyperparam(
"--aux-loss-scaler", [0.0000005], save_dir_key=lambda val: f"loss{val}",
),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_adaptive_span.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
grid = []
total_num_udpates = 20000
warmup_updates = 500
num_data_loaders = 4
arch = "bart_large"
task = "translation"
criterion = "label_smoothed_cross_entropy"
adam_eps = 1e-08
weight_decay = 0.01
update_freq = 4 if args.num_nodes == 1 else 1
grid += [
hyperparam(
"--restore-file",
"/private/home/namangoyal/src/fairseq_denoising_codepush/fairseq-py/bart.large/model.pt",
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--task", task),
hyperparam("--criterion", criterion),
hyperparam("--source-lang", "source"),
hyperparam("--target-lang", "target"),
hyperparam("--truncate-source"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
]
grid += [
hyperparam("--max-tokens", 2048, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"
),
hyperparam("--required-batch-size-multiple", 1),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.999)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
]
# lr scheduler
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", 3e-05, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
# validation and checkpoint settings
grid += [
# hyperparam("--no-save"),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--reset-meters"),
hyperparam("--reset-optimizer"),
]
grid += [
hyperparam("--share-all-embeddings"),
hyperparam("--layernorm-embedding"),
hyperparam("--share-decoder-input-output-embed"),
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_bart_cnn.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
grid = []
total_num_udpates = 50000
max_epoch = 50
warmup_updates = 10
num_data_loaders = 4
arch = "bart_large"
task = "summarization"
criterion = "label_smoothed_cross_entropy"
adam_eps = 1e-08
weight_decay = 0.01
grid += [
hyperparam(
"--restore-file",
"/data/home/xwhan/fairseq-py/checkpoints/bart.large.cnn.extended4K-1K/model.pt",
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--task", task),
hyperparam("--criterion", criterion),
hyperparam("--max-source-positions", 4096),
hyperparam("--max-target-positions", 1024),
hyperparam("--source-lang", "source"),
hyperparam("--target-lang", "target"),
hyperparam("--truncate-source"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
# hyperparam("--truncate-target"),
]
grid += [
hyperparam("--batch-size", 1, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--batch-size-valid", 1),
hyperparam("--update-freq", [1,2], save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-epoch", max_epoch, save_dir_key=lambda val: f"me{val}"
),
hyperparam("--required-batch-size-multiple", 1),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--seed", 42, save_dir_key=lambda val: f"s{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.999)", save_dir_key=lambda val: "beta9999"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
]
# lr scheduler
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", [3e-5, 5e-6], save_dir_key=lambda val: f"lr{val}"),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
# validation and checkpoint settings
grid += [
# hyperparam("--no-save"),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--reset-meters"),
hyperparam("--reset-optimizer"),
hyperparam("--reset-dataloader"),
hyperparam("--validate-interval-updates", 20),
hyperparam("--best-checkpoint-metric", "rougel", save_dir_key=lambda val: f"cmetric{val}")
]
grid += [
hyperparam("--share-all-embeddings"),
hyperparam("--layernorm-embedding"),
hyperparam("--share-decoder-input-output-embed"),
hyperparam("--find-unused-parameters")
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--maximize-best-checkpoint-metric"),
hyperparam("--log-format", "json"),
hyperparam("--keep-last-epochs", 1),
hyperparam("--log-interval", 10),
hyperparam("--eval-rouge"),
hyperparam("--eval-rouge-args", '{"beam": 4, "max_len_b": 300, "lenpen": 2.0, "no_repeat_ngram_size": 3}')
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_ami_icsi.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
def get_grid(args):
max_update = 300000
eff_bsz = 512
max_sentences = 2
update_freq = (
1
if args.local or "test" in args.prefix
else int((eff_bsz * 8) / (max_sentences * args.num_nodes * args.num_gpus))
)
save_interval = 2000
warmup_updates = 3000
peak_lr = 1.5e-04
return [
hyperparam(
"--train-subset",
"train13" if args.local or "test" in args.prefix else "train",
),
hyperparam(
"--valid-subset",
"valid"
if args.local or "test" in args.prefix
else "valid,valid1,valid2,valid3,valid4",
),
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "me_fp16"),
hyperparam("--num-workers", 2),
hyperparam("--model-parallel-size", min(8, args.num_gpus)),
hyperparam("--criterion", "vocab_parallel_cross_entropy"),
hyperparam("--save-interval-updates", save_interval),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--task", "language_modeling"),
hyperparam("--sample-break-mode", "none", save_dir_key=lambda val: f"bm_{val}"),
hyperparam("--tokens-per-sample", 1024, save_dir_key=lambda val: f"tps{val}"),
hyperparam(
"--arch", "transformer_lm_megatron_big", save_dir_key=lambda val: val
),
hyperparam(
"--share-decoder-input-output-embed", save_dir_key=lambda val: "share"
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "b2_0.98"),
hyperparam("--adam-eps", 1e-8, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"cl{val}"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--lr", peak_lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"wu{val}"
),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--batch-size", max_sentences, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--required-batch-size-multiple", 1),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--max-update", max_update, save_dir_key=lambda val: f"mu{val}"),
hyperparam("--bucket-cap-mb", "200"),
hyperparam("--seed", 1, save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
hyperparam("--fast-stat-sync"),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_megatron_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
from sweep_wmt_en2de_transformer_big_common import get_common_grid
COMMON_GRID = get_common_grid()
def get_grid(args):
return COMMON_GRID + [
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--distributed-wrapper", "DDP", save_dir_key=lambda val: f"{val}"),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_wmt_en2de_transformer_big_ddp.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
max_update = 100000
return [
hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
# hyperparam('--memory-efficient-fp16', save_dir_key=lambda val: 'me_fp16'),
hyperparam("--num-workers", 2),
hyperparam("--save-interval-updates", 10000),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--task", "language_modeling"),
hyperparam("--sample-break-mode", "none", save_dir_key=lambda val: f"bm_{val}"),
hyperparam("--tokens-per-sample", 1024, save_dir_key=lambda val: f"tps{val}"),
# hyperparam('--arch', 'transformer_lm_gpt', save_dir_key=lambda val: val),
hyperparam("--arch", "transformer_lm_gpt2_small", save_dir_key=lambda val: val),
# hyperparam('--arch', 'transformer_lm_gpt2_medium', save_dir_key=lambda val: val),
# hyperparam('--arch', 'transformer_lm_gpt2_big', save_dir_key=lambda val: val),
hyperparam(
"--share-decoder-input-output-embed", save_dir_key=lambda val: "share"
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "b2_0.98"),
hyperparam("--adam-eps", 1e-8, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"cl{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", 50e-4, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", max_update),
hyperparam("--warmup-updates", 10000, save_dir_key=lambda val: f"wu{val}"),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--batch-size", 2, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--required-batch-size-multiple", 1),
hyperparam("--update-freq", 1, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--max-update", max_update, save_dir_key=lambda val: f"mu{val}"),
hyperparam("--seed", 1, save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_gpt2_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from agg_results import make_sweep_table, find_common_prefix
class TestAggResults(unittest.TestCase):
def test_make_sweep_table(self):
test_data_glob = "fb_sweep/mock_results/*/*.log"
df = make_sweep_table(test_data_glob, log_pattern="valid", interactive=True)
# these next two checks break if without removing common prefix from keys
assert "valid_ppl" not in df.columns
assert "ppl" in df.columns
train_inner_df = make_sweep_table(
test_data_glob, log_pattern="train_inner", interactive=True
)
assert train_inner_df.ppl.notnull().all()
train_df = make_sweep_table(
test_data_glob, log_pattern="train", interactive=True
)
assert train_df.ppl.notnull().all()
assert train_df.shape[1] == train_inner_df.shape[1] == 15
def test_find_common_prefix(self):
assert find_common_prefix([]) == ""
assert find_common_prefix(["train_wall"]) == ""
assert find_common_prefix(["train_wall", "train_ppl"]) == "train_"
assert find_common_prefix(["train_wall", "train_ppl", "wall"]) == ""
assert (
find_common_prefix(["train_wall", "train_ppl", "train_train_wall"])
== "train_"
)
|
bart_ls-main
|
fairseq-py/fb_sweep/test_agg_results.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
return [
hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--fp16"),
# hyperparam('--memory-efficient-fp16', save_dir_key=lambda val: 'me_fp16'),
hyperparam("--num-workers", 2),
hyperparam("--log-interval", 1),
hyperparam("--optimizer", "adam"),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--no-save"),
hyperparam("--task", "dummy_lm", save_dir_key=lambda val: val),
hyperparam("--tokens-per-sample", 512),
hyperparam("--max-sentences", 2),
# hyperparam('--arch', 'transformer_lm_gpt', save_dir_key=lambda val: val),
hyperparam("--arch", "transformer_lm_gpt2_tiny"),
hyperparam("--log-format", "json"),
hyperparam("--max-update", 10),
hyperparam("--lr", 3e-4),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_dummy_lm.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sweep
from sweep import hyperparam
PREDIFINED_GRID_FUNCTION = {}
def register_grid(name):
def register_grid_func(fn):
if name not in PREDIFINED_GRID_FUNCTION:
PREDIFINED_GRID_FUNCTION[name] = fn
return fn
return register_grid_func
def get_predefined_grid(name):
if name not in PREDIFINED_GRID_FUNCTION:
return []
else:
return PREDIFINED_GRID_FUNCTION[name]()
def add_extra_options_func(parser):
parser.add_argument("--max-update", help="max update", default=40000)
parser.add_argument(
"--finetune-from-model",
help="finetune from a pretrained model",
type=str,
default=None,
)
parser.add_argument(
"--max-tokens", help="max tokens per batch", type=int, default=3584
)
parser.add_argument("--arch", default="transformer")
parser.add_argument("--task", default="translation")
# equivalent to training on 16x GPUs
parser.add_argument("--update-freq", default=4)
# use double the default learning rate, since we're using --update-freq=16
# per token learning should be approximately constant;
# ideally momentent and 2nd momentent of adam should be adjusted accordingly but less important
parser.add_argument("--lr", default=10e-4)
parser.add_argument(
"--ddp-backend",
default=None,
)
parser.add_argument(
"--source-lang", help="source language for translation", type=str
)
parser.add_argument(
"--target-lang", help="target language for translation", type=str
)
@register_grid("transformer_16_16")
def get_transformer_16_16_grid():
return [
hyperparam("--arch", "transformer", save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
True,
binary_flag=True,
save_dir_key=lambda val: "shem",
),
hyperparam("--encoder-layers", 16, save_dir_key=lambda val: f"ELS{val}"),
hyperparam("--decoder-layers", 16, save_dir_key=lambda val: f"DLS{val}"),
# this is a multiplier of embed dim
hyperparam(
"--encoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"encffnx{val}",
),
hyperparam(
"--decoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"decffnx{val}",
),
hyperparam("--encoder-embed-dim", 1024, save_dir_key=lambda val: f"E{val}"),
hyperparam("--decoder-embed-dim", 1024),
hyperparam("--encoder-attention-heads", 16, save_dir_key=lambda val: f"H{val}"),
hyperparam("--decoder-attention-heads", 16),
hyperparam(
"--encoder-normalize-before",
True,
binary_flag=True,
save_dir_key=lambda _: "NBF",
),
hyperparam("--decoder-normalize-before", True, binary_flag=True),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"ATTDRP{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"RELDRP{val}"),
]
@register_grid("mbart_large")
def get_transformer_mbart_large_grid():
return [
hyperparam("--arch", "mbart_large", save_dir_key=lambda val: val),
hyperparam("--lang-tok-style", "mbart"),
hyperparam(
"--layernorm-embedding", binary_flag=True, save_dir_key=lambda val: "lnemb"
),
hyperparam("--encoder-learned-pos"),
hyperparam("--decoder-learned-pos"),
hyperparam("--encoder-normalize-before"),
hyperparam("--decoder-normalize-before"),
hyperparam("--share-all-embeddings"),
hyperparam("--share-decoder-input-output-embed"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"ATTDRP{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"RELDRP{val}"),
]
@register_grid("transformer_12_12")
def get_transformer_12_12_grid():
return [
hyperparam("--arch", "transformer", save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
True,
binary_flag=True,
save_dir_key=lambda val: "shem",
),
hyperparam("--encoder-layers", 12, save_dir_key=lambda val: f"ELS{val}"),
hyperparam("--decoder-layers", 12, save_dir_key=lambda val: f"DLS{val}"),
# this is a multiplier of embed dim
hyperparam(
"--encoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"encffnx{val}",
),
hyperparam(
"--decoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"decffnx{val}",
),
hyperparam("--encoder-embed-dim", 1024, save_dir_key=lambda val: f"E{val}"),
hyperparam("--decoder-embed-dim", 1024),
hyperparam("--encoder-attention-heads", 16, save_dir_key=lambda val: f"H{val}"),
hyperparam("--decoder-attention-heads", 16),
hyperparam(
"--encoder-normalize-before",
True,
binary_flag=True,
save_dir_key=lambda _: "NBF",
),
hyperparam("--decoder-normalize-before", True, binary_flag=True),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"ATTDRP{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"RELDRP{val}"),
]
def get_grid(args):
max_update = args.max_update
task = args.task
grids = [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--max-update", max_update),
hyperparam(
"--source-lang", args.source_lang, save_dir_key=lambda val: f"@{val}"
),
hyperparam(
"--target-lang", args.target_lang, save_dir_key=lambda val: f"@{val}"
),
hyperparam(
"--update-freq", args.update_freq, save_dir_key=lambda val: f"up{val}"
),
hyperparam("--task", task),
hyperparam(
"--share-all-embeddings",
[True],
binary_flag=True,
save_dir_key=lambda val: "shareemb",
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam(
"--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta0.9,0.98"
),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--warmup-init-lr", 1e-7, save_dir_key=lambda val: f"initlr{val}"),
hyperparam("--warmup-updates", 4000, save_dir_key=lambda val: f"warmup{val}"),
hyperparam("--lr", args.lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", 1e-9),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--dropout", 0.3, save_dir_key=lambda val: f"drop{val}"),
hyperparam("--weight-decay", 0.0, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--criterion", "label_smoothed_cross_entropy"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--seed", [2], save_dir_key=lambda val: f"seed{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 100 if not args.local else 10),
]
if args.ddp_backend:
grids.append(
hyperparam(
"--ddp-backend", args.ddp_backend, save_dir_key=lambda val: f"{val}"
)
)
if args.max_tokens:
grids.append(
hyperparam(
"--max-tokens", args.max_tokens, save_dir_key=lambda val: f"maxtok{val}"
),
)
if args.finetune_from_model:
grids.append(hyperparam("--finetune-from-model", args.finetune_from_model))
arch_grid = get_predefined_grid(args.arch)
arch_grid = (
arch_grid
if arch_grid
else [
hyperparam("--arch", args.arch, save_dir_key=lambda val: val),
]
)
grids += arch_grid
return grids
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(
get_grid, postprocess_hyperparams, add_extra_options_func=add_extra_options_func
)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_translation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from itertools import takewhile
from glob import glob
import json
from pathlib import Path
import re
from typing import List, Dict
import os
try:
import pandas as pd
from fire import Fire
except ImportError:
raise ImportError(
"results aggregation has extra dependencies. Run `pip install pandas fire tabulate`."
)
def find_common_prefix(parsed_keys: List[str]) -> str:
# Finding common prefix using itertools.takewhile
if len(parsed_keys) <= 1:
return ""
return "".join(
c[0] for c in takewhile(lambda x: all(x[0] == y for y in x), zip(*parsed_keys))
)
def remove_prefix(text: str, prefix: str):
if text.startswith(prefix):
return text[len(prefix):]
return text
def remove_common_prefix_from_keys(entry: Dict[str, str]):
common_prefix = find_common_prefix(entry.keys())
if not common_prefix:
return entry
else:
return {remove_prefix(k, common_prefix): v for k, v in entry.items()}
def find_last_matching_line(reversed_lns: List[str], pattern: str) -> Dict[str, str]:
""" Find a line with train loss in it and try to read it to json."""
matched_line = None
for l in reversed_lns:
if pattern in l and "epoch" in l:
matched_line = l
break
if matched_line is None:
raise ValueError(f"none of lines had the substring {pattern}")
if "{" in matched_line: # log_format == 'json':
strang = matched_line.split("|")[-1]
record = json.loads(strang)
elif pattern == "train_inner":
strang = matched_line.split("|")[-1]
record = parse_train_inner_record(strang)
else:
record = parse_pipe_separated_record(matched_line)
epoch = record.pop("epoch", None)
sanitized_record = remove_common_prefix_from_keys(record)
sanitized_record["epoch"] = epoch
return sanitized_record
def parse_pipe_separated_record(rec2):
pipe_sep = [
x.strip()
for x in rec2.split("INFO")[-1].split("|")
if x.strip().count(" ") == 1
]
return dict([entry.split(" ") for entry in pipe_sep])
def parse_train_inner_record(record):
kv_pairs: str = re.compile(r"(\/ \d+\s)(.+)").search(record).groups()[-1]
return dict([entry.strip().split("=") for entry in kv_pairs.split(",")])
def find_all_matching_lines(lns, pattern="train_inner"):
"""Read train_inner logs (each step)"""
records = []
for l in lns:
if pattern not in l:
continue
strang = l.split("|")[-1]
record = json.loads(strang)
records.append(record)
return pd.DataFrame(records).pipe(tryfloat)
def tryfloat(x):
if isinstance(x, pd.Series):
try:
return x.astype(float)
except Exception:
return x
elif isinstance(x, pd.DataFrame):
return x.apply(tryfloat)
else:
try:
return float(x)
except TypeError:
return x
def reverse_readline(filename, buf_size=8192):
"""A generator that returns the lines of a file in reverse order"""
with open(filename) as fh:
segment = None
offset = 0
fh.seek(0, os.SEEK_END)
file_size = remaining_size = fh.tell()
while remaining_size > 0:
offset = min(file_size, offset + buf_size)
fh.seek(file_size - offset)
buffer = fh.read(min(remaining_size, buf_size))
remaining_size -= buf_size
lines = buffer.split("\n")
# The first line of the buffer is probably not a complete line so
# we'll save it and append it to the last line of the next buffer
# we read
if segment is not None:
# If the previous chunk starts right from the beginning of line
# do not concat the segment to the last line of new chunk.
# Instead, yield the segment first
if buffer[-1] != "\n":
lines[-1] += segment
else:
yield segment
segment = lines[0]
for index in range(len(lines) - 1, 0, -1):
if lines[index]:
yield lines[index]
# Don't yield None if the file was empty
if segment is not None:
yield segment
def make_sweep_table(
pattern,
log_pattern="train_inner",
csv_path=None,
keep_cols=None,
sort_col=None,
interactive=False,
):
"""
For each file matching pattern, extract the last json line matching log_pattern, tabulate.
Args:
pattern: (str) files to consider, e.g.
/checkpoint/sshleifer/2020-11-23/*/train.log* (should be quoted on command line)
log_pattern: (str): usually train, train_inner or valid
csv_path: (str) where to save if suffix is .md will save markdown, otherwise csv.
keep_cols: (list) (comma separated from CL) column names to show
sort_col: (str) column to sort resulting table by
interactive: (bool) just return the DataFrame
Usage:
./fb_sweep/agg_results.py "/checkpoint/sshleifer/2020-12-1*/big_run*/train.log" \
--log-pattern valid \
--csv_path big_run_sweep_results \
--keep_cols train_ppl,train_wps,train_loss \
--sort_col train_wps
"""
records = []
matches = list(glob(pattern, recursive=True))
if not matches:
raise FileNotFoundError(f"found no files matching {pattern}")
for f in matches:
lns = reverse_readline(f)
try:
record = find_last_matching_line(lns, pattern=log_pattern)
except ValueError as e:
print(f"failed to parse {f} with {str(e)}")
continue
record["parent_path"] = Path(f).parent.name
record["fname"] = Path(f).name
if f.startswith("multirun/"): # produced by hydra
_, date, t, *__ = f.split("/")
record["date"] = f"{date}-{t}"
records.append(record)
if len(records) == 0:
raise ValueError(
f"None of the {len(matches)} log files are ready to be parsed."
)
df = pd.DataFrame(records)
# Use the more informative path column. For sweep output this is parent path.
# For manual log files it's usually fname
path_col = (
"parent_path"
if (df["parent_path"].nunique() > df["fname"].nunique())
else "fname"
)
df = df.set_index(path_col).pipe(tryfloat).round(2).sort_index()
df = df.rename(columns=lambda x: x.replace(".", "_"))
if keep_cols is not None:
df = df[list(keep_cols)]
if sort_col is not None:
df = df.sort_values(sort_col)
if interactive:
return df
if csv_path is not None:
if csv_path.endswith("md"):
df.to_markdown(Path(csv_path).open("w"))
else:
df.to_csv(csv_path)
print(df.to_markdown(tablefmt="grid"))
if __name__ == "__main__":
# Usage: see docstring of make_sweep_table
Fire(make_sweep_table)
|
bart_ls-main
|
fairseq-py/fb_sweep/agg_results.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
MODEL = {
"levenshtein_transformer": "lev_base",
"levenshtein_transformer_wmt_en_de": "lev_base",
"levenshtein_transformer_big": "lev_big",
"levenshtein_transformer_wmt_en_de_big": "lev_big",
"nonautoregressive_transformer": "nat",
"nacrf_transformer": "nat_crf",
"iterative_nonautoregressive_transformer": "inat",
"cmlm_transformer": "cmlm",
"insertion_transformer": "ins",
}
def get_at_grid(args):
"""
Auto-regressive Transformer
"""
return [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
# hyperparam('--ddp-backend', 'no_c10d', save_dir_key=lambda val: 'no_c10d'),
hyperparam("--max-update", 300000),
# equivalent to training on 16x GPUs
# hyperparam('--update-freq', 16, save_dir_key=lambda val: f'updatefreq{val}'),
hyperparam("--arch", ["transformer_small"], save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
[True],
binary_flag=True,
save_dir_key=lambda val: "shareemb",
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam(
"--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta0.9,0.98"
),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--warmup-init-lr", 1e-7, save_dir_key=lambda val: f"initlr{val}"),
hyperparam("--warmup-updates", 4000, save_dir_key=lambda val: f"warmup{val}"),
# use double the default learning rate, since we're using --update-freq=16
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", 1e-9),
hyperparam("--clip-norm", 25, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"drop{val}"),
hyperparam("--weight-decay", 0.0001, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--criterion", "label_smoothed_cross_entropy"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--max-tokens", 4096, save_dir_key=lambda val: f"maxtok{val}"),
hyperparam("--seed", [2], save_dir_key=lambda val: f"seed{val}"),
hyperparam("--keep-last-epochs", 15),
hyperparam("--keep-interval-updates", 5),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 100),
]
def get_grid_levenshtein(args):
return [
# task, model, criterion
hyperparam("--task", "translation_lev"),
hyperparam(
"--arch",
"levenshtein_transformer_wmt_en_de",
save_dir_key=lambda val: MODEL[val],
),
# hyperparam('--arch', [
# 'levenshtein_transformer_wmt_en_de_big',
# 'levenshtein_transformer_wmt_en_de'
# ],
# save_dir_key=lambda val: MODEL[val]),
hyperparam("--criterion", "label_smoothed_dual_imitation"),
hyperparam("--noise", "random_delete"),
# task specific
hyperparam("--fixed-validation-seed", 7),
hyperparam("--append-bos", binary_flag=True),
# model
hyperparam("--encoder-learned-pos", binary_flag=True),
hyperparam(
"--decoder-learned-pos",
binary_flag=True,
save_dir_key=lambda val: f"lp" if val else f"sp",
),
hyperparam("--share-all-embeddings", binary_flag=True),
hyperparam(
"--apply-bert-init",
binary_flag=True,
save_dir_key=lambda val: f"bert" if val else f"",
),
hyperparam("--early-exit", "(6,6,6)", save_dir_key=lambda val: f"ext-{val}"),
# general
hyperparam("--activation-fn", "gelu", save_dir_key=lambda val: f"act-{val}"),
# hyperparam('--max-tokens', 8192, save_dir_key=lambda val: f'b{val}'),
hyperparam("--max-tokens", 4096, save_dir_key=lambda val: f"b8192"),
hyperparam("--update-freq", 2),
hyperparam("--fp16", binary_flag=True),
hyperparam("--optimizer", "adam"),
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", "1e-09"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--max-update", 400000),
hyperparam("--warmup-updates", 10000),
hyperparam("--keep-last-epochs", 15),
hyperparam("--keep-interval-updates", 5),
hyperparam("--warmup-init-lr", "1e-07"),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--dropout", 0.3),
hyperparam("--label-smoothing", 0.1),
hyperparam("--weight-decay", 0.01),
hyperparam("--save-interval-updates", 10000),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 5),
hyperparam("--seed", 2),
# hyperparam('--seed', [1, 11], save_dir_key=lambda val: f'prefix{val % 10}'),
# hyperparam('--seed', [3, 5, 7, 13, 15, 17], save_dir_key=lambda val: f'prefix{val % 10}'),
# hyperparam('--seed', 5, save_dir_key=lambda val: f'fuse-0.{val}'),
]
def get_grid_progressive(args):
return [
# task, model, criterion
hyperparam("--task", "translation_lev"),
hyperparam("--arch", "progressive_transformer"),
hyperparam("--criterion", "label_smoothed_dual_imitation"),
hyperparam("--noise", "full_mask"),
# task specific
hyperparam("--fixed-validation-seed", 7),
hyperparam("--append-bos", binary_flag=True),
# model
hyperparam("--encoder-learned-pos", binary_flag=True),
hyperparam(
"--decoder-learned-pos",
binary_flag=True,
save_dir_key=lambda val: f"lp" if val else f"sp",
),
hyperparam("--share-all-embeddings", binary_flag=True),
hyperparam(
"--apply-bert-init",
binary_flag=True,
save_dir_key=lambda val: f"bert" if val else f"",
),
# model specific
hyperparam("--passing-unk", binary_flag=True, save_dir_key=lambda val: f"pu"),
hyperparam("--pred-length-offset", binary_flag=True),
# hyperparam('--sg-length-pred', binary_flag=True, save_dir_key=lambda val: f'sg' if val else f''),
hyperparam("--output-checker", binary_flag=True),
hyperparam("--pred-length-format", "mean"),
hyperparam("--length-loss-factor", 0.1, save_dir_key=lambda val: f"lf{val}"),
hyperparam("--fixed-depth", 9, save_dir_key=lambda val: f"d-{val}"),
# general
hyperparam("--activation-fn", "gelu", save_dir_key=lambda val: f"act-{val}"),
hyperparam("--max-tokens", 5192, save_dir_key=lambda val: f"bz{val}"),
hyperparam("--fp16", binary_flag=True),
hyperparam("--optimizer", "adam"),
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", "1e-09"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--max-update", 400000),
hyperparam("--warmup-updates", 10000),
hyperparam("--keep-last-epochs", 15),
hyperparam("--keep-interval-updates", 5),
hyperparam("--warmup-init-lr", "1e-07"),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--dropout", 0.3),
hyperparam("--label-smoothing", 0.1),
hyperparam("--weight-decay", 0.01),
hyperparam("--save-interval-updates", 10000),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 5),
]
def get_grid_nat(args):
return [
# task, model, criterion
hyperparam("--task", "translation_lev"),
hyperparam(
"--arch",
"nonautoregressive_transformer",
save_dir_key=lambda val: MODEL[val],
),
hyperparam("--criterion", "label_smoothed_dual_imitation"),
# task specific
hyperparam("--fixed-validation-seed", 7),
hyperparam("--append-bos", binary_flag=True),
hyperparam("--noise", "full_mask"),
# model
hyperparam("--encoder-learned-pos", binary_flag=True),
hyperparam(
"--decoder-learned-pos",
binary_flag=True,
save_dir_key=lambda val: f"lp" if val else f"sp",
),
hyperparam("--share-all-embeddings", binary_flag=True),
hyperparam(
"--apply-bert-init",
binary_flag=True,
save_dir_key=lambda val: f"bert" if val else f"",
),
# length prediction
hyperparam("--pred-length-offset", binary_flag=True),
# hyperparam('--sg-length-pred', binary_flag=True, save_dir_key=lambda val: f'sg' if val else f''),
hyperparam("--length-loss-factor", 0.1, save_dir_key=lambda val: f"lf{val}"),
hyperparam(
"--src-embedding-copy", binary_flag=True, save_dir_key=lambda val: "cp"
),
# n-gram loss
# hyperparam('--ngram-predictor',
# 4,
# save_dir_key=lambda val: f'{val}-gram'),
# general
hyperparam("--activation-fn", "gelu", save_dir_key=lambda val: f"act-{val}"),
hyperparam("--max-tokens", 4096, save_dir_key=lambda val: f"b{val}"),
hyperparam("--fp16", binary_flag=True),
hyperparam("--optimizer", "adam"),
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", "1e-09"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--max-update", 400000),
hyperparam("--warmup-updates", 10000),
hyperparam("--keep-last-epochs", 15),
hyperparam("--keep-interval-updates", 5),
hyperparam("--warmup-init-lr", "1e-07"),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--dropout", 0.3),
hyperparam("--label-smoothing", 0.1),
hyperparam("--weight-decay", 0.01),
hyperparam("--save-interval-updates", 10000),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 5),
# hyperparam('--seed', [1, 2, 3, 4, 5, 6, 7], save_dir_key=lambda val: f'rb-{val}'),
]
def get_grid_nacrf(args):
return [
# task, model, criterion
hyperparam("--task", "translation_lev"),
hyperparam("--arch", "nacrf_transformer", save_dir_key=lambda val: MODEL[val]),
hyperparam("--criterion", "nat_loss"),
# task specific
hyperparam("--fixed-validation-seed", 7),
hyperparam("--noise", "full_mask"),
# model
hyperparam("--encoder-learned-pos", binary_flag=True),
hyperparam(
"--decoder-learned-pos",
binary_flag=True,
save_dir_key=lambda val: f"lp" if val else f"sp",
),
hyperparam("--share-all-embeddings", binary_flag=True),
hyperparam(
"--apply-bert-init",
binary_flag=True,
save_dir_key=lambda val: f"bert" if val else f"",
),
# length prediction
hyperparam("--pred-length-offset", binary_flag=True),
# hyperparam('--sg-length-pred', binary_flag=True, save_dir_key=lambda val: f'sg' if val else f''),
hyperparam("--length-loss-factor", 0.1, save_dir_key=lambda val: f"lf{val}"),
# general
hyperparam("--activation-fn", "gelu", save_dir_key=lambda val: f"act-{val}"),
hyperparam("--max-tokens", 8192, save_dir_key=lambda val: f"b{val}"),
hyperparam("--fp16", binary_flag=True),
hyperparam("--optimizer", "adam"),
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", "1e-09"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--max-update", 400000),
hyperparam("--warmup-updates", 10000),
hyperparam("--keep-last-epochs", 15),
hyperparam("--keep-interval-updates", 5),
hyperparam("--warmup-init-lr", "1e-07"),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--dropout", 0.3),
hyperparam("--label-smoothing", 0.1),
hyperparam("--weight-decay", 0.01),
hyperparam("--save-interval-updates", 10000),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 5),
]
def get_grid_inat(args):
return [
# task, model, criterion
hyperparam("--task", "translation_lev"),
hyperparam(
"--arch",
"iterative_nonautoregressive_transformer",
save_dir_key=lambda val: MODEL[val],
),
hyperparam("--criterion", "label_smoothed_dual_imitation"),
# task specific
hyperparam("--fixed-validation-seed", 7),
hyperparam("--append-bos", binary_flag=True),
hyperparam("--noise", "full_mask"),
# model
hyperparam("--encoder-learned-pos", True, binary_flag=True),
hyperparam(
"--decoder-learned-pos",
True,
binary_flag=True,
save_dir_key=lambda val: f"lp" if val else f"sp",
),
hyperparam("--share-all-embeddings", binary_flag=True),
hyperparam(
"--apply-bert-init",
binary_flag=True,
save_dir_key=lambda val: f"bert" if val else f"",
),
# iterative refinement settings
hyperparam("--train-step", 3, save_dir_key=lambda val: f"iter{val}"),
hyperparam("--dae-ratio", 0.5, save_dir_key=lambda val: f"dae{val}"),
hyperparam(
"--stochastic-approx", True, binary_flag=True, save_dir_key=lambda val: "sa"
),
# length prediction
hyperparam("--pred-length-offset", binary_flag=True),
# hyperparam('--sg-length-pred', binary_flag=True, save_dir_key=lambda val: f'sg' if val else f''),
hyperparam("--length-loss-factor", 0.1, save_dir_key=lambda val: f"lf{val}"),
# hyperparam('--src-embedding-copy', [True, False],
# binary_flag=True,
# save_dir_key=lambda val: 'copy'),
# n-gram loss
# hyperparam('--ngram-predictor',
# 4,
# save_dir_key=lambda val: f'{val}-gram'),
# general
hyperparam("--activation-fn", "gelu", save_dir_key=lambda val: f"{val}"),
hyperparam("--max-tokens", 2048, save_dir_key=lambda val: f"b{val}"),
hyperparam("--update-freq", 2, save_dir_key=lambda val: f"u{val}"),
hyperparam("--fp16", binary_flag=True),
hyperparam("--optimizer", "adam"),
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", "1e-09"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--max-update", 400000),
hyperparam("--warmup-updates", 10000),
hyperparam("--keep-last-epochs", 5),
hyperparam("--keep-interval-updates", 5),
hyperparam("--warmup-init-lr", "1e-07"),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--dropout", 0.3),
hyperparam("--label-smoothing", 0.1),
hyperparam("--weight-decay", 0.01),
hyperparam("--save-interval-updates", 10000),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 5),
# hyperparam('--seed', [1, 2, 3, 4, 5, 6, 7], save_dir_key=lambda val: f'rb-{val}'),
]
def get_grid_cmlm(args):
return [
# task, model, criterion
hyperparam("--task", "translation_lev"),
hyperparam("--arch", "cmlm_transformer", save_dir_key=lambda val: MODEL[val]),
hyperparam("--criterion", "label_smoothed_dual_imitation"),
# task specific
hyperparam("--fixed-validation-seed", 7),
hyperparam("--append-bos", binary_flag=True),
hyperparam("--noise", "random_mask"),
# model
hyperparam("--encoder-learned-pos", True, binary_flag=True),
hyperparam(
"--decoder-learned-pos",
True,
binary_flag=True,
save_dir_key=lambda val: f"lp" if val else f"sp",
),
hyperparam("--share-all-embeddings", binary_flag=True),
hyperparam(
"--apply-bert-init",
binary_flag=True,
save_dir_key=lambda val: f"bert" if val else f"",
),
# length prediction
hyperparam("--pred-length-offset", binary_flag=True),
# hyperparam('--sg-length-pred', binary_flag=True, save_dir_key=lambda val: f'sg' if val else f''),
hyperparam("--length-loss-factor", 0.1, save_dir_key=lambda val: f"lf{val}"),
# general
hyperparam("--activation-fn", "gelu", save_dir_key=lambda val: f"{val}"),
hyperparam("--max-tokens", 4096, save_dir_key=lambda val: f"b{val}"),
hyperparam("--update-freq", 2, save_dir_key=lambda val: f"u{val}"),
hyperparam("--fp16", binary_flag=True),
hyperparam("--optimizer", "adam"),
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", "1e-09"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--max-update", 400000),
hyperparam("--warmup-updates", 10000),
hyperparam("--keep-last-epochs", 5),
hyperparam("--keep-interval-updates", 5),
hyperparam("--warmup-init-lr", "1e-07"),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--dropout", 0.3),
hyperparam("--label-smoothing", 0.1),
hyperparam("--weight-decay", 0.01),
hyperparam("--save-interval-updates", 10000),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 5),
]
def get_grid_insertion(args):
return [
# task, model, criterion
hyperparam("--task", "translation_lev"),
hyperparam(
"--arch", "insertion_transformer", save_dir_key=lambda val: MODEL[val]
),
hyperparam("--criterion", "label_smoothed_dual_imitation"),
hyperparam("--noise", "random_delete"),
# task specific
hyperparam("--fixed-validation-seed", 7),
hyperparam("--append-bos", binary_flag=True),
# model
hyperparam("--encoder-learned-pos", binary_flag=True),
hyperparam(
"--decoder-learned-pos",
binary_flag=True,
save_dir_key=lambda val: f"lp" if val else f"sp",
),
hyperparam("--share-all-embeddings", binary_flag=True),
hyperparam(
"--apply-bert-init",
binary_flag=True,
save_dir_key=lambda val: f"bert" if val else f"",
),
hyperparam(
"--label-tau",
1,
save_dir_key=lambda val: f"tau{val}" if val < 1000 else f"uniform",
),
# general
hyperparam("--activation-fn", "gelu", save_dir_key=lambda val: f"act-{val}"),
# hyperparam('--max-tokens', 6144, save_dir_key=lambda val: f'bz{val}'),
hyperparam("--max-tokens", 8192, save_dir_key=lambda val: f"b{val}"),
hyperparam("--fp16", binary_flag=True),
hyperparam("--optimizer", "adam"),
hyperparam("--lr", 0.0005, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", "1e-09"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--max-update", 400000),
hyperparam("--warmup-updates", 10000),
hyperparam("--warmup-init-lr", "1e-07"),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--dropout", 0.3),
hyperparam("--label-smoothing", 0.1),
hyperparam("--weight-decay", 0.01),
hyperparam("--save-interval-updates", 10000),
hyperparam("--keep-last-epochs", 15),
hyperparam("--keep-interval-updates", 5),
hyperparam("--log-format", "simple"),
hyperparam("--log-interval", 5),
# hyperparam('--seed', [1, 2, 3, 4, 5, 6, 7], save_dir_key=lambda val: f'rb-{val}'),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
# if config['--seq-beam'].current_value <= 8:
# config['--max-tokens'].current_value = 400
# else:
# config['--max-tokens'].current_value = 300
# decoder_embed_dim = config['--decoder_embed_dim'].current_value
#
# config['--decoder-ffn-embed-dim'] = 4 * decoder_embed_dim
# config['--decoder-attention-heads'] = decoder_embed_dim // 16
# dataset, name = sweep_datasets(config['--seed'].current_value)
# args.data = dataset
# args.prefix = name
# args.seed = 1
if __name__ == "__main__":
sweep.main(get_grid_nacrf, postprocess_hyperparams)
# sweep.main(get_grid_levenshtein_pp, postprocess_hyperparams)
# sweep.main(get_at_grid, postprocess_hyperparams)
# sweep.main(get_grid_inat, postprocess_hyperparams)
# sweep.main(get_grid_nat, postprocess_hyperparams)
# sweep.main(get_grid_levenshtein, postprocess_hyperparams)
# sweep.main(get_grid_progressive, postprocess_hyperparams)
# sweep.main(get_grid_cmlm, postprocess_hyperparams)
# sweep.main(get_grid_insertion, postprocess_hyperparams)
# sweep.main(get_grid_reinforce, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_nonautoregressive_translation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
def get_grid(args):
grid = []
total_num_udpates = 100000
warmup_updates = 500
num_data_loaders = 4
arch = "bart_large"
task = "denoising"
criterion = "cross_entropy"
adam_eps = 1e-06
weight_decay = 0.01
update_freq = 1
grid += [
hyperparam(
"--restore-file",
"/data/home/xwhan/fairseq-py/checkpoints/bart.large/model.pt",
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--task", task),
hyperparam("--criterion", criterion),
]
grid += [
hyperparam("--max-tokens", 2048, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"
),
hyperparam("--required-batch-size-multiple", 1),
hyperparam(
"--sample-break-mode", ["complete"], save_dir_key=lambda val: f"brk_{val}"
),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
]
# lr scheduler
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", 1e-05, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
# validation and checkpoint settings
grid += [
# hyperparam("--no-save"),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--reset-meters"),
hyperparam("--reset-optimizer"),
]
grid += [
hyperparam("--share-all-embeddings"),
hyperparam("--layernorm-embedding"),
hyperparam("--share-decoder-input-output-embed"),
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
]
grid += [
hyperparam("--poisson-lambda", 3.5, save_dir_key=lambda val: f"poi_lam{val}"),
hyperparam("--mask", 0.3, save_dir_key=lambda val: f"mask{val}"),
hyperparam(
"--mask-length", "span-poisson", save_dir_key=lambda val: f"mask_len{val}"
),
hyperparam("--replace-length", 1, save_dir_key=lambda val: f"rpl_len{val}"),
hyperparam("--rotate", 0, save_dir_key=lambda val: f"rotate{val}"),
hyperparam("--mask-random", 0.1, save_dir_key=lambda val: f"mask_rand{val}"),
hyperparam("--insert", 0, save_dir_key=lambda val: f"ins{val}"),
hyperparam(
"--permute-sentences", 1.0, save_dir_key=lambda val: f"perm_sen{val}"
),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_bart_large.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
from sweep_wmt_en2de_transformer_big_common import get_common_grid
COMMON_GRID = get_common_grid()
def get_grid(args):
return COMMON_GRID + [
hyperparam(
"--distributed-wrapper", "SlowMo", save_dir_key=lambda val: f"{val}"
),
hyperparam(
"--slowmo-momentum", 0.5, save_dir_key=lambda val: f"slowmo_mom{val}"
),
hyperparam(
"--slowmo-algorithm", "LocalSGD", save_dir_key=lambda val: "localsgd"
),
hyperparam(
"--localsgd-frequency", 3, save_dir_key=lambda val: f"localsgd_freq{val}"
),
hyperparam("--nprocs-per-node", 8),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_wmt_en2de_transformer_big_localsgd.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sweep
from sweep import hyperparam
PREDIFINED_GRID_FUNCTION = {}
def register_grid(name):
def register_grid_func(fn):
if name not in PREDIFINED_GRID_FUNCTION:
PREDIFINED_GRID_FUNCTION[name] = fn
return fn
return register_grid_func
def get_predefined_grid(name):
if name not in PREDIFINED_GRID_FUNCTION:
return []
else:
return PREDIFINED_GRID_FUNCTION[name]()
def add_extra_options_func(parser):
parser.add_argument("--max-update", help="max update", default=40000)
parser.add_argument(
"--finetune-from-model",
help="finetune from a pretrained model",
type=str,
default=None,
)
parser.add_argument(
"--lang-dict",
help="a file containing a list of languages to support",
type=str,
default=None,
)
parser.add_argument(
"--max-tokens", help="max tokens per batch", type=int, default=None
)
parser.add_argument("--arch", default="transformer")
parser.add_argument("--task", default="translation_multi_simple_epoch")
parser.add_argument(
"--langs",
default=None,
type=str,
help="a list of languages comma sperated languages which can appear in lang-pairs; "
"note that the ordering determines language token IDs",
)
parser.add_argument(
"--lang-pairs", help="lang pairs for multilingual training", type=str
)
parser.add_argument(
"--sampling-method", help="sampling method", default="temperature"
)
parser.add_argument(
"--sampling-temperature", help="sampling temperature", default=5
)
parser.add_argument(
"--encoder-langtok", help="add src language token to encoder", default="src"
)
parser.add_argument("--decoder-langtok", default=True, action="store_true")
parser.add_argument("--virtual-epoch-size", default=None)
parser.add_argument("--virtual-data-size", default=None)
# equivalent to training on 16x GPUs
parser.add_argument("--update-freq", default=16)
# use double the default learning rate, since we're using --update-freq=16
# per token learning should be approximately constant;
# ideally momentent and 2nd momentent of adam should be adjusted accordingly but less important
parser.add_argument("--lr", default=10e-4)
parser.add_argument("--dropout", default=0.1)
parser.add_argument(
"--ddp-backend",
default=None,
)
parser.add_argument(
"--enable-reservsed-directions-shared-datasets",
default=False,
action="store_true",
)
parser.add_argument("--save-interval-updates", default=None)
@register_grid("transformer_24_24")
def get_transformer_24_24_grid():
return [
hyperparam("--arch", "transformer", save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
True,
binary_flag=True,
save_dir_key=lambda val: "shem",
),
hyperparam("--encoder-layers", 24, save_dir_key=lambda val: f"ELS{val}"),
hyperparam("--decoder-layers", 24, save_dir_key=lambda val: f"DLS{val}"),
# this is a multiplier of embed dim
hyperparam(
"--encoder-ffn-embed-dim",
8 * 1024,
save_dir_key=lambda val: f"encffnx{val}",
),
hyperparam(
"--decoder-ffn-embed-dim",
8 * 1024,
save_dir_key=lambda val: f"decffnx{val}",
),
hyperparam("--encoder-embed-dim", 1024, save_dir_key=lambda val: f"E{val}"),
hyperparam("--decoder-embed-dim", 1024),
hyperparam("--encoder-attention-heads", 16, save_dir_key=lambda val: f"H{val}"),
hyperparam("--decoder-attention-heads", 16),
hyperparam(
"--encoder-normalize-before",
True,
binary_flag=True,
save_dir_key=lambda _: "NBF",
),
hyperparam("--decoder-normalize-before", True, binary_flag=True),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"ATTDRP{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"RELDRP{val}"),
hyperparam("--memory-efficient-fp16", True, binary_flag=True),
hyperparam("--encoder-layerdrop", 0.05),
hyperparam("--decoder-layerdrop", 0.05),
]
@register_grid("transformer_16_16")
def get_transformer_16_16_grid():
return [
hyperparam("--arch", "transformer", save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
True,
binary_flag=True,
save_dir_key=lambda val: "shem",
),
hyperparam("--encoder-layers", 16, save_dir_key=lambda val: f"ELS{val}"),
hyperparam("--decoder-layers", 16, save_dir_key=lambda val: f"DLS{val}"),
# this is a multiplier of embed dim
hyperparam(
"--encoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"encffnx{val}",
),
hyperparam(
"--decoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"decffnx{val}",
),
hyperparam("--encoder-embed-dim", 1024, save_dir_key=lambda val: f"E{val}"),
hyperparam("--decoder-embed-dim", 1024),
hyperparam("--encoder-attention-heads", 16, save_dir_key=lambda val: f"H{val}"),
hyperparam("--decoder-attention-heads", 16),
hyperparam(
"--encoder-normalize-before",
True,
binary_flag=True,
save_dir_key=lambda _: "NBF",
),
hyperparam("--decoder-normalize-before", True, binary_flag=True),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"ATTDRP{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"RELDRP{val}"),
]
@register_grid("mbart_large")
def get_transformer_mbart_large_grid():
return [
hyperparam("--arch", "mbart_large", save_dir_key=lambda val: val),
hyperparam("--lang-tok-style", "mbart"),
hyperparam(
"--layernorm-embedding", binary_flag=True, save_dir_key=lambda val: "lnemb"
),
hyperparam("--encoder-learned-pos"),
hyperparam("--decoder-learned-pos"),
hyperparam("--encoder-normalize-before"),
hyperparam("--decoder-normalize-before"),
hyperparam("--share-all-embeddings"),
hyperparam("--share-decoder-input-output-embed"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"ATTDRP{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"RELDRP{val}"),
hyperparam("--warmup-updates", 2000, save_dir_key=lambda val: f"warmup{val}"),
]
@register_grid("transformer_12_12")
def get_transformer_12_12_grid():
return [
hyperparam("--arch", "transformer", save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
True,
binary_flag=True,
save_dir_key=lambda val: "shem",
),
hyperparam("--encoder-layers", 12, save_dir_key=lambda val: f"ELS{val}"),
hyperparam("--decoder-layers", 12, save_dir_key=lambda val: f"DLS{val}"),
# this is a multiplier of embed dim
hyperparam(
"--encoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"encffnx{val}",
),
hyperparam(
"--decoder-ffn-embed-dim",
4 * 1024,
save_dir_key=lambda val: f"decffnx{val}",
),
hyperparam("--encoder-embed-dim", 1024, save_dir_key=lambda val: f"E{val}"),
hyperparam("--decoder-embed-dim", 1024),
hyperparam("--encoder-attention-heads", 16, save_dir_key=lambda val: f"H{val}"),
hyperparam("--decoder-attention-heads", 16),
hyperparam(
"--encoder-normalize-before",
True,
binary_flag=True,
save_dir_key=lambda _: "NBF",
),
hyperparam("--decoder-normalize-before", True, binary_flag=True),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"ATTDRP{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"RELDRP{val}"),
]
def get_grid(args):
max_update = args.max_update
task = args.task
sampling_method = args.sampling_method
sampling_temperature = args.sampling_temperature
encoder_langtok = args.encoder_langtok
grids = [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--max-update", max_update),
hyperparam("--update-freq", args.update_freq),
hyperparam("--task", task),
hyperparam("--lang-pairs", args.lang_pairs),
hyperparam(
"--encoder-langtok", encoder_langtok, save_dir_key=lambda val: f"ent{val}"
),
hyperparam(
"--sampling-method", sampling_method, save_dir_key=lambda val: f"SPL_{val}"
),
hyperparam(
"--sampling-temperature",
sampling_temperature,
save_dir_key=lambda val: f"tmp{val}",
),
hyperparam(
"--share-all-embeddings",
[True],
binary_flag=True,
save_dir_key=lambda val: "shareemb",
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-eps", 1e-06),
hyperparam(
"--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta0.9,0.98"
),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--warmup-init-lr", 1e-7, save_dir_key=lambda val: f"initlr{val}"),
hyperparam("--warmup-updates", 4000, save_dir_key=lambda val: f"warmup{val}"),
hyperparam("--lr", args.lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", 1e-9),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--dropout", args.dropout, save_dir_key=lambda val: f"drop{val}"),
hyperparam("--weight-decay", 0.0, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--criterion", "label_smoothed_cross_entropy"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--max-tokens", 3584, save_dir_key=lambda val: f"maxtok{val}"),
hyperparam("--seed", [2], save_dir_key=lambda val: f"seed{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 100 if not args.local else 10),
]
if args.ddp_backend:
grids.append(
hyperparam(
"--ddp-backend", args.ddp_backend, save_dir_key=lambda val: f"{val}"
)
)
if args.decoder_langtok:
grids.append(
hyperparam(
"--decoder-langtok",
[True],
binary_flag=True,
save_dir_key=lambda val: "det",
)
)
if args.virtual_data_size:
grids.append(hyperparam("--virtual-data-size", args.virtual_data_size))
if args.virtual_epoch_size:
grids.append(hyperparam("--virtual-epoch-size", args.virtual_epoch_size))
if args.lang_dict:
grids.append(hyperparam("--lang-dict", args.lang_dict))
if args.langs:
grids.append(hyperparam("--langs", args.langs))
if args.max_tokens:
grids.append(hyperparam("--max-tokens", args.max_tokens))
if args.finetune_from_model:
grids.append(hyperparam("--finetune-from-model", args.finetune_from_model))
if args.enable_reservsed_directions_shared_datasets:
grids.append(
hyperparam(
"--enable-reservsed-directions-shared-datasets",
[True],
binary_flag=True,
)
)
if args.save_interval_updates:
grids.append(
hyperparam("--save-interval-updates", args.save_interval_updates),
)
arch_grid = get_predefined_grid(args.arch)
arch_grid = (
arch_grid
if arch_grid
else [
hyperparam("--arch", args.arch, save_dir_key=lambda val: val),
]
)
grids += arch_grid
return grids
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(
get_grid, postprocess_hyperparams, add_extra_options_func=add_extra_options_func
)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_translation_multi_simple_epoch.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
# These hyperparameters are tuned for RoBERTa-large
tasks = {
"MNLI": {
"data": "/private/home/myleott/data/data-bin/MNLI-bin",
"--num-classes": 3,
"--lr": 1e-5,
"--batch-size": 32,
"--max-update": 123873,
},
"QNLI": {
"data": "/private/home/myleott/data/data-bin/QNLI-bin",
"--num-classes": 2,
"--lr": 1e-5,
"--batch-size": 32,
"--max-update": 33112,
},
"QQP": {
"data": "/private/home/myleott/data/data-bin/QQP-bin",
"--num-classes": 2,
"--lr": 1e-5,
"--batch-size": 32,
"--max-update": 113272,
},
"RTE": {
"data": "/private/home/myleott/data/data-bin/RTE-bin",
"--num-classes": 2,
"--lr": 2e-5,
"--batch-size": 16,
"--max-update": 2036,
},
"SST-2": {
"data": "/private/home/myleott/data/data-bin/SST-2-bin",
"--num-classes": 2,
"--lr": 1e-5,
"--batch-size": 32,
"--max-update": 20935,
},
"MRPC": {
"data": "/private/home/myleott/data/data-bin/MRPC-bin",
"--num-classes": 2,
"--lr": 1e-5,
"--batch-size": 16,
"--max-update": 2296,
},
"CoLA": {
"data": "/private/home/myleott/data/data-bin/CoLA-bin",
"--num-classes": 2,
"--lr": 1e-5,
"--batch-size": 16,
"--max-update": 5336,
},
"STS-B": {
"data": "/private/home/myleott/data/data-bin/STS-B-bin",
"--num-classes": 1,
"--lr": 2e-5,
"--batch-size": 16,
"--max-update": 3598,
"--regression-target": True,
"--best-checkpoint-metric": "loss",
"--maximize-best-checkpoint-metric": False,
},
}
# convert a dataset path to the name of the dataset
def get_save_dir_key(data_dir):
for task_name, task_config in tasks.items():
if task_config["data"] == data_dir:
return task_name
raise Exception
def get_grid(args):
model_size = "large"
return [
hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam(
"data",
list(tasks.keys()),
positional_arg=True,
save_dir_key=lambda val: get_save_dir_key(val),
),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--no-last-checkpoints"),
hyperparam("--no-save-optimizer-state"),
hyperparam("--save-interval-updates", 1000),
hyperparam("--reset-optimizer"),
hyperparam("--reset-dataloader"),
hyperparam("--reset-meters"),
hyperparam("--best-checkpoint-metric", "accuracy"),
hyperparam("--maximize-best-checkpoint-metric", [True], binary_flag=True),
hyperparam(
"--restore-file",
"/private/home/myleott/roberta." + model_size + "/model.pt",
),
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--ddp-backend", "no_c10d"),
hyperparam("--num-workers", 1 if not args.local else 0),
hyperparam(
"--task", "sentence_prediction", save_dir_key=lambda val: "sentpred"
),
hyperparam("--init-token", 0, save_dir_key=lambda val: f"bos{val}"),
hyperparam("--separator-token", 2, save_dir_key=lambda val: f"sep{val}"),
hyperparam("--max-positions", 512),
hyperparam("--regression-target", [False], binary_flag=True),
hyperparam("--arch", "roberta_" + model_size, save_dir_key=lambda val: val),
hyperparam("--bpe", "gpt2"),
hyperparam("--criterion", "sentence_prediction"),
hyperparam("--num-classes", [None]),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "b2_0.98"),
hyperparam("--adam-eps", 1e-6, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", [None], save_dir_key=lambda val: f"lr{val}"),
hyperparam("--warmup-updates", [None], save_dir_key=lambda val: f"wu{val}"),
hyperparam("--total-num-update", [None]),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--batch-size", [None], save_dir_key=lambda val: f"ms{val}"),
hyperparam("--required-batch-size-multiple", 1),
hyperparam("--update-freq", 1, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--max-update", [None], save_dir_key=lambda val: f"mu{val}"),
hyperparam("--seed", [1], save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
# apply task-specific overrides
t = config["data"].current_value # current task name
for k, v in tasks[t].items():
assert k in config
config[k].current_value = v
# configure number of updates (warmup and total)
config["--warmup-updates"].current_value = int(
0.06 * config["--max-update"].current_value
)
config["--total-num-update"].current_value = config["--max-update"].current_value
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_roberta_large_glue.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
max_update = 500000
return [
hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--fast-stat-sync", save_dir_key=lambda _: "faststatsync"),
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "me_fp16"),
hyperparam("--num-workers", 2),
hyperparam("--task", "masked_lm"),
hyperparam("--criterion", "masked_lm"),
hyperparam("--arch", "roberta_large", save_dir_key=lambda val: val),
hyperparam(
"--sample-break-mode", "complete", save_dir_key=lambda val: "cmpltdoc"
),
hyperparam("--tokens-per-sample", 512, save_dir_key=lambda val: f"tps{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "b2_0.98"),
hyperparam("--adam-eps", 1e-6, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"cl{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", 6e-4, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--warmup-updates", 24000, save_dir_key=lambda val: f"wu{val}"),
hyperparam("--total-num-update", max_update),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--batch-size", 32, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--update-freq", 1, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--max-update", max_update, save_dir_key=lambda val: f"mu{val}"),
hyperparam("--seed", 1, save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_roberta_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
target_batch_size = 128
max_tokens = 2048
tokens_per_sample = 512
batch_size_per_gpu = max_tokens // tokens_per_sample
num_gpus = args.num_gpus * args.num_nodes
update_freq = target_batch_size // batch_size_per_gpu // num_gpus
return [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--max-update", 50000),
hyperparam("--task", "language_modeling"),
#hyperparam("--arch", "hf_gpt2", save_dir_key=lambda val: val),
hyperparam("--arch", "transformer_lm_gpt", save_dir_key=lambda val: val),
hyperparam("--share-decoder-input-output-embed", save_dir_key=lambda val: "shareemb"),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"drop{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam(
"--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta0.9,0.98"
),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--lr", 5e-4, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--warmup-updates", 4000, save_dir_key=lambda val: f"warmup{val}"),
hyperparam("--warmup-init-lr", 1e-7, save_dir_key=lambda val: f"initlr{val}"),
hyperparam(
"--tokens-per-sample", tokens_per_sample, save_dir_key=lambda val: f"sampletok{val}"
),
hyperparam(
"--sample-break-mode", "none", save_dir_key=lambda val: f"break{val}"
),
hyperparam("--max-tokens", max_tokens, save_dir_key=lambda val: f"maxtok{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"updatefreq{val}"),
hyperparam("--seed", [2], save_dir_key=lambda val: f"seed{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_lm_wikitext103_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
def set_data_based_on_shortname(args):
def set_data(fmt, num_shards):
if num_shards == 0:
args.data = fmt.format(0)
else:
args.data = ":".join([fmt.format(i) for i in range(num_shards)])
# mmap datasets
if args.data == "CC-NEWS-en.v7.1":
set_data("/private/home/myleott/data/data-bin/CC-NEWS-en.v7.1/shard{}", 10)
elif args.data == "fb_posts":
set_data("/data/tmp/fb_posts.en.2018-2019.bpe.mmap-bin/shard{}", 100)
elif args.data == "fb_posts_gfs":
set_data(
"/mnt/vol/gfsai-flash2-east/ai-group/users/myleott/fb_posts/fb_posts.en.2018-2019.bpe.mmap-bin/shard{}",
100,
)
elif args.data == "bookwiki_aml-mmap-bin":
set_data("/data/tmp/bookwiki_aml-mmap-bin/shard{}", 5)
elif args.data == "bookwiki_aml_CC-NEWS-en.v7.1":
set_data("/data/tmp/bookwiki_aml_CC-NEWS-en.v7.1/shard{}", 5)
# old datasets
elif args.data == "CC-NEWS-en.v6":
set_data("/private/home/myleott/data/data-bin/CC-NEWS-en.v6", 0)
elif args.data == "CC-NEWS-en.v9":
set_data(
"/private/home/namangoyal/fairseq-py/data-bin/CC-NEWS-en.v9/shard{}", 100
)
elif args.data == "bookwiki":
set_data("/private/home/myleott/data/data-bin/bookwiki.10shards/shard{}", 10)
elif args.data == "bookwiki_full":
set_data("/private/home/myleott/data/data-bin/bookwiki-bin", 0)
elif args.data == "fb_posts_old":
set_data("/data/tmp/mono.english.public.2018-2019.shard{}.sents.bpe-bin", 100)
elif args.data == "fb_posts_gfs":
set_data(
"/mnt/vol/gfsai-flash2-east/ai-group/users/myleott/fb_posts/en/mono.english.public.2018-2019.shard{}.sents.bpe-bin",
100,
)
elif args.data == "wmt19_en_news_docs":
set_data(
"/private/home/myleott/data/data-bin/wmt19_en_news_docs/wmt19_en_news_docs.bpe.shard{}",
100,
)
else:
set_data(args.data, 0)
return args
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_lm_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
from sweep_wmt_en2de_transformer_big_common import get_common_grid
COMMON_GRID = get_common_grid()
def get_grid(args):
return COMMON_GRID + [
hyperparam(
"--distributed-wrapper", "SlowMo", save_dir_key=lambda val: f"slowmo"
),
hyperparam(
"--slowmo-momentum", 0.5, save_dir_key=lambda val: f"slowmo_mom{val}"
),
hyperparam("--slowmo-algorithm", "SGP", save_dir_key=lambda val: "sgp"),
hyperparam("--nprocs-per-node", 8),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_wmt_en2de_transformer_big_sgp.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from sweep import hyperparam
def get_common_grid():
return [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--ddp-backend", "c10d", save_dir_key=lambda val: "no_c10d"),
hyperparam("--max-epoch", 70),
hyperparam("--arch", "transformer_wmt_en_de_big", save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
[True],
binary_flag=True,
save_dir_key=lambda val: "shareemb",
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam(
"--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta0.9,0.98"
),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--warmup-init-lr", 1e-7, save_dir_key=lambda val: f"initlr{val}"),
hyperparam("--warmup-updates", 4000, save_dir_key=lambda val: f"warmup{val}"),
hyperparam("--lr", 0.001, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", 1e-9),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--dropout", 0.3, save_dir_key=lambda val: f"drop{val}"),
hyperparam("--weight-decay", 0.0, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--criterion", "label_smoothed_cross_entropy"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--max-tokens", 3584, save_dir_key=lambda val: f"maxtok{val}"),
hyperparam("--seed", [2], save_dir_key=lambda val: f"seed{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25),
hyperparam("--save-interval", 1),
]
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_wmt_en2de_transformer_big_common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
"""
Replicates the `16-bit+cumul+2x lr` results from Table 1 of
"Scaling Neural Machine Translation" (https://arxiv.org/abs/1806.00187)
"""
return [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--ddp-backend", "fully_sharded", save_dir_key=lambda val: val),
# hyperparam("--cpu-offload"),
# hyperparam("--no-reshard-after-forward"),
hyperparam("--max-epoch", 70),
# equivalent to training on 16x GPUs
hyperparam(
"--update-freq",
16 if not args.local else 1,
save_dir_key=lambda val: f"updatefreq{val}",
),
hyperparam("--arch", "transformer_wmt_en_de_big", save_dir_key=lambda val: val),
hyperparam(
"--share-all-embeddings",
[True],
binary_flag=True,
save_dir_key=lambda val: "shareemb",
),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam(
"--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta0.9,0.98"
),
hyperparam("--lr-scheduler", "inverse_sqrt"),
hyperparam("--warmup-init-lr", 1e-7, save_dir_key=lambda val: f"initlr{val}"),
hyperparam("--warmup-updates", 4000, save_dir_key=lambda val: f"warmup{val}"),
# use double the default learning rate, since we're using --update-freq=16
hyperparam("--lr", 10e-4, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--stop-min-lr", 1e-9),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--dropout", 0.3, save_dir_key=lambda val: f"drop{val}"),
hyperparam("--weight-decay", 0.0, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--criterion", "label_smoothed_cross_entropy"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--max-tokens", 3584, save_dir_key=lambda val: f"maxtok{val}"),
hyperparam("--seed", [2], save_dir_key=lambda val: f"seed{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 100 if not args.local else 10),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_wmt_en2de_transformer_big.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
"""
Usage:
./fb_sweep/sweep_lm_enwik8_transformer_xl.py \
-d ~daju/data/enwik8/eos-data-bin/ \
-p enwiki8.transformer_xl \
-t 1 -g 4 \
--snapshot-code --snapshot-recurse-dirs fairseq,fairseq_cli,examples/truncated_bptt \
--constraint volta32gb
"""
import sweep
from sweep import hyperparam
def get_grid(args):
target_batch_size = 60
max_batch_size_on_v100 = 15
num_gpus = args.num_gpus * args.num_nodes
batch_size_per_gpu = min(max_batch_size_on_v100, target_batch_size // num_gpus)
update_freq = target_batch_size // (batch_size_per_gpu * num_gpus)
assert target_batch_size == update_freq * batch_size_per_gpu * num_gpus
return [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--max-update", 400000),
hyperparam("--user-dir", "examples/truncated_bptt"),
hyperparam("--task", "truncated_bptt_lm"),
hyperparam("--tokens-per-sample", 512),
hyperparam("--arch", "transformer_xl", save_dir_key=lambda val: val),
hyperparam("--n-layer", 12),
hyperparam("--d-model", 512),
hyperparam("--n-head", 8),
hyperparam("--d-head", 64),
hyperparam("--d-inner", 2048),
hyperparam("--dropout", 0.1),
hyperparam("--dropatt", 0.0),
hyperparam("--mem-len", 512),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--clip-norm", 0.25, save_dir_key=lambda val: f"cl{val}"),
hyperparam("--lr-scheduler", "cosine", save_dir_key=lambda val: val),
hyperparam("--warmup-updates", 0),
hyperparam("--lr", 0.00025, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--batch-size", batch_size_per_gpu),
hyperparam("--update-freq", update_freq),
hyperparam("--seed", [2], save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 25 if not args.local else 1),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_lm_enwik8_transformer_xl.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
config = "8k" # 2k
if config == "8k":
max_update = 100000
save_interval = 5000
valid_interval = 5000
update_freq = 1
lr = 5.2e-4
warmup = 5000
else:
max_update = 100000
save_interval = 5000
valid_interval = 5000
update_freq = 4
lr = 5e-4
warmup = 5000
seeds = [0]
grid = [
# hyperparam('--train-subset', 'train' if not args.local else 'test'),
hyperparam("--train-subset", "valid"),
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("--num-workers", 4),
hyperparam("--task", "multilingual_masked_lm"),
hyperparam("--criterion", "masked_lm"),
hyperparam("--arch", "roberta_large", save_dir_key=lambda val: val),
hyperparam("--sample-break-mode", "complete", save_dir_key=lambda val: "cmplt"),
hyperparam("--tokens-per-sample", 512, save_dir_key=lambda val: f"tps{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "b2_0.98"),
hyperparam("--adam-eps", 1e-6, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 1.0, save_dir_key=lambda val: f"cl{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--warmup-updates", warmup, save_dir_key=lambda val: f"wu{val}"),
hyperparam("--total-num-update", max_update),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
# hyperparam('--max-tokens', 3200, save_dir_key=lambda val: f'mt{val}'),
hyperparam("--batch-size", 12, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--max-update", max_update, save_dir_key=lambda val: f"mu{val}"),
hyperparam(
"--multilang-sampling-alpha", 0.7, save_dir_key=lambda val: f"s{val}"
),
]
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 100),
]
# random seed
grid += [
hyperparam("--seed", seeds, save_dir_key=lambda val: f"seed{val}"),
]
grid += [
hyperparam("--validate-interval", valid_interval),
]
grid += [
hyperparam("--save-interval-updates", save_interval),
hyperparam("--no-epoch-checkpoints"),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_multilingual_masked_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
CC25 = sorted(
[
"en_XX",
"ar_AR",
"de_DE",
"es_XX",
"fr_XX",
"hi_IN",
"it_IT",
"ja_XX",
"ko_KR",
"nl_XX",
"ru_RU",
"zh_CN",
"tr_TR",
"vi_VN",
"ro_RO",
"my_MM",
"ne_NP",
"si_LK",
"cs_CZ",
"lt_LT",
"kk_KZ",
"gu_IN",
"fi_FI",
"et_EE",
"lv_LV",
]
)
CC50 = CC25 + sorted(
[
"af_ZA",
"az_AZ",
"bn_IN",
"fa_IR",
"he_IL",
"hr_HR",
"id_ID",
"ka_GE",
"km_KH",
"mk_MK",
"ml_IN",
"mn_MN",
"mr_IN",
"pl_PL",
"ps_AF",
"pt_XX",
"sv_SE",
"sw_KE",
"ta_IN",
"te_IN",
"th_TH",
"tl_XX",
"uk_UA",
"ur_PK",
"xh_ZA",
]
)
def get_grid(args):
grid = []
total_num_udpates = 500000
warmup_updates = 10000
num_data_loaders = 4
arch = "mbart_large"
break_mode = "complete_doc"
# Denoising params
poisson_lambda = [3.5]
mask_p = [0.3]
mask_length = ["span-poisson"]
replace_length = [1]
rotate = [0]
mask_random = [0.1]
insert = [0]
sentence_permute = [1.0]
max_tokens = 1024 # 2048
max_sentences = 32
max_source_positions = None
max_target_positions = None
save_interval = 5000
adam_eps = 1e-6
peak_lr = 3e-4
update_freq = 9
seeds = [2]
valid_subsets = "valid"
fp16 = True
task = "multilingual_denoising"
criterion = "cross_entropy"
lr_scheduler = "poly"
weight_decay = 0.01
continued_pretraining = True
if continued_pretraining:
restore_file = "/private/home/namangoyal/src/fairseq_megatron_codepush/fairseq-py/mbart.cc25/model.pt"
grid += [hyperparam("--restore-file", restore_file)]
grid += [
hyperparam("--reset-lr-scheduler"),
hyperparam("--reset-meters"),
hyperparam("--reset-optimizer"),
hyperparam("--reset-dataloader"),
]
grid + [
"--no-whole-word-mask-langs",
",".join(["ja_XX", "km_KH", "th_TH", "zh_CN", "zh_TW"]),
]
if args.local:
grid += [hyperparam("--train-subset", "valid")]
grid += [hyperparam("--add-lang-token", save_dir_key=lambda x: "lgtkn")]
grid += [hyperparam("--langs", ",".join(CC50), save_dir_key=lambda x: "cc50")]
# data settings
grid += [
hyperparam("--dataset-impl", "mmap"),
]
grid += [
hyperparam("--bpe", "sentencepiece", save_dir_key=lambda x: "spm"),
hyperparam(
"--sentencepiece-model",
"/private/home/namangoyal/src/fairseq_megatron_codepush/fairseq-py/mbart.cc25/sentence.bpe.model",
),
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--criterion", criterion),
]
grid += [
hyperparam(
"--multilang-sampling-alpha", 0.7, save_dir_key=lambda val: f"alp{val}"
),
]
# Default is complete_doc
if break_mode == "complete":
grid += [
hyperparam(
"--sample-break-mode", break_mode, save_dir_key=lambda val: f"bm{val}"
),
]
# batch size
grid += [
hyperparam("--tokens-per-sample", 512, save_dir_key=lambda val: f"tps{val}"),
hyperparam("--max-tokens", max_tokens, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"
),
]
if max_sentences is not None:
grid += [
hyperparam(
"--batch-size", max_sentences, save_dir_key=lambda val: f"ms{val}"
),
]
if max_source_positions is not None:
grid += [
hyperparam(
"--max-source-positions",
max_source_positions,
save_dir_key=lambda val: f"msp{val}",
),
]
if max_target_positions is not None:
grid += [
hyperparam(
"--max-target-positions",
max_target_positions,
save_dir_key=lambda val: f"mtp{val}",
),
]
grid += [
hyperparam("--encoder-normalize-before", save_dir_key=lambda val: "enb"),
hyperparam("--decoder-normalize-before", save_dir_key=lambda val: "dnb"),
]
# task settings
grid += [
hyperparam("--task", task),
hyperparam("--required-batch-size-multiple", 8),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
# hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta998"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
]
# lr scheduler
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", peak_lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"wrm{val}"
),
]
if fp16:
grid += [
hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
hyperparam("--valid-subset", valid_subsets),
]
grid += [
hyperparam("--save-interval-updates", save_interval),
hyperparam("--no-epoch-checkpoints"),
]
grid += [
hyperparam(
"--poisson-lambda", poisson_lambda, save_dir_key=lambda val: f"lam{val}"
),
hyperparam("--mask", mask_p, save_dir_key=lambda val: f"mask{val}"),
hyperparam(
"--mask-length", mask_length, save_dir_key=lambda val: f"msklen{val}"
),
hyperparam(
"--replace-length", replace_length, save_dir_key=lambda val: f"rpllen{val}"
),
hyperparam("--rotate", rotate, save_dir_key=lambda val: f"rot{val}"),
hyperparam(
"--mask-random", mask_random, save_dir_key=lambda val: f"mskrnd{val}"
),
hyperparam("--insert", insert, save_dir_key=lambda val: f"ins{val}"),
hyperparam(
"--permute-sentences",
sentence_permute,
save_dir_key=lambda val: f"prmsen{val}",
),
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1000),
]
# random seed
grid += [
hyperparam("--seed", seeds, save_dir_key=lambda val: f"seed{val}"),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep_mbart.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
"""
python fb_sweep/long_pretrain/sweep_t5_baseline.py -p t5_all_corpus \
-g 8 -n 16 -t 1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/long_denoising --resume-failed \
--baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3/model.pt --snapshot-code --local
python fb_sweep/long_pretrain/sweep_t5_baseline.py -p from_scratch \
-g 8 -n 1 -t 1 --partition a100 --checkpoints-dir /checkpoints/xwhan/long_denoising --resume-failed --snapshot-code --local
"""
# every thing combines
prefix = '/fsx/xwhan/data/pretrain_corpus/pretrain_regimes/c4_books_stories_bookwiki_realnews_dialogue_10shards'
SHARDS = [
f'{prefix}/shard0/',
f'{prefix}/shard1/',
f'{prefix}/shard2/',
f'{prefix}/shard3/',
f'{prefix}/shard4/',
f'{prefix}/shard5/',
f'{prefix}/shard6/',
f'{prefix}/shard7/',
f'{prefix}/shard8/',#
# Created on Fri Oct 28 2022
#
# Copyright (c) 2022 Your Company
#
f'{prefix}/shard9/',
]
def get_grid(args):
grid = []
total_num_udpates = 500000
warmup_updates = 500
num_data_loaders = 4
arch = "bart_large"
task = "long_denoising"
criterion = "cross_entropy"
adam_eps = 1e-06
weight_decay = 0.01
lr = 1e-4
sequence_len = 8192 * 2
bsz = 8192 * 2 // sequence_len
update_freq = 1
# large-size experiments
bsz = bsz * 4
grid += [
hyperparam(
"--custom-dict",
f'/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3/dict.txt'
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--fast-stat-sync", save_dir_key=lambda _: "faststatsync"),
hyperparam("--task", task),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--criterion", criterion),
hyperparam("--pooling-layers", 4, save_dir_key=lambda val: f"pool{val}"),
hyperparam("--use-xformers"),
hyperparam("--attention-name", "block_noglobal", save_dir_key=lambda val: val),
hyperparam("--train-subset", "train" if not args.local else "valid"),
]
# task settings
grid += [
hyperparam("--truncate-target"),
]
grid += [
hyperparam("--max-source-positions", sequence_len, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--tokens-per-sample", sequence_len),
hyperparam("--max-target-positions", 1024, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"
),
hyperparam(
"--sample-break-mode", ["complete"], save_dir_key=lambda val: f"brk_{val}"
),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"bsz{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--checkpoint-activations"),
]
# lr scheduler
grid += [
hyperparam("--seed", 42, save_dir_key=lambda val: f"s{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
noisy_density = 1024 / sequence_len
grid += [
hyperparam("--noise-density", noisy_density, save_dir_key=lambda val: f"noise{val}"),
hyperparam("--dynamic-span-len", save_dir_key=lambda _: "dynaspan"),
]
# validation and checkpoint settings
grid += [
hyperparam("--no-epoch-checkpoints"),
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--combine-val"),
hyperparam("--save-interval-updates", 20000),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
args.data = ':'.join(SHARDS)
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_pretrain/sweep_t5_baseline.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
"""
python fb_sweep/long_pretrain/sweep_model_denoising.py -p md_vanilla_c4_r6 \
-g 8 -n 16 -t 1 --partition a100 --checkpoints-dir /data/home/xwhan/checkpoints/model_denoising --resume-failed \
--baseline-model /data/home/xwhan/fairseq-py/checkpoints/md.base.16k.pool4.span3.r6/model.pt --snapshot-code \
--local
"""
# shards on /fsx
# prefix = '/data/home/xwhan/data/pretrain_regimes/assembled_c4'
prefix = '/fsx/xwhan/data/pretrain_corpus/pretrain_regimes/c4_10shards'
SHARDS = [
f'{prefix}/shard0/',
f'{prefix}/shard1/',
f'{prefix}/shard2/',
f'{prefix}/shard3/',
f'{prefix}/shard4/',
f'{prefix}/shard5/',
f'{prefix}/shard6/',
f'{prefix}/shard7/',
f'{prefix}/shard8/',
f'{prefix}/shard9/',
]
def get_grid(args):
grid = []
total_num_udpates = 500000
warmup_updates = 500
num_data_loaders = 2
arch = "loco_base"
task = "model_based_denoising"
criterion = "model_based_denoising"
adam_eps = 1e-06
weight_decay = 0.01
lr = 1e-4
sequence_len = 8192 * 2
bsz = 8192 * 2 // sequence_len
update_freq = 2
grid += [
hyperparam(
"--custom-dict",
# f'/data/home/xwhan/fairseq-py/checkpoints/base.md.8k.pool4.span5/dict.txt'
'/data/home/xwhan/fairseq-py/checkpoints/md.base.16k.pool4.span3.r6/dict.txt'
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--task", task),
hyperparam("--fast-stat-sync", save_dir_key=lambda _: "faststatsync"),
hyperparam("--criterion", criterion),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--use-xformers"),
hyperparam("--attention-name", "block_noglobal", save_dir_key=lambda val: val),
hyperparam("--pooling-layers", 4, save_dir_key=lambda val: f"pool{val}"),
# hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--train-generator"),
hyperparam("--generator-layers", 6),
]
grid += [
hyperparam("--max-source-positions", sequence_len, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--tokens-per-sample", sequence_len),
hyperparam("--max-target-positions", 1024, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"
),
hyperparam("--required-batch-size-multiple", 1),
hyperparam(
"--sample-break-mode", ["complete"], save_dir_key=lambda val: f"brk_{val}"
),
]
# regularization
grid += [
hyperparam("--dropout", 0.0, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.0, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"bsz{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--mlm-loss-weight", 1, save_dir_key=lambda val: f"mlm{val}"),
# hyperparam("--checkpoint-activations"),
]
# lr scheduler
grid += [
hyperparam("--seed", 42, save_dir_key=lambda val: f"s{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
# validation and checkpoint settings
grid += [
hyperparam("--no-epoch-checkpoints"),
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1 if args.local else 20),
hyperparam("--combine-val"),
hyperparam("--save-interval-updates", 20000),
]
grid += [
hyperparam("--sample-ratio", 0.2, save_dir_key=lambda val: f"sample{val}"),
hyperparam("--noise-density", 0.0625, save_dir_key=lambda val: f"noise{val}"),
hyperparam("--dynamic-span-len", save_dir_key=lambda _: "dynaspan"),
# hyperparam("--mean-noise-span-length", 5, save_dir_key=lambda val: f"spanlen{val}"),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
args.data = ':'.join(SHARDS)
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_pretrain/sweep_model_denoising.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
"""
python fb_sweep/long_pretrain/sweep_pegasus.py -p pegasus_vanilla_c4 \
-g 8 -n 8 -t -1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/pegasus --resume-failed \
--baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.base.block16k.pool/model.pt \
--local
"""
# # shards on /data
# prefix = '/data/home/xwhan/data'
prefix = '/fsx/xwhan/data/pretrain_corpus/pretrain_regimes/c4_10shards'
SHARDS = [
f'{prefix}/shard0/',
f'{prefix}/shard1/',
f'{prefix}/shard2/',
f'{prefix}/shard3/',
f'{prefix}/shard4/',
f'{prefix}/shard5/',
f'{prefix}/shard6/',
f'{prefix}/shard7/',
f'{prefix}/shard8/',
f'{prefix}/shard9/',
]
def get_grid(args):
grid = []
total_num_udpates = 500000
warmup_updates = 500
num_data_loaders = 8
arch = "bart_base"
task = "pegasus"
criterion = "cross_entropy"
adam_eps = 1e-06
weight_decay = 0.01
lr = 1e-4
sequence_len = 8192 * 2
bsz = 8192 * 2 // sequence_len
update_freq = 2
# grid += [
# hyperparam(
# "--restore-file",
# "/data/home/xwhan/fairseq-py/checkpoints/bart_block8k_pool/model.pt",
# # "/data/home/xwhan/fairseq-py/checkpoints/bart.large.block8k/model.pt",
# # "/checkpoints/xwhan/pegasus/pegasus.bart_large.mt1024.msp8192.mtp1024.uf8.mu100000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz1.adam.beta9999.eps1e-06.clip0.1.s42.lr3e-05.warm500.memfp16.ngpu32/checkpoint_best.pt"
# ),
# ]
grid += [
hyperparam(
"--custom-dict",
f'/data/home/xwhan/fairseq-py/checkpoints/bart.base.block16k.pool/dict.txt'
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--fast-stat-sync", save_dir_key=lambda _: "faststatsync"),
hyperparam("--task", task),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--criterion", criterion),
hyperparam("--pooling-layers", 4, save_dir_key=lambda val: f"pool{val}"),
hyperparam("--use-xformers"),
hyperparam("--attention-name", "block_noglobal", save_dir_key=lambda val: val),
# hyperparam("--xformer-config", '{"window_size": 512}'),
# hyperparam("--attention-name", "bs_local", save_dir_key=lambda val: val),
# hyperparam("--xformer-config", '{"block_size": 1024, "max_seq_len": 8192}'),
hyperparam("--train-subset", "train" if not args.local else "valid"),
]
# task settings
grid += [
hyperparam("--max-target-len", 1024, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--truncate-target"),
]
grid += [
hyperparam("--max-source-positions", sequence_len, save_dir_key=lambda val: f"msp{val}"),
hyperparam("--tokens-per-sample", sequence_len, save_dir_key=lambda val: f"tps{val}"),
hyperparam("--max-target-positions", 1024, save_dir_key=lambda val: f"mtp{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam(
"--max-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"
),
hyperparam(
"--sample-break-mode", ["complete"], save_dir_key=lambda val: f"brk_{val}"
),
hyperparam("--mask-ratio", 0.0625, save_dir_key=lambda val: f"ms{val}")
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", weight_decay, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"bsz{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--fp16-scale-tolerance", 0.1, save_dir_key=lambda val: f"fst{val}")
]
# lr scheduler
grid += [
hyperparam("--seed", 42, save_dir_key=lambda val: f"s{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", lr, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
# validation and checkpoint settings
grid += [
hyperparam("--no-epoch-checkpoints"),
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--combine-val"),
hyperparam("--save-interval-updates", 10000),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
args.data = ':'.join(SHARDS)
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_pretrain/sweep_pegasus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
import sweep
from sweep import hyperparam
def get_grid(args):
model_size = 'large'
fp16 = True # some models will run into issues with fp16
max_update = 64000
num_gpu = 64
max_positions = 4096
window_size_ablation = False
# These parameterers can be fixed
tokens_per_batch = 4096 * 64 * 2
tokens_per_gpu = 4096 * 2 if model_size == 'large' and (not window_size_ablation) else 4096 * 4
tokens_per_gpu = (tokens_per_gpu // 2) if not fp16 else tokens_per_gpu
update_freq = tokens_per_batch // (tokens_per_gpu * num_gpu)
warm_up_steps = 500
# warm_up_steps = int(0.06 * max_update)
return [
hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "me_fp16"),
hyperparam("--fast-stat-sync", save_dir_key=lambda _: "faststatsync"),
hyperparam("--num-workers", 2),
hyperparam("--task", "masked_lm"),
hyperparam("--criterion", "masked_lm"),
hyperparam("--max-positions", max_positions),
hyperparam(
"--sample-break-mode", "complete_doc", save_dir_key=lambda val: f"brk_{val}"
),
hyperparam("--tokens-per-sample", max_positions, save_dir_key=lambda val: f"tps{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.98)", save_dir_key=lambda val: "b2_0.98"),
hyperparam("--adam-eps", 1e-6, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.0, save_dir_key=lambda val: f"cl{val}"),
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", 3e-5, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--warmup-updates", warm_up_steps, save_dir_key=lambda val: f"wu{val}"), # use more updates for performer, 500 for other models
hyperparam("--total-num-update", max_update),
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
hyperparam("--batch-size", tokens_per_gpu // max_positions, save_dir_key=lambda val: f"ms{val}"),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--max-update", max_update, save_dir_key=lambda val: f"mu{val}"),
hyperparam("--seed", 42, save_dir_key=lambda val: f"s{val}"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 100),
hyperparam("--combine-val"),
hyperparam("--keep-last-epochs", 1),
hyperparam("--save-interval-updates", 16000), # increased as it will save on epoch end
hyperparam("--ddp-backend", "no_c10d"),
hyperparam("--arch", f"roberta_{model_size}", save_dir_key=lambda val: val),
hyperparam("--use-xformers"),
hyperparam("--attention-name", "block"),
hyperparam("--xformer-config", '{"window_size": 512}'),
hyperparam("--restore-file", "/data/home/xwhan/fairseq-py/checkpoints/roberta.large.block-512/model.pt")
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_pretrain/sweep_from_roberta.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models.bart import BARTModel
from fairseq.tasks.denoising import DenoisingTask
from fairseq.tasks.pegasus import PegasusTask
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
import os
import torch
hub = BARTModel.from_pretrained('/data/home/xwhan/fairseq-py/checkpoints/bart.base', checkpoint_file='model.pt')
task = hub.task
bart = hub.model
model_args = hub.cfg.model
model_args.max_source_positions = 1024 * 16
model_args.max_target_positions = 1024
model_args.alibi = False
model_args.pooling_layers = 4
checkpoint_path = "/data/home/xwhan/fairseq-py/checkpoints/bart.base"
dictionary = DenoisingTask.load_dictionary(os.path.join(checkpoint_path, 'dict.txt'))
state = torch.load(os.path.join(checkpoint_path, 'model.pt'), map_location=torch.device('cpu'))
task = PegasusTask(model_args, dictionary)
long_cfg = convert_namespace_to_omegaconf(model_args)
long_model = task.build_model(long_cfg.model)
##### encoder staff #####
long_model.encoder.embed_tokens.load_state_dict(bart.encoder.embed_tokens.state_dict())
long_model.encoder.layernorm_embedding.load_state_dict(bart.encoder.layernorm_embedding.state_dict())
# 2. attention layers
long_model.encoder.layers.load_state_dict(bart.encoder.layers.state_dict(), strict=False)
# 3. embed_positions, longer
if not model_args.alibi:
pos_limit, _ = bart.encoder.embed_positions.weight.shape
new_pos_limit, embed_dim = long_model.encoder.embed_positions.weight.shape
new_pos_embed = bart.encoder.embed_positions.weight.new_empty(new_pos_limit, embed_dim)
step = pos_limit - 2
for start in range(2, new_pos_limit, step):
new_pos_embed[start:start+step] = bart.encoder.embed_positions.weight[2:]
long_model.encoder.embed_positions.weight.data = new_pos_embed
##### decoder staff #####
long_model.decoder.load_state_dict(bart.decoder.state_dict())
save_path = '/data/home/xwhan/fairseq-py/checkpoints/bart.base.block16k.pool'
print(len(dictionary))
dictionary.save(os.path.join(save_path, 'dict.txt'))
state['args'] = model_args
state['model'] = long_model.state_dict()
if 'criterion' in state:
del state['criterion']
state['extra_state'] = {"epoch": 0}
state['last_optimizer_state'] = None
torch.save(state, os.path.join(save_path, 'model.pt'))
|
bart_ls-main
|
fairseq-py/fb_sweep/long_pretrain/initialize_models_pegasus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import hashlib
import itertools
import os
import random
import shlex
import shutil
import subprocess
import textwrap
from collections import OrderedDict
from glob import iglob
BASH_IF_CLAUSE = """
if [ "$SLURM_ARRAY_TASK_ID" = "{index}" ]; then
{srun_cmd}
fi
"""
def main(get_grid, postprocess_hyperparams, args):
def dry_run(msg):
if args.dry_run:
print(f"| dry-run: {msg}")
return args.dry_run
if args.local:
args.num_nodes = 1
# compute all possible hyperparameter configurations
grid = get_grid(args)
grid_product = list(itertools.product(*[hp.values for hp in grid]))
# randomly shuffle configurations
random.seed(args.seed)
random.shuffle(grid_product)
launch_train(args, grid, grid_product, dry_run, postprocess_hyperparams)
def copy_all_python_files(
source, snapshot_main_dir, code_snapshot_hash, recurse_dirs="fairseq"
):
"""
Copies following files from source to destination:
a) all *.py files at direct source location.
b) all fairseq/*.py recursively (default); recurse through comma-separated recurse_dirs
"""
def all_pys(recurse_dirs):
yield from iglob(os.path.join(source, "*.py"))
for d in recurse_dirs.split(","):
yield from iglob(os.path.join(source, d, "**/*.py"), recursive=True)
yield from iglob(os.path.join(source, d, "**/*.so"), recursive=True)
yield from iglob(os.path.join(source, d, "**/*.yaml"), recursive=True)
os.makedirs(snapshot_main_dir, exist_ok=True)
destination = os.path.join(snapshot_main_dir, code_snapshot_hash)
assert not os.path.exists(destination), "Code snapshot: {0} alredy exists".format(
code_snapshot_hash
)
os.makedirs(destination)
for filepath in all_pys(recurse_dirs):
directory, filename = os.path.split(filepath)
if directory:
os.makedirs(os.path.join(destination, directory), exist_ok=True)
shutil.copy2(
os.path.join(source, filepath), os.path.join(destination, filepath)
)
return destination
def run_setup(args, config, dry_run):
# compute save_dir
save_dir_key = ".".join(
filter(
lambda save_dir_key: save_dir_key is not None,
[hp.get_save_dir_key() for hp in config.values()],
)
)
save_dir_key = save_dir_key.replace(",", "_")
num_total_gpus = args.num_nodes * args.num_gpus
if args.use_jobarray:
save_dir = os.path.join(
args.checkpoints_dir, args.jobarray_name, f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}"
)
else:
save_dir = os.path.join(
args.checkpoints_dir, f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}"
)
# create save directory if it doesn't exist
if not os.path.exists(save_dir):
if not dry_run(f"create directory: {save_dir}"):
os.makedirs(save_dir)
# copy baseline model
checkpoint_last = os.path.join(save_dir, "checkpoint_last.pt")
if (
args.baseline_model
and not os.path.exists(checkpoint_last)
and not dry_run(f"initialize with baseline model: {args.baseline_model}")
):
if not os.path.exists(args.baseline_model):
raise FileNotFoundError(
f"Cannot find baseline model: {args.baseline_model}"
)
shutil.copyfile(args.baseline_model, checkpoint_last)
# create slurm log dir for job arrays
if args.use_jobarray:
slurm_dir = os.path.join(args.checkpoints_dir, args.jobarray_name, "slurm_logs")
if not os.path.exists(slurm_dir):
if not dry_run(f"create directory: {slurm_dir}"):
os.makedirs(slurm_dir)
return save_dir_key, save_dir, slurm_dir
else:
return save_dir_key, save_dir
def is_job_valid(args, save_dir, dry_run):
# check for whether the run failed
if has_finished(save_dir):
if args.resume_finished:
dry_run(f"restart previously finished run: {save_dir}")
else:
print(f"skip finished run (override with --resume-finished): {save_dir}")
return False
elif has_failed(save_dir):
if args.resume_failed:
dry_run(f"resume failed run: {save_dir}")
else:
print(f"skip failed run (override with --resume-failed): {save_dir}")
return False
elif has_started(save_dir):
print(f"skip in progress run: {save_dir}")
return False
return True
def set_env(args, env, dry_run):
if "OMP_NUM_THREADS" not in env:
env["OMP_NUM_THREADS"] = "2"
if args.local:
if not dry_run("start training locally"):
if "CUDA_VISIBLE_DEVICES" not in env:
env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, range(args.num_gpus)))
env["NCCL_DEBUG"] = "INFO"
else:
if args.num_nodes > 1:
env["NCCL_SOCKET_IFNAME"] = "^docker0,lo"
env["NCCL_DEBUG"] = "INFO"
env["FI_PROVIDER"] = "efa"
# env["FI_OFI_RXR_RX_COPY_UNEXP"] = '1'
# # env["FI_EFA_MR_CACHE_ENABLE"] = '1'
# env["FI_OFI_RXR_RX_COPY_OOO"] = '1'
# # env["FI_OFI_RXR_INLINE_MR_ENABLE"] = '1'
# env["NCCL_TREE_THRESHOLD"] = '0'
# env["NCCL_SOCKET_IFNAME"] = "ens5"
# env["NCCL_NET_SHARED_BUFFERS"] = "0"
def gen_train_command(args, env, config, destination, save_dir, save_dir_key):
# generate train command
train_cmd = [args.python, os.path.join(destination, args.script)]
train_cmd.extend(["--distributed-world-size", str(args.num_nodes * args.num_gpus)])
if args.num_nodes > 1:
train_cmd.extend(
[
"--distributed-port",
str(get_random_port()),
]
)
if args.data is not None:
train_cmd.extend([args.data])
train_cmd.extend(["--save-dir", save_dir])
if not args.no_tensorboard:
_dir = args.tensorboard_logdir
if _dir is None:
# _dir = os.path.join(
# "/fsx/", # For AWS cluster "/checkpoint" -> "/checkpoints"
# env["USER"],
# "checkpoints",
# "tensorboard_logs",
# str(datetime.date.today()),
# )
_dir = os.path.join(args.checkpoints_dir, 'tb')
tensorboard_logdir = os.path.join(
_dir, f"{args.prefix}.{save_dir_key}.ngpu{str(args.num_nodes * args.num_gpus)}"
)
train_cmd.extend(["--tensorboard-logdir", tensorboard_logdir])
if not args.no_wandb:
if "WANDB_API_KEY" in env and "WANDB_BASE_URL" in env:
if "--wandb-project" not in config:
project = str(datetime.date.today())
train_cmd.extend(["--wandb-project", project])
if "WANDB_RUN_GROUP" not in env:
env["WANDB_RUN_GROUP"] = args.prefix
if "WANDB_RUN_ID" not in env:
env["WANDB_RUN_ID"] = (
hashlib.md5(os.path.basename(save_dir).encode('utf-8')).hexdigest()
)
for hp in config.values():
train_cmd.extend(map(str, hp.get_cli_args()))
return train_cmd
def gen_post_commands(args, save_dir):
post_cmds = []
if args.post_steps:
for post_step in args.post_steps:
if os.path.isfile(post_step):
from pathlib import Path
post_cmd = Path(post_step).read_text()
else:
post_cmd = post_step
post_cmd = post_cmd.strip().format(
job_dir=save_dir, data_dir=args.data
) # assume to provide job_dir
post_cmds.append(post_cmd)
return post_cmds
def gen_srun_command_and_str(args, env, save_dir_key, train_log, train_stderr, train_cmd, post_cmds):
base_srun_cmd = [
"srun",
"--job-name",
f"{args.prefix}.{save_dir_key}",
"--output",
train_log,
"--error",
train_stderr,
"--open-mode",
"append",
"--unbuffered",
]
if args.salloc:
excluded_hosts = os.environ.get("EXCLUDED_HOSTS", None)
included_hosts = os.environ.get("INCLUDED_HOSTS", None)
base_srun_cmd += [
"--nodes",
str(args.num_nodes),
"--ntasks",
str(args.num_nodes),
]
base_srun_cmd += ["-x", excluded_hosts] if excluded_hosts is not None else []
base_srun_cmd += ["-w", included_hosts] if included_hosts is not None else []
srun_cmd = base_srun_cmd + train_cmd
srun_cmd_str = " ".join(map(shlex.quote, srun_cmd))
for post_cmd in post_cmds:
post_cmd_str = " ".join(map(shlex.quote, base_srun_cmd)) + f" {post_cmd}"
srun_cmd_str = f"({srun_cmd_str} && {post_cmd_str})" if len(srun_cmd_str) > 0 else post_cmd_str
return srun_cmd, srun_cmd_str
def gen_sbatch_command_and_str(args, job_name, train_log, train_stderr, destination, srun_cmd_str, array_length=None):
excluded_hosts = os.environ.get("EXCLUDED_HOSTS", None)
included_hosts = os.environ.get("INCLUDED_HOSTS", None)
sbatch_cmd = [
"sbatch",
"--job-name",
job_name,
"--gpus",
str(args.num_gpus * args.num_nodes),
"--nodes",
str(args.num_nodes),
"--ntasks-per-node",
"1",
"--cpus-per-task",
str(int(8 * args.num_gpus)),
"--output",
train_log,
"--error",
train_stderr,
"--open-mode",
"append",
# '--no-requeue',
"--signal",
"B:USR1@180",
]
if array_length is not None:
sbatch_cmd += ["--array", f"0-{array_length-1}"]
if args.constraint:
sbatch_cmd += ["--constraint", args.constraint]
if args.partition:
sbatch_cmd += ["--partition", args.partition]
if args.reservation:
sbatch_cmd += ["--reservation", args.reservation]
if args.exclusive:
sbatch_cmd += ["--exclusive"]
if args.comment:
comment = args.comment
if args.snapshot_code:
comment += ", Code Location: {0}".format(destination)
sbatch_cmd += ["--comment", comment]
elif args.snapshot_code:
sbatch_cmd += ["--comment", "Code Location: {0}".format(destination)]
if args.dep is not None:
sbatch_cmd.extend(["-d", str(args.dep)])
if args.time is not None:
sbatch_cmd.extend(["--time", args.time])
# # remove mem requirements for AWS
# if args.mem is not None:
# sbatch_cmd += ["--mem", args.mem]
# else:
# sbatch_cmd += ["--mem-per-cpu", "7G"]
sbatch_cmd += ["-x", excluded_hosts] if excluded_hosts is not None else []
sbatch_cmd += ["-w", included_hosts] if included_hosts is not None else []
wrapped_cmd = requeue_support() + "\n" + srun_cmd_str
if array_length is None:
wrapped_cmd = wrapped_cmd + " \n wait $! \n sleep 610 & \n wait $!"
sbatch_cmd += ["--wrap", wrapped_cmd]
sbatch_cmd_str = " ".join(map(shlex.quote, sbatch_cmd))
return sbatch_cmd, sbatch_cmd_str
def local_run(args, env, train_cmd, post_cmds, dry_run):
assert args.num_nodes == 1, "distributed training cannot be combined with --local"
if not dry_run("start training locally"):
if "CUDA_VISIBLE_DEVICES" not in env:
env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, range(args.num_gpus)))
env["NCCL_DEBUG"] = "INFO"
train_proc = subprocess.Popen(train_cmd, env=env)
train_proc.wait()
for post_cmd in post_cmds:
post_cmd_proc = subprocess.Popen(post_cmd, shell=True, env=env)
post_cmd_proc.wait()
def run_batch(env, sbatch_cmd_str, sbatch_cmd):
print(f"running command: {sbatch_cmd_str}\n")
with subprocess.Popen(sbatch_cmd, stdout=subprocess.PIPE, env=env) as train_proc:
stdout = train_proc.stdout.read().decode("utf-8")
try:
job_id = int(stdout.rstrip().split()[-1])
print(f"Launched job {job_id}")
except IndexError:
job_id = None
return job_id, stdout
def write_git_commit(args, train_log):
with open(train_log, "a") as train_log_h:
# log most recent git commit
git_commit = subprocess.check_output("git log | head -n 1", shell=True, encoding="utf-8")
print(git_commit.rstrip(), file=train_log_h)
if args.baseline_model:
print(f"baseline model: {args.baseline_model}", file=train_log_h)
def dry_run_batch(env, train_log, train_stderr, sbatch_cmd_str, sbatch_cmd, dry_run):
dry_run("start remote training")
dry_run(f"- log stdout to: {train_log}")
dry_run(f"- log stderr to: {train_stderr}")
dry_run(f"- run command: {sbatch_cmd_str}")
sbatch_cmd += ["--test-only"]
with subprocess.Popen(sbatch_cmd, stdout=subprocess.PIPE, env=env) as train_proc:
stdout = train_proc.stdout.read().decode("utf-8")
print(stdout)
def launch_train(args, grid, grid_product, dry_run, postprocess_hyperparams):
destination = ""
if args.snapshot_code:
# Currently hash is just the current time in ISO format.
# Remove colons since they cannot be escaped in POSIX PATH env vars.
code_snapshot_hash = datetime.datetime.now().isoformat().replace(":", "_")
destination = copy_all_python_files(
".",
os.path.join(args.snapshot_root, "slurm_snapshot_code"),
code_snapshot_hash,
args.snapshot_recurse_dirs,
)
os.environ["PYTHONPATH"] = destination + ":" + os.environ.get("PYTHONPATH", "")
# set environment
env = os.environ.copy()
set_env(args, env, dry_run)
# start training
srun_cmd_str_list = []
train_log_list = []
for i, hp_values in enumerate(grid_product):
if i == args.num_trials:
break
config = OrderedDict()
for hp, value in zip(grid, hp_values):
config[hp.name] = hp
config[hp.name].current_value = value
# postprocess hyperparams
postprocess_hyperparams(args, config)
if args.use_jobarray:
save_dir_key, save_dir, slurm_dir = run_setup(args, config, dry_run)
else:
save_dir_key, save_dir = run_setup(args, config, dry_run)
# check if job failed, exists, finished
if not is_job_valid(args, save_dir, dry_run):
continue
# generate train command
train_cmd = gen_train_command(args, env, config, destination, save_dir, save_dir_key)
# post cmds
post_cmds = gen_post_commands(args, save_dir)
train_log = os.path.join(save_dir, "train.log")
train_stderr = os.path.join(save_dir, "train.stderr.%j") # %j = slurm job id
srun_cmd, srun_cmd_str = gen_srun_command_and_str(
args, env, save_dir_key, train_log, train_stderr, train_cmd, post_cmds
)
# launch each job individually
if not args.use_jobarray:
job_id = None
if args.dry_run:
train_cmd_str = " ".join(train_cmd)
dry_run(f"train command: {train_cmd_str}")
for post_cmd in post_cmds:
dry_run(f"post steps command: {post_cmd}")
if args.local:
local_run(args, env, train_cmd, post_cmds, dry_run)
else:
srun_cmd_str = srun_cmd_str + " &"
# build command
if not args.salloc:
job_name = f"{args.prefix}.{save_dir_key}"
sbatch_cmd, sbatch_cmd_str = gen_sbatch_command_and_str(
args, job_name, train_log, train_stderr, destination, srun_cmd_str
)
else:
sbatch_cmd = srun_cmd
sbatch_cmd_str = srun_cmd_str
if args.dry_run:
dry_run_batch(env, train_log, train_stderr, sbatch_cmd_str, sbatch_cmd, dry_run)
else:
write_git_commit(args, train_log)
with open(train_log, "a") as train_log_h:
job_id, stdout = run_batch(env, sbatch_cmd_str, sbatch_cmd)
print(stdout, file=train_log_h)
if job_id is not None:
print("Launched {}".format(job_id))
if args.sequential and not args.local and job_id is not None:
args.dep = job_id
else:
train_log_list.append(train_log)
srun_cmd_str_list.append(srun_cmd_str)
if not args.dry_run:
write_git_commit(args, train_log)
# aggregate cmds and launch single job array
if args.use_jobarray:
aggregate_cmd = ""
for i, srun_cmd_str in enumerate(srun_cmd_str_list):
aggregate_cmd = aggregate_cmd + BASH_IF_CLAUSE.format(index=i, srun_cmd=srun_cmd_str)
job_name = args.jobarray_name
slurm_stdout_log = os.path.join(slurm_dir, "slrm_stdout.%j")
slurm_stderr_log = os.path.join(slurm_dir, "slrm_stderr.%j")
array_length = len(srun_cmd_str_list)
sbatch_cmd, sbatch_cmd_str = gen_sbatch_command_and_str(
args, job_name, slurm_stdout_log, slurm_stderr_log, destination, aggregate_cmd, array_length=array_length
)
if args.dry_run:
dry_run_batch(env, slurm_stdout_log, slurm_stderr_log, sbatch_cmd_str, sbatch_cmd, dry_run)
else:
job_id, stdout = run_batch(env, sbatch_cmd_str, sbatch_cmd)
for train_log in train_log_list:
with open(train_log, "a") as train_log_h:
print(stdout, file=train_log_h)
def has_finished(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
with open(train_log, "r") as h:
lines = h.readlines()
if len(lines) == 0:
return False
if "done training" in lines[-1]:
return True
return False
def has_failed(save_dir):
if not os.path.exists(save_dir):
return False
# find max job id
job_ids = []
for fn in os.listdir(save_dir):
if fn.startswith("train.stderr."):
job_ids.append(int(fn.split(".")[-1]))
if len(job_ids) == 0:
return False
max_job_id = max(job_ids)
def _has_failed(stderr_fn):
with open(stderr_fn, "r") as h:
for line in h:
if len(line.strip()) > 0:
# assume that any output in stderr indicates an error
return True
return False
return _has_failed(os.path.join(save_dir, f"train.stderr.{max_job_id}"))
def has_started(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
return True
def get_random_port():
old_state = random.getstate()
random.seed()
port = random.randint(10000, 20000)
random.setstate(old_state)
return port
def requeue_support():
return textwrap.dedent(
"""
trap_handler () {
echo "Caught signal: " $1
# SIGTERM must be bypassed
if [ "$1" = "TERM" ]; then
echo "bypass sigterm"
else
# Submit a new job to the queue
echo "Requeuing " $SLURM_JOB_ID
scontrol requeue $SLURM_JOB_ID
fi
}
# Install signal handler
trap 'trap_handler USR1' USR1
trap 'trap_handler TERM' TERM
"""
)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep/slurm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import os
import socket
from typing import List, Optional
def csv_str_list(x):
return [y.strip() for y in x.split(",")]
def get_args(add_extra_options_func=None, input_args: Optional[List[str]] = None):
"""
input_args (List[str]): strings to parse, defaults to sys.argv
"""
parser = argparse.ArgumentParser("Script for launching hyperparameter sweeps ")
parser.add_argument("--grid", help="grid function we used", default=None)
parser.add_argument("--pair", help="language direction", default=None)
parser.add_argument("-d", "--data", help="path to data directory")
parser.add_argument(
"-p",
"--prefix",
required=True,
help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
)
parser.add_argument(
"-t",
"--num-trials",
required=True,
type=int,
help="number of random hyperparam configurations to try (-1 for grid search)",
)
parser.add_argument(
"-g", "--num-gpus", type=int, required=True, help="number of GPUs per node"
)
parser.add_argument(
"-n",
"--num-nodes",
type=int,
default=1,
help="number of nodes for distributed training",
)
parser.add_argument("--seed", type=int, default=1234)
parser.add_argument(
"--baseline-model", help="path to baseline model from which to resume training"
)
parser.add_argument(
"--force-checkpoints-dir", help="force using a given checkpoint dir"
)
parser.add_argument(
"--resume-failed",
action="store_true",
help="resume any runs that failed (assumes --num-trials and --seed are the same)",
)
parser.add_argument(
"--resume-finished",
action="store_true",
help="force any runs that finished to begin again (uncommon)",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="output only a list of actions to perform without performing them",
)
parser.add_argument("--local", action="store_true", help="run job locally")
parser.add_argument("--debug", action="store_true", help="debug")
parser.add_argument("--script", default="train.py", help="script to launch")
parser.add_argument(
"--python", default="python", help="path to nonstandard python binary"
)
try:
import torch_xla # noqa
tpu = True
except ImportError:
tpu = False
hostname = socket.gethostname()
if "fair" in hostname:
default_backend = "slurm"
parser.add_argument(
"--checkpoints-dir",
default=os.path.join(
"/checkpoint", os.environ["USER"], str(datetime.date.today())
),
help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
)
elif tpu:
default_backend = "tpu"
parser.add_argument(
"--checkpoints-dir",
default=os.path.join(
"/mnt/fairseq_data",
os.environ["USER"],
"checkpoints",
str(datetime.date.today()),
),
help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
)
else:
# default_backend = "fblearner"
# parser.add_argument(
# "--checkpoints-dir",
# default=os.path.join(
# "/mnt/vol/gfsai-east/ai-group/users",
# os.environ["USER"],
# "checkpoints",
# str(datetime.date.today()),
# ),
# help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
# )
# for AWS cluster
default_backend = "slurm"
parser.add_argument(
"--checkpoints-dir",
default=os.path.join(
"/checkpoints", os.environ["USER"], str(datetime.date.today())
),
help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
)
parser.add_argument(
"--log-main-dir",
default=None,
help="dir to store log in addition to stdout. If this "
"is not set, it will be set to args.checkpoints_dir",
)
parser.add_argument(
"--backend",
choices=["fblearner", "chronos", "slurm", "tpu"],
default=default_backend,
)
# FBLearner params
parser.add_argument("--entitlement", help="entitlement to use", default="gpu_fair")
parser.add_argument(
"--run-as-secure-group", help="secure group to use", default="oncall_fairseq"
)
parser.add_argument(
"--capabilities",
help="capabilities: e.g. GPU_V100_HOST,GPU_V100_32G_HOST",
type=csv_str_list,
default=None,
)
# for manifold in fblearner
parser.add_argument(
"--manifold-max-parallel",
default=8,
type=int,
help="set ManifoldPathHandler max_parallel download number",
)
parser.add_argument(
"--manifold-timeout-sec",
default=1800,
type=int,
help="set ManifoldPathHandler timeout seconds",
)
parser.add_argument(
"--manifold-has-user-data",
default=None,
type=lambda x: x.lower() not in ("no", "false", "f", "n", "0")
if x is not None
else None,
help="set ManifoldPathHandler has_user_data option",
)
parser.add_argument(
"--manifold-num-retries",
default=15,
type=int,
help="set ManifoldPathHandler num_retries option",
)
parser.add_argument(
"--manifold-ttl",
default=None,
type=int,
help="A manifold resource's time-to-live, applied to all manifold written resources. By default, there is no TTL.",
)
# for manifold in fblearner
# Chronos params
parser.add_argument("--hostgroup", help="hostgroup to use")
parser.add_argument("--host-filter", help="host filter")
parser.add_argument("--fbpkg", help="use the given fbpkg")
parser.add_argument("--build-only", action="store_true")
# Slurm params
parser.add_argument(
"--salloc", action="store_true", help="run agaist current allocation"
)
parser.add_argument("--partition", help="partition to run on", default="learnfair")
parser.add_argument("--reservation", help="reservation to run on")
parser.add_argument(
"--exclusive", action="store_true", help="if set, get exclusive host"
)
parser.add_argument(
"--dep",
metavar="JOBID",
type=int,
help="add JOBID as a dependency (i.e., wait for it to finish)",
)
parser.add_argument(
"--sequential", action="store_true", help="schedule jobs to run sequentially"
)
parser.add_argument(
"--time", default="4320", help="expected job duration in minutes"
)
parser.add_argument("--mem", "--mem", help="memory to request")
parser.add_argument(
"--constraint",
metavar="CONSTRAINT",
help='gpu constraint, if any. e.g. "volta"',
)
parser.add_argument("--comment", help="comment string")
parser.add_argument(
"--snapshot-code",
action="store_true",
default=False,
help="Flag for creating a snapshot of training code while creating slurm job,"
' path is "./slurm_snapshot_code/<TIME_ISO_FORMAT/>:", '
"can find time from comment of slurm job.",
)
parser.add_argument(
"--snapshot-root",
type=str,
default=".",
help="root path for saving the snapshot code.",
)
parser.add_argument(
"--snapshot-recurse-dirs",
default="fairseq,fairseq_cli",
help="comma-separated directories from where to recursively copy *.py, *.so and *.yaml files",
)
parser.add_argument(
"--tensorboard-logdir",
help="save tensorboard logs in <tensorboard-logdir>/<prefix>.<save_dir_key>",
)
parser.add_argument(
"--no-tensorboard", action="store_true", help="disable tensorboard logging"
)
parser.add_argument(
"--no-wandb", action="store_true", help="disable WandB logging"
)
parser.add_argument(
"--post-steps",
nargs="+",
help="additional steps to execute after the primary job is complete. "
"this can be a file with the steps, or a string. some placeholders such as "
"{job_dir} will be replaced",
)
parser.add_argument('--use-jobarray', action='store_true', help="Submit sweep as job-array")
parser.add_argument('--jobarray-name', type=str, default=None, help="Folder name for job-array. Defaults to <jobarray_timestamp>")
# GCP params
parser.add_argument("--tpu", help="tpu to use")
if add_extra_options_func is not None:
add_extra_options_func(parser)
args = parser.parse_args(input_args)
if args.use_jobarray:
if args.jobarray_name is None:
ja_hash = datetime.datetime.now().isoformat().replace(':', '_')
args.jobarray_name = f'jobarray_{ja_hash}'
assert not args.local, 'Job array should not be local'
assert not args.sequential, 'Cannot have both sequential and jobarray'
return args
class hyperparam(object):
"""Base class for defining hyperparameters."""
def __init__(
self,
name,
values=None,
binary_flag=False,
save_dir_key=None,
positional_arg=False,
):
"""
Arguments:
- name : the name of the hyperparameter (e.g., `--dropout`)
- values : the set of values to sweep over (e.g., `[0.0, 0.1, 0.2]`)
- binary_flag : whether the hyperparameter uses a boolean flag (e.g., `--no-save`)
- save_dir_key : function that takes the hyperparameter value and returns the "key"
to be appended to the output directory name
- positional_arg : whether the hyperparameter is a positional argument
"""
self.name = name
if values is None: # syntactic sugar for binary flags
self.values = [True]
self.binary_flag = True
else:
self.values = values if isinstance(values, list) else [values]
self.binary_flag = binary_flag
self.save_dir_key = save_dir_key
self.positional_arg = positional_arg
self.current_value = None
if positional_arg and name.startswith("-"):
raise ValueError(
f"positional arguments must not start with a dash ({name})"
)
if len(self.values) > 1 and self.save_dir_key is None:
raise ValueError(
f"{name} has more than one value but is missing a save_dir_key!"
)
def get_cli_args(self):
if self.binary_flag:
return [self.name] if self.current_value else []
elif self.positional_arg:
return [self.current_value]
else:
return [self.name, self.current_value]
def get_save_dir_key(self):
if self.save_dir_key is None:
return None
if self.binary_flag:
return self.save_dir_key(1) if self.current_value else None
return self.save_dir_key(self.current_value)
def main(
get_grid,
postprocess_hyperparams,
add_extra_options_func=None,
scheduler_args: Optional[List[str]] = None,
):
args = get_args(add_extra_options_func, scheduler_args)
if args.backend == "fblearner":
from .fblearner import main as backend_main
elif args.backend == "chronos":
from .chronos import main as backend_main
elif args.backend == "slurm":
from .slurm import main as backend_main
elif args.backend == "tpu":
from .tpu import main as backend_main
get_grid = get_grid[args.grid] if args.grid is not None else get_grid
backend_main(get_grid, postprocess_hyperparams, args)
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import os
import random
import shlex
import subprocess
from collections import OrderedDict
def main(get_grid, postprocess_hyperparams, args):
assert args.local or args.tpu is not None, "--tpu is required for TPU jobs"
# compute all possible hyperparameter configurations
grid = get_grid(args)
grid_product = list(itertools.product(*[hp.values for hp in grid]))
# randomly shuffle configurations
random.seed(args.seed)
random.shuffle(grid_product)
for i, hp_values in enumerate(grid_product):
config = OrderedDict()
for hp, value in zip(grid, hp_values):
config[hp.name] = hp
config[hp.name].current_value = value
# postprocess hyperparams
postprocess_hyperparams(args, config)
# launch training
launch_train(args, config)
if i == args.num_trials - 1:
break
def launch_train(args, config):
def dry_run(msg):
if args.dry_run:
print(f"| dry-run: {msg}")
return args.dry_run
# compute save_dir
save_dir_key = ".".join(
filter(
lambda save_dir_key: save_dir_key is not None,
[hp.get_save_dir_key() for hp in config.values()],
)
)
save_dir_key = save_dir_key.replace(",", "_")
num_total_gpus = args.num_nodes * args.num_gpus
if args.force_checkpoints_dir:
raise NotImplementedError
save_dir = os.path.join(
args.checkpoints_dir,
f"{args.prefix}.{save_dir_key}.ntpu{num_total_gpus}",
)
# create save directory if it doesn't exist
if not os.path.exists(save_dir):
if not dry_run(f"create directory: {save_dir}"):
os.makedirs(save_dir)
# os.chmod(save_dir, 0o777)
# if has_started(save_dir) and not args.resume_checkpoints_dir:
# print(f'skip in progress run: {save_dir}')
# return
# generate train command
cmd_args = [
"python",
"/mnt/fairseq_data/fairseq-py/train.py",
"--distributed-world-size",
str(args.num_nodes * args.num_gpus),
"--tpu",
]
if not args.local:
cmd_args = [
"python",
"-m",
"torch_xla.distributed.xla_dist",
"--tpu",
args.tpu,
"--conda-env",
"torch-xla-nightly",
"--",
] + cmd_args
if args.data:
cmd_args += [args.data]
cmd_args += ["--save-dir", save_dir]
for hp in config.values():
if hp.name == "--fp16":
hp.name = "--bf16"
cmd_args.extend(map(str, hp.get_cli_args()))
cmd_args_str = " ".join(map(shlex.quote, cmd_args))
if args.dry_run:
dry_run(f"train command: {cmd_args_str}")
# initialize train log
train_log = os.path.join(save_dir, "train.log")
if not dry_run(f"create train.log at: {train_log}"):
with open(train_log, "a") as train_log_h:
train_log_h.write("")
os.chmod(train_log, 0o777)
if args.dry_run:
print("| dry-run: start training")
print(f"| dry-run: - run command: {cmd_args_str}")
else:
subprocess.Popen(cmd_args).wait()
return train_log
def has_started(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
return True
def get_random_port():
old_state = random.getstate()
random.seed()
port = random.randint(10000, 20000)
random.setstate(old_state)
return port
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep/tpu.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import os
import random
import shlex
import shutil
import subprocess
import tempfile
from collections import OrderedDict
from fairseq.file_io import PathManager
def main(get_grid, postprocess_hyperparams, args):
if args.manifold_has_user_data is None:
raise ValueError(
"fblearner backend requires --manifold-has-user-data be specified explicitly"
)
try:
from iopath.fb.manifold import ManifoldPathHandler
PathManager.register_handler(
ManifoldPathHandler(
max_parallel=16,
timeout_sec=1800,
has_user_data=args.manifold_has_user_data,
)
)
except KeyError:
print("| ManifoldPathHandler already registered.")
# compute all possible hyperparameter configurations
grid = get_grid(args)
grid_product = list(itertools.product(*[hp.values for hp in grid]))
# randomly shuffle configurations
random.seed(args.seed)
random.shuffle(grid_product)
sweep_config = {}
save_dirs = []
for i, hp_values in enumerate(grid_product):
config = OrderedDict()
for hp, value in zip(grid, hp_values):
config[hp.name] = hp
config[hp.name].current_value = value
# postprocess hyperparams
postprocess_hyperparams(args, config)
# setup training
x = setup_train(args, config)
if x is not None:
cmd_args = x["cmd_args"]
cmd_args.extend(
[
"--manifold-max-parallel",
str(args.manifold_max_parallel),
"--manifold-timeout-sec",
str(args.manifold_timeout_sec),
"--manifold-has-user-data",
str(args.manifold_has_user_data),
"--manifold-num-retries",
str(args.manifold_num_retries),
]
)
if args.manifold_ttl is not None:
cmd_args.extend(
[
"--manifold-ttl",
str(args.manifold_ttl),
]
)
if args.tensorboard_logdir:
cmd_args.extend(
[
"--tensorboard-logdir",
args.tensorboard_logdir,
"--tensorboard-manifold",
]
)
sweep_config[x["train_log_path"]] = cmd_args
save_dirs.append(x["save_dir"])
if i == args.num_trials - 1:
break
if len(save_dirs) == 0:
return
with tempfile.NamedTemporaryFile("w") as h:
config = {
"cmd_args": [],
"num_nodes": args.num_nodes,
"num_gpus_per_node": args.num_gpus,
"fp16": any(
"--fp16" in cmd_args or "--memory-efficient-fp16" in cmd_args
for cmd_args in sweep_config.values()
),
"sweep_config": sweep_config,
"gang_affinity": False,
"capabilities": getattr(args, "capabilities", None),
"memory": int(args.mem) if args.mem is not None else None,
}
h.write(json.dumps(config))
h.flush()
# build flow command
prefix = args.prefix.rstrip("_")
num_total_gpus = args.num_nodes * args.num_gpus
flow_cmd = [
"/usr/local/bin/flow-cli",
"canary",
#'--py-version', '>=3',
"--mode",
"opt",
"--entitlement",
str(args.entitlement),
"--run-as-secure-group",
args.run_as_secure_group,
"--parameters-file",
str(h.name),
"--name",
f"{prefix}.ngpu{num_total_gpus}",
"fairseq.train.train_workflow"
# TODO put stuff in --notes, e.g., repro command
]
cmd = " ".join(map(shlex.quote, flow_cmd))
if args.dry_run:
print("| dry-run: start remote training")
print(f"| dry-run: - run command: {cmd}")
else:
subprocess.Popen(
flow_cmd,
cwd=os.path.join(
"/data/users",
os.environ["USER"],
"fbsource/fbcode",
),
).wait()
def setup_train(args, config):
def dry_run(msg):
if args.dry_run:
print(f"| dry-run: {msg}")
return args.dry_run
# compute save_dir
save_dir_key = ".".join(
filter(
lambda save_dir_key: save_dir_key is not None,
[hp.get_save_dir_key() for hp in config.values()],
)
)
save_dir_key = save_dir_key.replace(",", "_")
num_total_gpus = args.num_nodes * args.num_gpus
subdir_name = f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}"
save_dir = os.path.join(args.checkpoints_dir, subdir_name)
log_main_dir = (
args.log_main_dir if args.log_main_dir is not None else args.checkpoints_dir
)
log_dir = os.path.join(log_main_dir, subdir_name)
# create save directory if it doesn't exist
if not os.path.exists(save_dir):
if not dry_run(f"create directory: {save_dir}"):
PathManager.mkdirs(save_dir)
PathManager.chmod(save_dir, 0o777)
# copy baseline model
checkpoint_last = os.path.join(save_dir, "checkpoint_last.pt")
if (
args.baseline_model
and not os.path.exists(checkpoint_last)
and not dry_run(f"initialize with baseline model: {args.baseline_model}")
):
if not os.path.exists(args.baseline_model):
raise FileNotFoundError(
f"Cannot find baseline model: {args.baseline_model}"
)
shutil.copyfile(args.baseline_model, checkpoint_last)
# TODO make this work
# check for whether the run failed
# if has_finished(save_dir):
# if args.resume_finished:
# dry_run(f'restart previously finished run: {save_dir}')
# else:
# print(f'skip finished run (override with --resume-finished): {save_dir}')
# return
# elif has_failed(save_dir):
# if args.resume_failed:
# dry_run(f'resume failed run: {save_dir}')
# else:
# print(f'skip failed run (override with --resume-failed): {save_dir}')
# return
# elif has_started(save_dir):
if has_started(log_dir) and not (args.resume_finished or args.resume_failed):
# if not resume previous runs explicitly, take it as started
print(f"skip in progress run: {log_dir}")
return
# generate train command
cmd_args = [args.data, "--save-dir", save_dir, "--log-dir", log_dir]
for hp in config.values():
cmd_args.extend(map(str, hp.get_cli_args()))
if args.dry_run:
cmd_args_str = " ".join(cmd_args)
dry_run(f"train command: train.par {cmd_args_str}")
# initialize train log
train_log = os.path.join(log_dir, "train.log")
if not dry_run(f"create train.log at: {train_log}"):
PathManager.mkdirs(log_dir)
PathManager.chmod(log_dir, 0o777)
with PathManager.open(train_log, "a") as train_log_h:
train_log_h.write("")
PathManager.chmod(train_log, 0o777)
return {
"cmd_args": cmd_args,
"save_dir": save_dir,
"save_dir_key": save_dir_key,
"train_log_path": train_log,
}
# def has_finished(save_dir):
# train_log = os.path.join(save_dir, 'train.log')
# if not os.path.exists(train_log):
# return False
# with open(train_log, 'r') as h:
# lines = h.readlines()
# if len(lines) == 0:
# return False
# if 'done training' in lines[-1]:
# return True
# return False
#
#
# def has_failed(save_dir):
# if not os.path.exists(save_dir):
# return False
#
# # find max job id
# job_ids = []
# for fn in os.listdir(save_dir):
# if fn.startswith('train.stderr.'):
# job_ids.append(int(fn.split('.')[-1]))
# if len(job_ids) == 0:
# return False
# max_job_id = max(job_ids)
#
# def _has_failed(stderr_fn):
# with open(stderr_fn, 'r') as h:
# for line in h:
# if len(line.strip()) > 0:
# # assume that any output in stderr indicates an error
# return True
# return False
#
# return _has_failed(os.path.join(save_dir, f'train.stderr.{max_job_id}'))
def has_started(log_dir):
train_log = os.path.join(log_dir, "train.log")
if not os.path.exists(train_log):
return False
return True
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep/fblearner.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import os
import random
import shlex
import subprocess
import sys
import tempfile
import time
from collections import OrderedDict
import libfb.py.fbpkg as fbpkg
def main(get_grid, postprocess_hyperparams, args):
assert args.hostgroup is not None, "--hostgroup is required for Chronos jobs"
# compute all possible hyperparameter configurations
grid = get_grid(args)
grid_product = list(itertools.product(*[hp.values for hp in grid]))
# randomly shuffle configurations
random.seed(args.seed)
random.shuffle(grid_product)
# build train fbpkg
if not args.local and args.fbpkg is not None:
train_fbpkg = args.fbpkg
else:
# build train.par
if args.debug:
mode = "dbg"
elif args.local:
mode = "dev-nosan"
else:
mode = "opt"
buck_cmd = [
"/usr/local/bin/buck",
"build",
"@mode/" + mode,
"deeplearning/projects/fairseq-py:fb_train",
]
buck_cmd_str = " ".join(map(shlex.quote, buck_cmd))
if args.dry_run:
print(f"| dry-run: {buck_cmd_str}")
else:
subprocess.Popen(
buck_cmd,
cwd=os.path.join(
"/data/users",
os.environ["USER"],
"fbsource/fbcode",
),
).wait()
if args.dry_run:
print(f"| dry_run: build fbpkg")
elif args.local:
train_fbpkg = None
else:
train_fbpkg = fbpkg.build_version(
"fairseq",
build_config=fbpkg.BuildConfig(
paths=[
os.path.join(
"/data/users",
os.environ["USER"],
"fbsource/fbcode",
"buck-out/gen/deeplearning/projects/fairseq-py/fb_train.par",
)
],
),
ephemeral=True,
expire="2w",
)[0].identifier
if args.build_only:
sys.exit(0)
if args.dry_run:
train_fbpkg = "fb_train.par"
for i, hp_values in enumerate(grid_product):
config = OrderedDict()
for hp, value in zip(grid, hp_values):
config[hp.name] = hp
config[hp.name].current_value = value
# postprocess hyperparams
postprocess_hyperparams(args, config)
# launch training
launch_train(args, config, train_fbpkg)
if i == args.num_trials - 1:
break
def launch_train(args, config, train_fbpkg):
def dry_run(msg):
if args.dry_run:
print(f"| dry-run: {msg}")
return args.dry_run
# compute save_dir
save_dir_key = ".".join(
filter(
lambda save_dir_key: save_dir_key is not None,
[hp.get_save_dir_key() for hp in config.values()],
)
)
save_dir_key = save_dir_key.replace(",", "_")
num_total_gpus = args.num_nodes * args.num_gpus
x = int(time.time())
if not args.force_checkpoints_dir:
save_dir = os.path.join(
args.checkpoints_dir,
f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}.{x}",
)
else:
save_dir = args.force_checkpoints_dir
# create save directory if it doesn't exist
if not os.path.exists(save_dir):
if not dry_run(f"create directory: {save_dir}"):
os.makedirs(save_dir)
os.chmod(save_dir, 0o777)
# if has_started(save_dir) and not args.resume_checkpoints_dir:
# print(f'skip in progress run: {save_dir}')
# return
# generate train command
cmd_args = []
if args.data:
cmd_args += [args.data]
cmd_args += ["--save-dir", save_dir]
for hp in config.values():
cmd_args.extend(map(str, hp.get_cli_args()))
cmd_args_str = " ".join(map(shlex.quote, cmd_args))
if args.dry_run:
dry_run(f"train command: fb_train.par {cmd_args_str}")
# initialize train log
train_log = os.path.join(save_dir, "train.log")
if not dry_run(f"create train.log at: {train_log}"):
with open(train_log, "a") as train_log_h:
train_log_h.write("")
os.chmod(train_log, 0o777)
# write script
script = get_script(
port=get_random_port(),
world_size=(args.num_nodes * args.num_gpus),
train_fbpkg=train_fbpkg,
cmd_args_str=cmd_args_str,
stdout=train_log,
stderr_prefix=os.path.join(save_dir, "train.stderr"),
baseline_model_src=args.baseline_model,
baseline_model_dst=os.path.join(save_dir, "checkpoint_last.pt"),
)
with tempfile.NamedTemporaryFile("w") as h:
if not dry_run(f"write script to: {h.name}\n\n{script}"):
h.write(script)
h.flush()
# crun
crun_cmd = [
"/usr/local/chronos/scripts/crun",
"--print-url",
"--mailwhen",
"onFailure",
"--hostgroup",
str(args.hostgroup),
"--gang-size",
str(args.num_nodes),
"-G",
str(args.num_gpus),
"-C",
str(10 * args.num_gpus),
"-M",
("-1" if args.num_gpus == 8 else str(29 * args.num_gpus)),
#'--host-filter', 'gpu_model=="Tesla V100-SXM2-16GB"',
h.name,
]
crun_cmd_str = " ".join(map(shlex.quote, crun_cmd))
env = os.environ.copy()
if args.local:
assert (
args.num_nodes == 1
), "distributed training cannot be combined with --local"
if not dry_run("start training locally"):
if "CUDA_VISIBLE_DEVICES" not in env:
env["CUDA_VISIBLE_DEVICES"] = ",".join(
map(str, range(args.num_gpus))
)
with tempfile.TemporaryDirectory() as tmpdir:
os.chmod(tmpdir, 0o777)
subprocess.Popen(
[
os.path.join(
"/data/users",
os.environ["USER"],
"fbsource/fbcode",
"buck-out/gen/deeplearning/projects/fairseq-py/fb_train.par",
)
]
+ cmd_args,
env=env,
cwd=tmpdir,
).wait()
else:
if args.dry_run:
print("| dry-run: start remote training")
print(f"| dry-run: - run command: {crun_cmd_str}")
else:
subprocess.Popen(crun_cmd).wait()
return train_log
def get_script(
port,
world_size,
train_fbpkg,
cmd_args_str,
stdout,
stderr_prefix,
baseline_model_src,
baseline_model_dst,
):
if baseline_model_src is not None:
link_baseline = f"""
if [ ! -e {baseline_model_dst} ]; then
cp {baseline_model_src} {baseline_model_dst}.tmp
mv {baseline_model_dst}.tmp {baseline_model_dst}
fi
"""
wait_baseline = f"""
while [ ! -e {baseline_model_dst} ]; do
sleep 5
done
"""
else:
link_baseline = ":"
wait_baseline = ":"
node_size = world_size if world_size < 8 else 8
if world_size > 1:
distributed = """\
--distributed-init-method zeus://$CHRONOS_JOB_ID \
--distributed-world-size $WORLD_SIZE \
--distributed-rank $RANK
"""
else:
distributed = ""
save_dir = os.path.dirname(baseline_model_dst)
return f"""#!/bin/bash
/usr/local/bin/fbpkg fetch {train_fbpkg}
#if [ $(nvidia-smi | grep "No running processes found" | wc -l) != "1" ]; then
# echo "Error: there are other running GPU processes"
# exit 1
#fi
export MASTER_ADDR=$(/usr/local/chronos/scripts/clist -F name,hostname -n | grep $(echo $CHRONOS_JOB_NAME | sed "s/_GANG_MEMBER$//") | cut -d' ' -f 3).facebook.com
export MASTER_PORT={port}
export WORLD_SIZE={world_size}
export RANK=$(({node_size}*CHRONOS_GANG_MEMBER_ID))
echo MASTER_ADDR: $MASTER_ADDR
echo MASTER_PORT: $MASTER_PORT
echo WORLD_SIZE: $WORLD_SIZE
echo RANK: $RANK
export NCCL_DEBUG=INFO
export NCCL_MIN_NRINGS=8
export NCCL_NSOCKS_PERTHREAD=1
export NCCL_SOCKET_NTHREADS=4
export NCCL_BUFFSIZE=16777216
# disable trees
export NCCL_TREE_THRESHOLD=0
## disable libgpumon
#export CUDA_INJECTION64_PATH=none
nvidia-smi
export
ulimit -a
ifconfig
ping6 -c 5 $MASTER_ADDR
if [ $RANK -eq 0 ]; then
{link_baseline}
else
{wait_baseline}
fi
mkdir -p {save_dir}
chmod 777 {save_dir}
LD_LIBRARY_PATH=/mnt/vol/gfsai-flash3-east/ai-group/users/myleott/nccl_2.4.8-1:$LD_LIBRARY_PATH ./fb_train.par {cmd_args_str} {distributed}
"""
def has_started(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
return True
def get_random_port():
old_state = random.getstate()
random.seed()
port = random.randint(10000, 20000)
random.setstate(old_state)
return port
|
bart_ls-main
|
fairseq-py/fb_sweep/sweep/chronos.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
"""
python fb_sweep/long_finetune/sweep_summ.py -p ss_large_all \
-d /fsx/xwhan/data/summscreen/fd-bin \
-g 8 -n 1 -t -1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/summscreen --resume-failed --snapshot-code \
--baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --no-wandb --local
python fb_sweep/long_finetune/sweep_block_baseline.py -p pubmed_nopre_baseline \
-d /fsx/xwhan/data/pubmed-dataset-bin \
-g 8 -n 2 -t -1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/pubmed --resume-failed --snapshot-code \
--baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool/model.pt --no-wandb --local
python fb_sweep/long_finetune/sweep_summ.py -p arxiv_large_all \
-d /fsx/xwhan/data/arxiv-dataset-bin \
-g 8 -n 2 -t -1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/arxiv --resume-failed --snapshot-code \
--baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --no-wandb
python fb_sweep/long_finetune/sweep_block_baseline.py -p gov_nopre_baseline \
-d /fsx/xwhan/data//gov_report-bin \
-g 8 -n 2 -t -1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/gov --resume-failed --snapshot-code \
--baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool/model.pt --no-wandb --no-wandb --local
python fb_sweep/long_finetune/sweep_block_baseline.py -p tvm_nopre_baseline \
-d /fsx/xwhan/data/summscreen/tv-bin \
-g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /fsx/xwhan/checkpoints/tvm --resume-failed --snapshot-code \
--baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool/model.pt --no-wandb --time 600
python fb_sweep/long_finetune/sweep_summ.py -p bsum_large_all \
-d /fsx/xwhan/data/booksum-chapters-bin \
-g 1 -n 1 -t -1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/bsum --resume-failed --snapshot-code \
--baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --no-wandb --local
"""
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
def get_grid(args):
grid = []
total_num_udpates = 10000
warmup_updates = 200
num_data_loaders = 2
# arch = "bart_base"
arch = 'bart_large'
task = "summarization"
criterion = "label_smoothed_cross_entropy"
lrs = [3e-4, 1e-4]
source, target = 'src', 'tgt'
bsz = 4 if 'base' in arch else 2
update_freq = 2 if 'base' in arch else 4
dropout = 0
if 'base' in arch:
bsz = 4
update_freq = 2
grid += [
hyperparam("--checkpoint-activations"),
]
else:
bsz, update_freq = 2, 4
grid += [
hyperparam("--checkpoint-activations"),
]
if 'arxiv' in args.data:
max_epoch = 7 # arxiv
generate_args = '{"beam": 4, "max_len_b": 300, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
elif 'tv' in args.data:
total_num_udpates = 15000
max_epoch = 60
source, target = 'source', 'target'
generate_args = '{"beam": 4, "max_len_b": 700, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
bsz, update_freq = 4, 2
elif 'summscreen' in args.data:
max_epoch = 130
dropout = [0, 0.2]
generate_args = '{"beam": 4, "max_len_b": 300, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
elif 'gov_report' in args.data:
max_epoch = 40
generate_args = '{"beam": 4, "max_len_b": 450, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 60}'
elif 'booksum' in args.data:
max_epoch = 50
generate_args = '{"beam": 4, "max_len_b": 450, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
elif 'pubmed' in args.data:
max_epoch = 12 #TODO needs to be increased for better performance from 10
generate_args = '{"beam": 4, "max_len_b": 400, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
else:
assert False, "Max epoch not set for this dataset"
adam_eps = 1e-08
max_source_positions = 1024*16
# model to use
grid += [
# hyperparam(
# "--restore-file",
# f"{pretrain_path}seq2seq_100k.pt",
# # "/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k/model.pt"
# ),
hyperparam(
"--custom-dict",
# f'/data/home/xwhan/fairseq-py/checkpoints/bart.base.block8k.pool.t5/dict.txt' # t5 pretrain
# "/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3/dict.txt",
# "/checkpoints/xwhan/model_denoising/md_joint.loco_base.faststatsync.block_sw.pool4.ms8192.mt1024.uf4.mu100000.brk_complete_doc.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz1.adam.beta9999.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.sample0.2.noise0.1.spanlen5.ngpu64/dict.txt"
# f'{pretrain_path}/dict.txt'
"/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool/dict.txt"
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
# hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--task", task),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--criterion", criterion),
hyperparam("--max-epoch", max_epoch, save_dir_key=lambda val: f"mep{val}"),
hyperparam("--max-source-positions", max_source_positions, save_dir_key=lambda val: f"sl{val}"),
hyperparam("--max-target-positions", 1024),
hyperparam("--source-lang", source),
hyperparam("--target-lang", target),
hyperparam("--truncate-source"),
hyperparam("--truncate-target"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--pooling-layers", 4, save_dir_key=lambda val: f"pool{val}"),
hyperparam("--use-xformers"),
hyperparam("--attention-name", ['block_noglobal'], save_dir_key=lambda val: val),
# hyperparam("--xformer-config", '{"num_global_tokens": 64}')
]
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--batch-size-valid", 2 if 'gov' in args.data else 4),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--required-batch-size-multiple", 1),
]
# regularization
grid += [
hyperparam("--dropout", dropout, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--seed", [3, 42], save_dir_key=lambda val: f"s{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.999)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
# hyperparam("--fp16-scale-tolerance", 0.25),
#
]
# lr schedule4
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", lrs, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
valid_interval_updates = 500 if ('arxiv' in args.data or 'pubmed' in args.data) else 200
# validation and checkpoint settings
grid += [
hyperparam("--validate-interval", int(max_epoch // 5)),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--validate-interval-updates", 10 if args.local else valid_interval_updates),
hyperparam("--best-checkpoint-metric", "rouge_avg", save_dir_key=lambda val: f"cmetric{val}")
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--maximize-best-checkpoint-metric"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--eval-rouge"),
hyperparam("--eval-rouge-args", generate_args),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_finetune/sweep_block_baseline.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
python fb_sweep/long_finetune/sweep_summ.py -p ss_large_all \
-d /fsx/xwhan/data/summscreen/fd-bin \
-g 8 -n 1 -t -1 --partition a100 --checkpoints-dir /fsx/xwhan/checkpoints/summscreen --resume-failed --snapshot-code \
--baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --no-wandb --local
"""
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
def get_grid(args):
grid = []
total_num_udpates = 10000
warmup_updates = 200
num_data_loaders = 4
# arch = "bart_base"
arch = 'bart_large'
task = "summarization"
criterion = "label_smoothed_cross_entropy"
lrs = [3e-4, 1e-4]
source, target = 'src', 'tgt'
dropout = 0
if 'base' in arch:
bsz = 4
update_freq = 2
grid += [
hyperparam("--checkpoint-activations"),
]
else:
bsz, update_freq = 2, 4
grid += [
hyperparam("--checkpoint-activations"),
]
if 'arxiv' in args.data:
lrs = [3e-4, 1e-4, 4e-4]
max_epoch = 8 # arxiv
generate_args = '{"beam": 4, "max_len_b": 300, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
elif 'tv' in args.data:
total_num_udpates = 15000
dropout = [0]
max_epoch = 60
source, target = 'source', 'target'
generate_args = '{"beam": 4, "max_len_b": 700, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
bsz, update_freq = 2, 2
elif 'summscreen' in args.data:
lrs = [5e-5, 3e-5]
warmup_updates = [200, 500, 1000]
max_epoch = 130
dropout = [0, 0.2]
generate_args = '{"beam": 4, "max_len_b": 300, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
elif 'gov_report' in args.data:
lrs = [3e-4, 5e-5, 4e-4]
total_num_udpates = 15000
max_epoch = 70
generate_args = '{"beam": 4, "max_len_b": 450, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 60}'
elif 'booksum' in args.data:
max_epoch = 60
update_freq = 4
generate_args = '{"beam": 4, "max_len_b": 450, "lenpen": 4.0, "no_repeat_ngram_size": 3, "min_len": 20}'
elif 'pubmed' in args.data:
lrs = [1e-4]
max_epoch = 12 # TODO needs to be increased for better performance from 10
generate_args = '{"beam": 4, "max_len_b": 400, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'
else:
assert False, "Max epoch not set for this dataset"
adam_eps = 1e-08
max_source_positions = 1024*16
# model to use
grid += [
# hyperparam(
# "--restore-file",
# f"{pretrain_path}seq2seq_100k.pt",
# # "/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k/model.pt"
# ),
hyperparam(
"--custom-dict",
# f'/data/home/xwhan/fairseq-py/checkpoints/bart.base.block8k.pool.t5/dict.txt' # t5 pretrain
"/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3/dict.txt",
# "/checkpoints/xwhan/model_denoising/md_joint.loco_base.faststatsync.block_sw.pool4.ms8192.mt1024.uf4.mu100000.brk_complete_doc.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz1.adam.beta9999.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.sample0.2.noise0.1.spanlen5.ngpu64/dict.txt"
# f'{pretrain_path}/dict.txt'
# "/data/home/xwhan/fairseq-py/checkpoints/bart.base.block16k.pool/dict.txt"
)
]
# grid += [
# hyperparam("--ddp-backend", "no_c10d"),
# ]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
# hyperparam("--train-subset", "train" if not args.local else "valid"),
hyperparam("--task", task),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--criterion", criterion),
hyperparam("--max-epoch", max_epoch, save_dir_key=lambda val: f"mep{val}"),
hyperparam("--max-source-positions", max_source_positions, save_dir_key=lambda val: f"sl{val}"),
hyperparam("--max-target-positions", 1024),
hyperparam("--source-lang", source),
hyperparam("--target-lang", target),
hyperparam("--truncate-source"),
hyperparam("--truncate-target"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--pooling-layers", 4, save_dir_key=lambda val: f"pool{val}"),
hyperparam("--use-xformers"),
hyperparam("--attention-name", ['block_noglobal'], save_dir_key=lambda val: val),
]
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--batch-size-valid", 2 if 'gov' in args.data else 4),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--required-batch-size-multiple", 1),
]
# regularization
grid += [
hyperparam("--dropout", dropout, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", [0.1, 0], save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--seed", [3, 42], save_dir_key=lambda val: f"s{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.999)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", [0.1, 0], save_dir_key=lambda val: f"clip{val}"),
]
# lr schedule4
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", lrs, save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
valid_interval_updates = 500 if ('arxiv' in args.data or 'pubmed' in args.data or 'govreport' in args.data) else 200
# validation and checkpoint settings
grid += [
hyperparam("--validate-interval", int(max_epoch // 5)),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--validate-interval-updates", 10 if args.local else valid_interval_updates),
hyperparam("--best-checkpoint-metric", "rouge_avg", save_dir_key=lambda val: f"cmetric{val}")
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--maximize-best-checkpoint-metric"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--eval-rouge"),
hyperparam("--eval-rouge-args", generate_args),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_finetune/sweep_summ.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
"""
python fb_sweep/long_finetune/sweep_qmsum.py -p qmsum_best_r3f \
-d /fsx/xwhan/data/QMSum/data/raw-bin \
-g 8 -n 1 -t -1 --partition hipri --checkpoints-dir /checkpoints/xwhan/qmsum --resume-failed --snapshot-code \
--baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --local --time 1440
### test model denoising
python fb_sweep/long_finetune/sweep_qmsum.py -p qmsum_md \
-d /fsx/xwhan/data/QMSum/data/raw-bin \
-g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /checkpoints/xwhan/qmsum --resume-failed --snapshot-code \
--baseline-model /data/home/xwhan/checkpoints/model_denoising/md_assembled_c4.loco_base.faststatsync.block_noglobal.pool4.ms16384.mt1024.uf2.mu100000.brk_complete.dr0.0.atdr0.0.actdr0.0.wd0.01.bsz1.adam.beta9999.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.sample0.2.noise0.0625.dynaspan.ngpu128/model_100k.pt --time 1440
"""
def get_grid(args):
grid = []
# total_num_udpates = 5000
total_num_udpates = 8000 # larger for r3f
warmup_updates = [100, 200]
num_data_loaders = 4
arch = "bart_large"
task = "summarization"
criterion = "label_smoothed_cross_entropy"
adam_eps = 1e-08
max_source_positions = 1024*16
max_epoch = 150
# which model to use
grid += [
# hyperparam("--train-subset", "train" if not args.local else "valid"),
# # "/checkpoints/xwhan/model_denoising/md_joint_g512.loco_large.ms8192.ts8192.mt1024.uf2.mu100000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.beta9999.eps1e-06.clip0.1.s42.lr3e-05.warm500.memfp16.sample0.2.noise0.1.ngpu32/seq2seq_100k.pt",
# ),
# hyperparam(
# "--custom-dict",
# "/checkpoints/xwhan/model_denoising/md_joint_pool.loco_large.pool4.ms8192.ts8192.mt1024.uf1.mu100000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.beta9999.eps1e-06.clip0.1.s42.lr3e-05.warm500.memfp16.sample0.2.noise0.1.ngpu64/dict.txt"
# # f'/checkpoints/xwhan/model_denoising/md_joint_g512.loco_large.ms8192.ts8192.mt1024.uf2.mu100000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.beta9999.eps1e-06.clip0.1.s42.lr3e-05.warm500.memfp16.sample0.2.noise0.1.ngpu32/dict.txt'
# )
hyperparam(
"--custom-dict",
# f'/data/home/xwhan/fairseq-py/checkpoints/bart.base.block16k/dict.txt',
# '/data/home/xwhan/fairseq-py/checkpoints/md.base.16k.pool4.span3.a6/dict.txt'
'/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3/dict.txt'
)
]
if 'base' in arch:
bsz = 4
update_freq = 2
grid += [
hyperparam("--checkpoint-activations"),
]
else:
bsz, update_freq = 4, 1
grid += [
hyperparam("--checkpoint-activations"),
]
# better finetuning
criterion = "label_smoothed_cross_entropy_r3f"
bsz = bsz//2
update_freq = update_freq*2
grid += [
hyperparam("--noise-type", ["uniform"], save_dir_key=lambda val: f"noise{val}"),
hyperparam("--r3f-lambda", [0.01], save_dir_key=lambda val: f"r3f{val}"),
hyperparam("--user-dir", "examples/rxf/rxf_src"),
hyperparam("--ddp-backend", "no_c10d"),
hyperparam("--reset-optimizer"),
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--task", task),
hyperparam("--criterion", criterion),
hyperparam("--max-epoch", max_epoch, save_dir_key=lambda val: f"mep{val}"),
hyperparam("--max-source-positions", max_source_positions, save_dir_key=lambda val: f"sl{val}"),
hyperparam("--max-target-positions", 1024),
hyperparam("--source-lang", "source"),
hyperparam("--target-lang", "target"),
hyperparam("--truncate-source"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--query-based"),
hyperparam("--max-query-positions", 45, save_dir_key=lambda val: f"mq{val}"),
hyperparam("--pad-query", 0, save_dir_key=lambda val: f"pad_q{val}"),
hyperparam("--input-pattern", ['mixed'], save_dir_key=lambda val: f"{val}"),
hyperparam("--use-xformers"),
hyperparam("--pooling-layers",4, save_dir_key=lambda val: f"pool{val}"),
hyperparam("--attention-name", ['block_noglobal'], save_dir_key=lambda val: val),
]
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--batch-size-valid", 4),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--required-batch-size-multiple", 1),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--seed", [3, 5], save_dir_key=lambda val: f"s{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.999)", save_dir_key=lambda val: "beta9999"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
]
# lr scheduler
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", [5e-5, 1e-4], save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "fp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
# validation and checkpoint settings
grid += [
hyperparam("--validate-interval", int(max_epoch // 5)),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--validate-interval-updates", 200 if not args.local else 10),
hyperparam("--best-checkpoint-metric", "rouge_avg", save_dir_key=lambda val: f"cmetric{val}")
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--maximize-best-checkpoint-metric"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--eval-rouge"),
hyperparam("--eval-rouge-args", '{"beam": 4, "max_len_b": 256, "lenpen": 2.0, "no_repeat_ngram_size": 3, "min_len": 20}'),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_finetune/sweep_qmsum.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
"""
### r3f and ft on the scrolls
python fb_sweep/long_finetune/sweep_nli.py -p contract_r3f \
-d /fsx/xwhan/data/scrolls/contract_nli/bin \
-g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /checkpoints/xwhan/contract --resume-failed --snapshot-code --baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --time 1440 --local
"""
def get_grid(args):
grid = []
total_num_udpates = 8000
warmup_updates = 200
# warmup_updates = 800
num_data_loaders = 8
arch = "bart_large"
# arch = "bart_prelayernorm"
task = "qa"
criterion = "label_smoothed_cross_entropy"
adam_eps = 1e-08
max_source_positions = 1024*16
update_freq = 1
max_q_pos = 35
generate_args = '{"beam": 4, "max_len_b": 6, "lenpen": 3.0, "no_repeat_ngram_size": 3}'
max_epochs = 60
bsz = 4
# better finetuning
criterion = "label_smoothed_cross_entropy_r3f"
# bsz = bsz//2
# update_freq = update_freq*2
grid += [
hyperparam("--noise-type", ["uniform"], save_dir_key=lambda val: f"noise{val}"),
hyperparam("--r3f-lambda", [0.01], save_dir_key=lambda val: f"r3f{val}"),
hyperparam("--user-dir", "examples/rxf/rxf_src"),
hyperparam("--ddp-backend", "no_c10d"),
hyperparam("--reset-optimizer"),
]
# which model to use
grid += [
hyperparam(
"--custom-dict",
'/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3/dict.txt'
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--task", task),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--criterion", criterion),
hyperparam("--max-epoch", max_epochs, save_dir_key=lambda val: f"mep{val}"),
hyperparam("--max-source-positions", max_source_positions, save_dir_key=lambda val: f"sl{val}"),
hyperparam("--max-target-positions", 1024),
hyperparam("--source-lang", "source"),
hyperparam("--target-lang", "target"),
hyperparam("--truncate-source"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--max-query-positions", max_q_pos), # narrativeqa 35
hyperparam("--pad-query", 0, save_dir_key=lambda val: f"pad_q{val}"),
hyperparam("--input-pattern", ['mixed'], save_dir_key=lambda val: f"ip{val}"),
hyperparam("--use-xformers"),
hyperparam("--pooling-layers", 4, save_dir_key=lambda val: f"pool{val}"),
hyperparam("--attention-name", ['block_noglobal'], save_dir_key=lambda val: val),
# hyperparam("--xformer-config", '{"num_global_tokens": 64}')
]
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--batch-size-valid", 4),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--required-batch-size-multiple", 1),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--seed", [3, 42], save_dir_key=lambda val: f"s{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--checkpoint-activations"),
]
# lr scheduler
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", [5e-5, 3e-5], save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
metric = 'em'
# validation and checkpoint settings
grid += [
hyperparam("--validate-interval", int(max_epochs // 5)),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--validate-interval-updates", 200 if not args.local else 10),
hyperparam("--best-checkpoint-metric", metric, save_dir_key=lambda val: f"cmetric{val}")
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--maximize-best-checkpoint-metric"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--eval-f1"),
hyperparam("--generate-args", generate_args),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_finetune/sweep_nli.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
from fb_sweep import sweep
from fb_sweep.sweep import hyperparam
"""
python fb_sweep/long_finetune/sweep_qa.py -p narrative_best \
-d /fsx/xwhan/data/narrativeqa-bin \
-g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /checkpoints/xwhan/narrativeqa --resume-failed --snapshot-code --baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --time 1440
python fb_sweep/long_finetune/sweep_qa.py -p qasper_best \
-d /fsx/xwhan/data/qasper-bin \
-g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /checkpoints/xwhan/qasper --resume-failed --snapshot-code --baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --time 1440
python fb_sweep/long_finetune/sweep_qa.py -p qasper_block \
-d /fsx/xwhan/data/qasper-bin \
-g 8 -n 1 -t -1 --partition hipri --checkpoints-dir /checkpoints/xwhan/qasper --resume-failed --snapshot-code --baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k/model.pt --local
python fb_sweep/long_finetune/sweep_qa.py -p narrative_block \
-d /fsx/xwhan/data/narrativeqa-bin \
-g 8 -n 1 -t -1 --partition hipri --checkpoints-dir /checkpoints/xwhan/narrativeqa --resume-failed --snapshot-code --baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k/model.pt --time 1440
python fb_sweep/long_finetune/sweep_qa.py -p quality_best -d /fsx/xwhan/data/quality/bin -g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /checkpoints/xwhan/quality --resume-failed --snapshot-code --baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --time 1440
python fb_sweep/long_finetune/sweep_qa.py -p quality_block -d /fsx/xwhan/data/quality/bin -g 8 -n 1 -t -1 --partition hipri --checkpoints-dir /checkpoints/xwhan/quality --resume-failed --snapshot-code --baseline-model /data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k/model.pt --time 1440 --local
### test the model-denoising variant
python fb_sweep/long_finetune/sweep_qa.py -p qasper_md \
-d /fsx/xwhan/data/qasper-bin \
-g 8 -n 1 -t -1 --partition hipri --checkpoints-dir /checkpoints/xwhan/qasper --resume-failed --snapshot-code --baseline-model /data/home/xwhan/checkpoints/model_denoising/md_assembled_c4.loco_base.faststatsync.block_noglobal.pool4.ms16384.mt1024.uf2.mu100000.brk_complete.dr0.0.atdr0.0.actdr0.0.wd0.01.bsz1.adam.beta9999.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.sample0.2.noise0.0625.dynaspan.ngpu128/model_100k.pt --local
### r3f and ft on the scrolls
python fb_sweep/long_finetune/sweep_qa.py -p qasper_best_r3f \
-d /fsx/xwhan/data/scrolls/qasper/bin \
-g 8 -n 1 -t -1 --partition hipri --checkpoints-dir /checkpoints/xwhan/qasper --resume-failed --snapshot-code --baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --local --time 1440
python fb_sweep/long_finetune/sweep_qa.py -p quality_best_no_r3f -d /fsx/xwhan/data/scrolls/quality/bin -g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /checkpoints/xwhan/quality --resume-failed --snapshot-code --baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --time 1440
python fb_sweep/long_finetune/sweep_qa.py -p narrative_no_r3f \
-d /fsx/xwhan/data/scrolls/narrative_qa/bin \
-g 8 -n 1 -t -1 --partition lowpri --checkpoints-dir /checkpoints/xwhan/narrativeqa --resume-failed --snapshot-code --baseline-model /fsx/xwhan/checkpoints/long_denoising/t5_all_corpus.bart_large.faststatsync.pool4.block_noglobal.ms16384.mt1024.uf1.mu500000.brk_complete.dr0.1.atdr0.1.actdr0.0.wd0.01.bsz4.adam.eps1e-06.clip0.1.s42.lr0.0001.warm500.memfp16.noise0.0625.dynaspan.ngpu128/model_100k.pt --time 1440 --local
"""
def get_grid(args):
grid = []
total_num_udpates = 8000
warmup_updates = 200
# warmup_updates = 800
num_data_loaders = 8
arch = "bart_large"
# arch = "bart_prelayernorm"
task = "qa"
criterion = "label_smoothed_cross_entropy"
adam_eps = 1e-08
max_source_positions = 1024*16
# bs_xformer_config = '{"block_size": 1024, "max_seq_len": 16384, "global_blocks": 2}'
update_freq = 1
if 'narrative' in args.data:
max_q_pos = 32
generate_args = '{"beam": 4, "max_len_b": 20, "lenpen": 3.0, "no_repeat_ngram_size": 3}'
max_epochs = 8
update_freq = 4
elif 'qasper' in args.data:
max_q_pos = 32
generate_args = '{"beam": 4, "max_len_b": 80, "lenpen": 3.0, "no_repeat_ngram_size": 3}' # 100 0.013 left; 80 0.026 left; 50 0.08 left
max_epochs = 120
elif 'quality' in args.data:
max_q_pos = 200
generate_args = '{"beam": 4, "max_len_b": 50, "lenpen": 3.0, "no_repeat_ngram_size": 3}'
max_epochs = 60
bsz = 2
# better finetuning
# criterion = "label_smoothed_cross_entropy_r3f"
# bsz = bsz//2
# update_freq = update_freq*2
# grid += [
# hyperparam("--noise-type", ["uniform"], save_dir_key=lambda val: f"noise{val}"),
# hyperparam("--r3f-lambda", [0.01], save_dir_key=lambda val: f"r3f{val}"),
# hyperparam("--user-dir", "examples/rxf/rxf_src"),
# hyperparam("--ddp-backend", "no_c10d"),
# hyperparam("--reset-optimizer"),
# ]
# which model to use
grid += [
hyperparam(
"--custom-dict",
# "/data/home/xwhan/fairseq-py/checkpoints/bart.base.block16k.pool/dict.txt",
# "/data/home/xwhan/fairseq-py/checkpoints/bart.base.block8k.pool.t5/dict.txt",
'/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k.pool.t5.span3/dict.txt'
# "/data/home/xwhan/fairseq-py/checkpoints/bart.large.block16k/dict.txt"
# '/data/home/xwhan/fairseq-py/checkpoints/md.base.16k.pool4.span3.a6/dict.txt'
)
]
# model settings
grid += [
hyperparam("--arch", arch, save_dir_key=lambda val: val),
hyperparam("--task", task),
hyperparam("--required-seq-len-multiple", 1024),
hyperparam("--criterion", criterion),
hyperparam("--max-epoch", max_epochs, save_dir_key=lambda val: f"mep{val}"),
hyperparam("--max-source-positions", max_source_positions, save_dir_key=lambda val: f"sl{val}"),
hyperparam("--max-target-positions", 1024),
hyperparam("--source-lang", "source"),
hyperparam("--target-lang", "target"),
hyperparam("--truncate-source"),
hyperparam("--label-smoothing", 0.1, save_dir_key=lambda val: f"ls{val}"),
hyperparam("--max-query-positions", max_q_pos), # narrativeqa 35
hyperparam("--pad-query", 0, save_dir_key=lambda val: f"pad_q{val}"),
hyperparam("--input-pattern", ['mixed'], save_dir_key=lambda val: f"ip{val}"),
hyperparam("--use-xformers"),
hyperparam("--pooling-layers", 4, save_dir_key=lambda val: f"pool{val}"),
hyperparam("--attention-name", ['block_noglobal'], save_dir_key=lambda val: val),
# hyperparam("--xformer-config", '{"num_global_tokens": 64}')
]
grid += [
hyperparam("--batch-size", bsz, save_dir_key=lambda val: f"mt{val}"),
hyperparam("--batch-size-valid", 4),
hyperparam("--update-freq", update_freq, save_dir_key=lambda val: f"uf{val}"),
hyperparam("--required-batch-size-multiple", 1),
]
# regularization
grid += [
hyperparam("--dropout", 0.1, save_dir_key=lambda val: f"dr{val}"),
hyperparam("--attention-dropout", 0.1, save_dir_key=lambda val: f"atdr{val}"),
hyperparam("--relu-dropout", 0.0, save_dir_key=lambda val: f"actdr{val}"),
hyperparam("--weight-decay", 0.01, save_dir_key=lambda val: f"wd{val}"),
]
# optimization settings
grid += [
hyperparam("--seed", [3, 42], save_dir_key=lambda val: f"s{val}"),
hyperparam("--optimizer", "adam", save_dir_key=lambda val: val),
hyperparam("--adam-betas", "(0.9, 0.999)"),
hyperparam("--adam-eps", adam_eps, save_dir_key=lambda val: f"eps{val}"),
hyperparam("--clip-norm", 0.1, save_dir_key=lambda val: f"clip{val}"),
hyperparam("--checkpoint-activations"),
]
# lr scheduler
grid += [
hyperparam("--lr-scheduler", "polynomial_decay"),
hyperparam("--lr", [5e-5, 3e-5], save_dir_key=lambda val: f"lr{val}"),
hyperparam("--total-num-update", total_num_udpates, save_dir_key=lambda val: f"mu{val}"),
hyperparam(
"--warmup-updates", warmup_updates, save_dir_key=lambda val: f"warm{val}"
),
]
grid += [
hyperparam("--memory-efficient-fp16", save_dir_key=lambda val: "memfp16"),
]
# data loading settings
grid += [
hyperparam("--num-workers", num_data_loaders),
]
metric = 'em' if 'quality' in args.data else 'f1'
# validation and checkpoint settings
grid += [
hyperparam("--validate-interval", int(max_epochs // 5)),
hyperparam("--no-epoch-checkpoints"),
hyperparam("--validate-interval-updates", 200 if not args.local else 10),
hyperparam("--best-checkpoint-metric", metric, save_dir_key=lambda val: f"cmetric{val}")
]
# logging settings
grid += [
hyperparam("--skip-invalid-size-inputs-valid-test"),
hyperparam("--maximize-best-checkpoint-metric"),
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 10),
hyperparam("--eval-f1"),
hyperparam("--generate-args", generate_args),
]
if args.local:
grid += [
hyperparam("--log-format", "json"),
hyperparam("--log-interval", 1),
]
return grid
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
|
bart_ls-main
|
fairseq-py/fb_sweep/long_finetune/sweep_qa.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import string
import tempfile
import unittest
import torch
from fairseq import tokenizer
from fairseq.data import Dictionary
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
"A B C D",
"B C D",
"C D",
"D",
]
ref_ids1 = list(
map(
torch.IntTensor,
[
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
],
)
)
ref_ids2 = list(
map(
torch.IntTensor,
[
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
],
)
)
# build dictionary
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode="w") as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
def test_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999 #fairseq:overwrite\n"
"<s> 999 #fairseq:overwrite\n"
"</s> 999 #fairseq:overwrite\n"
", 999\n"
"▁de 999\n"
)
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index("<pad>"), 1)
self.assertEqual(d.index("foo"), 3)
self.assertEqual(d.index("<unk>"), 4)
self.assertEqual(d.index("<s>"), 5)
self.assertEqual(d.index("</s>"), 6)
self.assertEqual(d.index(","), 7)
self.assertEqual(d.index("▁de"), 8)
def test_no_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n"
)
d = Dictionary()
with self.assertRaisesRegex(RuntimeError, "Duplicate"):
d.add_from_file(dict_file)
def test_space(self):
# for example, character models treat space as a symbol
dict_file = io.StringIO(" 999\n" "a 999\n" "b 999\n")
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index(" "), 4)
self.assertEqual(d.index("a"), 5)
self.assertEqual(d.index("b"), 6)
def test_add_file_to_dict(self):
counts = {}
num_lines = 100
per_line = 10
with tempfile.TemporaryDirectory("test_sampling") as data_dir:
filename = os.path.join(data_dir, "dummy.txt")
with open(filename, "w", encoding="utf-8") as data:
for c in string.ascii_letters:
line = f"{c} " * per_line
for _ in range(num_lines):
data.write(f"{line}\n")
counts[c] = per_line * num_lines
per_line += 5
dict = Dictionary()
Dictionary.add_file_to_dictionary(
filename, dict, tokenizer.tokenize_line, 10
)
dict.finalize(threshold=0, nwords=-1, padding_factor=8)
for c in string.ascii_letters:
count = dict.get_count(dict.index(c))
self.assertEqual(
counts[c], count, f"{c} count is {count} but should be {counts[c]}"
)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_dictionary.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import unittest
from typing import Any, Dict, Sequence
import fairseq
import fairseq.options
import fairseq.tasks
import torch
from tests.utils import dummy_dictionary
VOCAB_SIZE = 100
@fairseq.tasks.register_task("fake_task")
class FakeTask(fairseq.tasks.LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = dummy_dictionary(VOCAB_SIZE - 4)
assert len(self.dictionary) == VOCAB_SIZE
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
@functools.lru_cache()
def get_toy_model(
device: str,
architecture: str = "roberta_enc_dec",
**extra_args: Any,
):
assert device in ("gpu", "cpu")
kwargs = {
"arch": architecture,
# Use characteristics dimensions
"encoder_layers": 3,
"encoder_embed_dim": 12,
"encoder_ffn_embed_dim": 14,
"encoder_attention_heads": 4,
"decoder_layers": 3,
"decoder_embed_dim": 12,
"decoder_ffn_embed_dim": 14,
"decoder_attention_heads": 4,
# Disable dropout so we have comparable tests.
"dropout": 0,
"attention_dropout": 0,
"activation_dropout": 0,
"encoder_layerdrop": 0,
# required args
"tokens_per_sample": 256,
"data": "/tmp/test_roberta",
}
kwargs.update(extra_args)
fake_task = FakeTask(kwargs)
args = fairseq.options.get_args(
task="online_backtranslation",
mono_langs="en,ro",
valid_lang_pairs="en-ro",
**kwargs,
)
torch.manual_seed(0)
model = fake_task.build_model(args)
if device == "gpu":
model.cuda()
return fake_task, model
def mk_sample(
lang: str, device: str, tok: Sequence[int] = None, batch_size: int = 2
) -> Dict[str, Any]:
assert device in ("gpu", "cpu")
if not tok:
if lang == "en":
tok = [10, 11, 12, 13, 14, 15, 2]
else:
tok = [20, 21, 22, 23, 24, 25, 26, 27, 2]
batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size)
if device == "gpu":
batch = batch.cuda()
sample = {
"net_input": {
"src_tokens": batch,
"prev_output_tokens": batch,
"src_lengths": torch.tensor(
[len(tok)] * batch_size, dtype=torch.long, device=batch.device
),
},
"target": batch[:, 1:],
}
return sample
def cpu_gpu(fn):
def helper(self):
fn(self, "cpu")
if torch.cuda.is_available():
fn(self, "gpu")
return helper
def architectures(fn):
def helper(self):
for arch in ["roberta_enc_dec", "transformer"]:
fn(self, arch)
return helper
class RobertaTest(unittest.TestCase):
def assertTensorEqual(self, t1, t2, delta: float = 1e-6):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
if delta == 0.0:
self.assertEqual(t1.ne(t2).long().sum(), 0)
else:
self.assertEqual(((t2 - t1).abs() > delta).long().sum(), 0)
def assertSharing(self, model, link_groups: Sequence[Sequence[str]]):
ids = {}
for group in link_groups:
group_ids = {name: id(params(model, name)) for name in group}
shared_id = group_ids[group[0]]
self.assertEqual(group_ids, {name: shared_id for name in group})
self.assertNotIn(shared_id, ids)
ids[shared_id] = group
def test_roberta_shared_params(self):
_, roberta = get_toy_model("cpu", architecture="roberta")
self.assertSharing(
roberta,
[
[
"encoder.sentence_encoder.embed_tokens.weight",
"encoder.lm_head.weight",
]
],
)
_, roberta = get_toy_model(
"cpu", architecture="roberta", untie_weights_roberta=True
)
self.assertSharing(
roberta,
[
["encoder.sentence_encoder.embed_tokens.weight"],
["encoder.lm_head.weight"],
],
)
def test_roberta_enc_dec_shared_params(self):
# 3 distinct embeddings
_, enc_dec = get_toy_model("cpu", architecture="roberta_enc_dec")
self.assertSharing(
enc_dec,
[
["encoder.embed_tokens.weight"],
["decoder.embed_tokens.weight"],
["decoder.output_projection.weight"],
],
)
# 2 distinct embeddings, one for encoder, one for decoder
_, enc_dec = get_toy_model(
"cpu", architecture="roberta_enc_dec", share_decoder_input_output_embed=True
)
self.assertSharing(
enc_dec,
[
["encoder.embed_tokens.weight"],
[
"decoder.embed_tokens.weight",
"decoder.output_projection.weight",
],
],
)
# shared embeddings
_, enc_dec = get_toy_model(
"cpu", architecture="roberta_enc_dec", share_all_embeddings=True
)
self.assertSharing(
enc_dec,
[
[
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
"decoder.output_projection.weight",
]
],
)
def test_roberta_max_positions_is_correctly_set(self):
device = "cpu"
task, model = get_toy_model(device)
max_pos = model.max_decoder_positions()
self.assertEqual(max_pos, 256)
self.assertEqual(max_pos, model.decoder.max_positions())
self.assertEqual(max_pos, model.encoder.max_positions())
self.assertEqual(max_pos, model.encoder.embed_positions.max_positions)
sentence = [31 for _ in range(max_pos)]
sample = mk_sample("en", device, sentence, batch_size=1)
self.assertEqual(list(sample["net_input"]["src_lengths"]), [max_pos])
self.assertEqual(len(sample["net_input"]["src_tokens"][0]), max_pos)
x, _ = model.forward(**sample["net_input"])
self.assertEqual(x.shape, (1, max_pos, VOCAB_SIZE))
@cpu_gpu
def test_roberta_forward_backward(self, device: str):
_, model = get_toy_model(device)
sample = mk_sample("en", device)
en_tokens = sample["net_input"]["src_tokens"]
(bs, l) = en_tokens.shape
# Forward
logits, _ = model(**sample["net_input"])
self.assertEqual(logits.shape, (bs, l, VOCAB_SIZE))
# Backward
loss = logits.sum()
loss.backward()
@cpu_gpu
def test_roberta_forward_backward_bs1(self, device: str):
_, model = get_toy_model(device)
sample = mk_sample("en", device, batch_size=1)
o, _ = model.forward(**sample["net_input"])
loss = o.sum()
sample2 = mk_sample("ro", device, batch_size=1)
o, _ = model.forward(**sample2["net_input"])
loss += o.sum()
loss.backward()
@cpu_gpu
def test_roberta_batching(self, device: str):
"""
Checks that the batch of size 2 give twice the same results than the batch of size 1.
"""
_, model = get_toy_model(device)
sample = mk_sample("en", device, batch_size=1)
slen = sample["net_input"]["src_lengths"][0]
sample2 = mk_sample("en", device, batch_size=2)
with torch.no_grad():
z = model.encoder.forward(
sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"]
)
z = z["encoder_out"][-1]
logits, _ = model.forward(**sample["net_input"])
z2 = model.encoder.forward(
sample2["net_input"]["src_tokens"], sample["net_input"]["src_lengths"]
)
z2 = z2["encoder_out"][-1]
logits2, _ = model.forward(**sample2["net_input"])
self.assertEqual(z.shape, (slen, 1, 12))
self.assertEqual(z2.shape, (slen, 2, 12))
self.assertTensorEqual(logits2[0], logits2[1])
self.assertTensorEqual(logits[0], logits2[0])
@cpu_gpu
def test_roberta_incremental_decoder(self, device: str):
"""
Checks that incremental decoding yields the same result than non incremental one.
"""
task, model = get_toy_model(device)
en_sample = mk_sample("en", device)
en_tokens = en_sample["net_input"]["src_tokens"]
ro_sample = mk_sample("ro", device)
ro_tokens = ro_sample["net_input"]["src_tokens"]
en_enc = model.encoder.forward(
en_tokens, src_lengths=en_sample["net_input"]["src_lengths"]
)
(bs, tgt_len) = ro_tokens.shape
# Decode without incremental state
ro_dec, _ = model.decoder.forward(ro_tokens, encoder_out=en_enc)
self.assertEqual(ro_dec.shape, (bs, tgt_len, VOCAB_SIZE))
self.assertTensorEqual(ro_dec[0], ro_dec[1])
# Decode with incremental state
inc_state = {}
ro_dec_inc = []
for l in range(tgt_len):
ro, _ = model.decoder.forward(
ro_tokens[:, : l + 1], encoder_out=en_enc, incremental_state=inc_state
)
self.assertEqual(ro.shape, (bs, 1, VOCAB_SIZE))
ro_dec_inc.append(ro)
for l in range(tgt_len):
# Intra-batch
self.assertTensorEqual(ro_dec_inc[l][0], ro_dec_inc[l][1])
# Incremental vs non-incremental
self.assertTensorEqual(ro_dec_inc[l][:, 0], ro_dec[:, l])
def params(model, name):
if "." not in name:
return getattr(model, name)
prefix, name = name.split(".", 1)
return params(getattr(model, prefix), name)
|
bart_ls-main
|
fairseq-py/tests/test_roberta.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq import utils
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 7, 8, 9, 10],
[1, 1, 1, 11, 12],
]
)
right_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[7, 8, 9, 10, 1],
[11, 12, 1, 1, 1],
]
)
self.assertAlmostEqual(
right_pad,
utils.convert_padding_direction(
left_pad,
pad,
left_to_right=True,
),
)
self.assertAlmostEqual(
left_pad,
utils.convert_padding_direction(
right_pad,
pad,
right_to_left=True,
),
)
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
]
)
left_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
]
)
right_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
]
)
right_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
]
)
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad),
)
def test_clip_grad_norm_(self):
params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, 0.0)
params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)]
for p in params:
p.grad = torch.full((5,), fill_value=2.0)
grad_norm = utils.clip_grad_norm_(params, 1.0)
exp_grad_norm = torch.full((15,), fill_value=2.0).norm()
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, exp_grad_norm)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertAlmostEqual(grad_norm, torch.tensor(1.0))
def test_resolve_max_positions_with_tuple(self):
resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000)
self.assertEqual(resolved, (2000, 100, 2000))
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from copy import deepcopy
from dataclasses import dataclass
from typing import Optional
import torch
from fairseq.models.ema import EMA
class DummyModule(torch.nn.Module):
def __init__(self) -> None:
"""LightningModule for testing purposes
Args:
epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum
validation loss for testing purposes (zero based). If None this is ignored. Defaults to None.
"""
super().__init__()
self.layer = torch.nn.Linear(in_features=32, out_features=2)
self.another_layer = torch.nn.Linear(in_features=2, out_features=2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.layer(x)
return self.another_layer(x)
@dataclass
class EMAConfig(object):
ema_decay: float = 0.99
ema_start_update: int = 0
ema_fp32: bool = False
ema_seed_model: Optional[str] = None
class TestEMAGPU(unittest.TestCase):
def assertTorchAllClose(self, x, y, atol=1e-8, rtol=1e-5, msg=None):
diff = x.float() - y.float()
diff_norm = torch.norm(diff)
other_norm = torch.norm(y.float())
if msg is None:
msg = "|input - other| > {} + {} * |other|".format(
atol, rtol
)
self.assertLessEqual(
diff_norm,
atol + rtol * other_norm,
msg=msg,
)
def test_ema(self):
model = DummyModule()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
state = deepcopy(model.state_dict())
config = EMAConfig()
ema = EMA(model, config)
# set decay
ema._set_decay(config.ema_decay)
self.assertEqual(ema.get_decay(), config.ema_decay)
# get model
self.assertEqual(ema.get_model(), ema.model)
# Since fp32 params is not used, it should be of size 0
self.assertEqual(len(ema.fp32_params), 0)
# EMA step
x = torch.randn(32)
y = model(x)
loss = y.sum()
loss.backward()
optimizer.step()
ema.step(model)
ema_state_dict = ema.get_model().state_dict()
for key, param in model.state_dict().items():
prev_param = state[key]
ema_param = ema_state_dict[key]
if "version" in key:
# Do not decay a model.version pytorch param
continue
self.assertTorchAllClose(
ema_param,
config.ema_decay * prev_param + (1 - config.ema_decay) * param,
)
# Since fp32 params is not used, it should be of size 0
self.assertEqual(len(ema.fp32_params), 0)
# Load EMA into model
model2 = DummyModule()
ema.reverse(model2)
for key, param in model2.state_dict().items():
ema_param = ema_state_dict[key]
self.assertTrue(
torch.allclose(ema_param, param)
)
def test_ema_fp32(self):
model = DummyModule().half()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
state = deepcopy(model.state_dict())
config = EMAConfig(ema_fp32=True)
ema = EMA(model, config)
x = torch.randn(32)
y = model(x.half())
loss = y.sum()
loss.backward()
optimizer.step()
ema.step(model)
for key, param in model.state_dict().items():
prev_param = state[key]
ema_param = ema.get_model().state_dict()[key]
if "version" in key:
# Do not decay a model.version pytorch param
continue
self.assertIn(key, ema.fp32_params)
# EMA update is done in fp32, and hence the EMA param must be
# closer to the EMA update done in fp32 than in fp16.
self.assertLessEqual(
torch.norm(
ema_param.float() -
(config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float()
),
torch.norm(
ema_param.float() -
(config.ema_decay * prev_param + (1 - config.ema_decay) * param).float()
),
)
self.assertTorchAllClose(
ema_param,
(config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half(),
)
def test_ema_fp16(self):
model = DummyModule().half()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
state = deepcopy(model.state_dict())
config = EMAConfig(ema_fp32=False)
ema = EMA(model, config)
# Since fp32 params is not used, it should be of size 0
self.assertEqual(len(ema.fp32_params), 0)
x = torch.randn(32)
y = model(x.half())
loss = y.sum()
loss.backward()
optimizer.step()
ema.step(model)
for key, param in model.state_dict().items():
prev_param = state[key]
ema_param = ema.get_model().state_dict()[key]
if "version" in key:
# Do not decay a model.version pytorch param
continue
# EMA update is done in fp16, and hence the EMA param must be
# closer to the EMA update done in fp16 than in fp32.
self.assertLessEqual(
torch.norm(
ema_param.float() -
(config.ema_decay * prev_param + (1 - config.ema_decay) * param).float()
),
torch.norm(
ema_param.float() -
(config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float()
),
)
self.assertTorchAllClose(
ema_param,
config.ema_decay * prev_param + (1 - config.ema_decay) * param,
)
# Since fp32 params is not used, it should be of size 0
self.assertEqual(len(ema.fp32_params), 0)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_ema.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from fairseq.data import iterators
class TestIterators(unittest.TestCase):
def test_counting_iterator_index(self, ref=None, itr=None):
# Test the indexing functionality of CountingIterator
if ref is None:
assert itr is None
ref = list(range(10))
itr = iterators.CountingIterator(ref)
else:
assert len(ref) == 10
assert itr is not None
self.assertTrue(itr.has_next())
self.assertEqual(itr.n, 0)
self.assertEqual(next(itr), ref[0])
self.assertEqual(itr.n, 1)
self.assertEqual(next(itr), ref[1])
self.assertEqual(itr.n, 2)
itr.skip(3)
self.assertEqual(itr.n, 5)
self.assertEqual(next(itr), ref[5])
itr.skip(2)
self.assertEqual(itr.n, 8)
self.assertEqual(list(itr), [ref[8], ref[9]])
self.assertFalse(itr.has_next())
def test_counting_iterator_length_mismatch(self):
ref = list(range(10))
# When the underlying iterable is longer than the CountingIterator,
# the remaining items in the iterable should be ignored
itr = iterators.CountingIterator(ref, total=8)
self.assertEqual(list(itr), ref[:8])
# When the underlying iterable is shorter than the CountingIterator,
# raise an IndexError when the underlying iterable is exhausted
itr = iterators.CountingIterator(ref, total=12)
self.assertRaises(IndexError, list, itr)
def test_counting_iterator_take(self):
# Test the "take" method of CountingIterator
ref = list(range(10))
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
def test_grouped_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.GroupedIterator(x, 1)
self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])
itr = iterators.GroupedIterator(x, 4)
self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]])
itr = iterators.GroupedIterator(x, 5)
self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
# test the GroupIterator also works correctly as a CountingIterator
x = list(range(30))
ref = list(iterators.GroupedIterator(x, 3))
itr = iterators.GroupedIterator(x, 3)
self.test_counting_iterator_index(ref, itr)
def test_sharded_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0)
self.assertEqual(list(itr), x)
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0)
self.assertEqual(list(itr), [0, 2, 4, 6, 8])
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1)
self.assertEqual(list(itr), [1, 3, 5, 7, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.assertEqual(list(itr), [0, 3, 6, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1)
self.assertEqual(list(itr), [1, 4, 7, None])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2)
self.assertEqual(list(itr), [2, 5, 8, None])
# test CountingIterator functionality
x = list(range(30))
ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0))
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.test_counting_iterator_index(ref, itr)
def test_counting_iterator_buffered_iterator_take(self):
ref = list(range(10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(buffered_itr), 5)
self.assertEqual(len(list(iter(buffered_itr))), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
ref = list(range(4, 10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr, start=4)
itr.take(5)
self.assertEqual(len(itr), 5)
self.assertEqual(len(buffered_itr), 1)
self.assertEqual(next(itr), ref[0])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_iterators.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict, List
import tests.utils as test_utils
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
TransformEosDataset,
data_utils,
noising,
)
class TestDataNoising(unittest.TestCase):
def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with continuation markers as suffixes to denote
non-end of word tokens. This is the standard BPE format used in
fairseq's preprocessing.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he@@")
vocab.add_symbol("llo")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("y@@")
vocab.add_symbol("ou")
vocab.add_symbol("n@@")
vocab.add_symbol("ew")
vocab.add_symbol("or@@")
vocab.add_symbol("k")
src_tokens = [
["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"],
["how", "are", "y@@", "ou"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_bpe_end_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with end-of-word markers as suffixes to denote
tokens at the end of a word. This is an alternative to fairseq's
standard preprocessing framework and is not generally supported
within fairseq.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he")
vocab.add_symbol("llo_EOW")
vocab.add_symbol("how_EOW")
vocab.add_symbol("are_EOW")
vocab.add_symbol("y")
vocab.add_symbol("ou_EOW")
vocab.add_symbol("n")
vocab.add_symbol("ew_EOW")
vocab.add_symbol("or")
vocab.add_symbol("k_EOW")
src_tokens = [
["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"],
["how_EOW", "are_EOW", "y", "ou_EOW"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_word_vocab(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: word vocab
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("hello")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("you")
vocab.add_symbol("new")
vocab.add_symbol("york")
src_tokens = [
["hello", "new", "york", "you"],
["how", "are", "you", "new", "york"],
]
x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _convert_src_tokens_to_tensor(
self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool
):
src_len = [len(x) for x in src_tokens]
# If we have to append EOS, we include EOS in counting src length
if append_eos:
src_len = [length + 1 for length in src_len]
x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
for i in range(len(src_tokens)):
for j in range(len(src_tokens[i])):
x[i][j] = vocab.index(src_tokens[i][j])
if append_eos:
x[i][j + 1] = vocab.eos()
x = x.transpose(1, 0)
return x, torch.LongTensor(src_len)
def assert_eos_at_end(self, x, x_len, eos):
"""Asserts last token of every sentence in x is EOS """
for i in range(len(x_len)):
self.assertEqual(
x[x_len[i] - 1][i],
eos,
(
"Expected eos (token id {eos}) at the end of sentence {i} "
"but got {other} instead"
).format(i=i, eos=eos, other=x[i][-1]),
)
def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
# Expect only the first word (2 bpe tokens) of the first example
# was dropped out
self.assertEqual(x_len[0] - 2, l_noised[0])
for i in range(l_noised[0]):
self.assertEqual(x_noised[i][0], x[i + 2][0])
def test_word_dropout_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
# Expect only the first word (2 bpe tokens) of the first example
# was blanked out
self.assertEqual(x_len[0], l_noised[0])
for i in range(l_noised[0]):
if i < 2:
self.assertEqual(x_noised[i][0], unk)
else:
self.assertEqual(x_noised[i][0], x[i][0])
def test_word_blank_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def generate_unchanged_shuffle_map(self, length):
return {i: i for i in range(length)}
def assert_word_shuffle_matches_expected(
self,
x,
x_len,
max_shuffle_distance: int,
vocab: Dictionary,
expected_shufle_maps: List[Dict[int, int]],
expect_eos_at_end: bool,
bpe_end_marker=None,
):
"""
This verifies that with a given x, x_len, max_shuffle_distance, and
vocab, we get the expected shuffle result.
Args:
x: Tensor of shape (T x B) = (sequence_length, batch_size)
x_len: Tensor of length B = batch_size
max_shuffle_distance: arg to pass to noising
expected_shuffle_maps: List[mapping] where mapping is a
Dict[old_index, new_index], mapping x's elements from their
old positions in x to their new positions in x.
expect_eos_at_end: if True, check the output to make sure there is
an EOS at the end.
bpe_end_marker: str denoting the BPE end token. If this is not None, we
set the BPE cont token to None in the noising classes.
"""
bpe_cont_marker = None
if bpe_end_marker is None:
bpe_cont_marker = "@@"
with data_utils.numpy_seed(1234):
word_shuffle = noising.WordShuffle(
vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker
)
x_noised, l_noised = word_shuffle.noising(
x, x_len, max_shuffle_distance=max_shuffle_distance
)
# For every example, we have a different expected shuffle map. We check
# that each example is shuffled as expected according to each
# corresponding shuffle map.
for i in range(len(expected_shufle_maps)):
shuffle_map = expected_shufle_maps[i]
for k, v in shuffle_map.items():
self.assertEqual(x[k][i], x_noised[v][i])
# Shuffling should not affect the length of each example
for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised):
self.assertEqual(pre_shuffle_length, post_shuffle_length)
if expect_eos_at_end:
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_shuffle_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=True,
)
def test_word_shuffle_with_eos_nonbpe(self):
"""The purpose of this is to test shuffling logic with word vocabs"""
vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
{0: 0, 1: 1, 2: 3, 3: 2},
{0: 0, 1: 2, 2: 1, 3: 3, 4: 4},
],
expect_eos_at_end=True,
)
def test_word_shuffle_without_eos(self):
"""Same result as word shuffle with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
)
def test_word_shuffle_without_eos_with_bpe_end_marker(self):
"""Same result as word shuffle without eos except using BPE end token"""
vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
def assert_no_eos_at_end(self, x, x_len, eos):
"""Asserts that the last token of each sentence in x is not EOS """
for i in range(len(x_len)):
self.assertNotEqual(
x[x_len[i] - 1][i],
eos,
"Expected no eos (token id {eos}) at the end of sentence {i}.".format(
eos=eos, i=i
),
)
def test_word_dropout_without_eos(self):
"""Same result as word dropout with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_blank_without_eos(self):
"""Same result as word blank with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def _get_noising_dataset_batch(
self,
src_tokens_no_pad,
src_dict,
append_eos_to_tgt=False,
):
"""
Constructs a NoisingDataset and the corresponding
``LanguagePairDataset(NoisingDataset(src), src)``. If
*append_eos_to_tgt* is True, wrap the source dataset in
:class:`TransformEosDataset` to append EOS to the clean source when
using it as the target.
"""
src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
noising_dataset = noising.NoisingDataset(
src_dataset=src_dataset,
src_dict=src_dict,
seed=1234,
max_word_shuffle_distance=3,
word_dropout_prob=0.2,
word_blanking_prob=0.2,
noising_class=noising.UnsupervisedMTNoising,
)
tgt = src_dataset
language_pair_dataset = LanguagePairDataset(
src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict
)
language_pair_dataset = TransformEosDataset(
language_pair_dataset,
src_dict.eos(),
append_eos_to_tgt=append_eos_to_tgt,
)
dataloader = torch.utils.data.DataLoader(
dataset=language_pair_dataset,
batch_size=2,
collate_fn=language_pair_dataset.collater,
)
denoising_batch_result = next(iter(dataloader))
return denoising_batch_result
def test_noising_dataset_with_eos(self):
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=True
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_noising_dataset_without_eos(self):
"""
Similar to test noising dataset with eos except that we have to set
*append_eos_to_tgt* to ``True``.
"""
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=False
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad,
src_dict=src_dict,
append_eos_to_tgt=True,
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_noising.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import torch
from fairseq.data.dictionary import Dictionary
from fairseq.models.lstm import LSTMModel
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
class TestJitLSTMModel(unittest.TestCase):
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
def assertTensorEqual(self, t1, t2):
t1 = t1[~torch.isnan(t1)] # can cause size mismatch errors if there are NaNs
t2 = t2[~torch.isnan(t2)]
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
def test_jit_and_export_lstm(self):
task, parser = get_dummy_task_and_parser()
LSTMModel.add_args(parser)
args = parser.parse_args([])
args.criterion = ""
model = LSTMModel.build_model(args, task)
scripted_model = torch.jit.script(model)
self._test_save_and_load(scripted_model)
def test_assert_jit_vs_nonjit_(self):
task, parser = get_dummy_task_and_parser()
LSTMModel.add_args(parser)
args = parser.parse_args([])
args.criterion = ""
model = LSTMModel.build_model(args, task)
model.eval()
scripted_model = torch.jit.script(model)
scripted_model.eval()
idx = len(task.source_dictionary)
iter = 100
# Inject random input and check output
seq_len_tensor = torch.randint(1, 10, (iter,))
num_samples_tensor = torch.randint(1, 10, (iter,))
for i in range(iter):
seq_len = seq_len_tensor[i]
num_samples = num_samples_tensor[i]
src_token = (torch.randint(0, idx, (num_samples, seq_len)),)
src_lengths = torch.randint(1, seq_len + 1, (num_samples,))
src_lengths, _ = torch.sort(src_lengths, descending=True)
# Force the first sample to have seq_len
src_lengths[0] = seq_len
prev_output_token = (torch.randint(0, idx, (num_samples, 1)),)
result = model(src_token[0], src_lengths, prev_output_token[0], None)
scripted_result = scripted_model(
src_token[0], src_lengths, prev_output_token[0], None
)
self.assertTensorEqual(result[0], scripted_result[0])
self.assertTensorEqual(result[1], scripted_result[1])
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_lstm_jitable.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import torch
from torch.cuda.amp import autocast, GradScaler
from fairseq.optim import build_optimizer
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestGradientScalingAMP(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([self.x * weight + bias + self.error]).cuda()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda()
self.params = list(self.model.parameters())
self.namespace_dls = argparse.Namespace(
optimizer="adam",
lr=[0.1],
adam_betas="(0.9, 0.999)",
adam_eps=1e-8,
weight_decay=0.0,
threshold_loss_scale=1,
min_loss_scale=1e-4,
)
self.scaler = GradScaler(
init_scale=1,
growth_interval=1,
)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
with autocast():
y = model(self.x)
loss = self.loss_fn(y, self.target)
self.scaler.scale(loss).backward()
self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16))
self.scaler.unscale_(optimizer)
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
self.scaler.step(optimizer)
self.scaler.update()
self.assertEqual(
model.weight,
torch.tensor(
[[3.1]], device="cuda:0", requires_grad=True
),
)
self.assertEqual(
model.bias,
torch.tensor(
[5.1], device="cuda:0", requires_grad=True
),
)
self.assertEqual(self.scaler.get_scale(), 2.0)
def test_automatic_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = build_optimizer(self.namespace_dls, params)
self.run_iter(model, params, optimizer)
|
bart_ls-main
|
fairseq-py/tests/test_amp_optimizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
class TestSparseMultiheadAttention(unittest.TestCase):
def test_sparse_multihead_attention(self):
attn_weights = torch.randn(1, 8, 8)
bidirectional_sparse_mask = torch.tensor(
[
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
]
)
bidirectional_attention = SparseMultiheadAttention(
16, 1, stride=4, expressivity=1, is_bidirectional=True
)
bidirectional_attention_sparse_mask = (
bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8)
)
torch.all(
torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask)
)
sparse_mask = torch.tensor(
[
[
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf")],
[
float("-inf"),
float("-inf"),
float("-inf"),
0,
0,
0,
float("-inf"),
float("-inf"),
],
[
float("-inf"),
float("-inf"),
float("-inf"),
0,
0,
0,
0,
float("-inf"),
],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
]
)
attention = SparseMultiheadAttention(
16, 1, stride=4, expressivity=1, is_bidirectional=False
)
attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(attention_sparse_mask, sparse_mask))
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_sparse_multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import uuid
from fairseq import metrics
class TestMetrics(unittest.TestCase):
def test_nesting(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate() as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1.5)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_new_root(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_nested_new_root(self):
with metrics.aggregate() as layer1:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as layer2:
metrics.log_scalar("loss", 2)
with metrics.aggregate() as layer3:
metrics.log_scalar("loss", 3)
with metrics.aggregate(new_root=True) as layer4:
metrics.log_scalar("loss", 4)
metrics.log_scalar("loss", 1.5)
self.assertEqual(layer4.get_smoothed_values()["loss"], 4)
self.assertEqual(layer3.get_smoothed_values()["loss"], 3)
self.assertEqual(layer2.get_smoothed_values()["loss"], 2.5)
self.assertEqual(layer1.get_smoothed_values()["loss"], 1.25)
def test_named(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
metrics.log_scalar("loss", 3)
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 1.5)
def test_nested_duplicate_names(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
with metrics.aggregate() as other:
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
metrics.log_scalar("loss", 6)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 3)
self.assertEqual(other.get_smoothed_values()["loss"], 2)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_metrics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import unittest
from io import StringIO
from unittest.mock import MagicMock, patch
import torch
from fairseq import checkpoint_utils, data
from omegaconf import OmegaConf
def mock_trainer(epoch, num_updates, iterations_in_epoch):
trainer = MagicMock()
trainer.load_checkpoint.return_value = {
"train_iterator": {
"epoch": epoch,
"iterations_in_epoch": iterations_in_epoch,
"shuffle": False,
},
}
trainer.get_num_updates.return_value = num_updates
return trainer
def mock_dict():
d = MagicMock()
d.pad.return_value = 1
d.eos.return_value = 2
d.unk.return_value = 3
return d
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1)
tokens_ds = data.TokenBlockDataset(
tokens,
sizes=[tokens.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(
tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False
)
epoch_itr = data.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=[[i] for i in range(epoch_size)],
)
return trainer, epoch_itr
def get_mock_cfg(finetune_from_model):
cfg_mock = OmegaConf.create(
{
"checkpoint": {
"save_dir": None,
"optimizer_overrides": "{}",
"reset_dataloader": False,
"reset_meters": False,
"reset_optimizer": False,
"reset_lr_scheduler": False,
"finetune_from_model": finetune_from_model,
"model_parallel_size": 1,
"restore_file": "checkpoint_last.pt",
},
"common": {
"model_parallel_size": 1,
},
}
)
return cfg_mock
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.cfg_mock = get_mock_cfg(None)
self.patches = {
"os.makedirs": MagicMock(),
"os.path.join": MagicMock(),
"os.path.isfile": MagicMock(return_value=True),
"os.path.isabs": MagicMock(return_value=False),
"fairseq.file_io.PathManager.exists": MagicMock(return_value=False),
}
self.applied_patches = [patch(p, d) for p, d in self.patches.items()]
[p.start() for p in self.applied_patches]
logging.disable(logging.CRITICAL)
def tearDown(self):
patch.stopall()
logging.disable(logging.NOTSET)
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
for _ in range(150 - 52):
next(itr)
self.assertEqual(epoch_itr.iterations_in_epoch, 149)
self.assertTrue(itr.has_next())
next(itr)
self.assertFalse(itr.has_next())
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertTrue(itr.has_next())
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
self.patches["os.path.isfile"].return_value = False
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0)
def test_finetune_from_model_args_conflict(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
for arg in [
"reset_optimizer",
"reset_lr_scheduler",
"reset_meters",
"reset_dataloader",
]:
with self.subTest(arg=arg):
cfg_mock = get_mock_cfg("/temp/checkpoint_pretrained.pt")
cfg_mock["checkpoint"][arg] = True
with self.assertRaises(Exception) as context:
_, _ = checkpoint_utils.load_checkpoint(
cfg_mock.checkpoint, trainer
)
self.assertTrue(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
in str(context.exception)
)
def test_finetune_from_model(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
from_model_path = "/temp/checkpoint_pretrained.pt"
def mock_finetune_exist(path):
if path == from_model_path:
return True
else:
return False
self.patches[
"fairseq.file_io.PathManager.exists"
].side_effect = mock_finetune_exist
cfg_mock = get_mock_cfg(from_model_path)
cfg_mock.checkpoint.restore_file = "checkpoint_last.pt"
_, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer)
(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
) = trainer.load_checkpoint.call_args[0]
reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"]
self.assertTrue(reset_optimizer)
self.assertTrue(reset_lr_scheduler)
self.assertTrue(reset_meters)
def test_finetune_from_model_resume(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
from_model_path = "/temp/checkpoint_pretrained.pt"
# launch second time
# both restore_file=checkpoint_last.pt and finetune_from_model are set
def mock_finetune_exist(path):
if path == from_model_path or path.endsWith("checkpoint_last.pt"):
return True
else:
return False
self.patches[
"fairseq.file_io.PathManager.exists"
].side_effect = mock_finetune_exist
cfg_mock = get_mock_cfg(from_model_path)
cfg_mock.checkpoint.restore_file = "checkpoint_last.pt"
_, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer)
(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
) = trainer.load_checkpoint.call_args[0]
reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"]
self.assertFalse(reset_optimizer)
self.assertFalse(reset_lr_scheduler)
self.assertFalse(reset_meters)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
try:
import boto3
from fairseq.fb_pathhandlers import S3PathHandler
except ImportError:
boto3 = None
S3PathHandler = None
# Hack to make the test cases ordered.
# https://stackoverflow.com/questions/4005695/changing-order-of-unit-tests-in-python
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: y > x
@unittest.skipIf(not boto3, "Requires boto3 install")
class TestsS3(unittest.TestCase):
s3_auth = False
skip_s3_auth_required_tests_message = (
"Provide an s3 project and bucket you are"
+ "authorised against, then set the s3_auth flag to True"
)
#############################################
# Shared
#############################################
@classmethod
def setUpClass(cls):
# NOTE: user can change these locations.
cls.s3_bucket = "fairusersglobal"
cls.s3_rel_path = os.path.expandvars("users/$USER/private/home/$USER/.fairseq/test_s3_pathhandler")
cls.s3_full_path = "s3://" + cls.s3_bucket + "/" + cls.s3_rel_path
cls.s3_pathhandler = S3PathHandler()
@classmethod
def tearDownClass(cls, _s3_auth=s3_auth):
if not _s3_auth:
return
# Recursive deletion is not implemented,
# so let's delete each file and directory.
# Delete all files
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir1", "f1_write_string"]))
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir1", "f2_write_bytes"]))
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir2", "f1_write_string_from_local"]))
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir2", "f2_write_bytes_from_local"]))
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir2", "f3_write_string_from_local"]))
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir2", "f4_write_bytes_from_local"]))
# Delete all directories.
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, "dir3", "dir4/"]))
for i in (1, 2, 3):
cls.s3_pathhandler._rm("/".join([cls.s3_full_path, f"dir{i}/"]))
assert cls.s3_pathhandler._ls(cls.s3_full_path) == []
#############################################
# Up here, test class attributes,
# and helpers that don't require S3 access.
#############################################
def test_00_supported_prefixes(self):
supported_prefixes = self.s3_pathhandler._get_supported_prefixes()
self.assertEqual(supported_prefixes, ["s3://"])
# # Require S3 Authentication ====>
#############################################
# Organization of s3 setup
# dir1/
# f1 <- small (via open)
# f2 <- large checkpoint file (via open)
# dir2/
# f3 <- small (via copy(), from dir1)
# f4 <- large checkpoint file (via copy_from_local)
# dir3/
# dir4/
#############################################
#############################################
# auth
# Just check that client loads properly
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_01_add_client_to_handler(self):
self.s3_pathhandler._get_client(
"/".join([self.s3_full_path, "path", "file.txt"])
)
# self.assertTrue(isinstance(self.s3_pathhandler.client, botocore.client.S3)) # TODO
# TODO: make sure that the error message displays properly if authentication is messed up.
#############################################
# mkdirs
# Set up the dirs
# (in BASE)
# +dir1
# +dir2
# +dir3
# +dir4
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_02_mkdirs_must_end_with_slash(self):
with self.assertRaises(AssertionError):
self.s3_pathhandler._mkdirs("/".join([self.s3_full_path, "fail"]))
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_03_mkdirs(self):
# dir{1,2,3} in BASE
for i in (1, 2, 3):
self.s3_pathhandler._mkdirs("/".join([self.s3_full_path, f"dir{i}/"]))
# Make a nested directory in dir3
self.s3_pathhandler._mkdirs("/".join([self.s3_full_path, "dir3/dir4/"]))
#############################################
# open (w/wb)
# +f1
# +f2
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_04_open_write_mode(self):
with self.s3_pathhandler._open("/".join([self.s3_full_path, "dir1", "f1_write_string"]), 'w') as f:
f.write("This is a test of writing a string.")
with self.s3_pathhandler._open("/".join([self.s3_full_path, "dir1", "f2_write_bytes"]), 'wb') as f:
f.write(b"This is a test of writing bytes.")
#############################################
# open (r/rb)
# read f1
# read f2
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_05_open_read_mode(self):
with self.s3_pathhandler._open("/".join([self.s3_full_path, "dir1", "f1_write_string"]), 'r') as f:
self.assertEqual(
f.read(),
"This is a test of writing a string."
)
with self.s3_pathhandler._open("/".join([self.s3_full_path, "dir1", "f2_write_bytes"]), 'rb') as f:
self.assertEqual(
f.read(),
b"This is a test of writing bytes."
)
#############################################
# isdir / isfile / exists
# test dir{1,2,3,4}
# test f{1,2}
# test nonexistants
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_06_exists(self):
# Path does not exist (if file)
self.assertFalse(self.s3_pathhandler._exists("/".join([self.s3_full_path, "dir1", "FAIL"])))
# Path does not exist (if dir)
self.assertFalse(self.s3_pathhandler._exists("/".join([self.s3_full_path, "FAIL/"])))
# Path exists (is file)
self.assertTrue(self.s3_pathhandler._exists("/".join([self.s3_full_path, "dir1", "f1_write_string"])))
# Path exists (is dir)
self.assertTrue(self.s3_pathhandler._exists("/".join([self.s3_full_path, "dir1/"])))
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_07_isdir(self):
# Path does not exist (if file)
self.assertFalse(self.s3_pathhandler._isdir("/".join([self.s3_full_path, "dir1", "FAIL"])))
# Path does not exist (if dir)
self.assertFalse(self.s3_pathhandler._isdir("/".join([self.s3_full_path, "FAIL/"])))
# Path exists (is file)
self.assertFalse(self.s3_pathhandler._isdir("/".join([self.s3_full_path, "dir1", "f1_write_string"])))
# Path exists (is dir)
self.assertTrue(self.s3_pathhandler._isdir("/".join([self.s3_full_path, "dir1/"])))
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_08_isfile(self):
# Path does not exist (if file)
self.assertFalse(self.s3_pathhandler._isfile("/".join([self.s3_full_path, "dir1", "FAIL"])))
# Path does not exist (if dir)
self.assertFalse(self.s3_pathhandler._isfile("/".join([self.s3_full_path, "FAIL/"])))
# Path exists (is file)
self.assertTrue(self.s3_pathhandler._isfile("/".join([self.s3_full_path, "dir1", "f1_write_string"])))
# Path exists (is dir)
self.assertFalse(self.s3_pathhandler._isfile("/".join([self.s3_full_path, "dir1/"])))
#############################################
# copy
# copy f1 -> f3
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_09_copy(self):
self.assertTrue(
self.s3_pathhandler._copy(
"/".join([self.s3_full_path, "dir1", "f1_write_string"]),
"/".join([self.s3_full_path, "dir2", "f3_write_string"])
)
)
#############################################
# ls
# ls dir{1,2,3,4}
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_10_ls(self):
# Path does not exist (if file)
self.assertEqual([], self.s3_pathhandler._ls("/".join([self.s3_full_path, "dir1", "FAIL"])))
# Path does not exist (if dir)
self.assertEqual([], self.s3_pathhandler._ls("/".join([self.s3_full_path, "FAIL/"])))
# Path exists (is file)
self.assertEqual(
["/".join([self.s3_rel_path, "dir1", "f1_write_string"])],
self.s3_pathhandler._ls("/".join([self.s3_full_path, "dir1", "f1_write_string"]))
)
# Path exists (is dir)
self.assertEqual(
{
"/".join([self.s3_rel_path, "dir1/"]), # TODO: should the trailing slash be
"/".join([self.s3_rel_path, "dir1", "f1_write_string"]),
"/".join([self.s3_rel_path, "dir1", "f2_write_bytes"])
},
set(self.s3_pathhandler._ls("/".join([self.s3_full_path, "dir1/"])))
)
#############################################
# rm
# rm f3
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_11_rm(self):
path = "/".join([self.s3_full_path, "dir2", "f3_write_string"])
self.assertTrue(self.s3_pathhandler._exists(path))
self.assertTrue(self.s3_pathhandler._isfile(path))
self.assertFalse(self.s3_pathhandler._isdir(path))
self.s3_pathhandler._rm(path)
self.assertFalse(self.s3_pathhandler._exists(path))
self.assertFalse(self.s3_pathhandler._isfile(path))
self.assertFalse(self.s3_pathhandler._isdir(path))
#############################################
# get_local_path
# Retrieve f{1,2}
# Check file contents.
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_12_get_local_path(self):
s3_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
s3_path_f2 = "/".join([self.s3_full_path, "dir1", "f2_write_bytes"])
local_path_f1 = self.s3_pathhandler._get_local_path(s3_path_f1)
local_path_f2 = self.s3_pathhandler._get_local_path(s3_path_f2)
with open(local_path_f1, 'r') as f:
self.assertEqual(
f.read(),
"This is a test of writing a string."
)
with open(local_path_f2, 'rb') as f:
self.assertEqual(
f.read(),
b"This is a test of writing bytes."
)
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_13_get_local_path_idempotent(self):
"""
Call _get_local_path multiple times.
Check that we keep returning the same cached copy instead of redownloading.
"""
s3_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
REPEATS = 3
local_paths = [self.s3_pathhandler._get_local_path(s3_path_f1) for _ in range(REPEATS)]
for local_path in local_paths[1:]:
self.assertEqual(local_path, local_paths[0])
with open(local_paths[0], 'r') as f:
self.assertEqual(
f.read(),
"This is a test of writing a string."
)
# TODO: make sure it fails if asked for a directory
# TODO: make sure that the returned path is appropriately placed.
##############################################
# copy_from_local
# Upload local copies of f1, f2 -> f3, f4.
# Check contents via open(), and via another get_local_path
##############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_14_copy_from_local(self):
s3_src_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
s3_src_path_f2 = "/".join([self.s3_full_path, "dir1", "f2_write_bytes"])
local_path_f1 = self.s3_pathhandler._get_local_path(s3_src_path_f1)
local_path_f2 = self.s3_pathhandler._get_local_path(s3_src_path_f2)
s3_dst_path_f1 = "/".join([self.s3_full_path, "dir2", "f1_write_string_from_local"])
s3_dst_path_f2 = "/".join([self.s3_full_path, "dir2", "f2_write_bytes_from_local"])
self.assertTrue(self.s3_pathhandler._copy_from_local(local_path_f1, s3_dst_path_f1))
self.assertTrue(self.s3_pathhandler._copy_from_local(local_path_f2, s3_dst_path_f2))
#############################################
# symlink
# should fail
#############################################
@unittest.skipIf(not s3_auth, skip_s3_auth_required_tests_message)
def test_15_symlink(self):
s3_src_path_f1 = "/".join([self.s3_full_path, "dir1", "f1_write_string"])
s3_dst_path_f1 = "/".join([self.s3_full_path, "dir2", "f1_write_string_symlink"])
with self.assertRaises(NotImplementedError):
self.s3_pathhandler._symlink(s3_src_path_f1, s3_dst_path_f1)
|
bart_ls-main
|
fairseq-py/tests/fb_test_pathhandlers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import tests.utils as test_utils
import torch
from fairseq.sequence_scorer import SequenceScorer
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
"source": torch.LongTensor([w1, w2, eos]),
"target": torch.LongTensor([w1, w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]
),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for id, hypos_id in zip(sample["id"].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]["target"])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_sequence_scorer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.transformer import TransformerModel
from tests.test_sequence_generator import get_dummy_task_and_parser
class TestInferenceDropout(unittest.TestCase):
def setUp(self):
self.task, self.parser = get_dummy_task_and_parser()
TransformerModel.add_args(self.parser)
self.args = self.parser.parse_args([])
self.args.encoder_layers = 2
self.args.decoder_layers = 1
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_sets_inference_dropout_to_true(self):
self.args.retain_dropout = True
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.apply_during_inference
def test_inference_dropout_false_by_default(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert not self.transformer_model.encoder.dropout_module.apply_during_inference
assert not self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert not layer.dropout_module.apply_during_inference
for layer in self.transformer_model.decoder.layers:
assert not layer.dropout_module.apply_during_inference
def test_applies_training_mode(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
assert self.transformer_model.encoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.training
self.transformer_model.eval()
assert not self.transformer_model.decoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert not layer.dropout_module.training
def test_retain_modules(self):
self.args.retain_dropout = True
self.args.retain_dropout_modules = [
"TransformerEncoder",
"TransformerEncoderLayer",
]
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert not self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.decoder.layers:
assert not layer.dropout_module.apply_during_inference
|
bart_ls-main
|
fairseq-py/tests/test_inference_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.modules.multihead_attention import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def test_append_prev_key_padding_mask(self):
bsz = 1
src_len = 4
cases = [
# no padding mask
(None, None, None),
# current padding mask only
(
torch.tensor([[1]]).bool(),
None,
torch.tensor([[0, 0, 0, 1]]).bool(),
),
# previous padding mask only
(
None,
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 0]]).bool(),
),
# both padding masks
(
torch.tensor([[1]]).bool(),
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 1]]).bool(),
),
# prev_key_padding_mask already full
(
torch.tensor([[0, 1, 0, 1]]).bool(),
None,
torch.tensor([[0, 1, 0, 1]]).bool(),
),
# key_padding_mask already full
(
None,
torch.tensor([[0, 1, 0, 1]]).bool(),
torch.tensor([[0, 1, 0, 1]]).bool(),
),
]
for c in cases:
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
c[0],
c[1],
batch_size=bsz,
src_len=src_len,
static_kv=False,
)
if key_padding_mask is not None:
self.assertTrue(
torch.all(torch.eq(key_padding_mask, c[2])),
f"Unexpected resultant key padding mask: {key_padding_mask}"
f" given current: {c[0]} and previous: {c[1]}",
)
self.assertEqual(key_padding_mask.size(0), bsz)
self.assertEqual(key_padding_mask.size(1), src_len)
else:
self.assertIsNone(c[2])
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
from fairseq.data.data_utils_fast import batch_by_size_fn
from fairseq.data.data_utils_fast import batch_by_size_vec
class TestBatchBySize(unittest.TestCase):
@classmethod
def batch_by_size_baseline(
cls,
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
):
"""Simple, reliable and slow implementation of batch by size """
batches = []
start = 0
while start < len(indices):
for end in range(start + 1, len(indices) + 1):
max_val = max(num_tokens_vec[pos] for pos in range(start, end))
sent_count = end - start
num_tokens = max_val * sent_count
overflow = num_tokens > max_tokens > 0 or sent_count > max_sentences > 0
terminate = overflow or end == len(indices)
if overflow:
sent_count -= 1
if terminate:
if sent_count > bsz_mult:
sent_count = sent_count - sent_count % bsz_mult
batches.append(indices[start : start + sent_count])
start = start + sent_count
break
return batches
@classmethod
def _get_error_message(
cls, max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results
):
return f"""Reference batch_by_size implementation should produce
same output as the baseline method.
Params:
max_sentences={max_sentences},
max_tokens={max_tokens},
bsz_mult={bsz_mult},
num_tokens_vec={num_tokens_vec},
expected_batches={validation},
returned_batches={results}"""
def _compare_results(
self,
indices_len,
batch_by_size_impl,
max_sentences,
max_tokens,
bsz_mult,
num_tokens_vec,
):
indices = np.array(list(range(indices_len)))
validation = self.batch_by_size_baseline(
indices,
num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
bsz_mult=bsz_mult,
)
results = batch_by_size_impl(
indices,
num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
bsz_mult=bsz_mult,
)
error_msg = self._get_error_message(
max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results
)
self.assertEqual(len(validation), len(results), error_msg)
for first, second in zip(validation, results):
self.assertTrue(np.array_equal(first, second), error_msg)
def _run_compare_with_baseline_sweep(self, batch_by_size_impl):
"""Compare reference batch_by_size implementation with batch_by_size_baseline
across a dense grid of hyperparam values"""
MAX_MAX_TOKENS = 10
NUM_TOKENS_VECS_COUNT = 5
for indices_len in [10, 11]: # try odd and even len of indices
for max_sentences in range(0, indices_len + 2):
for max_tokens in range(0, MAX_MAX_TOKENS):
for bsz_mult in range(1, max(MAX_MAX_TOKENS, indices_len) + 2):
for _ in range(NUM_TOKENS_VECS_COUNT):
num_tokens_vec = np.random.randint(
0, max_tokens + 1, size=indices_len
)
self._compare_results(
indices_len,
batch_by_size_impl,
max_sentences,
max_tokens,
bsz_mult,
num_tokens_vec,
)
class TestBatchBySizeVec(TestBatchBySize):
def test_compare_with_baseline(self):
self._run_compare_with_baseline_sweep(batch_by_size_vec)
class TestBatchBySizeFn(TestBatchBySize):
def test_compare_with_baseline(self):
def batch_by_size_fn_wrapper(
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
):
def num_tokens_fn(idx):
return num_tokens_vec[idx]
return batch_by_size_fn(
indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult
)
self._run_compare_with_baseline_sweep(batch_by_size_fn_wrapper)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import string
import typing as tp
import unittest
from collections import Counter
from tempfile import NamedTemporaryFile, TemporaryDirectory
from fairseq.data import Dictionary, indexed_dataset
from fairseq.data.huffman import (
HuffmanCodeBuilder,
HuffmanCoder,
HuffmanMMapIndexedDataset,
HuffmanMMapIndexedDatasetBuilder,
)
POPULATION = string.ascii_letters + string.digits
def make_sentence() -> tp.List[str]:
length = random.randint(10, 50)
return random.choices(
population=POPULATION, k=length, weights=range(1, len(POPULATION) + 1)
)
def make_data(length=1000) -> tp.List[tp.List[str]]:
return (
[make_sentence() for _ in range(0, length)]
# add all the symbols at least once
+ [list(string.ascii_letters), list(string.digits)]
)
def make_counts(data: tp.List[tp.List[str]]) -> Counter:
return Counter([symbol for sentence in data for symbol in sentence])
def make_code_builder(data: tp.List[tp.List[str]]) -> HuffmanCodeBuilder:
builder = HuffmanCodeBuilder()
for sentence in data:
builder.add_symbols(*sentence)
return builder
class TestCodeBuilder(unittest.TestCase):
def test_code_builder_can_count(self):
data = make_data()
counts = make_counts(data)
builder = make_code_builder(data)
self.assertEqual(builder.symbols, counts)
def test_code_builder_can_add(self):
data = make_data()
counts = make_counts(data)
builder = make_code_builder(data)
new_builder = builder + builder
self.assertEqual(new_builder.symbols, counts + counts)
def test_code_builder_can_io(self):
data = make_data()
builder = make_code_builder(data)
with NamedTemporaryFile() as tmp_fp:
builder.to_file(tmp_fp.name)
other_builder = HuffmanCodeBuilder.from_file(tmp_fp.name)
self.assertEqual(builder.symbols, other_builder.symbols)
class TestCoder(unittest.TestCase):
def test_coder_can_io(self):
data = make_data()
builder = make_code_builder(data)
coder = builder.build_code()
with NamedTemporaryFile() as tmp_fp:
coder.to_file(tmp_fp.name)
other_coder = HuffmanCoder.from_file(tmp_fp.name)
self.assertEqual(coder, other_coder)
def test_coder_can_encode_decode(self):
data = make_data()
builder = make_code_builder(data)
coder = builder.build_code()
encoded = [coder.encode(sentence) for sentence in data]
decoded = [[n.symbol for n in coder.decode(enc)] for enc in encoded]
self.assertEqual(decoded, data)
unseen_data = make_data()
unseen_encoded = [coder.encode(sentence) for sentence in unseen_data]
unseen_decoded = [
[n.symbol for n in coder.decode(enc)] for enc in unseen_encoded
]
self.assertEqual(unseen_decoded, unseen_data)
def build_dataset(prefix, data, coder):
with HuffmanMMapIndexedDatasetBuilder(prefix, coder) as builder:
for sentence in data:
builder.add_item(sentence)
def sizes(data):
return [len(sentence) for sentence in data]
class TestHuffmanDataset(unittest.TestCase):
def test_huffman_can_encode_decode(self):
data = make_data()
builder = make_code_builder(data)
coder = builder.build_code()
with TemporaryDirectory() as dirname:
prefix = os.path.join(dirname, "test1")
build_dataset(prefix, data, coder)
dataset = HuffmanMMapIndexedDataset(prefix)
self.assertEqual(len(dataset), len(data))
decoded = [list(dataset.get_symbols(i)) for i in range(0, len(dataset))]
self.assertEqual(decoded, data)
data_sizes = [i.item() for i in dataset.sizes]
self.assertEqual(data_sizes, sizes(data))
def test_huffman_compresses(self):
data = make_data()
builder = make_code_builder(data)
coder = builder.build_code()
with TemporaryDirectory() as dirname:
prefix = os.path.join(dirname, "huffman")
build_dataset(prefix, data, coder)
prefix_mmap = os.path.join(dirname, "mmap")
mmap_builder = indexed_dataset.make_builder(
indexed_dataset.data_file_path(prefix_mmap),
"mmap",
vocab_size=len(POPULATION),
)
dictionary = Dictionary()
for c in POPULATION:
dictionary.add_symbol(c)
dictionary.finalize()
for sentence in data:
mmap_builder.add_item(dictionary.encode_line(" ".join(sentence)))
mmap_builder.finalize(indexed_dataset.index_file_path(prefix_mmap))
huff_size = os.stat(indexed_dataset.data_file_path(prefix)).st_size
mmap_size = os.stat(indexed_dataset.data_file_path(prefix_mmap)).st_size
self.assertLess(huff_size, mmap_size)
def test_huffman_can_append(self):
data1 = make_data()
builder = make_code_builder(data1)
coder = builder.build_code()
with TemporaryDirectory() as dirname:
prefix1 = os.path.join(dirname, "test1")
build_dataset(prefix1, data1, coder)
data2 = make_data()
prefix2 = os.path.join(dirname, "test2")
build_dataset(prefix2, data2, coder)
prefix3 = os.path.join(dirname, "test3")
with HuffmanMMapIndexedDatasetBuilder(prefix3, coder) as builder:
builder.append(prefix1)
builder.append(prefix2)
dataset = HuffmanMMapIndexedDataset(prefix3)
self.assertEqual(len(dataset), len(data1) + len(data2))
decoded1 = [list(dataset.get_symbols(i)) for i in range(0, len(data1))]
self.assertEqual(decoded1, data1)
decoded2 = [
list(dataset.get_symbols(i)) for i in range(len(data1), len(dataset))
]
self.assertEqual(decoded2, data2)
data_sizes = [i.item() for i in dataset.sizes]
self.assertEqual(data_sizes[: len(data1)], sizes(data1))
self.assertEqual(data_sizes[len(data1) : len(dataset)], sizes(data2))
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_huffman.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
import tempfile
import unittest
from io import StringIO
from unittest.mock import patch
from fairseq import checkpoint_utils
from omegaconf import OmegaConf
from tests.utils import (
create_dummy_data,
preprocess_translation_data,
train_translation_model,
)
class TestCheckpointUtils(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@contextlib.contextmanager
def _train_transformer(self, seed, extra_args=None):
if extra_args is None:
extra_args = []
with tempfile.TemporaryDirectory(f"_train_transformer_seed{seed}") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"3",
"--decoder-layers",
"3",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--seed",
str(seed),
]
+ extra_args,
)
yield os.path.join(data_dir, "checkpoint_last.pt")
def test_load_model_ensemble_and_task(self):
# with contextlib.redirect_stdout(StringIO()):
with self._train_transformer(seed=123) as model1:
with self._train_transformer(seed=456) as model2:
ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
filenames=[model1, model2]
)
self.assertEqual(len(ensemble), 2)
# after Transformer has been migrated to Hydra, this will probably
# become cfg.common.seed
self.assertEqual(ensemble[0].args.seed, 123)
self.assertEqual(ensemble[1].args.seed, 456)
# the task from the first model should be returned
self.assertTrue("seed123" in task.cfg.data)
# last cfg is saved
self.assertEqual(cfg.common.seed, 456)
def test_prune_state_dict(self):
with contextlib.redirect_stdout(StringIO()):
extra_args = ["--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01"]
with self._train_transformer(seed=1, extra_args=extra_args) as model:
ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
filenames=[model],
arg_overrides={
"encoder_layers_to_keep": "0,2",
"decoder_layers_to_keep": "1",
},
)
self.assertEqual(len(ensemble), 1)
self.assertEqual(len(ensemble[0].encoder.layers), 2)
self.assertEqual(len(ensemble[0].decoder.layers), 1)
def test_torch_persistent_save_async(self):
state_dict = {}
filename = "async_checkpoint.pt"
with patch(f"{checkpoint_utils.__name__}.PathManager.opena") as mock_opena:
with patch(f"{checkpoint_utils.__name__}._torch_persistent_save") as mock_save:
checkpoint_utils.torch_persistent_save(
state_dict, filename, async_write=True
)
mock_opena.assert_called_with(filename, "wb")
mock_save.assert_called()
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_checkpoint_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest import mock
class TestIOPath(unittest.TestCase):
def test_no_iopath(self):
from .test_reproducibility import TestReproducibility
with mock.patch.dict("sys.modules", {"iopath": None}):
# reuse reproducibility tests, which are e2e tests that should cover
# most checkpoint related functionality
TestReproducibility._test_reproducibility(self, "test_reproducibility")
def test_no_supports_rename(self):
from .test_reproducibility import TestReproducibility
with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn:
mock_fn.return_value = False
TestReproducibility._test_reproducibility(self, "test_reproducibility")
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_iopath.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import unittest
import numpy as np
from fairseq.data import ListDataset, ResamplingDataset
class TestResamplingDataset(unittest.TestCase):
def setUp(self):
self.strings = ["ab", "c", "def", "ghij"]
self.weights = [4.0, 2.0, 7.0, 1.5]
self.size_ratio = 2
self.dataset = ListDataset(
self.strings, np.array([len(s) for s in self.strings])
)
def _test_common(self, resampling_dataset, iters):
assert len(self.dataset) == len(self.strings) == len(self.weights)
assert len(resampling_dataset) == self.size_ratio * len(self.strings)
results = {"ordered_by_size": True, "max_distribution_diff": 0.0}
totalfreqs = 0
freqs = collections.defaultdict(int)
for epoch_num in range(iters):
resampling_dataset.set_epoch(epoch_num)
indices = resampling_dataset.ordered_indices()
assert len(indices) == len(resampling_dataset)
prev_size = -1
for i in indices:
cur_size = resampling_dataset.size(i)
# Make sure indices map to same sequences within an epoch
assert resampling_dataset[i] == resampling_dataset[i]
# Make sure length of sequence is correct
assert cur_size == len(resampling_dataset[i])
freqs[resampling_dataset[i]] += 1
totalfreqs += 1
if prev_size > cur_size:
results["ordered_by_size"] = False
prev_size = cur_size
assert set(freqs.keys()) == set(self.strings)
for s, weight in zip(self.strings, self.weights):
freq = freqs[s] / totalfreqs
expected_freq = weight / sum(self.weights)
results["max_distribution_diff"] = max(
results["max_distribution_diff"], abs(expected_freq - freq)
)
return results
def test_resampling_dataset_batch_by_size_false(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=False,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = False, the batches should be returned in
# arbitrary order of size.
assert not results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
def test_resampling_dataset_batch_by_size_true(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=True,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = True, the batches should be returned in
# increasing order of size.
assert results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_resampling_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import tempfile
import unittest
from fairseq import options
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.data.data_utils import raise_if_valid_subsets_unintentionally_ignored
from .utils import create_dummy_data, preprocess_lm_data, train_language_model
def make_lm_config(
data_dir=None,
extra_flags=None,
task="language_modeling",
arch="transformer_lm_gpt2_tiny",
):
task_args = [task]
if data_dir is not None:
task_args += [data_dir]
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
*task_args,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
]
+ (extra_flags or []),
)
cfg = convert_namespace_to_omegaconf(train_args)
return cfg
def write_empty_file(path):
with open(path, "w"):
pass
assert os.path.exists(path)
class TestValidSubsetsErrors(unittest.TestCase):
"""Test various filesystem, clarg combinations and ensure that error raising happens as expected"""
def _test_case(self, paths, extra_flags):
with tempfile.TemporaryDirectory() as data_dir:
[
write_empty_file(os.path.join(data_dir, f"{p}.bin"))
for p in paths + ["train"]
]
cfg = make_lm_config(data_dir, extra_flags=extra_flags)
raise_if_valid_subsets_unintentionally_ignored(cfg)
def test_default_raises(self):
with self.assertRaises(ValueError):
self._test_case(["valid", "valid1"], [])
with self.assertRaises(ValueError):
self._test_case(
["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"]
)
def partially_specified_valid_subsets(self):
with self.assertRaises(ValueError):
self._test_case(
["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"]
)
# Fix with ignore unused
self._test_case(
["valid", "valid1", "valid2"],
["--valid-subset", "valid,valid1", "--ignore-unused-valid-subsets"],
)
def test_legal_configs(self):
self._test_case(["valid"], [])
self._test_case(["valid", "valid1"], ["--ignore-unused-valid-subsets"])
self._test_case(["valid", "valid1"], ["--combine-val"])
self._test_case(["valid", "valid1"], ["--valid-subset", "valid,valid1"])
self._test_case(["valid", "valid1"], ["--valid-subset", "valid1"])
self._test_case(
["valid", "valid1"], ["--combine-val", "--ignore-unused-valid-subsets"]
)
self._test_case(
["valid1"], ["--valid-subset", "valid1"]
) # valid.bin doesn't need to be ignored.
def test_disable_validation(self):
self._test_case([], ["--disable-validation"])
self._test_case(["valid", "valid1"], ["--disable-validation"])
def test_dummy_task(self):
cfg = make_lm_config(task="dummy_lm")
raise_if_valid_subsets_unintentionally_ignored(cfg)
def test_masked_dummy_task(self):
cfg = make_lm_config(task="dummy_masked_lm")
raise_if_valid_subsets_unintentionally_ignored(cfg)
class TestCombineValidSubsets(unittest.TestCase):
def _train(self, extra_flags):
with self.assertLogs() as logs:
with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir:
create_dummy_data(data_dir, num_examples=20)
preprocess_lm_data(data_dir)
shutil.copyfile(f"{data_dir}/valid.bin", f"{data_dir}/valid1.bin")
shutil.copyfile(f"{data_dir}/valid.idx", f"{data_dir}/valid1.idx")
train_language_model(
data_dir,
"transformer_lm",
["--max-update", "0", "--log-format", "json"] + extra_flags,
run_validation=False,
)
return [x.message for x in logs.records]
def test_combined(self):
flags = ["--combine-valid-subsets"]
logs = self._train(flags)
assert any(["valid1" in x for x in logs]) # loaded 100 examples from valid1
assert not any(["valid1_ppl" in x for x in logs]) # metrics are combined
def test_subsets(self):
flags = ["--valid-subset", "valid,valid1"]
logs = self._train(flags)
assert any(["valid_ppl" in x for x in logs]) # loaded 100 examples from valid1
assert any(["valid1_ppl" in x for x in logs]) # metrics are combined
|
bart_ls-main
|
fairseq-py/tests/test_valid_subset_checks.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bart_ls-main
|
fairseq-py/tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import tests.utils as test_utils
import torch
from fairseq.data import (
BacktranslationDataset,
LanguagePairDataset,
TransformEosDataset,
)
from fairseq.sequence_generator import SequenceGenerator
class TestBacktranslationDataset(unittest.TestCase):
def setUp(self):
(
self.tgt_dict,
self.w1,
self.w2,
self.src_tokens,
self.src_lengths,
self.model,
) = test_utils.sequence_generator_setup()
dummy_src_samples = self.src_tokens
self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples)
self.cuda = torch.cuda.is_available()
def _backtranslation_dataset_helper(
self,
remove_eos_from_input_src,
remove_eos_from_output_src,
):
tgt_dataset = LanguagePairDataset(
src=self.tgt_dataset,
src_sizes=self.tgt_dataset.sizes,
src_dict=self.tgt_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
)
generator = SequenceGenerator(
[self.model],
tgt_dict=self.tgt_dict,
max_len_a=0,
max_len_b=200,
beam_size=2,
unk_penalty=0,
)
backtranslation_dataset = BacktranslationDataset(
tgt_dataset=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# remove eos from the input src
remove_eos_from_src=remove_eos_from_input_src,
),
src_dict=self.tgt_dict,
backtranslation_fn=(
lambda sample: generator.generate([self.model], sample)
),
output_collater=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# if we remove eos from the input src, then we need to add it
# back to the output tgt
append_eos_to_tgt=remove_eos_from_input_src,
remove_eos_from_src=remove_eos_from_output_src,
).collater,
cuda=self.cuda,
)
dataloader = torch.utils.data.DataLoader(
backtranslation_dataset,
batch_size=2,
collate_fn=backtranslation_dataset.collater,
)
backtranslation_batch_result = next(iter(dataloader))
eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2
# Note that we sort by src_lengths and add left padding, so actually
# ids will look like: [1, 0]
expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]])
if remove_eos_from_output_src:
expected_src = expected_src[:, :-1]
expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
generated_src = backtranslation_batch_result["net_input"]["src_tokens"]
tgt_tokens = backtranslation_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_backtranslation_dataset_no_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False,
remove_eos_from_output_src=True,
)
def test_backtranslation_dataset_with_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False,
remove_eos_from_output_src=False,
)
def test_backtranslation_dataset_no_eos_in_input_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=True,
remove_eos_from_output_src=False,
)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_backtranslation_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import unittest
import tempfile
from io import StringIO
import numpy as np
from tests.utils import create_dummy_data, preprocess_lm_data, train_language_model
try:
from pyarrow import plasma
from fairseq.data.plasma_utils import PlasmaView, PlasmaStore
PYARROW_AVAILABLE = True
except ImportError:
PYARROW_AVAILABLE = False
dummy_path = "dummy"
@unittest.skipUnless(PYARROW_AVAILABLE, "")
class TestPlasmaView(unittest.TestCase):
def setUp(self) -> None:
self.tmp_file = tempfile.NamedTemporaryFile() # noqa: P201
self.path = self.tmp_file.name
self.server = PlasmaStore.start(path=self.path, nbytes=10000)
self.client = plasma.connect(self.path, num_retries=10)
def tearDown(self) -> None:
self.client.disconnect()
self.tmp_file.close()
self.server.kill()
def test_two_servers_do_not_share_object_id_space(self):
data_server_1 = np.array([0, 1])
data_server_2 = np.array([2, 3])
server_2_path = self.path
with tempfile.NamedTemporaryFile() as server_1_path:
server = PlasmaStore.start(path=server_1_path.name, nbytes=10000)
arr1 = PlasmaView(
data_server_1, dummy_path, 1, plasma_path=server_1_path.name
)
assert len(arr1.client.list()) == 1
assert (arr1.array == data_server_1).all()
arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=server_2_path)
assert (arr2.array == data_server_2).all()
assert (arr1.array == data_server_1).all()
server.kill()
def test_hash_collision(self):
data_server_1 = np.array([0, 1])
data_server_2 = np.array([2, 3])
arr1 = PlasmaView(data_server_1, dummy_path, 1, plasma_path=self.path)
assert len(arr1.client.list()) == 1
arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=self.path)
assert len(arr1.client.list()) == 1
assert len(arr2.client.list()) == 1
assert (arr2.array == data_server_1).all()
# New hash key based on tuples
arr3 = PlasmaView(
data_server_2, dummy_path, (1, 12312312312, None), plasma_path=self.path
)
assert (
len(arr2.client.list()) == 2
), "No new object was created by using a novel hash key"
assert (
arr3.object_id in arr2.client.list()
), "No new object was created by using a novel hash key"
assert (
arr3.object_id in arr3.client.list()
), "No new object was created by using a novel hash key"
del arr3, arr2, arr1
@staticmethod
def _assert_view_equal(pv1, pv2):
np.testing.assert_array_equal(pv1.array, pv2.array)
def test_putting_same_array_twice(self):
data = np.array([4, 4, 4])
arr1 = PlasmaView(data, dummy_path, 1, plasma_path=self.path)
assert len(self.client.list()) == 1
arr1b = PlasmaView(
data, dummy_path, 1, plasma_path=self.path
) # should not change contents of store
arr1c = PlasmaView(
None, dummy_path, 1, plasma_path=self.path
) # should not change contents of store
assert len(self.client.list()) == 1
self._assert_view_equal(arr1, arr1b)
self._assert_view_equal(arr1, arr1c)
PlasmaView(
data, dummy_path, 2, plasma_path=self.path
) # new object id, adds new entry
assert len(self.client.list()) == 2
new_client = plasma.connect(self.path)
assert len(new_client.list()) == 2 # new client can access same objects
assert isinstance(arr1.object_id, plasma.ObjectID)
del arr1b
del arr1c
def test_plasma_store_full_raises(self):
with tempfile.NamedTemporaryFile() as new_path:
server = PlasmaStore.start(path=new_path.name, nbytes=10000)
with self.assertRaises(plasma.PlasmaStoreFull):
# 2000 floats is more than 2000 bytes
PlasmaView(
np.random.rand(10000, 1), dummy_path, 1, plasma_path=new_path.name
)
server.kill()
def test_object_id_overflow(self):
PlasmaView.get_object_id("", 2 ** 21)
def test_training_lm_plasma(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
["--use-plasma-view", "--plasma-path", self.path],
run_validation=True,
)
|
bart_ls-main
|
fairseq-py/tests/test_plasma_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
import tempfile
import unittest
from io import StringIO
import torch
from tests.test_binaries import (
create_dummy_roberta_head_data,
train_masked_lm,
train_roberta_head,
)
from tests.utils import (
create_dummy_data,
generate_main,
preprocess_lm_data,
preprocess_translation_data,
train_translation_model,
)
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_fb_levenshtein_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_fb_levenshtein_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"fb_levenshtein_transformer",
[
"--apply-bert-init",
"--early-exit",
"6,6,6",
"--criterion",
"nat_loss",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/fb_test_binaries.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import json
import os
import random
import sys
import tempfile
import unittest
from io import StringIO
from typing import List, Dict
import torch
from fairseq import options
from fairseq_cli import eval_lm, train
from tests.utils import (
create_dummy_data,
generate_main,
preprocess_lm_data,
preprocess_summarization_data,
preprocess_translation_data,
create_laser_data_and_config_json,
train_translation_model,
train_language_model,
)
try:
import transformers # noqa
has_hf_transformers = True
except ImportError:
has_hf_transformers = False
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, "fconv_iwslt_de_en")
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--dataset-impl", "raw"])
train_translation_model(
data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"]
)
generate_main(data_dir, ["--dataset-impl", "raw"])
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_update_freq") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"]
)
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_max_positions") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
["--max-target-positions", "5"],
)
self.assertTrue(
"skip this example with --skip-invalid-size-inputs-valid-test"
in str(context.exception)
)
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--max-target-positions",
"5",
"--skip-invalid-size-inputs-valid-test",
],
)
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_sampling") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, "fconv_iwslt_de_en")
generate_main(
data_dir,
[
"--sampling",
"--temperature",
"2",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--sampling",
"--sampling-topk",
"3",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--sampling",
"--sampling-topp",
"0.2",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--diversity-rate",
"0.5",
"--beam",
"6",
],
)
with self.assertRaises(ValueError):
generate_main(
data_dir,
[
"--diverse-beam-groups",
"4",
"--match-source-len",
],
)
generate_main(data_dir, ["--prefix-size", "2"])
generate_main(data_dir, ["--retain-dropout"])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--eval-bleu",
"--eval-bleu-print-samples",
"--eval-bleu-remove-bpe",
"--eval-bleu-detok",
"space",
"--eval-bleu-args",
'{"beam": 4, "min_len": 10}',
],
)
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lstm_wiseman_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lstm",
[
"--encoder-layers",
"2",
"--encoder-bidirectional",
"--encoder-hidden-size",
"16",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
"--decoder-layers",
"2",
],
)
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
run_validation=True,
)
generate_main(data_dir)
def test_multilingual_transformer(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [
[],
["--encoder-langtok", "src"],
["--encoder-langtok", "tgt"],
]
decoder_langtok_flags = [[], ["--decoder-langtok"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(
f"test_multilingual_transformer_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
arch="multilingual_transformer",
task="multilingual_translation",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"multilingual_translation",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
@unittest.skipIf(
sys.platform.lower() == "darwin", "skip latent depth test on MacOS"
)
def test_multilingual_translation_latent_depth(self):
# test with latent depth in encoder, decoder, or both
encoder_latent_layer = [[], ["--encoder-latent-layer"]]
decoder_latent_layer = [[], ["--decoder-latent-layer"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_latent_layer)):
for j in range(len(decoder_latent_layer)):
if i == 0 and j == 0:
continue
enc_ll_flag = encoder_latent_layer[i]
dec_ll_flag = decoder_latent_layer[j]
with tempfile.TemporaryDirectory(
f"test_multilingual_translation_latent_depth_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=["--joined-dictionary"]
)
train_translation_model(
data_dir,
arch="latent_multilingual_transformer",
task="multilingual_translation_latent_depth",
extra_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--share-encoders",
"--share-decoders",
"--sparsity-weight",
"0.1",
]
+ enc_ll_flag
+ dec_ll_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
]
+ enc_ll_flag
+ dec_ll_flag,
)
generate_main(
data_dir,
extra_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
"--task",
"multilingual_translation_latent_depth",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ll_flag
+ dec_ll_flag,
)
def test_translation_multi_simple_epoch(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [
[],
["--encoder-langtok", "src"],
["--encoder-langtok", "tgt"],
]
decoder_langtok_flags = [[], ["--decoder-langtok"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(
f"test_translation_multi_simple_epoch_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=["--joined-dictionary"]
)
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_no_vepoch(self):
# test with all combinations of encoder/decoder lang tokens
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_dicts(self):
# test with all combinations of encoder/decoder lang tokens
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_src_tgt_dict_spec(self):
# test the specification of explicit --src-dict and --tgt-dict
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--source-dict",
f"{data_dir}/dict.in.txt",
"--target-dict",
f"{data_dir}/dict.out.txt",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_cross_self_attention"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--no-cross-attention",
"--cross-self-attention",
],
run_validation=True,
)
generate_main(data_dir, extra_flags=[])
def test_transformer_pointer_generator(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_pointer_generator"
) as data_dir:
create_dummy_data(data_dir)
preprocess_summarization_data(data_dir)
train_translation_model(
data_dir,
"transformer_pointer_generator",
extra_flags=[
"--user-dir",
"examples/pointer_generator/pointer_generator_src",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--alignment-layer",
"-1",
"--alignment-heads",
"1",
"--source-position-markers",
"0",
],
run_validation=True,
extra_valid_flags=[
"--user-dir",
"examples/pointer_generator/pointer_generator_src",
],
)
generate_main(
data_dir,
extra_flags=[
"--user-dir",
"examples/pointer_generator/pointer_generator_src",
],
)
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lightconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lightconv_iwslt_de_en",
[
"--encoder-conv-type",
"lightweight",
"--decoder-conv-type",
"lightweight",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lightconv_iwslt_de_en",
[
"--encoder-conv-type",
"dynamic",
"--decoder-conv-type",
"dynamic",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"cmlm_transformer",
[
"--apply-bert-init",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--pred-length-offset",
"--length-loss-factor",
"0.1",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_nonautoregressive_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"nonautoregressive_transformer",
[
"--apply-bert-init",
"--src-embedding-copy",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--pred-length-offset",
"--length-loss-factor",
"0.1",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"0",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
# def test_nat_crf_transformer(self):
# with contextlib.redirect_stdout(StringIO()):
# with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir:
# create_dummy_data(data_dir)
# preprocess_translation_data(data_dir, ['--joined-dictionary'])
# train_translation_model(data_dir, 'nacrf_transformer', [
# '--apply-bert-init', '--criterion',
# 'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
# '--length-loss-factor', '0.1',
# '--word-ins-loss-factor', '0.5',
# '--crf-lowrank-approx', '1',
# '--crf-beam-approx', '1'
# ], task='translation_lev')
# generate_main(data_dir, [
# '--task', 'translation_lev',
# '--iter-decode-max-iter', '0',
# '--iter-decode-eos-penalty', '0',
# '--print-step',
# ])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_iterative_nonautoregressive_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"iterative_nonautoregressive_transformer",
[
"--apply-bert-init",
"--src-embedding-copy",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--stochastic-approx",
"--dae-ratio",
"0.5",
"--train-step",
"3",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"insertion_transformer",
[
"--apply-bert-init",
"--criterion",
"nat_loss",
"--noise",
"random_mask",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_moe") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--task",
"translation_moe",
"--user-dir",
"examples/translation_moe/translation_moe_src",
"--method",
"hMoElp",
"--mean-pool-gating-network",
"--num-experts",
"3",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(
data_dir,
[
"--task",
"translation_moe",
"--user-dir",
"examples/translation_moe/translation_moe_src",
"--method",
"hMoElp",
"--mean-pool-gating-network",
"--num-experts",
"3",
"--gen-expert",
"0",
],
)
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_alignment") as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ["--align-suffix", "align"])
train_translation_model(
data_dir,
"transformer_align",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--load-alignments",
"--alignment-layer",
"1",
"--criterion",
"label_smoothed_cross_entropy_with_alignment",
],
run_validation=True,
)
generate_main(data_dir)
def test_laser_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_laser_lstm") as data_dir:
laser_config_file = create_laser_data_and_config_json(data_dir)
train_translation_model(
laser_config_file.name,
"laser_lstm",
[
"--user-dir",
"examples/laser/laser_src",
"--weighting-alpha",
"0.3",
"--encoder-bidirectional",
"--encoder-hidden-size",
"512",
"--encoder-layers",
"5",
"--decoder-layers",
"1",
"--encoder-embed-dim",
"320",
"--decoder-embed-dim",
"320",
"--decoder-lang-embed-dim",
"32",
"--save-dir",
data_dir,
"--disable-validation",
],
task="laser",
lang_flags=[],
)
def test_laser_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_laser_transformer") as data_dir:
laser_config_file = create_laser_data_and_config_json(data_dir)
train_translation_model(
laser_config_file.name,
"laser_transformer",
[
"--user-dir",
"examples/laser/laser_src",
"--weighting-alpha",
"0.3",
"--encoder-embed-dim",
"320",
"--decoder-embed-dim",
"320",
"--decoder-lang-embed-dim",
"32",
"--save-dir",
data_dir,
"--disable-validation",
],
task="laser",
lang_flags=[],
)
def test_alignment_full_context(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_alignment") as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ["--align-suffix", "align"])
train_translation_model(
data_dir,
"transformer_align",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--load-alignments",
"--alignment-layer",
"1",
"--criterion",
"label_smoothed_cross_entropy_with_alignment",
"--full-context-alignment",
],
run_validation=True,
)
generate_main(data_dir)
def test_transformer_layerdrop(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"3",
"--decoder-layers",
"3",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--encoder-layerdrop",
"0.01",
"--decoder-layerdrop",
"0.01",
],
)
generate_main(data_dir)
generate_main(
data_dir,
[
"--model-overrides",
"{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}",
],
)
class TestStories(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = [
"--encoder-layers",
"[(128, 3)] * 2",
"--decoder-layers",
"[(128, 3)] * 2",
"--decoder-attention",
"True",
"--encoder-attention",
"False",
"--gated-attention",
"True",
"--self-attention",
"True",
"--project-input",
"True",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
"--multihead-self-attention-nheads",
"2",
]
train_translation_model(data_dir, "fconv_self_att_wp", config)
generate_main(data_dir)
# fusion model
os.rename(
os.path.join(data_dir, "checkpoint_last.pt"),
os.path.join(data_dir, "pretrained.pt"),
)
config.extend(
[
"--pretrained",
"True",
"--pretrained-checkpoint",
os.path.join(data_dir, "pretrained.pt"),
"--save-dir",
os.path.join(data_dir, "fusion_model"),
]
)
train_translation_model(data_dir, "fconv_self_att_wp", config)
class TestLanguageModeling(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"fconv_lm",
[
"--decoder-layers",
"[(850, 3)] * 2 + [(1024,4)]",
"--decoder-embed-dim",
"280",
"--optimizer",
"nag",
"--lr",
"0.1",
],
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
["--add-bos-token", '--nval', '1'],
run_validation=True,
)
eval_lm_main(data_dir)
eval_lm_main(data_dir, extra_flags=["--context-window", "25"])
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_transformer_lm_with_adaptive_softmax(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_lm_with_adaptive_softmax"
) as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
[
"--add-bos-token",
"--criterion",
"adaptive_loss",
"--adaptive-softmax-cutoff",
"5,10,15",
],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lightconv_lm",
["--add-bos-token"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lstm_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lstm_lm",
["--add-bos-token"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lstm_lm_residuals(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lstm_lm",
["--add-bos-token", "--residuals"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
@unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing")
def test_transformer_xl_bptt_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
task_flags = [
"--user-dir",
"examples/truncated_bptt",
"--task",
"truncated_bptt_lm",
"--batch-size",
"2",
"--tokens-per-sample",
"50",
]
train_language_model(
data_dir=data_dir,
arch="transformer_xl",
extra_flags=task_flags
+ [
"--n-layer",
"2",
],
task="truncated_bptt_lm",
run_validation=True,
extra_valid_flags=task_flags,
)
eval_lm_main(data_dir, extra_flags=task_flags)
# Train with activation offloading
train_language_model(
data_dir=data_dir,
arch="transformer_xl",
extra_flags=task_flags
+ [
"--n-layer",
"2",
"--offload-activations",
],
task="truncated_bptt_lm",
run_validation=True,
extra_valid_flags=task_flags,
)
class TestMaskedLanguageModel(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, "masked_lm")
def test_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(
data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"]
)
def test_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes)
def test_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_roberta_regression_single"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=["--regression-target"],
)
def test_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_roberta_regression_multiple"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=["--regression-target"],
)
def test_linformer_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(
data_dir,
"linformer_roberta_base",
extra_flags=[
"--user-dir",
"examples/linformer/linformer_src",
"--encoder-layers",
"2",
],
)
def test_linformer_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=["--user-dir", "examples/linformer/linformer_src"],
)
def test_linformer_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_linformer_roberta_regression_single"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=[
"--regression-target",
"--user-dir",
"examples/linformer/linformer_src",
],
)
def test_linformer_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_linformer_roberta_regression_multiple"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=[
"--regression-target",
"--user-dir",
"examples/linformer/linformer_src",
],
)
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(
data_dir,
arch="masked_lm",
extra_args=("--encoder-learned-pos",) if learned_pos_emb else (),
)
with tempfile.TemporaryDirectory(
"test_mlm_translation"
) as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(
translation_dir, extra_flags=["--joined-dictionary"]
)
# Train transformer with data_dir/checkpoint_last.pt
train_translation_model(
translation_dir,
arch="transformer_from_pretrained_xlm",
extra_flags=[
"--decoder-layers",
"1",
"--decoder-embed-dim",
"32",
"--decoder-attention-heads",
"1",
"--decoder-ffn-embed-dim",
"32",
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
"--pretrained-xlm-checkpoint",
"{}/checkpoint_last.pt".format(data_dir),
"--activation-fn",
"gelu",
"--max-source-positions",
"500",
"--max-target-positions",
"500",
]
+ (
["--encoder-learned-pos", "--decoder-learned-pos"]
if learned_pos_emb
else []
)
+ (["--init-encoder-only"] if encoder_only else []),
task="translation_from_pretrained_xlm",
)
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True)
def test_r4f_roberta(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=[
"--user-dir",
"examples/rxf/rxf_src",
"--criterion",
"sentence_prediction_r3f",
"--spectral-norm-classification-head",
],
)
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--stop-min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"legacy_masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
"--num-workers",
"0",
]
+ list(extra_args),
)
train.main(train_args)
class TestOptimizers(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_optimizers") as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"]
last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt")
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(
data_dir,
"lstm",
[
"--required-batch-size-multiple",
"1",
"--encoder-layers",
"1",
"--encoder-hidden-size",
"32",
"--decoder-layers",
"1",
"--optimizer",
optimizer,
],
)
generate_main(data_dir)
def read_last_log_entry(
logs: List[logging.LogRecord], logger_name: str
) -> Dict[str, float]:
for x in reversed(logs):
if x.name == logger_name:
return json.loads(x.message)
raise ValueError(f"No entries from {logger_name} found in captured logs")
class TestActivationCheckpointing(unittest.TestCase):
base_flags = [
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--restore-file",
"x.pt",
"--log-format",
"json",
"--log-interval",
"1",
"--max-update",
"2",
]
def _train(self, data_dir, extra_flags):
with self.assertLogs() as logs:
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
self.base_flags + extra_flags,
run_validation=True,
extra_valid_flags=["--log-format", "json"],
)
return logs.records
def test_activation_offloading_does_not_change_metrics(self):
"""Neither ----checkpoint-activations nor --offload-activations should change loss"""
with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir:
with self.assertLogs():
create_dummy_data(data_dir, num_examples=20)
preprocess_translation_data(data_dir)
offload_logs = self._train(data_dir, ["--offload-activations"])
baseline_logs = self._train(data_dir, [])
assert len(baseline_logs) == len(offload_logs)
baseline_valid_stats = read_last_log_entry(baseline_logs, "valid")
offload_valid_stats = read_last_log_entry(offload_logs, "valid")
baseline_train_stats = read_last_log_entry(baseline_logs, "train")
offload_train_stats = read_last_log_entry(offload_logs, "train")
assert (
baseline_train_stats["train_loss"] == offload_train_stats["train_loss"]
)
assert (
baseline_valid_stats["valid_loss"] == offload_valid_stats["valid_loss"]
)
def test_activation_checkpointing_does_not_change_metrics(self):
"""--checkpoint-activations should not change loss"""
with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir:
with self.assertLogs():
create_dummy_data(data_dir, num_examples=20)
preprocess_translation_data(data_dir)
ckpt_logs = self._train(data_dir, ["--checkpoint-activations"])
baseline_logs = self._train(data_dir, [])
assert len(baseline_logs) == len(ckpt_logs)
baseline_train_stats = read_last_log_entry(baseline_logs, "train")
ckpt_train_stats = read_last_log_entry(ckpt_logs, "train")
assert baseline_train_stats["train_loss"] == ckpt_train_stats["train_loss"]
baseline_valid_stats = read_last_log_entry(baseline_logs, "valid")
ckpt_valid_stats = read_last_log_entry(ckpt_logs, "valid")
assert baseline_valid_stats["valid_loss"] == ckpt_valid_stats["valid_loss"]
def create_dummy_roberta_head_data(
data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False
):
input_dir = "input0"
def _create_dummy_data(filename):
random_data = torch.rand(num_examples * maxlen)
input_data = 97 + torch.floor(26 * random_data).int()
if regression:
output_data = torch.rand((num_examples, num_classes))
else:
output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int()
with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in:
label_filename = filename + ".label" if regression else filename + ".out"
with open(os.path.join(data_dir, "label", label_filename), "w") as f_out:
offset = 0
for i in range(num_examples):
# write example input
ex_len = random.randint(1, maxlen)
ex_str = " ".join(map(chr, input_data[offset : offset + ex_len]))
print(ex_str, file=f_in)
# write example label
if regression:
class_str = " ".join(map(str, output_data[i].numpy()))
print(class_str, file=f_out)
else:
class_str = "class{}".format(output_data[i])
print(class_str, file=f_out)
offset += ex_len
os.mkdir(os.path.join(data_dir, input_dir))
os.mkdir(os.path.join(data_dir, "label"))
_create_dummy_data("train")
_create_dummy_data("valid")
_create_dummy_data("test")
def train_masked_lm(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"masked_lm",
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"masked_lm",
"--batch-size",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"sentence_prediction",
data_dir,
"--arch",
arch,
"--encoder-layers",
"2",
"--num-classes",
str(num_classes),
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"sentence_prediction",
"--max-tokens",
"500",
"--max-positions",
"500",
"--batch-size",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
def eval_lm_main(data_dir, extra_flags=None):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--no-progress-bar",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
eval_lm.main(eval_lm_args)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_binaries.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import unittest
import torch
from fairseq.token_generation_constraints import *
def tensorize(constraints: List[List[int]]) -> torch.Tensor:
return [torch.tensor(x) for x in constraints]
class TestHelperRoutines(unittest.TestCase):
def setUp(self):
self.examples = [
([[]], torch.tensor([[0]])),
([[], []], torch.tensor([[0], [0]])),
([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])),
(
[
[
torch.tensor([3, 1, 2]),
torch.tensor([3]),
torch.tensor([4, 5, 6, 7]),
],
[],
[torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])],
],
torch.tensor(
[
[3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0],
]
),
),
]
def test_packing(self):
"""Ensures the list of lists of tensors gets packed correctly."""
for batch_constraints, expected_tensor in self.examples:
packed = pack_constraints(batch_constraints)
assert torch.equal(packed, expected_tensor)
class TestUnorderedConstraintState(unittest.TestCase):
def setUp(self):
# Tuples of (contraint set, expected printed graph, token counts per node)
self.examples = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
"([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))",
{1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1},
),
([], "[None].False#0", {}),
(tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}),
(
tensorize([[100000, 1, 2, 3, 4, 5]]),
"([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))",
{100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
),
(
tensorize([[1, 2], [1, 2]]),
"([None].False#2 ([1].False#2 [2].True#2))",
{1: 2, 2: 2},
),
(
tensorize([[1, 2], [3, 4]]),
"([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))",
{1: 1, 2: 1, 3: 1, 4: 1},
),
]
self.sequences = [
(
self.examples[0][0],
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 94],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4, 999],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[4, 5, 6, 8],
{"bank": 2, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
# Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5]
# [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]],
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": True},
),
(
tensorize([[1], [2, 3]]),
# Should not be able to get credit for entering 1 a second time
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[4][0],
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
self.examples[4][0],
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
(
self.examples[5][0],
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
]
def test_graphs(self):
"""
Test whether unordered graph systems are created correctly.
"""
for example in self.examples:
constraints, expected, gold_counts = example
c = ConstraintNode.create(constraints)
assert (
ConstraintNode.print_graph(c) == expected
), f"got {ConstraintNode.print_graph(c)}, expected {expected}"
assert (
c.token_counts() == gold_counts
), f"{c} got {c.token_counts()} wanted {gold_counts}"
def test_next_tokens(self):
"""
Tests that the set of next tokens is correct.
"""
for example in self.examples:
constraints, expected, gold_counts = example
root = ConstraintNode.create(constraints)
root_tokens = set(root.children.keys())
for sequence in constraints:
state = UnorderedConstraintState(root)
for token in sequence:
all_tokens = root_tokens.union(state.node.children.keys())
assert (
all_tokens == state.next_tokens()
), f"ALL {all_tokens} NEXT {state.next_tokens()}"
state = state.advance(token)
def test_sequences(self):
for constraints, tokens, expected in self.sequences:
state = UnorderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
class TestOrderedConstraintState(unittest.TestCase):
def setUp(self):
self.sequences = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 94],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 3, 999, 1, 4],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 999, 999],
{"bank": 3, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 77, 1, 3, 1],
{"bank": 6, "num_completed": 2, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1], [2, 3]]),
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [3, 4]]),
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
]
def test_sequences(self):
for i, (constraints, tokens, expected) in enumerate(self.sequences):
state = OrderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_constraints.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import tests.utils as test_utils
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
)
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, eos]),
},
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, w1, eos]),
},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.report_accuracy = False
self.args.probs = (
torch.FloatTensor(
[
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]
)
.unsqueeze(0)
.expand(2, 3, 7)
) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_label_smoothing.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import random
import sys
from io import StringIO
import torch
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.data import Dictionary
from fairseq.data.language_pair_dataset import collate
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.tasks import LegacyFairseqTask
from fairseq_cli import generate, interactive, preprocess, train, validate
import fairseq.distributed.utils as distributed_utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
def dummy_dictionary(vocab_size, prefix="token_"):
d = Dictionary()
for i in range(vocab_size):
token = prefix + str(i)
d.add_symbol(token)
d.finalize(padding_factor=1) # don't add extra padding symbols
return d
def dummy_dataloader(
samples, padding_idx=1, eos_idx=2, batch_size=None,
):
if batch_size is None:
batch_size = len(samples)
# add any missing data to samples
for i, sample in enumerate(samples):
if "id" not in sample:
sample["id"] = i
# create dataloader
dataset = TestDataset(samples)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)),
)
return iter(dataloader)
def sequence_generator_setup():
# construct dummy dictionary
d = dummy_dictionary(vocab_size=2)
eos = d.eos()
w1 = 4
w2 = 5
# construct source data
src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0)
[0.0, unk, 0.9, 0.1], # w2: 0.1
# sentence 2:
[0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25)
[0.00, unk, 0.10, 0.9], # w2: 0.3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9
[
0.6,
unk,
0.2,
0.2,
], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6)
# sentence 2:
[
0.60,
unk,
0.4,
0.00,
], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6)
[0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[
1.0,
unk,
0.0,
0.0,
], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0)
[
1.0,
unk,
0.0,
0.0,
], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0)
# sentence 2:
[
0.1,
unk,
0.5,
0.4,
], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1)
[
1.0,
unk,
0.0,
0.0,
], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0)
]
),
]
task = TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
tgt_dict = task.target_dictionary
return tgt_dict, w1, w2, src_tokens, src_lengths, model
def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False):
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), "w") as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = " ".join(map(chr, data[offset : offset + ex_len]))
print(ex_str, file=h)
offset += ex_len
def _create_dummy_alignment_data(filename_src, filename_tgt, filename):
with open(os.path.join(data_dir, filename_src), "r") as src_f, open(
os.path.join(data_dir, filename_tgt), "r"
) as tgt_f, open(os.path.join(data_dir, filename), "w") as h:
for src, tgt in zip(src_f, tgt_f):
src_len = len(src.split())
tgt_len = len(tgt.split())
avg_len = (src_len + tgt_len) // 2
num_alignments = random.randint(avg_len // 2, 2 * avg_len)
src_indices = torch.floor(torch.rand(num_alignments) * src_len).int()
tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int()
ex_str = " ".join(
[
"{}-{}".format(src, tgt)
for src, tgt in zip(src_indices, tgt_indices)
]
)
print(ex_str, file=h)
_create_dummy_data("train.in")
_create_dummy_data("train.out")
_create_dummy_data("valid.in")
_create_dummy_data("valid.out")
_create_dummy_data("test.in")
_create_dummy_data("test.out")
if alignment:
_create_dummy_alignment_data("train.in", "train.out", "train.align")
_create_dummy_alignment_data("valid.in", "valid.out", "valid.align")
_create_dummy_alignment_data("test.in", "test.out", "test.align")
def preprocess_lm_data(data_dir):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
"--only-source",
"--trainpref",
os.path.join(data_dir, "train.out"),
"--validpref",
os.path.join(data_dir, "valid.out"),
"--testpref",
os.path.join(data_dir, "test.out"),
"--destdir",
data_dir,
]
)
preprocess.main(preprocess_args)
def preprocess_translation_data(data_dir, extra_flags=None):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
"--source-lang",
"in",
"--target-lang",
"out",
"--trainpref",
os.path.join(data_dir, "train"),
"--validpref",
os.path.join(data_dir, "valid"),
"--testpref",
os.path.join(data_dir, "test"),
"--thresholdtgt",
"0",
"--thresholdsrc",
"0",
"--destdir",
data_dir,
]
+ (extra_flags or []),
)
preprocess.main(preprocess_args)
def preprocess_summarization_data(data_dir, extra_flags=None):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
"--source-lang",
"in",
"--target-lang",
"out",
"--trainpref",
os.path.join(data_dir, "train"),
"--validpref",
os.path.join(data_dir, "valid"),
"--testpref",
os.path.join(data_dir, "test"),
"--thresholdtgt",
"0",
"--thresholdsrc",
"0",
"--joined-dictionary",
"--destdir",
data_dir,
]
+ (extra_flags or []),
)
preprocess.main(preprocess_args)
def create_laser_data_and_config_json(data_dir):
src_langs = ["de", "fr", "ru", "tr", "zh"]
tgt_langs = ["en", "es"]
config_json = {}
config_train_json = []
src_vocab = None
tgt_vocab = None
for src_lang in src_langs:
for tgt_lang in tgt_langs:
langpair_folder = f"{src_lang}-{tgt_lang}"
langpair_path = os.path.join(data_dir, langpair_folder)
os.mkdir(langpair_path)
create_dummy_data(langpair_path)
preprocess_translation_data(langpair_path, ["--dataset-impl", "cached"])
src_vocab = os.path.join(langpair_path, "dict.in.txt")
tgt_vocab = os.path.join(langpair_path, "dict.out.txt")
config_train_json.append(
{
"id": 0 if tgt_lang == "en" else 1,
"src": os.path.join(langpair_path, "train.in-out.in"),
"tgt": os.path.join(langpair_path, "train.in-out.out"),
}
)
config_json["src_vocab"] = src_vocab
config_json["tgt_vocab"] = tgt_vocab
config_json["train"] = config_train_json
with open(os.path.join(data_dir, "laserconfig.json"), "w") as config_file:
json.dump(config_json, config_file)
return config_file
def train_translation_model(
data_dir,
arch,
extra_flags=None,
task="translation",
run_validation=False,
lang_flags=None,
extra_valid_flags=None,
world_size=1,
):
if lang_flags is None:
lang_flags = [
"--source-lang",
"in",
"--target-lang",
"out",
]
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
task,
data_dir,
"--save-dir",
data_dir,
"--arch",
arch,
"--optimizer",
"nag",
"--lr",
"0.05",
"--max-tokens",
"500",
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
str(world_size),
"--num-workers",
"0",
]
+ lang_flags
+ (extra_flags or []),
)
cfg = convert_namespace_to_omegaconf(train_args)
distributed_utils.call_main(cfg, train.main)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
"--task",
task,
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--valid-subset",
"valid",
"--max-tokens",
"500",
"--no-progress-bar",
"--num-workers",
"0",
]
+ lang_flags
+ (extra_valid_flags or []),
)
validate.main(validate_args)
def generate_main(data_dir, extra_flags=None, path=None):
if extra_flags is None:
extra_flags = [
"--print-alignment",
]
if path is None:
path = os.path.join(data_dir, "checkpoint_last.pt")
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(
generate_parser,
[
data_dir,
"--path",
path,
"--beam",
"3",
"--batch-size",
"64",
"--max-len-b",
"5",
"--gen-subset",
"valid",
"--no-progress-bar",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
# evaluate model in batch mode
generate.main(generate_args)
# evaluate model interactively
generate_args.buffer_size = 0
generate_args.input = "-"
generate_args.batch_size = None
orig_stdin = sys.stdin
sys.stdin = StringIO("h e l l o\n")
interactive.main(generate_args)
sys.stdin = orig_stdin
class TestDataset(torch.utils.data.Dataset):
def __init__(self, data):
super().__init__()
self.data = data
self.sizes = None
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class TestTranslationTask(LegacyFairseqTask):
def __init__(self, args, src_dict, tgt_dict, model):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.model = model
@classmethod
def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
return cls(args, src_dict, tgt_dict, model)
def build_model(self, args):
return TestModel.build_model(args, self)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
class TestModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
return EncoderOut(
encoder_out=src_tokens,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestIncrementalDecoder(FairseqIncrementalDecoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
assert hasattr(args, "beam_probs") or hasattr(args, "probs")
args.max_decoder_positions = getattr(args, "max_decoder_positions", 100)
self.args = args
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bbsz = prev_output_tokens.size(0)
vocab = len(self.dictionary)
src_len = encoder_out.encoder_out.size(1)
tgt_len = prev_output_tokens.size(1)
# determine number of steps
if incremental_state is not None:
# cache step number
step = utils.get_incremental_state(self, incremental_state, "step")
if step is None:
step = 0
utils.set_incremental_state(self, incremental_state, "step", step + 1)
steps = [step]
else:
steps = list(range(tgt_len))
# define output in terms of raw probs
if hasattr(self.args, "probs"):
assert (
self.args.probs.dim() == 3
), "expected probs to have size bsz*steps*vocab"
probs = self.args.probs.index_select(1, torch.LongTensor(steps))
else:
probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_()
for i, step in enumerate(steps):
# args.beam_probs gives the probability for every vocab element,
# starting with eos, then unknown, and then the rest of the vocab
if step < len(self.args.beam_probs):
probs[:, i, self.dictionary.eos() :] = self.args.beam_probs[step]
else:
probs[:, i, self.dictionary.eos()] = 1.0
# random attention
attn = torch.rand(bbsz, tgt_len, src_len)
dev = prev_output_tokens.device
return probs.to(dev), {"attn": [attn.to(dev)]}
def get_normalized_probs(self, net_output, log_probs, _):
# the decoder returns probabilities directly
probs = net_output[0]
if log_probs:
return probs.log()
else:
return probs
def max_positions(self):
return self.args.max_decoder_positions
class TestReshapingEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
b_sz, t_sz = src_tokens.shape
padding_needed = t_sz % 2
x = src_tokens
if padding_needed > 0:
padding_needed = 2 - padding_needed
x = F.pad(x, (0, padding_needed))
return EncoderOut(
encoder_out=x.view(b_sz, -1, 2),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestReshapingModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestReshapingEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestAdditionalInputEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
assert "fancy_other_input" in kwargs
assert kwargs["fancy_other_input"] is not None
return EncoderOut(
encoder_out=src_tokens,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestAdditionalInputModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestAdditionalInputEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def train_language_model(
data_dir,
arch,
extra_flags=None,
run_validation=False,
extra_valid_flags=None,
task="language_modeling",
world_size=1,
):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
task,
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
str(world_size),
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
cfg = convert_namespace_to_omegaconf(train_args)
distributed_utils.call_main(cfg, train.main)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
"--task",
task,
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--valid-subset",
"valid",
"--max-tokens",
"500",
"--no-progress-bar",
"--num-workers",
"0",
]
+ (extra_valid_flags or []),
)
validate.main(validate_args)
|
bart_ls-main
|
fairseq-py/tests/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
from typing import Sequence
from fairseq.data import LanguagePairDataset, ListDataset, RoundRobinZipDatasets
from tests.test_train import mock_dict
def lang_pair_dataset(lengths: Sequence[int]) -> LanguagePairDataset:
tokens = [[i] * l for i, l in enumerate(lengths)]
return LanguagePairDataset(ListDataset(tokens), lengths, mock_dict())
def sample(id: int, length: int):
return {"id": id, "source": [id] * length, "target": None}
class TestDataset(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_round_robin_zip_datasets(self):
long_dataset = lang_pair_dataset([10, 9, 8, 11])
short_dataset = lang_pair_dataset([11, 9])
dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset})
# Dataset is now sorted by sentence length
dataset.ordered_indices()
assert dataset.longest_dataset is long_dataset
self.assertEqual(dict(dataset[0]), {"a": sample(2, 8), "b": sample(1, 9)})
# The item 2 of dataset 'a' is with item (2 % 2 = 0) of dataset 'b'
self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 9)})
def test_round_robin_zip_datasets_filtered(self):
long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12])
short_dataset = lang_pair_dataset([11, 20, 9, 1000])
dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset})
# Dataset is now sorted by sentence length
idx = dataset.ordered_indices()
idx, _ = dataset.filter_indices_by_size(idx, {"a": 19, "b": 900})
self.assertEqual(list(idx), [0, 1, 2, 3, 4])
self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)})
self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 20)})
self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(0, 11)})
def test_round_robin_zip_datasets_filtered_with_tuple(self):
long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12])
short_dataset = lang_pair_dataset([11, 20, 9, 1000])
dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset})
# Dataset is now sorted by sentence length
idx = dataset.ordered_indices()
idx, _ = dataset.filter_indices_by_size(idx, 19)
self.assertEqual(list(idx), [0, 1, 2, 3, 4])
self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)})
self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(2, 9)})
self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(2, 9)})
|
bart_ls-main
|
fairseq-py/tests/test_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torch.nn as nn
from fairseq.modules import ConvTBC
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
# ksz, in_channels, out_channels
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
# out_channels, in_channels, ksz
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(
output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data
)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(
conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data
)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(
input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data
)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_convtbc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from collections import OrderedDict
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.multi_corpus_dataset import MultiCorpusDataset
from tests.test_train import mock_dict
class TestMultiCorpusDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([i for i in range(1, 5000, 2)]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([i for i in range(0, 5000, 2)]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def _test_sample_helper(
self,
distribution,
):
m = MultiCorpusDataset(
OrderedDict({0: self.dataset_1, 1: self.dataset_2}),
distribution=distribution,
seed=0,
sort_indices=True,
)
m.set_epoch(1)
indices = m.ordered_indices()
count_sample_from_first_dataset = 0
items = set()
for i in indices:
item = m[i]["source"].item()
if item % 2 == 1:
count_sample_from_first_dataset += 1
items.add(item)
sample_from_first_ds_percentage = (
1.0 * count_sample_from_first_dataset / len(indices)
)
self.assertLess(
abs(sample_from_first_ds_percentage - distribution[0]),
0.01,
)
self.assertEqual(
len(items),
int(min(len(self.dataset_1), len(indices) * distribution[0])
+ min(len(self.dataset_1), len(indices) * distribution[1]))
)
print(distribution)
def test_multi_corpus_dataset(self):
for distribution in [[0.5, 0.5], [0.1, 0.9], [0.9, 0.1]]:
self._test_sample_helper(distribution=distribution)
|
bart_ls-main
|
fairseq-py/tests/test_multi_corpus_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import unittest
import torch
from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
from omegaconf import OmegaConf
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestGradientScaling(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda().half()
self.params = list(self.model.parameters())
self.cfg_dls = OmegaConf.create(
{
"optimization": {
"lr": [0.1],
},
"optimizer": {
"_name": "adam",
"lr": [0.1],
"adam_betas": "(0.9, 0.999)",
"adam_eps": 1e-8,
"weight_decay": 0.0,
},
"common": {
"fp16_init_scale": 1,
"fp16_scale_window": 1,
"fp16_scale_tolerance": 1,
"threshold_loss_scale": 1,
"min_loss_scale": 1e-4,
"tpu": False,
},
}
)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
y = model(self.x)
loss = self.loss_fn(y, self.target)
optimizer.backward(loss)
self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16))
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
optimizer.step()
self.assertEqual(
model.weight,
torch.tensor(
[[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(
model.bias,
torch.tensor(
[5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(optimizer.scaler.loss_scale, 2.0)
def test_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
self.assertTrue(
all(
torch.all(
fp32_params.eq(
torch.tensor(
[3.1000, 5.1000], device="cuda:0", requires_grad=True
)
)
)
for fp32_params in optimizer.fp32_params.values()
)
)
def test_memory_efficient(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_fp16_optimizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import unittest
import torch
from fairseq.optim.adam import FairseqAdam
from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer
from omegaconf import OmegaConf
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestMemoryEfficientFP16(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_load_state_dict(self):
# define simple FP16 model
model = torch.nn.Linear(5, 5).cuda().half()
params = list(model.parameters())
# initialize memory efficient FP16 optimizer
# with pseudo DictConfigs
optimizer = FairseqAdam(
cfg=OmegaConf.create(
vars(
argparse.Namespace(
adam_betas="(0.9, 0.999)",
adam_eps=1e-8,
weight_decay=0.0,
lr=[0.00001],
)
)
),
params=params,
)
me_optimizer = MemoryEfficientFP16Optimizer(
cfg=OmegaConf.create(
{
"common": vars(
argparse.Namespace(
fp16_init_scale=1,
fp16_scale_window=1,
fp16_scale_tolerance=1,
threshold_loss_scale=1,
min_loss_scale=1e-4,
)
)
}
),
params=params,
optimizer=optimizer,
)
# optimizer state is created in the first step
loss = model(torch.rand(5).cuda().half()).sum()
me_optimizer.backward(loss)
me_optimizer.step()
# reload state
state = me_optimizer.state_dict()
me_optimizer.load_state_dict(state)
for k, v in me_optimizer.optimizer.state.items():
self.assertTrue(k.dtype == torch.float16)
for v_i in v.values():
if torch.is_tensor(v_i):
self.assertTrue(v_i.dtype == torch.float32)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_memory_efficient_fp16.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import tests.utils as test_utils
import torch
from fairseq.data import TokenBlockDataset
class TestTokenBlockDataset(unittest.TestCase):
def _build_dataset(self, data, **kwargs):
sizes = [len(x) for x in data]
underlying_ds = test_utils.TestDataset(data)
return TokenBlockDataset(underlying_ds, sizes, **kwargs)
def test_eos_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [1])
self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
self.assertEqual(ds[2].tolist(), [1])
def test_block_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none")
self.assertEqual(ds[0].tolist(), [5, 4, 3])
self.assertEqual(ds[1].tolist(), [2, 1, 8])
self.assertEqual(ds[2].tolist(), [7, 6, 1])
self.assertEqual(ds[3].tolist(), [9, 1])
def test_complete_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=6, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
data = [
torch.tensor([4, 3, 2, 1], dtype=torch.long),
torch.tensor([5, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([6, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=3, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [5, 1, 1])
self.assertEqual(ds[2].tolist(), [6, 1])
def test_4billion_tokens(self):
"""Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745"""
data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000
ds = self._build_dataset(
data, block_size=6, pad=0, eos=1, break_mode="complete"
)
ds[-1] # __getitem__ works
start, end = ds.slice_indices[-1]
assert end > 4294967295 # data must be sufficiently large to overflow uint32
assert not isinstance(
end + 1, float
) # this would also raise, since np.uint64(1) + 1 => 2.0
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_token_block_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import math
import numpy as np
import tests.utils as test_utils
import torch
from fairseq import search
from fairseq.data.dictionary import Dictionary
from fairseq.models.transformer import TransformerModel
from fairseq.sequence_generator import EnsembleModel, SequenceGenerator
from fairseq.ngram_repeat_block import NGramRepeatBlock
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), n=1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
class TestJitSequenceGeneratorBase(unittest.TestCase):
def setUp(self):
self.task, self.parser = get_dummy_task_and_parser()
eos = self.task.tgt_dict.eos()
src_tokens = torch.randint(3, 50, (2, 10)).long()
src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1)
src_lengths = torch.LongTensor([2, 10])
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
TransformerModel.add_args(self.parser)
args = self.parser.parse_args([])
args.encoder_layers = 2
args.decoder_layers = 1
self.transformer_model = TransformerModel.build_model(args, self.task)
def assertOutputEqual(self, hypo, pos_probs):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores)
self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel())
def assertTensorSizeEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
def assertHypoEqual(self, h1, h2):
"Check two hypos are equal"
self.assertTensorEqual(h1["tokens"], h2["tokens"])
self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"])
self.assertLess(abs(h1["score"] - h2["score"]), 1e-6)
self.assertAlmostEqual(h1["attention"], h2["attention"])
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
JIT_MSG = "Targeting OSS scriptability for the 1.6 release"
@unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG)
class TestJitSequenceGenerator(TestJitSequenceGeneratorBase):
def test_export_transformer(self):
model = self.transformer_model
torch.jit.script(model)
def test_ensemble_sequence_generator(self):
model = self.transformer_model
generator = SequenceGenerator(
[model],
self.task.tgt_dict,
beam_size=2,
no_repeat_ngram_size=2,
max_len_b=10,
)
scripted_model = torch.jit.script(generator)
self._test_save_and_load(scripted_model)
def test_export_ensemble_model(self):
model = self.transformer_model
ensemble_models = EnsembleModel([model])
torch.jit.script(ensemble_models)
class TestExportSearch(unittest.TestCase):
def setUp(self):
task, _ = get_dummy_task_and_parser()
self.tgt_dict = task.tgt_dict
self.min_top1_prob = 0.4
def test_export_diverse_bs(self):
search_strategy = search.DiverseBeamSearch(
self.tgt_dict, num_groups=2, diversity_strength=0.0
)
torch.jit.script(search_strategy)
def test_export_sampling(self):
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
torch.jit.script(search_strategy)
def test_export_diverse_siblings_search(self):
search_strategy = search.DiverseSiblingsSearch(
self.tgt_dict, diversity_rate=0.5
)
torch.jit.script(search_strategy)
class TestSequenceGeneratorBase(unittest.TestCase):
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
class TestSequenceGenerator(TestSequenceGeneratorBase):
def setUp(self):
(
self.tgt_dict,
self.w1,
self.w2,
src_tokens,
src_lengths,
self.model,
) = test_utils.sequence_generator_setup()
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
def test_with_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
# Sentence 1: unchanged from the normalized case
# Sentence 2: beams swap order
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, normalize_scores=False
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, max_len_b=2
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
def test_encoder_with_different_output_len(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(
args, self.tgt_dict, self.tgt_dict
)
reshaping_model = test_utils.TestReshapingModel.build_model(args, task)
generator = SequenceGenerator(
[reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2
)
hypos = generator.forward(self.sample)
for sent in [0, 1]:
for beam in [0, 1]:
assert hypos[sent][beam]["attention"] is not None
def test_generation_with_additional_input(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(
args, self.tgt_dict, self.tgt_dict
)
add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task)
generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2)
sample = self.sample.copy()
sample["net_input"]["fancy_other_input"] = sample["net_input"]["src_tokens"]
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
@unittest.skipUnless(torch.cuda.is_available(), "")
class TestRepeatNgramBlocking(TestSequenceGeneratorBase):
@classmethod
def setUpClass(cls):
(
cls.tgt_dict,
cls.w1,
cls.w2,
src_tokens,
src_lengths,
cls.model,
) = test_utils.sequence_generator_setup()
return cls
def test_finds_repetitive_tokens(self):
bsz, vocab_size, beam_size, step = 2, 4, 1, 3
generated_tok = torch.tensor(
[[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda"
)
lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda")
desired_result = lprobs.new_tensor(
[[0.0, 0.0, -math.inf, 0.0], [0.0, 0.0, 0.0, -math.inf]]
)
cuda_ext_result, baseline_result = self._compare_cuda_ext_to_default_implem(
bsz, beam_size, generated_tok, lprobs, step, 2
)
self.assertTensorEqual(cuda_ext_result, desired_result)
self.assertTensorEqual(baseline_result, desired_result)
@unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG)
def test_jit_no_extension(self):
bsz, vocab_size, beam_size, step = 2, 4, 1, 3
generated_tok = torch.tensor(
[[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda"
)
lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda")
blocker = NGramRepeatBlock(2, use_extension=False)
base_result = blocker(generated_tok, lprobs.clone(), bsz, beam_size, step)
scripted_blocker = torch.jit.script(blocker)
jit_result = scripted_blocker(
generated_tok, lprobs.clone(), bsz, beam_size, step
)
self.assertTensorEqual(base_result, jit_result)
def test_ngram_blocking_same_as_default_implem(self):
"""Test that cuda extension returns same things as default impl in many settings."""
vocab_size = 4
step = 6
for _ in range(2):
block_param = np.random.choice([1, 2, 3, 4])
batch_size = np.random.randint(1, 8)
beam_size = np.random.choice([1, 2, 4, 8])
lprobs = torch.zeros((beam_size * batch_size, vocab_size), device="cuda")
generated_tok = torch.tensor(
np.random.randint(
0, vocab_size, size=(batch_size * beam_size, step + 1)
),
device="cuda",
dtype=torch.long,
)
self._compare_cuda_ext_to_default_implem(
batch_size,
beam_size,
generated_tok,
lprobs,
step,
block_param,
)
def _compare_cuda_ext_to_default_implem(
self, bsz, beam_size, generated_tok, lprobs, step, block_param
):
"""Assert that cuda extension and default implem return the same thing."""
blocker = NGramRepeatBlock(block_param)
assert blocker.use_extension, "Extension not compiled"
cuda_ext_result = blocker(
generated_tok,
lprobs.clone(),
bsz,
beam_size,
step,
)
blocker.use_extension = False
baseline_result = blocker(
generated_tok,
lprobs.clone(),
bsz,
beam_size,
step,
)
self.assertTensorEqual(cuda_ext_result, baseline_result)
blocker.use_extension = True
return cuda_ext_result, baseline_result
class TestDiverseBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
]
)
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.6, 0.4],
[0.0, unk, 0.6, 0.4],
# sentence 2:
[0.25, unk, 0.35, 0.4],
[0.25, unk, 0.35, 0.4],
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
# sentence 2:
[0.9, unk, 0.1, 0.0],
[0.9, unk, 0.1, 0.0],
]
),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
search_strategy = search.DiverseBeamSearch(
self.tgt_dict, num_groups=2, diversity_strength=0.0
)
generator = SequenceGenerator(
[self.model],
self.tgt_dict,
beam_size=2,
search_strategy=search_strategy,
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9])
class TestDiverseSiblingsSearch(TestDiverseBeamSearch):
def assertHypoScore(
self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0
):
pos_scores = torch.FloatTensor(pos_probs).log()
pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate)
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def test_diverse_beam_search(self):
search_strategy = search.DiverseSiblingsSearch(
self.tgt_dict, diversity_rate=0.5
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5)
class TestPrefixBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
vocab_size = 10
d = test_utils.dummy_dictionary(vocab_size=vocab_size)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
self.beam_size = 3
# construct prefix data
self.tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
]
)
self.token_lengths = torch.LongTensor([2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# prefix step 0:
torch.FloatTensor(
[
# eos
[0.0, unk] + [1.0 / vocab_size] * vocab_size # beam 1
] * self.beam_size
),
] * vocab_size
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_prefix_beam_search(self):
search_strategy = search.BeamSearch(self.tgt_dict)
generator = SequenceGenerator(
[self.model],
self.tgt_dict,
beam_size=self.beam_size,
search_strategy=search_strategy,
)
sample = {
"net_input": {
"src_tokens": self.tokens,
"src_lengths": self.token_lengths,
}
}
# make sure test sample doesn't break any assertion
generator.forward(sample, prefix_tokens=self.tokens[:, :-1])
class TestTopPSamplingSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
]
)
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
# The minimal probability of top 2 tokens.
self.min_top2_prob = 0.75
# The minimal probability of the top 1 token.
self.min_top1_prob = 0.4
w1_prob = self.min_top1_prob
w2_prob = self.min_top2_prob - self.min_top1_prob
eos_prob = 1 - self.min_top2_prob
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
]
),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_topp_sampling_search_low_prob(self):
# Given a prob low enough to top-P sampling, we expect only the top
# 1 token to be sampled, which always results in the same output.
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1 = self.eos, self.w1
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w1, eos])
self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0])
def test_topp_sampling_search_high_prob(self):
# Given a prob high enough to top-P sampling, any of the top 2
# tokens could be sampled. This can cause different outputs.
high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=high_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertTrue(
self.hypoTokens(hypos[0][0], [w1, w1, eos])
or self.hypoTokens(hypos[0][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0])
)
# sentence 1, beam 2
self.assertTrue(
self.hypoTokens(hypos[0][1], [w1, w1, eos])
or self.hypoTokens(hypos[0][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0])
)
# sentence 2, beam 1
self.assertTrue(
self.hypoTokens(hypos[1][0], [w1, w1, eos])
or self.hypoTokens(hypos[1][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0])
)
# sentence 2, beam 2
self.assertTrue(
self.hypoTokens(hypos[1][1], [w1, w1, eos])
or self.hypoTokens(hypos[1][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0])
)
def hypoTokens(self, hypo, tokens):
return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
if not self.almostEqual(hypo["positional_scores"], pos_scores):
return False
if pos_scores.numel() != hypo["tokens"].numel():
return False
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
return abs(score - hypo["score"]) < 1e-6
def almostEqual(self, t1, t2):
return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4
def tensorEqual(self, t1, t2):
return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_sequence_generator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import tempfile
import unittest
from typing import Optional
class TestFileChunker(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_line_content = "Hello, World\n"
_num_bytes = None
_num_lines = 200
_num_splits = 20
@classmethod
def setUpClass(cls) -> None:
cls._num_bytes = len(cls._line_content.encode("utf-8"))
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f:
cls._tmpfile = f.name
for _i in range(cls._num_lines):
f.write(cls._line_content)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
if cls._tmpdir is not None:
shutil.rmtree(cls._tmpdir) # type: ignore
def test_find_offsets(self):
from fairseq.file_chunker_utils import find_offsets
offsets = find_offsets(self._tmpfile, self._num_splits)
self.assertEqual(len(offsets), self._num_splits + 1)
(zero, *real_offsets, last) = offsets
self.assertEqual(zero, 0)
for i, o in enumerate(real_offsets):
self.assertEqual(
o,
self._num_bytes
+ ((i + 1) * self._num_bytes * self._num_lines / self._num_splits),
)
self.assertEqual(last, self._num_bytes * self._num_lines)
def test_readchunks(self):
from fairseq.file_chunker_utils import Chunker, find_offsets
offsets = find_offsets(self._tmpfile, self._num_splits)
for start, end in zip(offsets, offsets[1:]):
with Chunker(self._tmpfile, start, end) as lines:
all_lines = list(lines)
num_lines = self._num_lines / self._num_splits
self.assertAlmostEqual(
len(all_lines), num_lines, delta=1
) # because we split on the bites, we might end up with one more/less line in a chunk
self.assertListEqual(
all_lines, [self._line_content for _ in range(len(all_lines))]
)
|
bart_ls-main
|
fairseq-py/tests/test_file_chunker_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from argparse import ArgumentParser
from dataclasses import dataclass, field
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
@dataclass
class A(FairseqDataclass):
data: str = field(default="test", metadata={"help": "the data input"})
num_layers: int = field(default=200, metadata={"help": "more layers is better?"})
@dataclass
class B(FairseqDataclass):
bar: A = field(default=A())
foo: int = field(default=0, metadata={"help": "not a bar"})
@dataclass
class D(FairseqDataclass):
arch: A = field(default=A())
foo: int = field(default=0, metadata={"help": "not a bar"})
@dataclass
class C(FairseqDataclass):
data: str = field(default="test", metadata={"help": "root level data input"})
encoder: D = field(default=D())
decoder: A = field(default=A())
lr: int = field(default=0, metadata={"help": "learning rate"})
class TestDataclassUtils(unittest.TestCase):
def test_argparse_convert_basic(self):
parser = ArgumentParser()
gen_parser_from_dataclass(parser, A(), True)
args = parser.parse_args(["--num-layers", '10', "the/data/path"])
self.assertEqual(args.num_layers, 10)
self.assertEqual(args.data, "the/data/path")
def test_argparse_recursive(self):
parser = ArgumentParser()
gen_parser_from_dataclass(parser, B(), True)
args = parser.parse_args(["--num-layers", "10", "--foo", "10", "the/data/path"])
self.assertEqual(args.num_layers, 10)
self.assertEqual(args.foo, 10)
self.assertEqual(args.data, "the/data/path")
def test_argparse_recursive_prefixing(self):
self.maxDiff = None
parser = ArgumentParser()
gen_parser_from_dataclass(parser, C(), True, "")
args = parser.parse_args(
[
"--encoder-arch-data",
"ENCODER_ARCH_DATA",
"--encoder-arch-num-layers",
"10",
"--encoder-foo",
"10",
"--decoder-data",
"DECODER_DATA",
"--decoder-num-layers",
"10",
"--lr",
"10",
"the/data/path",
]
)
self.assertEqual(args.encoder_arch_data, "ENCODER_ARCH_DATA")
self.assertEqual(args.encoder_arch_num_layers, 10)
self.assertEqual(args.encoder_foo, 10)
self.assertEqual(args.decoder_data, "DECODER_DATA")
self.assertEqual(args.decoder_num_layers, 10)
self.assertEqual(args.lr, 10)
self.assertEqual(args.data, "the/data/path")
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/test_dataclass_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
from tests.test_train import mock_dict
class TestConcatDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def test_concat_dataset_basics(self):
d = ConcatDataset([self.dataset_1, self.dataset_2])
assert len(d) == 2
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 2
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2])
assert len(d) == 3
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 2
assert d[2]["source"][0] == 2
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1])
assert len(d) == 3
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 1
assert d[2]["source"][0] == 2
|
bart_ls-main
|
fairseq-py/tests/test_concat_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import unittest
from io import StringIO
import torch
from fairseq import fb_hub
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestTranslationHub(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@torch.no_grad()
def test_transformer_wmt14_en_fr(self):
with contextlib.redirect_stdout(StringIO()):
# Load an En-Fr Transformer model trained on WMT'14 data
en2fr = fb_hub.load(
"transformer.wmt14.en-fr", tokenizer="moses", bpe="subword_nmt"
)
en2fr.eval() # disable dropout
# Translate with beam search
fr = en2fr.translate("Hello world!", beam=5)
self.assertEqual(fr, "Bonjour à tous !")
# Manually tokenize
en_toks = en2fr.tokenize("Hello world!")
self.assertEqual(en_toks, "Hello world !")
# Manually apply BPE
en_bpe = en2fr.apply_bpe(en_toks)
self.assertEqual(en_bpe, "H@@ ello world !")
# Manually binarize
en_bin = en2fr.binarize(en_bpe)
self.assertEqual(en_bin.tolist(), [329, 14044, 682, 812, 2])
# Generate five translations with top-k sampling
fr_bin = en2fr.generate(en_bin, beam=5, sampling=True, sampling_topk=20)
self.assertEqual(len(fr_bin), 5)
# Convert one of the samples to a string and detokenize
fr_sample = fr_bin[0]["tokens"]
fr_bpe = en2fr.string(fr_sample)
fr_toks = en2fr.remove_bpe(fr_bpe)
fr = en2fr.detokenize(fr_toks)
self.assertEqual(fr, en2fr.decode(fr_sample))
# Batched translation
fr_batch = en2fr.translate(["Hello world", "The cat sat on the mat."])
self.assertEqual(
fr_batch, ["Bonjour à tous.", "Le chat était assis sur le tapis."]
)
@torch.no_grad()
def test_transformer_wmt19_en_de_single_model(self):
with contextlib.redirect_stdout(StringIO()):
# Load an En-De Transformer model trained on WMT'19 data
en2de = fb_hub.load(
"transformer.wmt19.en-de.single_model", tokenizer="moses", bpe="fastbpe"
)
en2de.eval() # disable dropout
# Access the underlying TransformerModel
self.assertTrue(isinstance(en2de.models[0], torch.nn.Module))
# Translate from En-De
de = en2de.translate(
"PyTorch Hub is a pre-trained model repository designed to facilitate research reproducibility."
)
self.assertEqual(
de,
"PyTorch Hub ist ein vorgefertigtes Modell-Repository, das die Reproduzierbarkeit der Forschung erleichtern soll.",
)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestLMHub(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@torch.no_grad()
def test_transformer_lm_wmt19_en(self):
with contextlib.redirect_stdout(StringIO()):
# Load an English LM trained on WMT'19 News Crawl data
en_lm = fb_hub.load("transformer_lm.wmt19.en")
en_lm.eval() # disable dropout
# Sample from the language model
en_lm.sample(
"Barack Obama", beam=1, sampling=True, sampling_topk=10, temperature=0.8
)
ppl = (
en_lm.score("Barack Obama is coming to Sydney and New Zealand")[
"positional_scores"
]
.mean()
.neg()
.exp()
)
self.assertAlmostEqual(ppl.item(), 4.2739, places=4)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestRobertaHub(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@torch.no_grad()
def test_roberta_base(self):
with contextlib.redirect_stdout(StringIO()):
# Load RoBERTa
roberta = fb_hub.load("roberta.base")
roberta.eval() # disable dropout
# Apply Byte-Pair Encoding (BPE) to input text
tokens = roberta.encode("Hello world!")
self.assertEqual(tokens.tolist(), [0, 31414, 232, 328, 2])
self.assertEqual(roberta.decode(tokens), "Hello world!")
# Extract the last layer's features
last_layer_features = roberta.extract_features(tokens)
self.assertEqual(last_layer_features.size(), torch.Size([1, 5, 768]))
# Extract all layer's features (layer 0 is the embedding layer)
all_layers = roberta.extract_features(tokens, return_all_hiddens=True)
self.assertEqual(len(all_layers), 13)
self.assertTrue(torch.all(all_layers[-1] == last_layer_features))
# Register a new (randomly initialized) classification head
roberta.register_classification_head("new_task", num_classes=3)
logprobs = roberta.predict("new_task", tokens) # noqa
# Test mask filling
res = roberta.fill_mask(
"The first Star wars movie came out in <mask>", topk=3
)
self.assertEqual(len(res), 3)
self.assertEqual(res[0][2], " 1977")
@torch.no_grad()
def test_roberta_large_mnli(self):
with contextlib.redirect_stdout(StringIO()):
# Download RoBERTa already finetuned for MNLI
roberta = fb_hub.load("roberta.large.mnli")
roberta.eval() # disable dropout for evaluation
# Encode a pair of sentences and make a prediction
tokens = roberta.encode(
"Roberta is a heavily optimized version of BERT.",
"Roberta is not very optimized.",
)
prediction = roberta.predict("mnli", tokens).argmax().item()
self.assertEqual(prediction, 0) # contradiction
# Encode another pair of sentences
tokens = roberta.encode(
"Roberta is a heavily optimized version of BERT.",
"Roberta is based on BERT.",
)
prediction = roberta.predict("mnli", tokens).argmax().item()
self.assertEqual(prediction, 2) # entailment
# Test batched prediction
from fairseq.data.data_utils import collate_tokens
batch_of_pairs = [
[
"Roberta is a heavily optimized version of BERT.",
"Roberta is not very optimized.",
],
[
"Roberta is a heavily optimized version of BERT.",
"Roberta is based on BERT.",
],
["potatoes are awesome.", "I like to run."],
["Mars is very far from earth.", "Mars is very close."],
]
batch = collate_tokens(
[roberta.encode(pair[0], pair[1]) for pair in batch_of_pairs], pad_idx=1
)
logprobs = roberta.predict("mnli", batch)
self.assertEqual(logprobs.argmax(dim=1).tolist(), [0, 2, 1, 0])
@torch.no_grad()
def test_roberta_large_wsc(self):
with contextlib.redirect_stdout(StringIO()):
roberta = fb_hub.load("roberta.large.wsc", user_dir="examples/roberta/wsc")
roberta.eval() # disable dropout
ans = roberta.disambiguate_pronoun(
"The _trophy_ would not fit in the brown suitcase because [it] was too big."
)
self.assertTrue(ans)
ans = roberta.disambiguate_pronoun(
"The trophy would not fit in the brown _suitcase_ because [it] was too big."
)
self.assertFalse(ans)
ans = roberta.disambiguate_pronoun(
"The city councilmen refused the demonstrators a permit because [they] feared violence."
)
self.assertEqual(ans, "The city councilmen")
ans = roberta.disambiguate_pronoun(
"The city councilmen refused the demonstrators a permit because [they] advocated violence."
)
self.assertEqual(ans, "demonstrators")
@torch.no_grad()
def test_camembert(self):
with contextlib.redirect_stdout(StringIO()):
camembert = fb_hub.load("camembert.v0")
camembert.eval() # disable dropout
# Filling masks
masked_line = "Le camembert est <mask> :)"
res = camembert.fill_mask(masked_line, topk=3)
self.assertEqual(len(res), 3)
self.assertEqual(res[0][2], " délicieux")
# Extract the last layer's features
line = "J'aime le camembert!"
tokens = camembert.encode(line)
last_layer_features = camembert.extract_features(tokens)
self.assertEqual(last_layer_features.size(), torch.Size([1, 10, 768]))
# Extract all layer's features (layer 0 is the embedding layer)
all_layers = camembert.extract_features(tokens, return_all_hiddens=True)
self.assertEqual(len(all_layers), 13)
self.assertTrue(torch.all(all_layers[-1] == last_layer_features))
@torch.no_grad()
def test_xlmr(self):
with contextlib.redirect_stdout(StringIO()):
xlmr = fb_hub.load("xlmr.large")
xlmr.eval() # disable dropout
# Test sentencepiece
en_tokens = xlmr.encode("Hello world!")
self.assertEqual(en_tokens.tolist(), [0, 35378, 8999, 38, 2])
xlmr.decode(en_tokens) # 'Hello world!'
zh_tokens = xlmr.encode("你好,世界")
self.assertEqual(zh_tokens.tolist(), [0, 6, 124084, 4, 3221, 2])
xlmr.decode(zh_tokens) # '你好,世界'
hi_tokens = xlmr.encode("नमस्ते दुनिया")
self.assertEqual(hi_tokens.tolist(), [0, 68700, 97883, 29405, 2])
xlmr.decode(hi_tokens) # 'नमस्ते दुनिया'
ar_tokens = xlmr.encode("مرحبا بالعالم")
self.assertEqual(ar_tokens.tolist(), [0, 665, 193478, 258, 1705, 77796, 2])
xlmr.decode(ar_tokens) # 'مرحبا بالعالم'
fr_tokens = xlmr.encode("Bonjour le monde")
self.assertEqual(fr_tokens.tolist(), [0, 84602, 95, 11146, 2])
xlmr.decode(fr_tokens) # 'Bonjour le monde'
# Extract the last layer's features
last_layer_features = xlmr.extract_features(zh_tokens)
self.assertEqual(last_layer_features.size(), torch.Size([1, 6, 1024]))
# Extract all layer's features (layer 0 is the embedding layer)
all_layers = xlmr.extract_features(zh_tokens, return_all_hiddens=True)
self.assertEqual(len(all_layers), 25)
self.assertTrue(torch.all(all_layers[-1] == last_layer_features))
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestBartHub(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@torch.no_grad()
def test_bart_base(self):
with contextlib.redirect_stdout(StringIO()):
# Load BART
bart = fb_hub.load("bart.base")
bart.eval() # disable dropout
# Test mask filling (beam = topk = 3)
res = bart.fill_mask(["The cat <mask> on the <mask>."], topk=3)[0]
self.assertEqual(len(res), 3)
self.assertEqual(res[0][0], "The cat was on the ground.")
self.assertEqual(res[1][0], "The cat was on the floor.")
self.assertEqual(res[2][0], "The cat was sitting on the couch")
# Test mask filling (beam = 10, topk = 3)
res = bart.fill_mask(["The cat <mask> on the <mask>."], topk=3, beam=10)[0]
self.assertEqual(len(res), 3)
self.assertEqual(res[0][0], "The cat was on the ground.")
self.assertEqual(res[1][0], "The cat was on the floor.")
self.assertEqual(res[2][0], "The cat sleeps on the couch.")
# Test mask filling (beam = 10, topk = 3, match_source_len = False)
res = bart.fill_mask(
["The cat <mask> on the <mask>."],
topk=3,
beam=10,
match_source_len=False,
)[0]
self.assertEqual(len(res), 3)
self.assertEqual(res[0][0], "The cat was on the ground.")
self.assertEqual(res[1][0], "The cat was asleep on the couch.")
self.assertEqual(res[2][0], "The cat was on the floor.")
# Test mask filling (beam = 10, topk = 3) and batch size > 1
res = bart.fill_mask(
["The cat <mask> on the <mask>.", "The dog <mask> on the <mask>."],
topk=3,
beam=10,
)
self.assertEqual(len(res), 2)
self.assertEqual(len(res[0]), 3)
self.assertEqual(res[0][0][0], "The cat was on the ground.")
self.assertEqual(res[0][1][0], "The cat was on the floor.")
self.assertEqual(res[0][2][0], "The cat sleeps on the couch.")
self.assertEqual(len(res[1]), 3)
self.assertEqual(res[1][0][0], "The dog was on the ground.")
self.assertEqual(res[1][1][0], "The dog lay on the ground.")
self.assertEqual(res[1][2][0], "The dog was asleep on the couch")
@torch.no_grad()
def test_bart_large(self):
with contextlib.redirect_stdout(StringIO()):
# Load BART
bart = fb_hub.load("bart.large")
bart.eval() # disable dropout
# Apply Byte-Pair Encoding (BPE) to input text
tokens = bart.encode("Hello world!")
self.assertEqual(tokens.tolist(), [0, 31414, 232, 328, 2])
self.assertEqual(bart.decode(tokens), "Hello world!")
# Extract the last layer's features
last_layer_features = bart.extract_features(tokens)
self.assertEqual(last_layer_features.size(), torch.Size([1, 5, 1024]))
# Extract all layer's features from decoder (layer 0 is the embedding layer)
all_layers = bart.extract_features(tokens, return_all_hiddens=True)
self.assertEqual(len(all_layers), 13)
self.assertTrue(torch.all(all_layers[-1] == last_layer_features))
# Register a new (randomly initialized) classification head
bart.register_classification_head("new_task", num_classes=3)
logprobs = bart.predict("new_task", tokens) # noqa
@torch.no_grad()
def test_bart_large_mnli(self):
with contextlib.redirect_stdout(StringIO()):
# Download BART already finetuned for MNLI
bart = fb_hub.load("bart.large.mnli")
bart.eval() # disable dropout for evaluation
# Encode a pair of sentences and make a prediction
tokens = bart.encode(
"BART is a seq2seq model.", "BART is not sequence to sequence."
)
prediction = bart.predict("mnli", tokens).argmax().item()
self.assertEqual(prediction, 0) # contradiction
# Encode another pair of sentences
tokens = bart.encode(
"BART is denoising autoencoder.", "BART is version of autoencoder."
)
prediction = bart.predict("mnli", tokens).argmax().item()
self.assertEqual(prediction, 2) # entailment
# Test batched prediction
from fairseq.data.data_utils import collate_tokens
batch_of_pairs = [
["BART is a seq2seq model.", "BART is not sequence to sequence."],
["BART is denoising autoencoder.", "BART is version of autoencoder."],
]
batch = collate_tokens(
[bart.encode(pair[0], pair[1]) for pair in batch_of_pairs], pad_idx=1
)
logprobs = bart.predict("mnli", batch)
self.assertEqual(logprobs.argmax(dim=1).tolist(), [0, 2])
@torch.no_grad()
def test_bart_large_cnn(self):
with contextlib.redirect_stdout(StringIO()):
# Download BART already finetuned for MNLI
bart = fb_hub.load("bart.large.cnn")
bart.eval() # disable dropout for evaluation
hypothesis = bart.sample(
[
"""This is the first time anyone has been \
recorded to run a full marathon of 42.195 kilometers \
(approximately 26 miles) under this pursued landmark time. \
It was not, however, an officially sanctioned world record, \
as it was not an "open race" of the IAAF. His time was \
1 hour 59 minutes 40.2 seconds. Kipchoge ran in Vienna, Austria. \
It was an event specifically designed to help Kipchoge \
break the two hour barrier. Kenyan runner Eliud Kipchoge \
has run a marathon in less than two hours."""
]
)
# Encode a pair of sentences and make a prediction
self.assertEqual(
hypothesis[0],
"""Eliud Kipchoge has run a marathon in less than two hours. \
Kenyan ran in Vienna, Austria. It was not an officially sanctioned world record.""",
)
def test_bart_cnn_output_order(self):
# BartModel.generate used to return outputs in the wrong order
bart = torch.hub.load("pytorch/fairseq", "bart.large.cnn").eval()
input_sentences = ["This sentence is the longest sentence that is used in this unit test.", "This is short.",
"This sentence is in the middle."]
inputs = [bart.encode(x) for x in input_sentences]
outputs = bart.generate(inputs, beam=1, max_length=10)
self.assertEqual(
bart.decode(outputs[0][0]["tokens"]),
"This sentence is the longest sentence that is used in this unit test.",
)
self.assertEqual(bart.decode(outputs[1][0]["tokens"]), "This is short.")
self.assertEqual(
bart.decode(outputs[2][0]["tokens"]), "This sentence is in the middle."
)
if __name__ == "__main__":
unittest.main()
|
bart_ls-main
|
fairseq-py/tests/fb_test_hub.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.