python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
import unittest
from typing import Callable
import numpy as np
from augly.tests import AudioAugConfig
from augly.utils import pathmgr, TEST_URI
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
import librosa # fmt: off
import soundfile as sf # fmt: off
def are_equal_audios(a: np.ndarray, b: np.ndarray) -> bool:
return a.size == b.size and np.allclose(a, b, atol=1e-4)
class BaseAudioUnitTest(unittest.TestCase):
ref_audio_dir = os.path.join(TEST_URI, "audio", "speech_commands_expected_output")
local_audio_paths = []
audios = []
sample_rates = []
metadata = {}
def test_import(self) -> None:
try:
from augly import audio as audaugs
except ImportError:
self.fail("audaugs failed to import")
self.assertTrue(dir(audaugs), "Audio directory does not exist")
@classmethod
def setUpClass(cls):
cls.maxDiff = None
config = AudioAugConfig()
for i in range(len(config.input_files)):
audio_path, audio_file = config.get_input_path(i)
local_audio_path = pathmgr.get_local_path(audio_path)
audio, sample_rate = librosa.load(local_audio_path, sr=None, mono=False)
cls.audios.append(audio)
cls.sample_rates.append(sample_rate)
cls.local_audio_paths.append(local_audio_path)
def evaluate_function(self, aug_function: Callable[..., np.ndarray], **kwargs):
folders = ["mono", "stereo"]
for i, local_audio_path in enumerate(self.local_audio_paths):
ref = self.get_ref_audio(aug_function.__name__, folders[i])
with tempfile.NamedTemporaryFile(suffix=".wav") as tmpfile:
np.random.seed(1)
aug_function(local_audio_path, output_path=tmpfile.name, **kwargs)
dst = librosa.load(tmpfile.name, sr=None, mono=False)[0]
self.assertTrue(
are_equal_audios(dst, ref), "Expected and outputted audio do not match"
)
def evaluate_class(self, transform_class: Callable[..., np.ndarray], fname: str):
metadata = []
audio, sample_rate = transform_class(
self.audios[0], self.sample_rates[0], metadata
)
self.assertEqual(
metadata,
self.metadata[fname],
"Expected and outputted metadata do not match",
)
if audio.ndim > 1:
audio = np.swapaxes(audio, 0, 1)
# we compare the audio arrays loaded in from the audio files rather than the
# returned audio array and the loaded in audio array because the array is
# slightly modified during sf.write
with tempfile.NamedTemporaryFile(suffix=".wav") as tmpfile:
sf.write(tmpfile.name, audio, sample_rate)
dst = librosa.load(tmpfile.name, sr=None, mono=False)[0]
ref = self.get_ref_audio(fname)
self.assertTrue(
are_equal_audios(dst, ref), "Expected and outputted audio do not match"
)
def get_ref_audio(self, fname: str, folder: str = "mono") -> np.ndarray:
local_ref_path = pathmgr.get_local_path(
os.path.join(self.ref_audio_dir, folder, f"test_{fname}.wav")
)
return librosa.load(local_ref_path, sr=None, mono=False)[0]
@classmethod
def tearDownClass(cls):
cls.local_audio_paths = []
cls.audios = []
cls.sample_rates = []
|
AugLy-main
|
augly/tests/audio_tests/base_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import random
import unittest
import numpy as np
from augly import audio as audaugs
from augly.tests.audio_tests.base_unit_test import BaseAudioUnitTest
from augly.utils import AUDIO_METADATA_PATH
class TransformsAudioUnitTest(BaseAudioUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(AUDIO_METADATA_PATH, "r") as f:
cls.metadata = json.load(f)
def test_AddBackgroundNoise(self):
random_generator = np.random.default_rng(1)
self.evaluate_class(
audaugs.AddBackgroundNoise(
background_audio=None, snr_level_db=10.0, seed=random_generator
),
fname="add_background_noise2",
)
def test_ApplyLambda(self):
self.evaluate_class(audaugs.ApplyLambda(), fname="apply_lambda")
def test_ChangeVolume(self):
self.evaluate_class(audaugs.ChangeVolume(volume_db=10.0), fname="change_volume")
def test_Clicks(self):
self.evaluate_class(audaugs.Clicks(seconds_between_clicks=0.5), fname="clicks")
def test_Clip(self):
self.evaluate_class(
audaugs.Clip(offset_factor=0.25, duration_factor=0.5), fname="clip"
)
def test_Compose(self):
self.evaluate_class(
audaugs.Compose(
[
audaugs.Clip(duration_factor=0.25),
audaugs.ChangeVolume(volume_db=10.0),
]
),
fname="compose",
)
def test_Compose2(self):
random_generator = np.random.default_rng(1)
self.evaluate_class(
audaugs.Compose(
[
audaugs.InsertInBackground(
offset_factor=0.2, background_audio=None, seed=random_generator
),
audaugs.Clip(offset_factor=0.5, duration_factor=0.25),
audaugs.Speed(factor=4.0),
]
),
fname="compose2",
)
def test_Harmonic(self):
self.evaluate_class(
audaugs.Harmonic(kernel_size=31, power=2.0, margin=1.0), fname="harmonic"
)
def test_HighPassFilter(self):
self.evaluate_class(
audaugs.HighPassFilter(cutoff_hz=3000), fname="high_pass_filter"
)
def test_InsertInBackground(self):
random_generator = np.random.default_rng(1)
self.evaluate_class(
audaugs.InsertInBackground(offset_factor=0.3, seed=random_generator),
fname="insert_in_background2",
)
def test_InvertChannels(self):
self.evaluate_class(audaugs.InvertChannels(), fname="invert_channels")
def Loop(self):
self.evaluate_class(audaugs.Loop(n=1), fname="loop")
def test_LowPassFilter(self):
self.evaluate_class(
audaugs.LowPassFilter(cutoff_hz=500), fname="low_pass_filter"
)
def test_Normalize(self):
self.evaluate_class(audaugs.Normalize(), fname="normalize")
def test_OneOf(self):
random.seed(1)
self.evaluate_class(
audaugs.OneOf(
[audaugs.PitchShift(n_steps=4), audaugs.TimeStretch(rate=1.5)]
),
fname="time_stretch",
)
def test_PeakingEqualizer(self):
self.evaluate_class(
audaugs.PeakingEqualizer(gain_db=-20.0), fname="peaking_equalizer"
)
def test_Percussive(self):
self.evaluate_class(
audaugs.Percussive(kernel_size=31, power=2.0, margin=1.0),
fname="percussive",
)
def test_PitchShift(self):
self.evaluate_class(audaugs.PitchShift(n_steps=4), fname="pitch_shift")
def test_Reverb(self):
self.evaluate_class(audaugs.Reverb(reverberance=100.0), fname="reverb")
def test_Speed(self):
self.evaluate_class(audaugs.Speed(factor=3.0), fname="speed")
def test_Tempo(self):
self.evaluate_class(audaugs.Tempo(factor=2.0), fname="tempo")
def test_TimeStretch(self):
self.evaluate_class(audaugs.TimeStretch(rate=1.5), fname="time_stretch")
def test_ToMono(self):
self.evaluate_class(audaugs.ToMono(), fname="to_mono")
def test_FFTConvolve(self):
np.random.seed(1)
self.evaluate_class(audaugs.FFTConvolve(normalize=True), fname="fft_convolve")
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/audio_tests/transforms_unit_test.py
|
AugLy-main
|
augly/tests/video_tests/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from typing import List
from unittest.mock import MagicMock, patch
from augly.utils import Segment
from augly.utils.base_paths import ASSETS_BASE_DIR
from augly.video import helpers
from augly.video.augmenters import ffmpeg as af
class MetadataUnitTest(unittest.TestCase):
def assert_equal_segments(self, actual: Segment, expected: Segment):
fail_msg = f"actual={actual}, expected={expected}"
self.assertAlmostEqual(actual.start, expected.start, msg=fail_msg)
self.assertAlmostEqual(actual.end, expected.end, msg=fail_msg)
self.assertEqual(actual.src_id, expected.src_id, msg=fail_msg)
def assert_equal_segment_lists(
self, actual: List[Segment], expected: List[Segment]
):
self.assertEqual(
len(actual), len(expected), f"actuals={actual}, expected={expected}"
)
for act, exp in zip(actual, expected):
self.assert_equal_segments(act, exp)
def test_insert_in_background_transitions_start(self):
new_src_segments, new_dst_segments = helpers.compute_segments(
"insert_in_background",
src_duration=20.0,
dst_duration=26.0,
src_fps=25,
dst_fps=30,
metadata=None,
offset_factor=0.0,
background_video_duration=10.0,
source_percentage=None,
transition_before=False,
transition_after=True,
transition=af.TransitionConfig(
effect=af.TransitionEffect.CIRCLECLOSE, duration=4.0
),
)
self.assert_equal_segment_lists(
new_src_segments, [Segment(start=0.0, end=18.0)]
)
self.assert_equal_segment_lists(
new_dst_segments, [Segment(start=0.0, end=18.0)]
)
def test_insert_in_background_transitions_end(self):
new_src_segments, new_dst_segments = helpers.compute_segments(
"insert_in_background",
src_duration=20.0,
dst_duration=26.0,
src_fps=25,
dst_fps=30,
metadata=None,
offset_factor=1.0,
background_video_duration=10.0,
source_percentage=None,
transition_before=True,
transition_after=False,
transition=af.TransitionConfig(
effect=af.TransitionEffect.CIRCLECLOSE, duration=4.0
),
)
self.assert_equal_segment_lists(
new_src_segments, [Segment(start=2.0, end=20.0)]
)
self.assert_equal_segment_lists(
new_dst_segments, [Segment(start=8.0, end=26.0)]
)
def test_insert_in_background_transitions(self):
new_src_segments, new_dst_segments = helpers.compute_segments(
"insert_in_background",
src_duration=20.0,
dst_duration=22.0,
src_fps=25,
dst_fps=30,
metadata=None,
offset_factor=0.5,
background_video_duration=10.0,
source_percentage=None,
transition_before=True,
transition_after=True,
transition=af.TransitionConfig(
effect=af.TransitionEffect.CIRCLECLOSE, duration=4.0
),
)
self.assert_equal_segment_lists(
new_src_segments, [Segment(start=2.0, end=18.0)]
)
self.assert_equal_segment_lists(
new_dst_segments, [Segment(start=3.0, end=19.0)]
)
def test_insert_in_background(self):
new_src_segments, new_dst_segments = helpers.compute_segments(
"insert_in_background",
src_duration=20.0,
dst_duration=30.0,
src_fps=25,
dst_fps=30,
metadata=None,
offset_factor=0.5,
background_video_duration=10.0,
source_percentage=None,
transition_before=True,
transition_after=True,
transition=None,
)
self.assert_equal_segment_lists(
new_src_segments, [Segment(start=0.0, end=20.0)]
)
self.assert_equal_segment_lists(
new_dst_segments, [Segment(start=5.0, end=25.0)]
)
def test_compute_insert_in_background_multiple_segments(self):
src_1, dst_1 = helpers.compute_segments(
name="insert_in_background_multiple",
src_duration=20.0,
dst_duration=40.0,
src_fps=25,
dst_fps=30,
metadata=None,
src_segment_starts=[2.0, 3.0],
src_segment_ends=[12.0, 13.0],
bkg_insertion_points=[5.0, 10.0],
src_ids=["0", "1"],
transition=None,
)
self.assert_equal_segment_lists(
src_1, [Segment(2.0, 12.0, "0"), Segment(3.0, 13.0, "1")]
)
self.assert_equal_segment_lists(
dst_1, [Segment(5.0, 15.0), Segment(20.0, 30.0)]
)
src_2, dst_2 = helpers.compute_segments(
name="insert_in_background_multiple",
src_duration=20.0,
dst_duration=50.0,
src_fps=25,
dst_fps=30,
metadata=None,
src_segment_starts=[2.0, 3.0, 7.0],
src_segment_ends=[12.0, 13.0, 20.0],
bkg_insertion_points=[5.0, 10.0, 20.0],
src_ids=["0", "1", "2"],
transition=af.TransitionConfig(
effect=af.TransitionEffect.CIRCLECLOSE, duration=2.0
),
)
self.assert_equal_segment_lists(
src_2,
[Segment(3.0, 11.0, "0"), Segment(4.0, 12.0, "1"), Segment(8.0, 19.0, "2")],
)
self.assert_equal_segment_lists(
dst_2, [Segment(4.0, 12.0), Segment(15.0, 23.0), Segment(31.0, 42.0)]
)
def test_time_decimate(self):
new_src_segments, new_dst_segments = helpers.compute_segments(
"time_decimate",
src_duration=20,
dst_duration=12,
src_fps=25.0,
dst_fps=30.0,
metadata=None,
start_offset_factor=0.1, # 2sec
on_factor=0.2, # 4sec
off_factor=0.5, # 2sec (relative to on_factor)
transition=None,
)
self.assert_equal_segment_lists(
new_src_segments,
[
Segment(start=2.0, end=6.0),
Segment(start=8.0, end=12.0),
Segment(start=14.0, end=18.0),
],
)
self.assert_equal_segment_lists(
new_dst_segments,
[
Segment(start=0, end=4.0),
Segment(start=4.0, end=8.0),
Segment(start=8.0, end=12.0),
],
)
def test_time_decimate_with_transition(self):
new_src_segments, new_dst_segments = helpers.compute_segments(
"time_decimate",
src_duration=20,
dst_duration=8,
src_fps=25.0,
dst_fps=30.0,
metadata=None,
start_offset_factor=0.1, # 2sec
on_factor=0.2, # 4sec
off_factor=0.5, # 2sec (relative to on_factor)
transition=af.TransitionConfig(
effect=af.TransitionEffect.CIRCLECLOSE, duration=2.0
),
)
self.assert_equal_segment_lists(
new_src_segments,
[
Segment(start=2.0, end=5.0),
Segment(start=9.0, end=11.0),
Segment(start=15.0, end=18.0),
],
)
self.assert_equal_segment_lists(
new_dst_segments,
[
Segment(start=0, end=3.0),
Segment(start=3.0, end=5.0),
Segment(start=5.0, end=8.0),
],
)
def test_change_video_speed(self):
md = {
"name": "overlay_text",
"src_duration": 58.591925,
"dst_duration": 58.591925,
"src_fps": 29.952932801822325,
"dst_fps": 29.95293280069612,
"src_width": 640,
"src_height": 352,
"dst_width": 640,
"dst_height": 352,
"src_segments": [
{"start": 0.0, "end": 14.492321571512164},
{"start": 23.479556369432764, "end": 37.97187794094493},
],
"dst_segments": [
{"start": 8.791720999383276, "end": 23.284042570895444},
{"start": 23.284042570895444, "end": 37.7763641424076},
],
"text_len": 12,
"text_change_nth": None,
"fonts": [
(
os.path.join(
ASSETS_BASE_DIR,
"similarity/media_assets/fonts/cac_champagne.ttf",
),
os.path.join(
ASSETS_BASE_DIR,
"similarity/media_assets/fonts/cac_champagne.pkl",
),
)
],
"fontscales": [0.2973747861954241, 0.5916911269561087],
"colors": [(238, 166, 244)],
"thickness": None,
"random_movement": False,
"topleft": [0.09272467655824976, 0.5098042791327592],
"bottomright": [0.8126272414865852, 0.7849615824118924],
"kwargs": {},
"intensity": 0.1980864483894119,
}
new_src_segments, new_dst_segments = helpers.compute_segments(
"change_video_speed",
src_duration=58.591925,
dst_duration=40.563641,
src_fps=29.9,
dst_fps=29.9,
metadata=[
md,
],
factor=1.44,
)
self.assert_equal_segment_lists(
new_src_segments,
[
Segment(start=0.0, end=14.492321571512164),
Segment(start=23.479556369432764, end=37.97187794094493),
],
)
self.assert_equal_segment_lists(
new_dst_segments,
[
Segment(start=6.105361805127275, end=16.169474007566283),
Segment(start=16.169474007566283, end=26.233586210005278),
],
)
@patch(
"augly.video.helpers.get_video_info",
side_effect=[
{"duration": "20.0"},
],
)
def test_concat_transition_video_in_the_middle(
self, mock_get_video_info: MagicMock
):
new_src_segments, new_dst_segments = helpers.compute_segments(
"concat",
src_duration=30.0,
dst_duration=52.0,
src_fps=29.97,
dst_fps=30.0,
metadata=None,
video_paths=["before", "main", "after"],
src_video_path_index=1,
transition=af.TransitionConfig(
effect=af.TransitionEffect.CIRCLECLOSE, duration=4.0
),
)
self.assert_equal_segment_lists(
new_src_segments, [Segment(start=2.0, end=28.0)]
)
assert mock_get_video_info.call_count == 1, mock_get_video_info.call_count
self.assert_equal_segment_lists(
new_dst_segments, [Segment(start=18.0, end=44.0)]
)
def test_concat_transition_main_video_first(self):
new_src_segments, new_dst_segments = helpers.compute_segments(
"concat",
src_duration=30.33,
dst_duration=64.8,
src_fps=29.97,
dst_fps=30.0,
metadata=None,
video_paths=["main", "after"],
src_video_path_index=0,
transition=af.TransitionConfig(
effect=af.TransitionEffect.CIRCLECLOSE, duration=4.0
),
)
self.assert_equal_segment_lists(
new_src_segments, [Segment(start=0.0, end=28.33)]
)
self.assert_equal_segment_lists(
new_dst_segments, [Segment(start=0.0, end=28.33)]
)
|
AugLy-main
|
augly/tests/video_tests/metadata_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import os
import tempfile
import unittest
from typing import Any, Callable, Dict, List, Optional, Tuple
from augly.tests import VideoAugConfig
from augly.utils import pathmgr, TEST_URI
def are_equal_videos(a_path: str, b_path: str) -> bool:
hasher = hashlib.md5()
with open(a_path, "rb") as afile:
buf = afile.read()
hasher.update(buf)
a_md5_hash = hasher.hexdigest()
hasher = hashlib.md5()
with open(b_path, "rb") as bfile:
buf = bfile.read()
hasher.update(buf)
b_md5_hash = hasher.hexdigest()
return a_md5_hash == b_md5_hash
class BaseVideoUnitTest(unittest.TestCase):
ref_vid_dir = os.path.join(TEST_URI, "video", "expected_output")
def check_equal_metadata(
self,
actual_meta: List[Dict[str, Any]],
expected_meta: List[Dict[str, Any]],
exclude_keys: Optional[List[str]],
):
if actual_meta == expected_meta:
return
for actual_dict, expected_dict in zip(actual_meta, expected_meta):
for (act_k, act_v), (exp_k, exp_v) in zip(
sorted(actual_dict.items(), key=lambda kv: kv[0]),
sorted(expected_dict.items(), key=lambda kv: kv[0]),
):
if exclude_keys is not None and act_k in exclude_keys:
continue
if act_k != exp_k:
self.assertEqual(
act_k,
exp_k,
f"Actual_dict={actual_dict}",
)
if act_v == exp_v:
continue
"""
Allow relative paths in expected metadata: just check that the end of the
actual path matches the expected path
"""
condition = (
lambda actual, expected: isinstance(actual, str)
and isinstance(expected, str)
and actual[-len(expected) :] == expected
)
if isinstance(act_v, list) and isinstance(exp_v, list):
for actual_path, expected_path in zip(act_v, exp_v):
self.assertTrue(
condition(actual_path, expected_path),
f"Error comparing list values: actual_path={actual_path}, expected_path={expected_path}, actual_metadata={actual_meta}.",
)
if not condition(actual_path, expected_path):
return False
else:
self.assertTrue(
condition(act_v, exp_v),
f"Error comparing values: actual={act_v}, expected={exp_v}, actual_metadata={actual_meta}.",
)
def test_import(self) -> None:
try:
from augly import video as vidaugs
except ImportError:
self.fail("vidaugs failed to import")
self.assertTrue(dir(vidaugs), "Video directory does not exist")
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.config, cls.local_vid_path, vid_file = cls.download_video(0)
def evaluate_function(
self,
aug_function: Callable[..., None],
ref_filename: Optional[str] = None,
**kwargs,
) -> None:
ref_filename = ref_filename or aug_function.__name__
ref_vid_path = self.get_ref_video(ref_filename)
if not kwargs.pop("diff_video_input", False):
kwargs["video_path"] = self.local_vid_path
with tempfile.NamedTemporaryFile(suffix=".mp4") as tmpfile:
aug_function(output_path=tmpfile.name, **kwargs)
self.assertTrue(
are_equal_videos(ref_vid_path, tmpfile.name),
"Expected and outputted videos do not match",
)
def evaluate_class(
self,
transform_class: Callable[..., None],
fname: str,
seed: Optional[int] = None,
metadata_exclude_keys: Optional[List[str]] = None,
**kwargs,
) -> None:
metadata = []
if not kwargs.pop("diff_video_input", False):
kwargs["video_path"] = self.local_vid_path
with tempfile.NamedTemporaryFile(suffix=".mp4") as tmpfile:
transform_class(
output_path=tmpfile.name, seed=seed, metadata=metadata, **kwargs
)
self.assertTrue(
os.path.exists(tmpfile.name), "Output video file does not exist"
)
self.check_equal_metadata(metadata, self.metadata[fname], metadata_exclude_keys)
def get_ref_video(self, fname: str) -> str:
ref_vid_name = f"test_{fname}.mp4"
return pathmgr.get_local_path(os.path.join(self.ref_vid_dir, ref_vid_name))
@staticmethod
def download_video(input_file_index: int) -> Tuple[VideoAugConfig, str, str]:
config = VideoAugConfig(input_file_index=input_file_index)
vid_path, vid_file = config.get_input_path()
local_vid_path = pathmgr.get_local_path(vid_path)
return config, local_vid_path, vid_file
|
AugLy-main
|
augly/tests/video_tests/base_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import unittest
from augly import video as vidaugs
from augly.tests.base_configs import VideoAugConfig
from augly.tests.video_tests.base_unit_test import BaseVideoUnitTest
from augly.utils import VIDEO_METADATA_PATH
from augly.utils.ffmpeg import get_conditional_for_skipping_video_tests
class TransformsVideoUnitTest(BaseVideoUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(VIDEO_METADATA_PATH, "r") as f:
cls.metadata = json.load(f)
def test_BlendVideos(self):
overlay_path, _ = VideoAugConfig(input_file_index=1).get_input_path()
self.evaluate_class(
vidaugs.BlendVideos(overlay_path=overlay_path, opacity=0.5),
fname="blend_videos",
)
def test_MemeFormat(self):
self.evaluate_class(vidaugs.MemeFormat(), fname="meme_format")
def test_OverlayOntoScreenshot(self):
self.evaluate_class(
vidaugs.OverlayOntoScreenshot(),
fname="overlay_onto_screenshot",
metadata_exclude_keys=[
"dst_height",
"dst_width",
"intensity",
"template_filepath",
],
)
@unittest.skipUnless(*get_conditional_for_skipping_video_tests())
def test_PerspectiveTransformAndShake(self):
self.evaluate_class(
vidaugs.PerspectiveTransformAndShake(shake_radius=20, seed=10),
fname="perspective_transform_and_shake",
)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/video_tests/transforms/image_based_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import unittest
from augly import video as vidaugs
from augly.tests.base_configs import VideoAugConfig
from augly.tests.video_tests.base_unit_test import BaseVideoUnitTest
from augly.utils import VIDEO_METADATA_PATH
class TransformsVideoUnitTest(BaseVideoUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(VIDEO_METADATA_PATH, "r") as f:
cls.metadata = json.load(f)
def test_AddNoise(self):
self.evaluate_class(vidaugs.AddNoise(level=10), fname="add_noise")
def test_AudioSwap(self):
self.evaluate_class(
vidaugs.AudioSwap(audio_path=self.config.input_audio_file),
fname="audio_swap",
)
def test_Blur(self):
self.evaluate_class(vidaugs.Blur(sigma=3), fname="blur")
def test_Brightness(self):
self.evaluate_class(vidaugs.Brightness(), fname="brightness")
def test_ChangeAspectRatio(self):
self.evaluate_class(vidaugs.ChangeAspectRatio(), fname="change_aspect_ratio")
def test_ChangeVideoSpeed(self):
self.evaluate_class(
vidaugs.ChangeVideoSpeed(factor=2.0), fname="change_video_speed"
)
def test_ColorJitter(self):
self.evaluate_class(
vidaugs.ColorJitter(
brightness_factor=0.15, contrast_factor=1.3, saturation_factor=2.0
),
fname="color_jitter",
)
def test_Concat(self):
video_path, _ = VideoAugConfig(input_file_index=0).get_input_path()
second_path, _ = VideoAugConfig(input_file_index=1).get_input_path()
self.evaluate_class(
vidaugs.Concat(other_video_paths=[second_path]),
diff_video_input=True,
video_path=video_path,
fname="concat",
)
def test_Contrast(self):
self.evaluate_class(vidaugs.Contrast(level=1.3), fname="contrast")
def test_Crop(self):
self.evaluate_class(vidaugs.Crop(), fname="crop")
def test_EncodingQuality(self):
self.evaluate_class(
vidaugs.EncodingQuality(quality=37), fname="encoding_quality"
)
def test_FPS(self):
self.evaluate_class(vidaugs.FPS(), fname="fps")
def test_Grayscale(self):
self.evaluate_class(vidaugs.Grayscale(), fname="grayscale")
def test_HFlip(self):
self.evaluate_class(vidaugs.HFlip(), fname="hflip")
def test_HStack(self):
second_video_path, _ = VideoAugConfig(input_file_index=1).get_input_path()
self.evaluate_class(
vidaugs.HStack(second_video_path=second_video_path), fname="hstack"
)
def test_Loop(self):
self.evaluate_class(vidaugs.Loop(num_loops=1), fname="loop")
def test_Overlay(self):
overlay_path, _ = VideoAugConfig(input_file_index=1).get_input_path()
self.evaluate_class(vidaugs.Overlay(overlay_path=overlay_path), fname="overlay")
def test_Pad(self):
self.evaluate_class(vidaugs.Pad(), fname="pad")
def test_RandomAspectRatio(self):
self.evaluate_class(
vidaugs.RandomAspectRatio(), fname="RandomAspectRatio", seed=1
)
def test_RandomBlur(self):
self.evaluate_class(vidaugs.RandomBlur(), fname="RandomBlur", seed=1)
def test_RandomBrightness(self):
self.evaluate_class(
vidaugs.RandomBrightness(), fname="RandomBrightness", seed=1
)
def test_RandomContrast(self):
self.evaluate_class(vidaugs.RandomContrast(), fname="RandomContrast", seed=1)
def test_RandomEncodingQuality(self):
self.evaluate_class(
vidaugs.RandomEncodingQuality(), fname="RandomEncodingQuality", seed=1
)
def test_RandomFPS(self):
self.evaluate_class(vidaugs.RandomFPS(), fname="RandomFPS", seed=1)
def test_RandomNoise(self):
self.evaluate_class(vidaugs.RandomNoise(), fname="RandomNoise", seed=1)
def test_RandomRotation(self):
self.evaluate_class(vidaugs.RandomRotation(), fname="RandomRotation", seed=1)
def test_RandomVideoSpeed(self):
self.evaluate_class(
vidaugs.RandomVideoSpeed(), fname="RandomVideoSpeed", seed=1
)
def test_RemoveAudio(self):
self.evaluate_class(vidaugs.RemoveAudio(), fname="remove_audio")
def test_Resize(self):
self.evaluate_class(vidaugs.Resize(height=300, width=300), fname="resize")
def test_Rotate(self):
self.evaluate_class(vidaugs.Rotate(), fname="rotate")
def test_Scale(self):
self.evaluate_class(vidaugs.Scale(), fname="scale")
def test_TimeCrop(self):
self.evaluate_class(vidaugs.TimeCrop(duration_factor=0.5), fname="time_crop")
def test_TimeDecimate(self):
self.evaluate_class(vidaugs.TimeDecimate(), fname="time_decimate")
def test_Trim(self):
self.evaluate_class(vidaugs.Trim(end=5), fname="trim")
def test_VFlip(self):
self.evaluate_class(vidaugs.VFlip(), fname="vflip")
def test_VStack(self):
second_video_path, _ = VideoAugConfig(input_file_index=1).get_input_path()
self.evaluate_class(
vidaugs.VStack(second_video_path=second_video_path), fname="vstack"
)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/video_tests/transforms/ffmpeg_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import unittest
from augly import video as vidaugs
from augly.tests.video_tests.base_unit_test import BaseVideoUnitTest
from augly.utils import VIDEO_METADATA_PATH
class TransformsVideoUnitTest(BaseVideoUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(VIDEO_METADATA_PATH, "r") as f:
cls.metadata = json.load(f)
def test_OverlayDots(self):
self.evaluate_class(vidaugs.OverlayDots(), fname="overlay_dots", seed=1)
def test_OverlayShapes(self):
self.evaluate_class(vidaugs.OverlayShapes(), fname="overlay_shapes", seed=1)
def test_OverlayText(self):
self.evaluate_class(
vidaugs.OverlayText(
topleft=(0, 0),
bottomright=(0.5, 0.25),
),
fname="overlay_text",
seed=1,
)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/video_tests/transforms/cv2_test.py
|
AugLy-main
|
augly/tests/video_tests/transforms/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import random
import unittest
from augly import audio as audaugs, video as vidaugs
from augly.tests.base_configs import VideoAugConfig
from augly.tests.video_tests.base_unit_test import BaseVideoUnitTest
from augly.utils import VIDEO_METADATA_PATH
from augly.utils.ffmpeg import get_conditional_for_skipping_video_tests
class TransformsVideoUnitTest(BaseVideoUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(VIDEO_METADATA_PATH, "r") as f:
cls.metadata = json.load(f)
def test_ApplyLambda(self):
self.evaluate_class(vidaugs.ApplyLambda(), fname="apply_lambda")
@unittest.skipUnless(*get_conditional_for_skipping_video_tests())
def test_AugmentAudio(self):
self.evaluate_class(
vidaugs.AugmentAudio(
audio_aug_function=audaugs.PitchShift(),
),
fname="augment_audio",
)
def test_InsertInBackground(self):
self.evaluate_class(
vidaugs.InsertInBackground(offset_factor=0.25),
fname="insert_in_background",
metadata_exclude_keys=[
"dst_duration",
"dst_fps",
"intensity",
"local_path",
],
)
def test_InsertInBackgroundMultiple(self):
_, bkg_path, _ = self.download_video(1)
self.evaluate_class(
vidaugs.InsertInBackgroundMultiple(
additional_video_paths=[],
background_path=bkg_path,
src_ids=["0"],
seed=23,
),
fname="insert_in_background_multiple",
metadata_exclude_keys=[
"dst_duration",
"dst_fps",
"intensity",
"local_path",
"bkg_insertion_points",
"background_path",
"src_segment_starts",
"src_segment_ends",
],
)
def test_Compose(self):
random.seed(1)
self.evaluate_class(
vidaugs.Compose(
[
vidaugs.VFlip(),
vidaugs.Brightness(),
vidaugs.OneOf([vidaugs.Grayscale(), vidaugs.ApplyLambda()]),
]
),
fname="compose",
)
def test_OverlayEmoji(self):
self.evaluate_class(vidaugs.OverlayEmoji(), fname="overlay_emoji")
def test_OverlayOntoBackgroundVideo(self):
self.evaluate_class(
vidaugs.OverlayOntoBackgroundVideo(
background_path=VideoAugConfig(input_file_index=1).get_input_path()[0],
),
fname="overlay_onto_background_video",
)
def test_Pixelization(self):
self.evaluate_class(vidaugs.Pixelization(ratio=0.1), fname="pixelization")
def test_RandomEmojiOverlay(self):
random.seed(1)
self.evaluate_class(
vidaugs.RandomEmojiOverlay(),
fname="RandomEmojiOverlay",
metadata_exclude_keys=["emoji_path"],
)
def test_RandomPixelization(self):
random.seed(1)
self.evaluate_class(
vidaugs.RandomPixelization(max_ratio=0.2), fname="RandomPixelization"
)
def test_ReplaceWithBackground(self):
self.evaluate_class(
vidaugs.ReplaceWithBackground(
source_offset=0.1,
background_offset=0,
source_percentage=0.7,
),
fname="replace_with_background",
metadata_exclude_keys=["dst_duration", "dst_fps"],
)
def test_ReplaceWithColorFrames(self):
self.evaluate_class(
vidaugs.ReplaceWithColorFrames(), fname="replace_with_color_frames"
)
def test_Shift(self):
self.evaluate_class(vidaugs.Shift(x_factor=0.25, y_factor=0.25), fname="shift")
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/video_tests/transforms/composite_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from augly.text.augmenters.utils import (
detokenize,
rejoin_words_and_whitespace,
split_words_on_whitespace,
tokenize,
)
class UtilsTest(unittest.TestCase):
def test_tokenize(self) -> None:
# Includes the apostrophe in "can't" but not in "thinkin'".
tokens1 = tokenize("Can't stop\nthinkin' about you")
self.assertEqual(tokens1, ["Can't", "stop", "thinkin", "'", "about", "you"])
# Differs in the missing newline.
self.assertEqual(detokenize(tokens1), "Can't stop thinkin' about you")
# Decimal place needs a leading zero or it isn't handled correctly.
tokens2 = tokenize("Write it: 0.004% not .004% #grammar")
self.assertEqual(
tokens2,
["Write", "it", ":", "0.004", "%", "not", ".", "004", "%", "#", "grammar"],
)
# No leading zero puts the decimal place with 'not' as a period.
self.assertEqual(detokenize(tokens2), "Write it: 0.004% not. 004 %#grammar")
# Dollar signs, parentheses, time. The newline is discarded.
tokens3 = tokenize("Will work for $$ ($100) until 5:00")
self.assertEqual(
tokens3,
["Will", "work", "for", "$", "$", "(", "$", "100", ")", "until", "5:00"],
)
# Detokenized version has no space between the dollar signs and parenthesis.
self.assertEqual(detokenize(tokens3), "Will work for $$($100) until 5:00")
def test_split_words_on_whitespace(self) -> None:
# Preserves the whitespace from the original text.
words, whitespace = split_words_on_whitespace(
"Can't stop\nthinkin' about\tyou"
)
self.assertEqual(words, ["Can't", "stop", "thinkin'", "about", "you"])
self.assertEqual(whitespace, ["", " ", "\n", " ", "\t", ""])
self.assertEqual(
rejoin_words_and_whitespace(words, whitespace),
"Can't stop\nthinkin' about\tyou",
)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/text_tests/utils_test.py
|
AugLy-main
|
augly/tests/text_tests/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# @lint-ignore-every UTF8
import random
import unittest
from augly import text as txtaugs
from augly.utils import FUN_FONTS_GREEK_PATH
class FunctionalTextUnitTest(unittest.TestCase):
def test_import(self) -> None:
try:
from augly.text import functional
except ImportError:
self.fail("functional failed to import")
self.assertTrue(dir(functional))
def setUp(self):
random.seed(123)
self.texts = [
"The quick brown 'fox' couldn't jump over the green, grassy hill.",
]
self.priority_words = ["green", "grassy", "hill"]
self.fairness_texts = [
"The king and queen have a son named Raj and a daughter named Amanda.",
]
def test_apply_lambda(self) -> None:
augmented_apply_lambda = txtaugs.apply_lambda(self.texts)
self.assertTrue(augmented_apply_lambda[0] == self.texts[0])
def test_change_case(self) -> None:
augmented_words = txtaugs.change_case(self.texts[0], cadence=3.0, case="upper")
self.assertTrue(
augmented_words[0]
== "THE quick brown 'FOX' couldn't jump OVER the green, GRASSY hill.",
)
def test_contractions(self) -> None:
augmented_words = txtaugs.contractions(
"I would call him but I do not know where he has gone", aug_p=0.7
)
self.assertTrue(
augmented_words[0] == "I would call him but I don't know where he's gone"
)
def test_get_baseline(self) -> None:
augmented_baseline = txtaugs.get_baseline(self.texts)
self.assertTrue(
augmented_baseline[0]
== "The quick brown 'fox' couldn't jump over the green, grassy hill."
)
def test_insert_punctuation_chars(self) -> None:
augmented_every_char = txtaugs.insert_punctuation_chars(
self.texts, "all", 1.0, False
)
# Separator inserted between every character (including spaces/punctuation).
self.assertEqual(
augmented_every_char,
[
"T.h.e. .q.u.i.c.k. .b.r.o.w.n. .'.f.o.x.'. .c.o.u.l.d.n.'.t. "
".j.u.m.p. .o.v.e.r. .t.h.e. .g.r.e.e.n.,. .g.r.a.s.s.y. .h.i.l.l.."
],
)
augmented_per_word = txtaugs.insert_punctuation_chars(
self.texts, "word", 1.0, False
)
# Each word uses a different separator; no separators around whitespace.
self.assertEqual(
augmented_per_word,
[
"T;h;e q?u?i?c?k b-r-o-w-n ';f;o;x;' c?o?u?l?d?n?'?t j.u.m.p o-v-e-r "
"t...h...e g...r...e...e...n..., g:r:a:s:s:y h:i:l:l:."
],
)
augmented_wider_cadence = txtaugs.insert_punctuation_chars(
self.texts, "all", 2.7, False
)
# Separators are every 2-3 (avg. 2.7) characters.
self.assertEqual(
augmented_wider_cadence,
[
"The. qu.ick. b.row.n '.fo.x' .cou.ld.n't. ju.mp .ov.er .the. g.ree.n, "
".gr.ass.y h.ill.."
],
)
augmented_varying_char = txtaugs.insert_punctuation_chars(
self.texts, "all", 2.0, True
)
# Each separator is chosen independently.
self.assertEqual(
augmented_varying_char,
[
"Th?e ,qu,ic!k .br,ow.n :'f.ox!' ;co...ul.dn?'t' j.um...p :ov!er' "
"t-he, g're?en:, ;gr'as!sy, h-il;l."
],
)
def test_insert_whitespace_chars(self) -> None:
augmented_every_char = txtaugs.insert_whitespace_chars(
self.texts, "all", 1.0, False
)
# Separator inserted between every character (including spaces/punctuation).
self.assertEqual(
augmented_every_char,
[
"T h e q u i c k b r o w n ' f o x ' c o u l d n ' t "
"j u m p o v e r t h e g r e e n , g r a s s y h i l l ."
],
)
augmented_per_word = txtaugs.insert_whitespace_chars(
self.texts, "word", 1.0, False
)
# Each word uses a different separator; no separators around whitespace.
self.assertEqual(
augmented_per_word,
[
"T\nh\ne q u i c k b\rr\ro\rw\rn '\nf\no\nx\n' c o u l d n ' t "
"j u m p o\rv\re\rr t\x0bh\x0be g\x0br\x0be\x0be\x0bn\x0b, "
"g\nr\na\ns\ns\ny h\ni\nl\nl\n."
],
)
augmented_wider_cadence = txtaugs.insert_whitespace_chars(
self.texts, "all", 2.7, False
)
# Separators are every 2-3 (avg. 2.7) characters.
self.assertEqual(
augmented_wider_cadence,
[
"The qu ick b row n ' fo x' cou ld n't ju mp ov er the "
"g ree n, gr ass y h ill ."
],
)
augmented_varying_char = txtaugs.insert_whitespace_chars(
self.texts, "all", 2.0, True
)
# Each separator is chosen independently.
self.assertEqual(
augmented_varying_char,
[
"Th e \nqu\nic\tk br\now n \r'f ox\t' \nco\x0cul dn 't\x0b "
"j um\x0cp \rov\ter\x0c t\x0bhe\n g\x0bre\ten\r, "
"\rgr\x0bas\tsy\n h\x0bil\rl."
],
)
def test_insert_text(self) -> None:
# Single insertion in random location
insert_single_word = txtaugs.insert_text(self.texts, ["wolf", "sheep"], seed=42)
self.assertEqual(
insert_single_word,
["wolf The quick brown 'fox' couldn't jump over the green, grassy hill."],
)
# Three insertions in random locations
insert_multiple = txtaugs.insert_text(
self.texts, ["wolf", "sheep"], num_insertions=3
)
self.assertEqual(
insert_multiple,
[
"The quick brown wolf 'fox' couldn't jump wolf over the sheep green, grassy hill."
],
)
# Single insertion in prepend mode
prepend = txtaugs.insert_text(
self.texts, ["wolf", "sheep"], insertion_location="prepend"
)
self.assertEqual(
prepend,
["wolf The quick brown 'fox' couldn't jump over the green, grassy hill."],
)
append = txtaugs.insert_text(
self.texts, ["wolf", "sheep"], insertion_location="append"
)
# Single insertion in append mode
self.assertEqual(
append,
["The quick brown 'fox' couldn't jump over the green, grassy hill. wolf"],
)
def test_insert_zero_width_chars(self) -> None:
augmented_every_char = txtaugs.insert_zero_width_chars(
self.texts, "all", 1.0, False
)
# Separator inserted between every character (including spaces/punctuation).
# Renders as: "The quick brown 'fox' couldn't jump over the green, grassy hill."
self.assertEqual(
augmented_every_char,
[
"T\u200bh\u200be\u200b \u200bq\u200bu\u200bi\u200bc\u200bk\u200b "
"\u200bb\u200br\u200bo\u200bw\u200bn\u200b \u200b'\u200bf\u200bo"
"\u200bx\u200b'\u200b \u200bc\u200bo\u200bu\u200bl\u200bd\u200bn"
"\u200b'\u200bt\u200b \u200bj\u200bu\u200bm\u200bp\u200b \u200bo"
"\u200bv\u200be\u200br\u200b \u200bt\u200bh\u200be\u200b \u200bg"
"\u200br\u200be\u200be\u200bn\u200b,\u200b \u200bg\u200br\u200ba"
"\u200bs\u200bs\u200by\u200b \u200bh\u200bi\u200bl\u200bl\u200b."
],
)
augmented_per_word = txtaugs.insert_zero_width_chars(
self.texts, "word", 1.0, False
)
# Each word uses a different separator; no separators around whitespace.
# Renders as: "The quick brown 'fox' couldn't jump over the green, grassy hill."
self.assertEqual(
augmented_per_word,
[
"T\u2061h\u2061e q\u200cu\u200ci\u200cc\u200ck b\u2063r\u2063o"
"\u2063w\u2063n '\u2061f\u2061o\u2061x\u2061' c\u200co\u200cu"
"\u200cl\u200cd\u200cn\u200c'\u200ct j\u200bu\u200bm\u200bp o"
"\u2063v\u2063e\u2063r t\u2062h\u2062e g\u2062r\u2062e\u2062e"
"\u2062n\u2062, g\u200br\u200ba\u200bs\u200bs\u200by h\u200di"
"\u200dl\u200dl\u200d."
],
)
augmented_wider_cadence = txtaugs.insert_zero_width_chars(
self.texts, "all", 2.7, False
)
# Separators are every 2-3 (avg. 2.7) characters.
# Renders as: "The quick brown 'fox' couldn't jump over the green, grassy hill."
self.assertEqual(
augmented_wider_cadence,
[
"The\u200d qu\u200dick\u200d b\u200drow\u200dn '\u200dfo\u200dx"
"' \u200dcou\u200dld\u200dn't\u200d ju\u200dmp \u200dov\u200der"
" \u200dthe\u200d g\u200dree\u200dn, \u200dgr\u200dass\u200dy h"
"\u200dill\u200d."
],
)
augmented_varying_char = txtaugs.insert_zero_width_chars(
self.texts, "all", 2.0, True
)
# Each separator is chosen independently.
# Renders as: "The quick brown 'fox' couldn't jump over the green, grassy hill."
self.assertEqual(
augmented_varying_char,
[
"Th\u200de \u200dqu\u200cic\u200bk \u2060br\u200bow\u2061n \u200b"
"'f\u200dox\u2060' \u2064co\u200bul\u200cdn\u2063't\u200b j\u2064u"
"m\u2061p \u200dov\u2063er\u2063 t\u200dhe\u2063 g\u200cre\u2061e"
"n\u2061, \u2063gr\u200das\u2060sy\u2063 h\u2061il\u2062l."
],
)
def test_merge_words(self) -> None:
augmented_split_words = txtaugs.merge_words(self.texts, aug_word_p=0.3, n=1)
self.assertTrue(
augmented_split_words[0]
== "Thequick brown 'fox' couldn'tjump overthe green, grassy hill."
)
augmented_split_words_targetted = txtaugs.merge_words(
self.texts, aug_word_p=0.3, n=1, priority_words=self.priority_words
)
self.assertTrue(
augmented_split_words_targetted[0]
== "The quick brown 'fox' couldn'tjump over the green, grassyhill."
)
def test_replace_bidirectional(self) -> None:
augmented_bidirectional = txtaugs.replace_bidirectional(self.texts)
# Renders as: ".llih yssarg ,neerg eht revo pmuj t'ndluoc 'xof' nworb kciuq ehT"
self.assertEqual(
augmented_bidirectional,
[
"\u202e.llih yssarg ,neerg eht revo pmuj t'ndluoc 'xof' nworb kciuq ehT\u202c"
],
)
augmented_bidirectional_word = txtaugs.replace_bidirectional(
self.texts, granularity="word"
)
# Renders as: "ehT kciuq nworb 'xof' t'ndluoc pmuj revo eht ,neerg yssarg .llih"
self.assertEqual(
augmented_bidirectional_word,
[
"\u202d\u202eehT\u202c \u202ekciuq\u202c \u202enworb\u202c \u202e'"
"xof'\u202c \u202et'ndluoc\u202c \u202epmuj\u202c \u202erevo\u202c"
" \u202eeht\u202c \u202e,neerg\u202c \u202eyssarg\u202c \u202e.lli"
"h\u202c"
],
)
augmented_bidirectional_split = txtaugs.replace_bidirectional(
self.texts, granularity="word", split_word=True
)
# Renders as: "Teh qukci brnwo 'f'xo coult'nd jupm ovre teh gre,ne grayss hi.ll"
self.assertEqual(
augmented_bidirectional_split,
[
"\u202dT\u202eeh\u202c qu\u202ekci\u202c br\u202enwo\u202c 'f\u202e"
"'xo\u202c coul\u202et'nd\u202c ju\u202epm\u202c ov\u202ere\u202c"
" t\u202eeh\u202c gre\u202e,ne\u202c gra\u202eyss\u202c hi\u202e"
".ll\u202c"
],
)
def test_replace_fun_fonts(self) -> None:
augmented_fun_fonts_word = txtaugs.replace_fun_fonts(
self.texts, granularity="word", aug_p=0.3, vary_fonts=False, n=1
)
self.assertTrue(
augmented_fun_fonts_word[0]
== "The 𝙦𝙪𝙞𝙘𝙠 brown '𝙛𝙤𝙭' 𝙘𝙤𝙪𝙡𝙙𝙣'𝙩 jump over the green, 𝙜𝙧𝙖𝙨𝙨𝙮 hill."
)
augmented_fun_fonts_char = txtaugs.replace_fun_fonts(
self.texts, granularity="char", aug_p=0.3, vary_fonts=True, n=1
)
self.assertTrue(
augmented_fun_fonts_char[0]
== "T̷he̳ 𝒒uiᴄk 𝙗r𝓸wn 'fo̲x' coul͎dn't jump over t̶h̷e green, 𝑔ra͎ss̳𝒚 ʜill."
)
augmented_fun_fonts_all = txtaugs.replace_fun_fonts(
self.texts, granularity="all", aug_p=1.0, vary_fonts=False, n=1
)
self.assertTrue(
augmented_fun_fonts_all[0]
== "𝕋𝕙𝕖 𝕢𝕦𝕚𝕔𝕜 𝕓𝕣𝕠𝕨𝕟 '𝕗𝕠𝕩' 𝕔𝕠𝕦𝕝𝕕𝕟'𝕥 𝕛𝕦𝕞𝕡 𝕠𝕧𝕖𝕣 𝕥𝕙𝕖 𝕘𝕣𝕖𝕖𝕟, 𝕘𝕣𝕒𝕤𝕤𝕪 𝕙𝕚𝕝𝕝."
)
augmented_fun_fonts_word_targetted = txtaugs.replace_fun_fonts(
self.texts,
granularity="word",
aug_p=0.3,
vary_fonts=True,
n=1,
priority_words=self.priority_words,
)
self.assertTrue(
augmented_fun_fonts_word_targetted[0]
== "T͓̽h͓̽e͓̽ quick brown 'fox' couldn't jump over the 𝘨𝘳𝘦𝘦𝘯, g̳r̳a̳s̳s̳y̳ h̴i̴l̴l̴."
)
augmented_fun_fonts_greek = txtaugs.replace_fun_fonts(
[
"Η γρήγορη καφέ αλεπού δεν μπορούσε να πηδήξει πάνω από τον καταπράσινο λόφο."
],
granularity="word",
aug_p=0.3,
vary_fonts=True,
fonts_path=FUN_FONTS_GREEK_PATH,
n=1.0,
)
self.assertTrue(
augmented_fun_fonts_greek[0]
== "𝝜 γρήγορη καφέ αλεπού 𝛿𝜀𝜈 μπορούσε να πηδήξει πάνω από 𝞽𝞸𝞶 𝝹𝝰𝞃𝝰𝝿𝞀ά𝞂𝝸𝝼𝝾 λόφο."
)
def test_replace_similar_chars(self) -> None:
augmented_chars = txtaugs.replace_similar_chars(
self.texts[0], aug_word_p=0.3, aug_char_p=0.3, n=2
)
self.assertEqual(
augmented_chars,
[
"T/-/e quick brown 'fox' coul|)n't jump over the green, grassy hill.",
"T)-(e quick br0wn 'fox' couldn't jump over the green, g12assy hill.",
],
)
augmented_chars_targetted = txtaugs.replace_similar_chars(
self.texts[0],
aug_word_p=0.3,
aug_char_p=0.3,
n=2,
priority_words=self.priority_words,
)
self.assertEqual(
augmented_chars_targetted,
[
"The quic|{ brown 'fox' couldn't jump Dver the green, gr4ssy hill.",
"7he quick brown 'fox' couldn't jump over the green, gr4ssy hill.",
],
)
def test_replace_similar_unicode_chars(self) -> None:
augmented_unicode_chars = txtaugs.replace_similar_unicode_chars(
self.texts[0], aug_word_p=0.3, aug_char_p=0.3, n=2
)
self.assertEqual(
augmented_unicode_chars,
[
"TĦe ℚuick brown 'fox' coul₫n't jump over the green, grassy hill.",
"Ŧhe quick browŅ 'fox' couldn't jÙmp over the green, grassy hill.",
],
)
augmented_unicode_chars_targetted = txtaugs.replace_similar_unicode_chars(
self.texts[0],
aug_word_p=0.3,
aug_char_p=0.3,
n=2,
priority_words=self.priority_words,
)
self.assertEqual(
augmented_unicode_chars_targetted,
[
"⍡he quick brown 'fox' couldn't jump oveℛ the green, ġrassy hill.",
"The quick brown 'fox' couldn't jump over thė green, gℝassy hill.",
],
)
def test_replace_text(self) -> None:
texts = [
"The quick brown 'fox' couldn't jump over the green, grassy hill.",
"The quick brown",
"jump over the green",
]
replace_texts = {
"jump over the blue": "jump over the red",
"The quick brown": "The slow green",
"couldn't jump": "jumped",
}
replace_string = "The slow green"
augmented_text_from_list = txtaugs.replace_text(texts, replace_texts)
self.assertEqual(
augmented_text_from_list,
[texts[0], replace_texts[texts[1]], texts[2]],
)
augmented_text_from_string = txtaugs.replace_text(texts, replace_string)
self.assertEqual(
augmented_text_from_string,
[replace_string, replace_string, replace_string],
)
augmented_string_from_list = txtaugs.replace_text(texts[0], replace_texts)
self.assertTrue(augmented_string_from_list == texts[0])
augmented_string_from_list = txtaugs.replace_text(texts[1], replace_texts)
self.assertTrue(augmented_string_from_list == replace_texts[texts[1]])
augmented_string_from_string = txtaugs.replace_text(texts[2], replace_string)
self.assertTrue(augmented_string_from_string == replace_string)
def test_replace_upside_down(self) -> None:
augmented_upside_down_all = txtaugs.replace_upside_down(self.texts)
self.assertTrue(
augmented_upside_down_all[0]
== "˙llᴉɥ ʎssɐɹɓ 'uǝǝɹɓ ǝɥʇ ɹǝʌo dɯnɾ ʇ,uplnoɔ ,xoɟ, uʍoɹq ʞɔᴉnb ǝɥꞱ"
)
augmented_upside_down_words = txtaugs.replace_upside_down(
self.texts, granularity="word", aug_p=0.3
)
self.assertTrue(
augmented_upside_down_words[0]
== "ǝɥꞱ ʞɔᴉnb brown 'xoɟ' ʇ,uplnoɔ jump over the green, grassy hill."
)
augmented_upside_down_chars = txtaugs.replace_upside_down(
self.texts[0], granularity="char", aug_p=0.3, n=2
)
self.assertEqual(
augmented_upside_down_chars,
[
"Thǝ buiɔk qrown 'fox, couldn,t jump over ʇɥe green' gɹassʎ hill.",
"Ʇɥǝ qnᴉɔk qɹown 'fox' conldn,t jnɯp over the gɹeen, grassy ɥᴉll ˙",
],
)
def test_replace_words(self) -> None:
augmented_words = txtaugs.replace_words(self.texts, aug_word_p=0.3)
self.assertTrue(augmented_words[0] == self.texts[0])
augmented_words = txtaugs.replace_words(
self.texts,
mapping={"jump": "hop", "brown": "orange", "green": "blue", "the": "a"},
aug_word_p=1.0,
)
self.assertTrue(
augmented_words[0]
== "A quick orange 'fox' couldn't hop over a blue, grassy hill.",
)
augmented_words = txtaugs.replace_words(
self.texts,
mapping={"jump": "hop", "brown": "orange", "green": "blue", "the": "a"},
aug_word_p=1.0,
ignore_words=["green", "jump"],
)
self.assertTrue(
augmented_words[0]
== "A quick orange 'fox' couldn't jump over a green, grassy hill.",
)
def test_simulate_typos(self) -> None:
augmented_typos = txtaugs.simulate_typos(
self.texts[0], aug_word_p=0.3, aug_char_p=0.3, n=2, typo_type="misspelling"
)
self.assertEqual(
augmented_typos,
[
"Ther quick brown 'fox' couldn' t jump over the green, grassy hill.",
"Teh quick brown 'fox' couldn' t jump over tghe green, grassy hill.",
],
)
augmented_typos_targetted = txtaugs.simulate_typos(
self.texts[0],
aug_word_p=0.3,
n=2,
priority_words=self.priority_words,
typo_type="charmix",
)
self.assertEqual(
augmented_typos_targetted,
[
"The quick buown 'fox' couldn' t jump over he rgeen, rgassy lhill.",
"The quick brown 'fox' couldn' t nump o^er the gre$n, grasys ill.",
],
)
def test_split_words(self) -> None:
augmented_split_words = txtaugs.split_words(self.texts[0], aug_word_p=0.3, n=2)
self.assertEqual(
augmented_split_words,
[
"The qui ck brown 'fox' c ouldn't j ump over the green, grassy hi ll.",
"The qu ick bro wn 'fox' could n't jump over the gre en, grassy hill.",
],
)
augmented_split_words_targetted = txtaugs.split_words(
self.texts[0], aug_word_p=0.3, n=2, priority_words=self.priority_words
)
self.assertEqual(
augmented_split_words_targetted,
[
"The quick br own 'fox' couldn't jump over the g reen, gras sy h ill.",
"The quick brown 'fox' couldn't jump ov er the g reen, g rassy hi ll.",
],
)
def test_swap_gendered_words(self) -> None:
augmented_gender_swap_words = txtaugs.swap_gendered_words(
self.fairness_texts[0], aug_word_p=0.3
)
self.assertTrue(
augmented_gender_swap_words
== "The queen and king have a daughter named Raj and a son named Amanda.",
)
ignore_augmented_gender_swap_words = txtaugs.swap_gendered_words(
self.fairness_texts[0], aug_word_p=0.3, ignore_words=["son"]
)
self.assertTrue(
ignore_augmented_gender_swap_words
== "The queen and king have a son named Raj and a son named Amanda.",
)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/text_tests/functional_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# @lint-ignore-every UTF8
import json
import os
import random
import unittest
from typing import Any, Dict, List
from augly import text as txtaugs
from augly.utils import TEXT_METADATA_PATH
def are_equal_metadata(
actual_meta: List[Dict[str, Any]], expected_meta: List[Dict[str, Any]]
) -> bool:
if actual_meta == expected_meta:
return True
for actual_dict, expected_dict in zip(actual_meta, expected_meta):
for (act_k, act_v), (exp_k, exp_v) in zip(
sorted(actual_dict.items(), key=lambda kv: kv[0]),
sorted(expected_dict.items(), key=lambda kv: kv[0]),
):
if act_k != exp_k:
return False
if act_v == exp_v:
continue
"""
Allow relative paths in expected metadata: just check that the end of the
actual path matches the expected path
"""
if not (
isinstance(act_v, str)
and isinstance(exp_v, str)
and os.path.normpath(act_v[-len(exp_v) :]).split(os.path.sep)
== os.path.normpath(exp_v).split(os.path.sep)
):
return False
return True
class TransformsTextUnitTest(unittest.TestCase):
def test_import(self) -> None:
try:
from augly.text import transforms
except ImportError:
self.fail("transforms failed to import")
self.assertTrue(dir(transforms))
def setUp(self):
self.metadata = []
self.maxDiff = None
random.seed(123)
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(TEXT_METADATA_PATH, "r") as f:
cls.expected_metadata = json.load(f)
cls.texts = ["The quick brown 'fox' couldn't jump over the green, grassy hill."]
cls.priority_words = ["green", "grassy", "hill"]
cls.fairness_texts = [
"The king and queen have a son named Raj and a daughter named Amanda.",
]
def test_ApplyLambda(self) -> None:
augmented_apply_lambda = txtaugs.ApplyLambda()(
self.texts, metadata=self.metadata
)
self.assertTrue(augmented_apply_lambda[0] == self.texts[0])
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["apply_lambda"]),
)
def test_ChangeCase(self) -> None:
augmented_words = txtaugs.ChangeCase(
granularity="char", cadence=5.0, case="random"
)(self.texts, metadata=self.metadata)
self.assertTrue(
augmented_words[0]
== "The qUick brown 'fox' couldn't jump over the Green, graSsy hill."
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["change_case"])
)
def test_Contractions(self) -> None:
augmented_words = txtaugs.Contractions(aug_p=1.0)(
["I would call him but I do not know where he has gone"],
metadata=self.metadata,
)
self.assertTrue(
augmented_words[0] == "I'd call him but I don't know where he's gone"
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["contractions"])
)
def test_Compose(self) -> None:
random.seed(1)
augmented_compose = txtaugs.Compose(
[
txtaugs.OneOf([txtaugs.ReplaceSimilarChars(), txtaugs.SimulateTypos()]),
txtaugs.InsertPunctuationChars(),
txtaugs.ReplaceFunFonts(),
]
)(self.texts, metadata=self.metadata)
self.assertEqual(
augmented_compose,
[
"T... h... e...... u... q... i... c... k...... b... r... o... w... "
"n...... '... f... o... x... '...... c... o... u... d... n... '...... "
"t...... j... u... m... p...... o... v... e... f...... t... j... e......"
" g... r... e... e... n...,...... g... r... a... s... s... y...... h... "
"i...,... l...."
],
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["compose"]),
)
def test_GetBaseline(self) -> None:
augmented_baseline = txtaugs.GetBaseline()(self.texts, metadata=self.metadata)
self.assertTrue(
augmented_baseline[0]
== "The quick brown 'fox' couldn't jump over the green, grassy hill."
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["get_baseline"]),
)
def test_InsertPunctuationChars(self) -> None:
aug_punc_text = txtaugs.InsertPunctuationChars("all", 1.0, False)(
self.texts, metadata=self.metadata
)
# Separator inserted between every character (including spaces/punctuation).
self.assertEqual(
aug_punc_text,
[
"T?h?e? ?q?u?i?c?k? ?b?r?o?w?n? ?'?f?o?x?'? ?c?o?u?l?d?n?'?t? "
"?j?u?m?p? ?o?v?e?r? ?t?h?e? ?g?r?e?e?n?,? ?g?r?a?s?s?y? ?h?i?l?l?."
],
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["insert_punctuation_chars"]
),
)
def test_InsertText(self) -> None:
aug_inserted_text = txtaugs.InsertText(seed=42)(
self.texts, metadata=self.metadata, insert_text=["wolf", "sheep"]
)
self.assertEqual(
aug_inserted_text,
["wolf The quick brown 'fox' couldn't jump over the green, grassy hill."],
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["insert_text"]),
)
def test_InsertWhitespaceChars(self) -> None:
aug_whitespace_text = txtaugs.InsertWhitespaceChars("all", 1.0, False)(
self.texts, metadata=self.metadata
)
# Separator inserted between every character (including spaces/punctuation).
self.assertEqual(
aug_whitespace_text,
[
"T h e q u i c k b r o w n ' f o x ' c o u l d n ' t "
"j u m p o v e r t h e g r e e n , g r a s s y h i l l ."
],
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["insert_whitespace_chars"]
),
)
def test_InsertZeroWidthChars(self) -> None:
aug_unicode_text = txtaugs.InsertZeroWidthChars("all", 1.0, False)(
self.texts, metadata=self.metadata
)
# Separator inserted between every character (including spaces/punctuation).
# Renders as: "The quick brown 'fox' couldn't jump over the green, grassy hill."
self.assertEqual(
aug_unicode_text,
[
"T\u200ch\u200ce\u200c \u200cq\u200cu\u200ci\u200cc\u200ck\u200c "
"\u200cb\u200cr\u200co\u200cw\u200cn\u200c \u200c'\u200cf\u200co"
"\u200cx\u200c'\u200c \u200cc\u200co\u200cu\u200cl\u200cd\u200cn"
"\u200c'\u200ct\u200c \u200cj\u200cu\u200cm\u200cp\u200c \u200co"
"\u200cv\u200ce\u200cr\u200c \u200ct\u200ch\u200ce\u200c \u200cg"
"\u200cr\u200ce\u200ce\u200cn\u200c,\u200c \u200cg\u200cr\u200ca"
"\u200cs\u200cs\u200cy\u200c \u200ch\u200ci\u200cl\u200cl\u200c."
],
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["insert_zero_width_chars"]
),
)
def test_MergeWords(self) -> None:
aug_merge_words = txtaugs.MergeWords(aug_word_p=0.3)(
self.texts, metadata=self.metadata
)
self.assertTrue(
aug_merge_words[0]
== "The quickbrown 'fox' couldn'tjump overthe green, grassy hill."
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["merge_words"]),
)
def test_ReplaceBidirectional(self) -> None:
aug_bidirectional_text = txtaugs.ReplaceBidirectional()(
self.texts, metadata=self.metadata
)
# Renders as: ".llih yssarg ,neerg eht revo pmuj t'ndluoc 'xof' nworb kciuq ehT"
self.assertEqual(
aug_bidirectional_text,
[
"\u202e.llih yssarg ,neerg eht revo pmuj t'ndluoc 'xof' nworb "
"kciuq ehT\u202c"
],
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["replace_bidirectional"]
),
)
def test_ReplaceFunFonts(self) -> None:
aug_fun_fonts = txtaugs.ReplaceFunFonts(aug_p=0.8, vary_fonts=False, n=1)(
self.texts, metadata=self.metadata
)
self.assertTrue(
aug_fun_fonts[0]
== "𝑻𝒉𝒆 𝒒𝒖𝒊𝒄𝒌 𝒃𝒓𝒐𝒘𝒏 '𝒇𝒐𝒙' 𝒄𝒐𝒖𝒍𝒅𝒏'𝒕 𝒋𝒖𝒎𝒑 𝒐𝒗𝒆𝒓 𝒕𝒉𝒆 𝒈𝒓𝒆𝒆𝒏, 𝒈𝒓𝒂𝒔𝒔𝒚 𝒉𝒊𝒍𝒍."
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["replace_fun_fonts"]
),
)
def test_ReplaceSimilarChars(self) -> None:
aug_chars = txtaugs.ReplaceSimilarChars(aug_word_p=0.3, aug_char_p=0.3)(
self.texts, metadata=self.metadata
)
self.assertTrue(
aug_chars[0]
== "The quick brown 'fox' coul|)n't jump 0ver the green, grassy hill."
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["replace_similar_chars"]
),
)
def test_ReplaceSimilarUnicodeChars(self) -> None:
aug_unicode_chars = txtaugs.ReplaceSimilarUnicodeChars(
aug_word_p=0.3, aug_char_p=0.3
)(self.texts, metadata=self.metadata)
self.assertTrue(
aug_unicode_chars[0]
== "The ℚuick brown 'fox' coul₫n't jump ov६r the green, grassy hill."
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["replace_similar_unicode_chars"]
),
)
def test_ReplaceText(self) -> None:
texts = [
"The quick brown 'fox' couldn't jump over the green, grassy hill.",
"The quick brown",
"jump over the green",
]
replace_texts = {
"couldn't jump": "jumped",
"jump over the blue": "jump over the red",
"The quick brown": "The slow green",
}
aug_replaced_text = txtaugs.ReplaceText(replace_texts)(
texts=texts, metadata=self.metadata
)
self.assertEqual(
aug_replaced_text, [texts[0], replace_texts[texts[1]], texts[2]]
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["replace_text"]),
)
def test_ReplaceUpsideDown(self) -> None:
aug_upside_down_text = txtaugs.ReplaceUpsideDown()(
self.texts, metadata=self.metadata
)
self.assertTrue(
aug_upside_down_text[0]
== "˙llᴉɥ ʎssɐɹɓ 'uǝǝɹɓ ǝɥʇ ɹǝʌo dɯnɾ ʇ,uplnoɔ ,xoɟ, uʍoɹq ʞɔᴉnb ǝɥꞱ"
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["replace_upside_down"]
),
)
def test_ReplaceWords(self) -> None:
augmented_words = txtaugs.ReplaceWords()(self.texts, metadata=self.metadata)
self.assertTrue(augmented_words[0] == self.texts[0])
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["replace_words"]),
)
def test_SimulateTypos(self) -> None:
aug_typo_text = txtaugs.SimulateTypos(
aug_word_p=0.3, aug_char_p=0.3, typo_type="all"
)(self.texts, metadata=self.metadata)
self.assertTrue(
aug_typo_text[0]
== "Thw qu(ck brown 'fox' co)uldn' t jamp over the green, grassy hill.",
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["simulate_typos"]),
)
def test_SplitWords(self) -> None:
aug_split_words = txtaugs.SplitWords(aug_word_p=0.3)(
self.texts, metadata=self.metadata
)
self.assertTrue(
aug_split_words[0]
== "The quick b rown 'fox' couldn' t j ump over the green, gras sy hill."
)
self.assertTrue(
are_equal_metadata(self.metadata, self.expected_metadata["split_words"]),
)
def test_SwapGenderedWords(self) -> None:
augmented_words = txtaugs.SwapGenderedWords()(
self.fairness_texts, metadata=self.metadata
)
self.assertTrue(
augmented_words[0]
== "The queen and king have a daughter named Raj and a son named Amanda.",
)
self.assertTrue(
are_equal_metadata(
self.metadata, self.expected_metadata["swap_gendered_words"]
),
)
if __name__ == "__main__":
unittest.main()
|
AugLy-main
|
augly/tests/text_tests/transforms_unit_test.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from augly.utils.classes import Segment
def compute_time_crop_segments(
src_segment: Segment,
dst_segment: Segment,
speed_factor: float,
crop_start: float,
crop_end: float,
new_src_segments: List[Segment],
new_dst_segments: List[Segment],
) -> None:
"""
Calculates how the given matching pair src_segment & dst_segment change
given a temporal crop starting at crop_start & ending at crop_end. We can
use the same logic here for multiple transforms, by setting the crop_start
& crop_end depending on the transform kwargs.
Doesn't return anything, but appends the new matching segments in the dst
video corresponding to the pair passed in to new_src_segments & new_dst_segments,
if the segment pair still matches in the dst video. If the passed in segment
pair is cropped out as a result of this temporal crop, nothing will be
appended to the lists, since the segment pair doesn't exist in the dst video.
"""
# Crop segment is outside of the initial clip, so this matching segment
# pair no longer exists in the new video.
if crop_start >= dst_segment.end or crop_end <= dst_segment.start:
return
# new_start represents where the matching segment starts in the dst audio
# (if negative, then part of the matching segment is getting cut out, so
# we need to adjust both the src & dst starts).
new_start = dst_segment.start - crop_start
src_start, src_end, src_id = src_segment
if new_start < 0:
# We're cropping the beginning of the matching segment.
# Note: if the video was sped up before, we need to take this into account
# (the matching segment that is e.g. 10 seconds of dst audio might
# correspond to 5 seconds of src audio, if it was previously
# slowed down by 0.5x).
src_start = src_segment.start - new_start * speed_factor
new_start = 0
new_end = min(dst_segment.end - crop_start, crop_end - crop_start)
if crop_end < dst_segment.end:
# We're cropping the end of the matching segment.
# Note: if the video was sped up before, we need to take this into
# account (as above).
src_end = src_segment.end - (dst_segment.end - crop_end) * speed_factor
new_src_segments.append(Segment(src_start, src_end))
new_dst_segments.append(Segment(new_start, new_end))
|
AugLy-main
|
augly/utils/functions.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from augly.utils.base_paths import (
AUDIO_ASSETS_DIR,
EMOJI_DIR,
FONTS_DIR,
IMG_MASK_DIR,
SCREENSHOT_TEMPLATES_DIR,
TEXT_DIR,
)
# color constants
DEFAULT_COLOR = (0, 0, 0)
WHITE_RGB_COLOR = (255, 255, 255)
RED_RGB_COLOR = (255, 0, 0)
# audio & video rate constants
DEFAULT_FRAME_RATE = 10
DEFAULT_SAMPLE_RATE = 44100
# text constants
DEFAULT_TEXT_INDICES = [random.randrange(1, 1000) for _ in range(5)]
# `overlay_stripes` constants
SUPPORTED_LINE_TYPES = ["dotted", "dashed", "solid"]
# screenshot augmentation assets
BBOXES_PATH = os.path.join(SCREENSHOT_TEMPLATES_DIR, "bboxes.json")
# audio assets
SILENT_AUDIO_PATH = os.path.join(AUDIO_ASSETS_DIR, "silent.flac")
# image augmentation assets
IMG_MASK_PATH = os.path.join(IMG_MASK_DIR, "dfdc_mask.png")
"""
All emoji assets used in AugLy come from Twemoji: https://twemoji.twitter.com/
Copyright 2020 Twitter, Inc and other contributors.
Code licensed under the MIT License: http://opensource.org/licenses/MIT
Graphics licensed under CC-BY 4.0: https://creativecommons.org/licenses/by/4.0/
"""
SMILEY_EMOJI_DIR = os.path.join(EMOJI_DIR, "smileys")
EMOJI_PATH = os.path.join(SMILEY_EMOJI_DIR, "smiling_face_with_heart_eyes.png")
"""
All font assets used in AugLy come from Google Noto fonts: https://www.google.com/get/noto/
Noto is a trademark of Google Inc. Noto fonts are open source.
All Noto fonts are published under the SIL Open Font License, Version 1.1
"""
FONT_LIST_PATH = os.path.join(FONTS_DIR, "list")
FONT_PATH = os.path.join(FONTS_DIR, "NotoNaskhArabic-Regular.ttf")
MEME_DEFAULT_FONT = os.path.join(FONTS_DIR, "Raleway-ExtraBold.ttf")
# text augmentation assets
CONTRACTIONS_MAPPING = os.path.join(TEXT_DIR, "contractions.json")
FUN_FONTS_PATH = os.path.join(TEXT_DIR, "fun_fonts.json")
FUN_FONTS_GREEK_PATH = os.path.join(TEXT_DIR, "fun_fonts_greek.json")
UNICODE_MAPPING_PATH = os.path.join(TEXT_DIR, "letter_unicode_mapping.json")
MISSPELLING_DICTIONARY_PATH = os.path.join(TEXT_DIR, "misspelling.json")
"""
Text fairness augmentation assets: the feminine & masculine word lists were provided to
us by Adina Williams and are the same ones used in Dinan et al., 2020
(https://arxiv.org/pdf/2005.00614.pdf), which aggregated such word lists from
Zhao et al., 2018b, 2019 (https://aclanthology.org/D18-1521.pdf) and Hoyle et al., 2019
(https://aclanthology.org/P19-1167.pdf). We constructed the gendered words mapping file
from the feminine & masculine word lists for ease of use with the `swap_gendered_words`
augmentation to avoid having to re-compute the mapping
"""
GENDERED_WORDS_MAPPING = os.path.join(TEXT_DIR, "gendered_words_mapping.json")
|
AugLy-main
|
augly/utils/constants.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from iopath.common.file_io import HTTPURLHandler, PathManager
pathmgr = PathManager()
pathmgr.register_handler(HTTPURLHandler())
pathmgr.set_strict_kwargs_checking(False)
|
AugLy-main
|
augly/utils/io.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from distutils import spawn
from typing import Tuple
FFMPEG_PATH = os.environ.get("AUGLY_FFMPEG_PATH", None)
FFPROBE_PATH = os.environ.get("AUGLY_FFPROBE_PATH", None)
if FFMPEG_PATH is None:
FFMPEG_PATH = spawn.find_executable("ffmpeg")
if FFPROBE_PATH is None:
FFPROBE_PATH = spawn.find_executable("ffprobe")
ffmpeg_paths_error = (
"Please install 'ffmpeg' or set the {} & {} environment variables "
"before running AugLy Video"
)
assert FFMPEG_PATH is not None and FFPROBE_PATH is not None, ffmpeg_paths_error.format(
"AUGLY_FFMPEG_PATH", "AUGLY_FFPROBE_PATH"
)
def get_conditional_for_skipping_video_tests() -> Tuple[bool, str]:
return (True, "We currently do not need to skip any video tests")
|
AugLy-main
|
augly/utils/ffmpeg.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.utils.asserts import (
is_audio_file,
is_image_file,
is_video_file,
validate_audio_path,
validate_image_path,
validate_output_path,
validate_path,
validate_rgb_color,
validate_video_path,
)
from augly.utils.base_paths import (
ASSETS_BASE_DIR,
AUDIO_ASSETS_DIR,
AUDIO_METADATA_PATH,
EMOJI_DIR,
FONTS_DIR,
IMAGE_METADATA_PATH,
IMG_MASK_DIR,
METADATA_BASE_PATH,
SCREENSHOT_TEMPLATES_DIR,
TEMPLATE_PATH,
TEST_URI,
TEXT_DIR,
TEXT_METADATA_PATH,
VIDEO_METADATA_PATH,
)
from augly.utils.classes import Segment
from augly.utils.constants import (
BBOXES_PATH,
CONTRACTIONS_MAPPING,
DEFAULT_COLOR,
DEFAULT_FRAME_RATE,
DEFAULT_SAMPLE_RATE,
DEFAULT_TEXT_INDICES,
EMOJI_PATH,
FONT_LIST_PATH,
FONT_PATH,
FUN_FONTS_GREEK_PATH,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
IMG_MASK_PATH,
MEME_DEFAULT_FONT,
MISSPELLING_DICTIONARY_PATH,
RED_RGB_COLOR,
SILENT_AUDIO_PATH,
SMILEY_EMOJI_DIR,
SUPPORTED_LINE_TYPES,
UNICODE_MAPPING_PATH,
WHITE_RGB_COLOR,
)
from augly.utils.functions import compute_time_crop_segments
from augly.utils.io import pathmgr
__all__ = [
"ASSETS_BASE_DIR",
"AUDIO_ASSETS_DIR",
"AUDIO_METADATA_PATH",
"EMOJI_DIR",
"FONTS_DIR",
"IMAGE_METADATA_PATH",
"IMG_MASK_DIR",
"METADATA_BASE_PATH",
"SCREENSHOT_TEMPLATES_DIR",
"TEXT_DIR",
"TEXT_METADATA_PATH",
"TEST_URI",
"VIDEO_METADATA_PATH",
"BBOXES_PATH",
"CONTRACTIONS_MAPPING",
"CATALOG_CATEGORIES",
"CATALOG_LANGUAGE_FAMILIES",
"CATALOG_STYLES",
"CATALOG_WEIGHTS",
"DEFAULT_COLOR",
"DEFAULT_FRAME_RATE",
"DEFAULT_SAMPLE_RATE",
"DEFAULT_TEXT_INDICES",
"EMOJI_PATH",
"FONT_LIST_PATH",
"FONT_PATH",
"FUN_FONTS_GREEK_PATH",
"FUN_FONTS_PATH",
"GENDERED_WORDS_MAPPING",
"IMG_MASK_PATH",
"MEME_DEFAULT_FONT",
"MISSPELLING_DICTIONARY_PATH",
"RED_RGB_COLOR",
"SILENT_AUDIO_PATH",
"SMILEY_EMOJI_DIR",
"SUPPORTED_LINE_TYPES",
"TEMPLATE_PATH",
"UNICODE_MAPPING_PATH",
"WHITE_RGB_COLOR",
"Segment",
"compute_time_crop_segments",
"is_audio_file",
"is_image_file",
"is_video_file",
"pathmgr",
"validate_audio_path",
"validate_image_path",
"validate_output_path",
"validate_path",
"validate_rgb_color",
"validate_video_path",
]
|
AugLy-main
|
augly/utils/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def install_libsndfile():
pass
|
AugLy-main
|
augly/utils/libsndfile.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
MODULE_BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# asset paths
ASSETS_BASE_DIR = os.path.join(MODULE_BASE_DIR, "assets")
AUDIO_ASSETS_DIR = os.path.join(ASSETS_BASE_DIR, "audio")
TEXT_DIR = os.path.join(ASSETS_BASE_DIR, "text")
EMOJI_DIR = os.path.join(ASSETS_BASE_DIR, "twemojis")
FONTS_DIR = os.path.join(ASSETS_BASE_DIR, "fonts")
IMG_MASK_DIR = os.path.join(ASSETS_BASE_DIR, "masks")
SCREENSHOT_TEMPLATES_DIR = os.path.join(ASSETS_BASE_DIR, "screenshot_templates")
TEMPLATE_PATH = os.path.join(SCREENSHOT_TEMPLATES_DIR, "web.png")
TEST_URI = os.path.join(MODULE_BASE_DIR, "tests", "assets")
# test paths
METADATA_BASE_PATH = os.path.join(TEST_URI, "expected_metadata")
METADATA_FILENAME = "expected_metadata.json"
AUDIO_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "audio_tests", METADATA_FILENAME)
IMAGE_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "image_tests", METADATA_FILENAME)
TEXT_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "text_tests", METADATA_FILENAME)
VIDEO_METADATA_PATH = os.path.join(METADATA_BASE_PATH, "video_tests", METADATA_FILENAME)
|
AugLy-main
|
augly/utils/base_paths.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Optional
class Segment(NamedTuple):
start: float
end: float
src_id: Optional[str] = None
def delta(self, start_delta: float, end_delta: float):
new_start = self.start + start_delta
new_end = self.end + end_delta
if new_start > new_end:
raise ValueError(
f"Invalid segment created: expected {new_start} < {new_end}."
)
return Segment(new_start, new_end, self.src_id)
|
AugLy-main
|
augly/utils/classes.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Tuple
import magic
from augly.utils.io import pathmgr
def is_content_type(filename: str, content_type: str) -> bool:
file_type = magic.from_file(filename, mime=True).lower()
return content_type in file_type
def is_audio_file(filename: str) -> bool:
return is_content_type(filename, "audio")
def is_image_file(filename: str) -> bool:
return is_content_type(filename, "image")
def is_video_file(filename: str) -> bool:
return is_content_type(filename, "video")
def validate_path(file_path: str) -> None:
correct_type = type(file_path) == str
path_exists = pathmgr.exists(file_path)
assert correct_type and path_exists, f"Path is invalid: {file_path}"
def validate_audio_path(audio_path: str) -> None:
validate_path(audio_path)
# since `librosa` can extract audio from audio and video
# paths, we check for both here
assert is_audio_file(audio_path) or is_video_file(
audio_path
), f"Audio path invalid: {audio_path}"
def validate_image_path(image_path: str) -> None:
validate_path(image_path)
assert is_image_file(image_path), f"Image path invalid: {image_path}"
def validate_video_path(video_path: str) -> None:
validate_path(video_path)
assert is_video_file(video_path), f"Video path invalid: {video_path}"
def validate_output_path(output_path: str) -> None:
correct_type = type(output_path) == str
dir_exists = pathmgr.exists(os.path.dirname(output_path))
assert correct_type and dir_exists, f"Output path invalid: {output_path}"
def validate_rgb_color(color: Tuple[int, int, int]) -> None:
correct_len = len(color) == 3
correct_values = all(0 <= c <= 255 for c in color)
assert correct_len and correct_values, "Invalid RGB color specified"
|
AugLy-main
|
augly/utils/asserts.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from augly import utils
from augly.image import functional as F
from PIL import Image, ImageFilter
"""
Base Classes for Transforms
"""
class BaseTransform:
def __init__(self, p: float = 1.0):
"""
@param p: the probability of the transform being applied; default value is 1.0
"""
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.p = p
def __call__(
self,
image: Image.Image,
force: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
@param image: PIL Image to be augmented
@param force: if set to True, the transform will be applied. Otherwise,
application is determined by the probability set
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
assert isinstance(image, Image.Image), "Image passed in must be a PIL Image"
assert type(force) == bool, "Expected type bool for variable `force`"
if not force and random.random() > self.p:
return image
return self.apply_transform(image, metadata, bboxes, bbox_format)
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
This function is to be implemented in the child classes.
From this function, call the augmentation function with the
parameters specified
"""
raise NotImplementedError()
class BaseRandomRangeTransform(BaseTransform):
def __init__(self, min_val: float, max_val: float, p: float = 1.0):
"""
@param min_val: the lower value of the range
@param max_val: the upper value of the range
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.min_val = min_val
self.max_val = max_val
self.chosen_value = None
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
self.chosen_value = (
random.random() * (self.max_val - self.min_val)
) + self.min_val
return self.apply_random_transform(
image,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
def apply_random_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
This function is to be implemented in the child classes. It has
access to `self.chosen_value` which is the randomly chosen value
from the range specified to pass into the augmentation function
"""
raise NotImplementedError()
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> image = Image.open("....")
>>> blur_tsfm = Blur(radius=5.0, p=0.5)
>>> blurred_image = blur_tsfm(image)
"""
class ApplyLambda(BaseTransform):
def __init__(
self,
aug_function: Callable[..., Image.Image] = lambda x: x,
p: float = 1.0,
**kwargs,
):
"""
@param aug_function: the augmentation function to be applied onto the image
(should expect a PIL image as input and return one)
@param p: the probability of the transform being applied; default value is 1.0
@param **kwargs: the input attributes to be passed into the augmentation
function to be applied
"""
super().__init__(p)
self.aug_function = aug_function
self.kwargs = kwargs
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Apply a user-defined lambda on an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.apply_lambda(
image,
aug_function=self.aug_function,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
**self.kwargs,
)
class ApplyPILFilter(BaseTransform):
def __init__(
self,
filter_type: Union[
Callable, ImageFilter.Filter
] = ImageFilter.EDGE_ENHANCE_MORE,
p: float = 1.0,
):
"""
@param filter_type: the PIL ImageFilter to apply to the image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.filter_type = filter_type
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies a given PIL filter to the input image using `Image.filter()`
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.apply_pil_filter(
image,
filter_type=self.filter_type,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Blur(BaseTransform):
def __init__(self, radius: float = 2.0, p: float = 1.0):
"""
@param radius: the larger the radius, the blurrier the image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.radius = radius
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Blurs the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.blur(
image,
radius=self.radius,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Brightness(BaseTransform):
def __init__(self, factor: float = 1.0, p: float = 1.0):
"""
@param factor: values less than 1.0 darken the image and values greater than
1.0 brighten the image. Setting factor to 1.0 will not alter the image's
brightness
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the brightness of the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.brightness(
image,
factor=self.factor,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class ChangeAspectRatio(BaseTransform):
def __init__(self, ratio: float = 1.0, p: float = 1.0):
"""
@param ratio: aspect ratio, i.e. width/height, of the new image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.ratio = ratio
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the aspect ratio of the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.change_aspect_ratio(
image,
ratio=self.ratio,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class ClipImageSize(BaseTransform):
def __init__(
self,
min_resolution: Optional[int] = None,
max_resolution: Optional[int] = None,
p: float = 1.0,
):
"""
@param min_resolution: the minimum resolution, i.e. width * height, that the
augmented image should have; if the input image has a lower resolution than this,
the image will be scaled up as necessary
@param max_resolution: the maximum resolution, i.e. width * height, that the
augmented image should have; if the input image has a higher resolution than
this, the image will be scaled down as necessary
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.min_resolution = min_resolution
self.max_resolution = max_resolution
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Scales the image up or down if necessary to fit in the given min and max
resolution
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.clip_image_size(
image,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class ColorJitter(BaseTransform):
def __init__(
self,
brightness_factor: float = 1.0,
contrast_factor: float = 1.0,
saturation_factor: float = 1.0,
p: float = 1.0,
):
"""
@param brightness_factor: a brightness factor below 1.0 darkens the image,
a factor of 1.0 does not alter the image, and a factor greater than 1.0
brightens the image
@param contrast_factor: a contrast factor below 1.0 removes contrast, a factor
of 1.0 gives the original image, and a factor greater than 1.0 adds contrast
@param saturation_factor: a saturation factor of below 1.0 lowers the saturation,
a factor of 1.0 gives the original image, and a factor greater than 1.0 adds
saturation
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.brightness_factor = brightness_factor
self.contrast_factor = contrast_factor
self.saturation_factor = saturation_factor
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Color jitters the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.color_jitter(
image,
brightness_factor=self.brightness_factor,
contrast_factor=self.contrast_factor,
saturation_factor=self.saturation_factor,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Contrast(BaseTransform):
def __init__(self, factor: float = 1.0, p: float = 1.0):
"""
@param factor: zero gives a grayscale image, values below 1.0 decrease contrast,
a factor of 1.0 gives the original image, and a factor greater than 1.0
increases contrast
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the contrast of the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.contrast(
image,
factor=self.factor,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class ConvertColor(BaseTransform):
def __init__(
self,
mode: Optional[str] = None,
matrix: Union[
None,
Tuple[float, float, float, float],
Tuple[
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
],
] = None,
dither: Optional[int] = None,
palette: int = 0,
colors: int = 256,
p: float = 1.0,
):
"""
@param mode: defines the type and depth of a pixel in the image. If mode is
omitted, a mode is chosen so that all information in the image and the
palette can be represented without a palette. For list of available modes,
check: https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
@param matrix: an optional conversion matrix. If given, this should be 4- or
12-tuple containing floating point values.
@param dither: dithering method, used when converting from mode “RGB” to “P” or
from “RGB” or “L” to “1”. Available methods are NONE or FLOYDSTEINBERG (default)
@param palette: palette to use when converting from mode “RGB” to “P”. Available
palettes are WEB or ADAPTIVE
@param colors: number of colors to use for the ADAPTIVE palette. Defaults to 256
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.mode = mode
self.matrix = matrix
self.dither = dither
self.palette = palette
self.colors = colors
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Converts the image in terms of color modes
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.convert_color(
image,
mode=self.mode,
matrix=self.matrix,
dither=self.dither,
palette=self.palette,
colors=self.colors,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Crop(BaseTransform):
def __init__(
self,
x1: float = 0.25,
y1: float = 0.25,
x2: float = 0.75,
y2: float = 0.75,
p: float = 1.0,
):
"""
@param x1: position of the left edge of cropped image relative to the width
of the original image; must be a float value between 0 and 1
@param y1: position of the top edge of cropped image relative to the height
of the original image; must be a float value between 0 and 1
@param x2: position of the right edge of cropped image relative to the width
of the original image; must be a float value between 0 and 1
@param y2: position of the bottom edge of cropped image relative to the height
of the original image; must be a float value between 0 and 1
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.x1, self.y1 = x1, y1
self.x2, self.y2 = x2, y2
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Crops the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.crop(
image,
x1=self.x1,
y1=self.y1,
x2=self.x2,
y2=self.y2,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class EncodingQuality(BaseTransform):
def __init__(self, quality: int = 50, p: float = 1.0):
"""
@param quality: JPEG encoding quality. 0 is lowest quality, 100 is highest
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.quality = quality
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the JPEG encoding quality level
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.encoding_quality(
image,
quality=self.quality,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Grayscale(BaseTransform):
def __init__(self, mode: str = "luminosity", p: float = 1.0):
"""
@param mode: the type of greyscale conversion to perform; two options are
supported ("luminosity" and "average")
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.mode = mode
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters an image to be grayscale
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.grayscale(
image,
mode=self.mode,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class HFlip(BaseTransform):
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Horizontally flips an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.hflip(
image,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class MaskedComposite(BaseTransform):
def __init__(
self, transform_function: BaseTransform, mask: Image.Image, p: float = 1.0
):
"""
@param mask: the path to an image or a variable of type PIL.Image.Image for
masking. This image can have mode “1”, “L”, or “RGBA”, and must have the
same size as the other two images. If the mask is not provided the function
returns the augmented image
@param transform_function: the augmentation function to be applied. If
transform_function is not provided, function returns the input image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.mask = mask
self.transform_function = transform_function
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies given augmentation function to the masked area of the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.masked_composite(
image,
mask=self.mask,
transform_function=self.transform_function,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class MemeFormat(BaseTransform):
def __init__(
self,
text: str = "LOL",
font_file: str = utils.MEME_DEFAULT_FONT,
opacity: float = 1.0,
text_color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
caption_height: int = 250,
meme_bg_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
p: float = 1.0,
):
"""
@param text: the text to be overlaid/used in the meme. note: if using a very
long string, please add in newline characters such that the text remains
in a readable font size
@param font_file: iopath uri to the .ttf font file
@param opacity: the lower the opacity, the more transparent the text
@param text_color: color of the text in RGB values
@param caption_height: the height of the meme caption
@param meme_bg_color: background color of the meme caption in RGB values
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.text = text
self.text_color, self.opacity = text_color, opacity
self.caption_height, self.meme_bg_color = caption_height, meme_bg_color
self.font_file = font_file
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Creates a new image that looks like a meme, given text and an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.meme_format(
image,
text=self.text,
font_file=self.font_file,
opacity=self.opacity,
text_color=self.text_color,
caption_height=self.caption_height,
meme_bg_color=self.meme_bg_color,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Opacity(BaseTransform):
def __init__(self, level: float = 1.0, p: float = 1.0):
"""
@param level: the level the opacity should be set to, where 0 means completely
transparent and 1 means no transparency at all
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.level = level
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the opacity of an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.opacity(
image,
level=self.level,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class OverlayEmoji(BaseTransform):
def __init__(
self,
emoji_path: str = utils.EMOJI_PATH,
opacity: float = 1.0,
emoji_size: float = 0.15,
x_pos: float = 0.4,
y_pos: float = 0.8,
p: float = 1.0,
):
"""
@param emoji_path: iopath uri to the emoji image
@param opacity: the lower the opacity, the more transparent the overlaid emoji
@param emoji_size: size of the emoji is emoji_size * height of the original image
@param x_pos: position of emoji relative to the image width
@param y_pos: position of emoji relative to the image height
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.emoji_path = emoji_path
self.emoji_size, self.opacity = emoji_size, opacity
self.x_pos, self.y_pos = x_pos, y_pos
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay an emoji onto the original image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.overlay_emoji(
image,
emoji_path=self.emoji_path,
opacity=self.opacity,
emoji_size=self.emoji_size,
x_pos=self.x_pos,
y_pos=self.y_pos,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class OverlayImage(BaseTransform):
def __init__(
self,
overlay: Union[str, Image.Image],
opacity: float = 1.0,
overlay_size: float = 1.0,
x_pos: float = 0.4,
y_pos: float = 0.4,
max_visible_opacity: float = 0.75,
p: float = 1.0,
):
"""
@param overlay: the path to an image or a variable of type PIL.Image.Image
that will be overlaid
@param opacity: the lower the opacity, the more transparent the overlaid image
@param overlay_size: size of the overlaid image is overlay_size * height
of the original image
@param x_pos: position of overlaid image relative to the image width
@param y_pos: position of overlaid image relative to the image height
@param max_visible_opacity: if bboxes are passed in, this param will be used as
the maximum opacity value through which the src image will still be
considered visible; see the function `overlay_image_bboxes_helper` in
`utils/bboxes.py` for more details about how this is used. If bboxes are not
passed in this is not used
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.overlay = overlay
self.overlay_size, self.opacity = overlay_size, opacity
self.x_pos, self.y_pos = x_pos, y_pos
self.max_visible_opacity = max_visible_opacity
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlays an image onto another image at position (width * x_pos, height * y_pos)
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.overlay_image(
image,
overlay=self.overlay,
opacity=self.opacity,
overlay_size=self.overlay_size,
x_pos=self.x_pos,
y_pos=self.y_pos,
max_visible_opacity=self.max_visible_opacity,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class OverlayOntoBackgroundImage(BaseTransform):
def __init__(
self,
background_image: Union[str, Image.Image],
opacity: float = 1.0,
overlay_size: float = 1.0,
x_pos: float = 0.4,
y_pos: float = 0.4,
scale_bg: bool = False,
p: float = 1.0,
):
"""
@param background_image: the path to an image or a variable of type
PIL.Image.Image onto which the source image will be overlaid
@param opacity: the lower the opacity, the more transparent the overlaid image
@param overlay_size: size of the overlaid image is overlay_size * height
of the background image
@param x_pos: position of overlaid image relative to the background image width
with respect to the x-axis
@param y_pos: position of overlaid image relative to the background image height
with respect to the y-axis
@param scale_bg: if True, the background image will be scaled up or down so that
overlay_size is respected; if False, the source image will be scaled instead
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.background_image = background_image
self.opacity = opacity
self.overlay_size = overlay_size
self.x_pos = x_pos
self.y_pos = y_pos
self.scale_bg = scale_bg
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlays the image onto a given background image at position
(width * x_pos, height * y_pos)
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.overlay_onto_background_image(
image,
background_image=self.background_image,
opacity=self.opacity,
overlay_size=self.overlay_size,
x_pos=self.x_pos,
y_pos=self.y_pos,
scale_bg=self.scale_bg,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class OverlayOntoScreenshot(BaseTransform):
def __init__(
self,
template_filepath: str = utils.TEMPLATE_PATH,
template_bboxes_filepath: str = utils.BBOXES_PATH,
max_image_size_pixels: Optional[int] = None,
crop_src_to_fit: bool = False,
resize_src_to_match_template: bool = True,
p: float = 1.0,
):
"""
@param template_filepath: iopath uri to the screenshot template
@param template_bboxes_filepath: iopath uri to the file containing the
bounding box for each template
@param max_image_size_pixels: if provided, the template image and/or src image
will be scaled down to avoid an output image with an area greater than this
size (in pixels)
@param crop_src_to_fit: if True, the src image will be cropped if necessary to
fit into the template image if the aspect ratios are different. If False, the
src image will instead be resized if needed
@param resize_src_to_match_template: if True, the src image will be resized if it
is too big or small in both dimensions to better match the template image. If
False, the template image will be resized to match the src image instead. It
can be useful to set this to True if the src image is very large so that the
augmented image isn't huge, but instead is the same size as the template image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.template_filepath = template_filepath
self.template_bboxes_filepath = template_bboxes_filepath
self.max_image_size_pixels = max_image_size_pixels
self.crop_src_to_fit = crop_src_to_fit
self.resize_src_to_match_template = resize_src_to_match_template
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay the image onto a screenshot template so it looks like it was
screenshotted on Instagram
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.overlay_onto_screenshot(
image,
template_filepath=self.template_filepath,
template_bboxes_filepath=self.template_bboxes_filepath,
max_image_size_pixels=self.max_image_size_pixels,
crop_src_to_fit=self.crop_src_to_fit,
resize_src_to_match_template=self.resize_src_to_match_template,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class OverlayStripes(BaseTransform):
def __init__(
self,
line_width: float = 0.5,
line_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
line_angle: float = 0,
line_density: float = 0.5,
line_type: Optional[str] = "solid",
line_opacity: float = 1.0,
p: float = 1.0,
):
"""
@param line_width: the width of individual stripes as a float value ranging
from 0 to 1. Defaults to 0.5
@param line_color: color of the overlaid lines in RGB values
@param line_angle: the angle of the stripes in degrees, ranging from
-360° to 360°. Defaults to 0° or horizontal stripes
@param line_density: controls the distance between stripes represented
as a float value ranging from 0 to 1, with 1 indicating more densely
spaced stripes. Defaults to 0.5
@param line_type: the type of stripes. Current options include: dotted,
dashed, and solid. Defaults to solid
@param line_opacity: the opacity of the stripes, ranging from 0 to 1 with
1 being opaque. Defaults to 1
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.line_width, self.line_angle = line_width, line_angle
self.line_color, self.line_opacity = line_color, line_opacity
self.line_density = line_density
self.line_type = line_type
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay stripe pattern onto the image (by default, stripes are horizontal)
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.overlay_stripes(
image,
line_width=self.line_width,
line_color=self.line_color,
line_angle=self.line_angle,
line_density=self.line_density,
line_type=self.line_type,
line_opacity=self.line_opacity,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class OverlayText(BaseTransform):
def __init__(
self,
text: List[Union[int, List[int]]] = utils.DEFAULT_TEXT_INDICES,
font_file: str = utils.FONT_PATH,
font_size: float = 0.15,
opacity: float = 1.0,
color: Tuple[int, int, int] = utils.RED_RGB_COLOR,
x_pos: float = 0.0,
y_pos: float = 0.5,
p: float = 1.0,
):
"""
@param text: indices (into the file) of the characters to be overlaid. Each line
of text is represented as a list of int indices; if a list of lists is
supplied, multiple lines of text will be overlaid
@param font_file: iopath uri to the .ttf font file
@param font_size: size of the overlaid characters, calculated as
font_size * min(height, width) of the original image
@param opacity: the lower the opacity, the more transparent the overlaid text
@param color: color of the overlaid text in RGB values
@param x_pos: position of the overlaid text relative to the image width
@param y_pos: position of the overlaid text relative to the image height
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.text, self.color = text, color
self.font_file = font_file
self.font_size, self.opacity = font_size, opacity
self.x_pos, self.y_pos = x_pos, y_pos
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay text onto the image (by default, text is randomly overlaid)
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.overlay_text(
image,
text=self.text,
font_file=self.font_file,
font_size=self.font_size,
opacity=self.opacity,
color=self.color,
x_pos=self.x_pos,
y_pos=self.y_pos,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Pad(BaseTransform):
def __init__(
self,
w_factor: float = 0.25,
h_factor: float = 0.25,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
p: float = 1.0,
):
"""
@param w_factor: width * w_factor pixels are padded to both left and
right of the image
@param h_factor: height * h_factor pixels are padded to the top and
the bottom of the image
@param color: color of the padded border in RGB values
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.w_factor = w_factor
self.h_factor = h_factor
self.color = color
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pads the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.pad(
image,
w_factor=self.w_factor,
h_factor=self.h_factor,
color=self.color,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class PadSquare(BaseTransform):
def __init__(
self, color: Tuple[int, int, int] = utils.DEFAULT_COLOR, p: float = 1.0
):
"""
@param color: color of the padded border in RGB values
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.color = color
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pads the shorter edge of the image such that it is now square-shaped
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.pad_square(
image,
color=self.color,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class PerspectiveTransform(BaseTransform):
def __init__(
self,
sigma: float = 50.0,
dx: float = 0.0,
dy: float = 0.0,
seed: Optional[int] = 42,
p: float = 1.0,
):
"""
@param sigma: the standard deviation of the distribution of destination
coordinates. the larger the sigma value, the more intense the transform
@param seed: if provided, this will set the random seed to ensure
consistency between runs
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.sigma = sigma
self.dx, self.dy = dx, dy
self.seed = seed
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Apply a perspective transform to the image so it looks like it was taken
as a photo from another device (e.g. taking a picture from your phone of a
picture on a computer).
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.perspective_transform(
image,
sigma=self.sigma,
dx=self.dx,
dy=self.dy,
seed=self.seed,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Pixelization(BaseTransform):
def __init__(self, ratio: float = 1.0, p: float = 1.0):
"""
@param ratio: smaller values result in a more pixelated image, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.ratio = ratio
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pixelizes an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.pixelization(
image,
ratio=self.ratio,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class RandomNoise(BaseTransform):
def __init__(
self, mean: float = 0.0, var: float = 0.01, seed: int = 42, p: float = 1.0
):
"""
@param mean: mean of the gaussian noise added
@param var: variance of the gaussian noise added
@param seed: if provided, this will set the random seed before generating noise
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.mean = mean
self.var = var
self.seed = seed
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Adds random noise to the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.random_noise(
image,
mean=self.mean,
var=self.var,
seed=self.seed,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Resize(BaseTransform):
def __init__(
self,
width: Optional[int] = None,
height: Optional[int] = None,
resample: Any = Image.BILINEAR,
p: float = 1.0,
):
"""
@param width: the desired width the image should be resized to have. If None,
the original image width will be used
@param height: the desired height the image should be resized to have. If None,
the original image height will be used
@param resample: A resampling filter. This can be one of PIL.Image.NEAREST,
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC, or
PIL.Image.LANCZOS
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.width, self.height, self.resample = width, height, resample
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Resizes an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.resize(
image,
width=self.width,
height=self.height,
resample=self.resample,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Rotate(BaseTransform):
def __init__(self, degrees: float = 15.0, p: float = 1.0):
"""
@param degrees: the amount of degrees that the original image will be rotated
counter clockwise
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.degrees = degrees
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Rotates the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.rotate(
image,
degrees=self.degrees,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Saturation(BaseTransform):
def __init__(self, factor: float = 1.0, p: float = 1.0):
"""
@param factor: a saturation factor of below 1.0 lowers the saturation, a
factor of 1.0 gives the original image, and a factor greater than 1.0
adds saturation
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the saturation of an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.saturation(
image,
factor=self.factor,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Scale(BaseTransform):
def __init__(
self,
factor: float = 0.5,
interpolation: Optional[int] = None,
p: float = 1.0,
):
"""
@param scale_factor: the ratio by which the image should be down-scaled
or upscaled
@param interpolation: interpolation method. This can be one of PIL.Image.NEAREST,
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC or
PIL.Image.LANCZOS
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
self.interpolation = interpolation
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the resolution of an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.scale(
image,
factor=self.factor,
interpolation=self.interpolation,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Sharpen(BaseTransform):
def __init__(self, factor: float = 1.0, p: float = 1.0):
"""
@param factor: a factor of below 1.0 blurs the image, a factor of 1.0 gives
the original image, and a factor greater than 1.0 sharpens the image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the sharpness of the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.sharpen(
image,
factor=self.factor,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class ShufflePixels(BaseTransform):
def __init__(self, factor: float = 1.0, seed: int = 10, p: float = 1.0):
"""
@param factor: a control parameter between 0.0 and 1.0. While a factor of
0.0 returns the original image, a factor of 1.0 performs full shuffling
@param seed: seed for numpy random generator to select random pixels
for shuffling
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
self.seed = seed
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Shuffles the pixels of an image with respect to the shuffling factor. The
factor denotes percentage of pixels to be shuffled and randomly selected
Note: The actual number of pixels will be less than the percentage given
due to the probability of pixels staying in place in the course of shuffling
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.shuffle_pixels(
image,
factor=self.factor,
seed=self.seed,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class Skew(BaseTransform):
def __init__(self, skew_factor: float = 0.5, axis: int = 0, p: float = 1.0):
"""
@param skew_factor: the level of skew to apply to the image; a larger absolute value will
result in a more intense skew. Recommended range is between [-2, 2]
@param axis: the axis along which the image will be skewed; can be set to 0 (x-axis)
or 1 (y-axis)
"""
super().__init__(p)
self.skew_factor = skew_factor
self.axis = axis
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Skews an image with respect to its x or y-axis
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.skew(
image,
skew_factor=self.skew_factor,
axis=self.axis,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class VFlip(BaseTransform):
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Vertically flips an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.vflip(
image,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
"""
Random Transforms
These classes below are similar to the non-random transforms in the sense
where they can be used with the Compose operator, etc. However, instead of
specifying specific parameters for the augmentation, with these functions
you can specify a range (or a list) to randomly choose from instead.
Example:
>>> image = Image.open("....")
>>> blur_tsfm = RandomBlur(min_radius=2.0, max_radius=5.0, p=0.5)
>>> blurred_image = blur_tsfm(image)
"""
class RandomAspectRatio(BaseRandomRangeTransform):
def __init__(self, min_ratio: float = 0.5, max_ratio: float = 2.0, p: float = 1.0):
"""
@param min_ratio: the lower value on the range of aspect ratio values to choose
from, i.e. the width/height ratio
@param max_ratio: the upper value on the range of aspect ratio values to choose
from, i.e. the width/height ratio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_ratio, max_ratio, p)
def apply_random_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Transform that randomly changes the aspect ratio of an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.change_aspect_ratio(
image,
ratio=self.chosen_value,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class RandomBlur(BaseRandomRangeTransform):
def __init__(
self, min_radius: float = 0.0, max_radius: float = 10.0, p: float = 1.0
):
"""
@param min_radius: the lower value on the range of blur values to choose
from. The larger the radius, the blurrier the image
@param max_radius: the upper value on the range of blur values to choose
from. The larger the radius, the blurrier the image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_radius, max_radius, p)
def apply_random_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Transform that randomly blurs an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.blur(
image,
radius=self.chosen_value,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class RandomBrightness(BaseRandomRangeTransform):
def __init__(
self, min_factor: float = 0.0, max_factor: float = 2.0, p: float = 1.0
):
"""
@param min_factor: the lower value on the range of brightness values to choose
from. The lower the factor, the darker the image
@param max_factor: the upper value on the range of brightness values to choose
from. The higher the factor, the brighter the image
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_factor, max_factor, p)
def apply_random_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Transform that randomly changes the brightness of an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.brightness(
image,
factor=self.chosen_value,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class RandomEmojiOverlay(BaseTransform):
def __init__(
self,
emoji_directory: str = utils.SMILEY_EMOJI_DIR,
opacity: float = 1.0,
emoji_size: Union[float, Tuple[float, float]] = 0.15,
x_pos: Union[float, Tuple[float, float]] = 0.4,
y_pos: Union[float, Tuple[float, float]] = 0.8,
seed: Optional[int] = 42,
p: float = 1.0,
):
"""
@param emoji_directory: iopath directory uri containing the emoji images
@param opacity: the lower the opacity, the more transparent the overlaid emoji
@param emoji_size: size of the emoji is emoji_size * height of the original
image. If set to a tuple, a position will randomly be chosen from the range
provided
@param x_pos: position of emoji relative to the image width. If set to a tuple, a
position will randomly be chosen from the range provided
@param y_pos: position of emoji relative to the image height. If set to a tuple, a
position will randomly be chosen from the range provided
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.emoji_directory = emoji_directory
self.emoji_paths = utils.pathmgr.ls(emoji_directory)
self.opacity = opacity
self.emoji_size = emoji_size
self.x_pos = x_pos
self.y_pos = y_pos
self.seed = seed
def apply_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Transform that overlays a random emoji onto an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
assert isinstance(self.emoji_size, (float, int)) or (
isinstance(self.emoji_size, tuple)
and self.emoji_size[0] < self.emoji_size[1] # pyre-ignore
), "emoji_size must be a float or a tuple [low, high) to sample the value from"
assert isinstance(self.x_pos, (float, int)) or (
isinstance(self.x_pos, tuple)
and self.x_pos[0] < self.x_pos[1] # pyre-ignore
), "x_pos must be a float or a tuple [low, high) to sample the value from"
assert isinstance(self.y_pos, (float, int)) or (
isinstance(self.y_pos, tuple)
and self.y_pos[0] < self.y_pos[1] # pyre-ignore
), "y_pos must be a float or a tuple [low, high) to sample the value from"
if self.seed is not None:
random.seed(self.seed)
emoji_size = float(
random.uniform(self.emoji_size[0], self.emoji_size[1]) # pyre-ignore
if isinstance(self.emoji_size, tuple)
else self.emoji_size
)
x_pos = float(
random.uniform(self.x_pos[0], self.x_pos[1]) # pyre-ignore
if isinstance(self.x_pos, tuple)
else self.x_pos
)
y_pos = float(
random.uniform(self.y_pos[0], self.y_pos[1]) # pyre-ignore
if isinstance(self.y_pos, tuple)
else self.y_pos
)
emoji_path = random.choice(self.emoji_paths)
return F.overlay_emoji(
image,
emoji_path=os.path.join(self.emoji_directory, emoji_path),
opacity=self.opacity,
emoji_size=emoji_size,
x_pos=x_pos,
y_pos=y_pos,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class RandomPixelization(BaseRandomRangeTransform):
def __init__(self, min_ratio: float = 0.1, max_ratio: float = 1.0, p: float = 1.0):
"""
@param min_ratio: the lower value on the range of pixelization ratio values to
choose from. Smaller values result in a more pixelated image, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param max_ratio: the upper value on the range of pixelization ratio values to
choose from. Smaller values result in a more pixelated image, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_ratio, max_ratio, p)
def apply_random_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Transform that randomly pixelizes an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.pixelization(
image,
ratio=self.chosen_value,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
class RandomRotation(BaseRandomRangeTransform):
def __init__(
self, min_degrees: float = 0.0, max_degrees: float = 180.0, p: float = 1.0
):
"""
@param min_degrees: the lower value on the range of degree values to choose from
@param max_degrees: the upper value on the range of degree values to choose from
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(min_degrees, max_degrees, p)
def apply_random_transform(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Transform that randomly rotates an image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
return F.rotate(
image,
degrees=self.chosen_value,
metadata=metadata,
bboxes=bboxes,
bbox_format=bbox_format,
)
|
AugLy-main
|
augly/image/transforms.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
"""
This file contains 'intensity' functions for each of our augmentations.
Intensity functions give a float representation of how intense a particular
transform called with particular parameters is.
Each intensity function expects as parameters the kwargs the corresponding
transform was called with, as well as the metadata dictionary computed by the
transform (e.g. metadata[-1] after passing a list 'metadata' into the transform).
All intensity functions are normalized to be in [0, 100]. This means we are
assuming a range of valid values for each param - e.g. for change_aspect_ratio
we assume we will never change the aspect ratio of the video by more than 10x,
meaning the range of valid values for `ratio` is [0.1, 10.0], which you can see
is assumed in the intensity function below.
"""
def apply_pil_filter_intensity(**kwargs) -> float:
return 100.0
def apply_lambda_intensity(aug_function: str, **kwargs) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0
def blur_intensity(radius: int, **kwargs) -> float:
assert (
isinstance(radius, (float, int)) and radius >= 0
), "radius must be a non-negative number"
max_radius = 100
return min((radius / max_radius) * 100.0, 100.0)
def brightness_intensity(factor: float, **kwargs) -> float:
return mult_factor_intensity_helper(factor)
def change_aspect_ratio_intensity(
ratio: float, metadata: Dict[str, Any], **kwargs
) -> float:
assert (
isinstance(ratio, (float, int)) and ratio > 0
), "ratio must be a positive number"
if ratio == metadata["src_width"] / metadata["src_height"]:
return 0.0
max_ratio = 10.0
ratio = ratio if ratio >= 1 else 1 / ratio
return min((ratio / max_ratio) * 100.0, 100.0)
def clip_image_size_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata)
def color_jitter_intensity(
brightness_factor: float, contrast_factor: float, saturation_factor: float, **kwargs
) -> float:
assert (
isinstance(brightness_factor, (float, int)) and brightness_factor >= 0
), "brightness_factor must be a nonnegative number"
assert (
isinstance(contrast_factor, (float, int)) and contrast_factor >= 0
), "contrast_factor must be a nonnegative number"
assert (
isinstance(saturation_factor, (float, int)) and saturation_factor >= 0
), "saturation_factor must be a nonnegative number"
max_total_factor = 30
brightness_factor = normalize_mult_factor(brightness_factor)
contrast_factor = normalize_mult_factor(contrast_factor)
saturation_factor = normalize_mult_factor(saturation_factor)
total_factor = brightness_factor + contrast_factor + saturation_factor
return min((total_factor / max_total_factor) * 100.0, 100.0)
def contrast_intensity(factor: float, **kwargs) -> float:
return mult_factor_intensity_helper(factor)
def convert_color_intensity(**kwargs) -> float:
return 100.0
def crop_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata)
def encoding_quality_intensity(quality: int, **kwargs) -> float:
assert (
isinstance(quality, int) and 0 <= quality <= 100
), "quality must be a number in [0, 100]"
return ((100 - quality) / 100) * 100.0
def grayscale_intensity(**kwargs) -> float:
return 100.0
def hflip_intensity(**kwargs) -> float:
return 100.0
def masked_composite_intensity(
mask: Optional[Union[str, Image.Image]], metadata: Dict[str, Any], **kwargs
) -> float:
if mask is None:
mask_intensity = 1.0
else:
mask = imutils.validate_and_load_image(mask)
mask_arr = np.array(mask)
# There can be 3 dimensions if the mask is RGBA format, in which case
# we only care about the last channel (alpha) to determine the mask
mask_values = mask_arr[:, :, -1] if mask_arr.ndim == 3 else mask_arr
mask_intensity = np.sum(mask_values > 0) / (
mask_values.shape[0] * mask_values.shape[1]
)
if metadata["transform_function"] is None:
aug_intensity = 0.0
else:
aug_intensity_func = globals().get(
f"{metadata['transform_function']}_intensity"
)
aug_intensity = (
aug_intensity_func(**kwargs) / 100.0
if aug_intensity_func is not None
else 1.0
)
return (aug_intensity * mask_intensity) * 100.0
def meme_format_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata)
def opacity_intensity(level: float, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and 0 <= level <= 1
), "level must be a number in [0, 1]"
return (1 - level) * 100.0
def overlay_emoji_intensity(emoji_size: float, opacity: float, **kwargs) -> float:
return overlay_media_intensity_helper(opacity, emoji_size)
def overlay_image_intensity(opacity: float, overlay_size: float, **kwargs) -> float:
return overlay_media_intensity_helper(opacity, overlay_size)
def overlay_onto_background_image_intensity(
opacity: float, overlay_size: float, **kwargs
) -> float:
return 100.0 - overlay_media_intensity_helper(opacity, overlay_size)
def overlay_onto_screenshot_intensity(
template_filepath: str,
template_bboxes_filepath: str,
metadata: Dict[str, Any],
**kwargs,
) -> float:
_, bbox = imutils.get_template_and_bbox(template_filepath, template_bboxes_filepath)
bbox_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
dst_area = metadata["dst_width"] * metadata["dst_height"]
return min(((dst_area - bbox_area) / dst_area) * 100.0, 100.0)
def overlay_stripes_intensity(
line_width: float,
line_angle: float,
line_density: float,
line_type: str,
line_opacity: float,
metadata: Dict[str, Any],
**kwargs,
) -> float:
binary_mask = imutils.compute_stripe_mask(
src_w=metadata["src_width"],
src_h=metadata["src_height"],
line_width=line_width,
line_angle=line_angle,
line_density=line_density,
)
if line_type == "dotted":
# To create dotted effect, multiply mask by stripes in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=metadata["src_width"],
src_h=metadata["src_height"],
line_width=line_width,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
elif line_type == "dashed":
# To create dashed effect, multiply mask by stripes with a larger line
# width in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=metadata["src_width"],
src_h=metadata["src_height"],
line_width=0.7,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
perc_stripes = np.mean(binary_mask)
return overlay_media_intensity_helper(line_opacity, perc_stripes)
def overlay_text_intensity(opacity: float, font_size: float, **kwargs) -> float:
return overlay_media_intensity_helper(opacity, font_size)
def pad_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata)
def pad_square_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata)
def perspective_transform_intensity(sigma: float, **kwargs) -> float:
assert (
isinstance(sigma, (float, int)) and sigma >= 0
), "sigma must be a non-negative number"
max_sigma_val = 100
sigma_intensity = sigma / max_sigma_val
return min(sigma_intensity * 100.0, 100.0)
def pixelization_intensity(ratio: float, **kwargs) -> float:
assert (
isinstance(ratio, (float, int)) and ratio > 0
), "ratio must be a positive number"
return min((1 - ratio) * 100.0, 100.0)
def random_noise_intensity(mean: float, var: float, **kwargs) -> float:
assert isinstance(mean, (float, int)), "mean must be a number"
assert (
isinstance(var, (float, int)) and var >= 0
), "var must be a non-negative number"
max_mean_val = 100
max_var_val = 10
# Even if mean or var is 0, we want the intensity to be non-zero if the
# other one is non-zero, so we add a little jitter away from 0
mean_intensity = max(abs(mean / max_mean_val), 0.01)
var_intensity = max(var / max_var_val, 0.01)
return (mean_intensity * var_intensity) * 100.0
def resize_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata)
def rotate_intensity(degrees: float, **kwargs) -> float:
assert isinstance(degrees, (float, int)), "degrees must be a number"
max_degrees_val = 180
degrees = abs(degrees) % 180
return (degrees / max_degrees_val) * 100.0
def saturation_intensity(factor: float, **kwargs) -> float:
return mult_factor_intensity_helper(factor)
def scale_intensity(factor: float, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor_val = 10.0
scale_factor = factor if factor > 1 else 1 / factor
return min((scale_factor / max_factor_val) * 100.0, 100.0)
def sharpen_intensity(factor: float, **kwargs) -> float:
return mult_factor_intensity_helper(factor)
def shuffle_pixels_intensity(factor: float, **kwargs) -> float:
return factor * 100.0
def skew_intensity(skew_factor: float, **kwargs) -> float:
max_skew_factor = 2.0
return min((abs(skew_factor) / max_skew_factor) * 100.0, 100.0)
def vflip_intensity(**kwargs) -> float:
return 100.0
def normalize_mult_factor(factor: float) -> float:
assert (
isinstance(factor, (float, int)) and factor >= 0
), "factor must be a non-negative number"
if factor == 1:
return 0.0
return factor if factor >= 1 else 1 / factor
def mult_factor_intensity_helper(factor: float) -> float:
factor = normalize_mult_factor(factor)
max_factor = 10
return min((factor / max_factor) * 100.0, 100.0)
def overlay_media_intensity_helper(
opacity: float, overlay_content_size: float
) -> float:
assert (
isinstance(opacity, (float, int)) and 0 <= opacity <= 1
), "opacity must be a number in [0, 1]"
assert (
isinstance(overlay_content_size, (float, int))
and 0 <= overlay_content_size <= 1
), "content size factor must be a number in [0, 1]"
return (opacity * (overlay_content_size**2)) * 100.0
def resize_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of any transform that resizes the src image. For these
types of transforms the intensity is defined as the percentage of image
area that has been cut out (if cropped/resized to smaller) or added (if
padding/resized to bigger). When computing the percentage, the denominator
should be the larger of the src & dst areas so the resulting percentage
isn't greater than 100.
"""
src_area = metadata["src_width"] * metadata["src_height"]
dst_area = metadata["dst_width"] * metadata["dst_height"]
larger_area = max(src_area, dst_area)
return (abs(dst_area - src_area) / larger_area) * 100.0
|
AugLy-main
|
augly/image/intensity.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.image.composition import Compose, OneOf
from augly.image.functional import (
apply_lambda,
apply_pil_filter,
blur,
brightness,
change_aspect_ratio,
clip_image_size,
color_jitter,
contrast,
convert_color,
crop,
encoding_quality,
grayscale,
hflip,
masked_composite,
meme_format,
opacity,
overlay_emoji,
overlay_image,
overlay_onto_background_image,
overlay_onto_screenshot,
overlay_stripes,
overlay_text,
pad,
pad_square,
perspective_transform,
pixelization,
random_noise,
resize,
rotate,
saturation,
scale,
sharpen,
shuffle_pixels,
skew,
vflip,
)
from augly.image.helpers import aug_np_wrapper
from augly.image.intensity import (
apply_lambda_intensity,
apply_pil_filter_intensity,
blur_intensity,
brightness_intensity,
change_aspect_ratio_intensity,
clip_image_size_intensity,
color_jitter_intensity,
contrast_intensity,
convert_color_intensity,
crop_intensity,
encoding_quality_intensity,
grayscale_intensity,
hflip_intensity,
masked_composite_intensity,
meme_format_intensity,
opacity_intensity,
overlay_emoji_intensity,
overlay_image_intensity,
overlay_onto_background_image_intensity,
overlay_onto_screenshot_intensity,
overlay_stripes_intensity,
overlay_text_intensity,
pad_intensity,
pad_square_intensity,
perspective_transform_intensity,
pixelization_intensity,
random_noise_intensity,
resize_intensity,
rotate_intensity,
saturation_intensity,
scale_intensity,
sharpen_intensity,
shuffle_pixels_intensity,
skew_intensity,
vflip_intensity,
)
from augly.image.transforms import (
ApplyLambda,
ApplyPILFilter,
Blur,
Brightness,
ChangeAspectRatio,
ClipImageSize,
ColorJitter,
Contrast,
ConvertColor,
Crop,
EncodingQuality,
Grayscale,
HFlip,
MaskedComposite,
MemeFormat,
Opacity,
OverlayEmoji,
OverlayImage,
OverlayOntoBackgroundImage,
OverlayOntoScreenshot,
OverlayStripes,
OverlayText,
Pad,
PadSquare,
PerspectiveTransform,
Pixelization,
RandomAspectRatio,
RandomBlur,
RandomBrightness,
RandomEmojiOverlay,
RandomNoise,
RandomPixelization,
RandomRotation,
Resize,
Rotate,
Saturation,
Scale,
Sharpen,
ShufflePixels,
Skew,
VFlip,
)
__all__ = [
"ApplyLambda",
"ApplyPILFilter",
"Blur",
"Brightness",
"ChangeAspectRatio",
"ClipImageSize",
"ColorJitter",
"Compose",
"Contrast",
"ConvertColor",
"Crop",
"EncodingQuality",
"Grayscale",
"HFlip",
"MaskedComposite",
"MemeFormat",
"OneOf",
"Opacity",
"OverlayEmoji",
"OverlayImage",
"OverlayOntoBackgroundImage",
"OverlayOntoScreenshot",
"OverlayStripes",
"OverlayText",
"Pad",
"PadSquare",
"PerspectiveTransform",
"Pixelization",
"RandomAspectRatio",
"RandomBlur",
"RandomBrightness",
"RandomEmojiOverlay",
"RandomNoise",
"RandomPixelization",
"RandomRotation",
"Resize",
"Rotate",
"Saturation",
"Scale",
"Sharpen",
"ShufflePixels",
"Skew",
"VFlip",
"apply_lambda",
"apply_pil_filter",
"aug_np_wrapper",
"blur",
"brightness",
"change_aspect_ratio",
"clip_image_size",
"color_jitter",
"contrast",
"convert_color",
"crop",
"encoding_quality",
"grayscale",
"hflip",
"masked_composite",
"meme_format",
"opacity",
"overlay_emoji",
"overlay_image",
"overlay_onto_background_image",
"overlay_onto_screenshot",
"overlay_stripes",
"overlay_text",
"pad",
"pad_square",
"perspective_transform",
"pixelization",
"random_noise",
"resize",
"rotate",
"saturation",
"scale",
"sharpen",
"shuffle_pixels",
"skew",
"vflip",
"apply_lambda_intensity",
"apply_pil_filter_intensity",
"blur_intensity",
"brightness_intensity",
"change_aspect_ratio_intensity",
"clip_image_size_intensity",
"color_jitter_intensity",
"contrast_intensity",
"convert_color_intensity",
"crop_intensity",
"encoding_quality_intensity",
"grayscale_intensity",
"hflip_intensity",
"masked_composite_intensity",
"meme_format_intensity",
"opacity_intensity",
"overlay_emoji_intensity",
"overlay_image_intensity",
"overlay_onto_background_image_intensity",
"overlay_onto_screenshot_intensity",
"overlay_stripes_intensity",
"overlay_text_intensity",
"pad_intensity",
"pad_square_intensity",
"perspective_transform_intensity",
"pixelization_intensity",
"random_noise_intensity",
"resize_intensity",
"rotate_intensity",
"saturation_intensity",
"scale_intensity",
"sharpen_intensity",
"shuffle_pixels_intensity",
"skew_intensity",
"vflip_intensity",
]
|
AugLy-main
|
augly/image/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def apply_lambda(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
aug_function: Callable[..., Image.Image] = lambda x: x,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
**kwargs,
) -> Image.Image:
"""
Apply a user-defined lambda on an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param aug_function: the augmentation function to be applied onto the image
(should expect a PIL image as input and return one)
@param **kwargs: the input attributes to be passed into the augmentation
function to be applied
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
image = imutils.validate_and_load_image(image)
func_kwargs = deepcopy(locals())
if aug_function is not None:
try:
func_kwargs["aug_function"] = aug_function.__name__
except AttributeError:
func_kwargs["aug_function"] = type(aug_function).__name__
func_kwargs = imutils.get_func_kwargs(metadata, func_kwargs)
src_mode = image.mode
aug_image = aug_function(image, **kwargs)
imutils.get_metadata(
metadata=metadata,
function_name="apply_lambda",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def apply_pil_filter(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
filter_type: Union[Callable, ImageFilter.Filter] = ImageFilter.EDGE_ENHANCE_MORE,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies a given PIL filter to the input image using `Image.filter()`
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param filter_type: the PIL ImageFilter to apply to the image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = deepcopy(locals())
ftr = filter_type() if isinstance(filter_type, Callable) else filter_type
assert isinstance(
ftr, ImageFilter.Filter
), "Filter type must be a PIL.ImageFilter.Filter class"
func_kwargs = imutils.get_func_kwargs(
metadata, func_kwargs, filter_type=getattr(ftr, "name", filter_type)
)
src_mode = image.mode
aug_image = image.filter(ftr)
imutils.get_metadata(
metadata=metadata,
function_name="apply_pil_filter",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def blur(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
radius: float = 2.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Blurs the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param radius: the larger the radius, the blurrier the image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert radius > 0, "Radius cannot be negative"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
aug_image = image.filter(ImageFilter.GaussianBlur(radius))
imutils.get_metadata(
metadata=metadata,
function_name="blur",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def brightness(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the brightness of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: values less than 1.0 darken the image and values greater than 1.0
brighten the image. Setting factor to 1.0 will not alter the image's brightness
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = ImageEnhance.Brightness(image).enhance(factor)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="brightness", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def change_aspect_ratio(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
ratio: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the aspect ratio of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param ratio: aspect ratio, i.e. width/height, of the new image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert ratio > 0, "Ratio cannot be negative"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
area = width * height
new_width = int(math.sqrt(ratio * area))
new_height = int(area / new_width)
aug_image = image.resize((new_width, new_height))
imutils.get_metadata(
metadata=metadata,
function_name="change_aspect_ratio",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def clip_image_size(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
min_resolution: Optional[int] = None,
max_resolution: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Scales the image up or down if necessary to fit in the given min and max resolution
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param min_resolution: the minimum resolution, i.e. width * height, that the
augmented image should have; if the input image has a lower resolution than this,
the image will be scaled up as necessary
@param max_resolution: the maximum resolution, i.e. width * height, that the
augmented image should have; if the input image has a higher resolution than
this, the image will be scaled down as necessary
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert min_resolution is None or (
isinstance(min_resolution, int) and min_resolution >= 0
), "min_resolution must be None or a nonnegative int"
assert max_resolution is None or (
isinstance(max_resolution, int) and max_resolution >= 0
), "max_resolution must be None or a nonnegative int"
assert not (
min_resolution is not None
and max_resolution is not None
and min_resolution > max_resolution
), "min_resolution cannot be greater than max_resolution"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
aug_image = image
if min_resolution is not None and image.width * image.height < min_resolution:
resize_factor = math.sqrt(min_resolution / (image.width * image.height))
aug_image = scale(aug_image, factor=resize_factor)
elif max_resolution is not None and image.width * image.height > max_resolution:
resize_factor = math.sqrt(max_resolution / (image.width * image.height))
aug_image = scale(aug_image, factor=resize_factor)
imutils.get_metadata(
metadata=metadata,
function_name="clip_image_size",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def color_jitter(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
brightness_factor: float = 1.0,
contrast_factor: float = 1.0,
saturation_factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Color jitters the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param brightness_factor: a brightness factor below 1.0 darkens the image, a factor
of 1.0 does not alter the image, and a factor greater than 1.0 brightens the image
@param contrast_factor: a contrast factor below 1.0 removes contrast, a factor of
1.0 gives the original image, and a factor greater than 1.0 adds contrast
@param saturation_factor: a saturation factor of below 1.0 lowers the saturation,
a factor of 1.0 gives the original image, and a factor greater than 1.0
adds saturation
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = ImageEnhance.Brightness(image).enhance(brightness_factor)
aug_image = ImageEnhance.Contrast(aug_image).enhance(contrast_factor)
aug_image = ImageEnhance.Color(aug_image).enhance(saturation_factor)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="color_jitter", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def contrast(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the contrast of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: zero gives a grayscale image, values below 1.0 decreases contrast,
a factor of 1.0 gives the original image, and a factor greater than 1.0
increases contrast
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Image.Image - Augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
enhancer = ImageEnhance.Contrast(image)
aug_image = enhancer.enhance(factor)
imutils.get_metadata(
metadata=metadata,
function_name="contrast",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def convert_color(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mode: Optional[str] = None,
matrix: Union[
None,
Tuple[float, float, float, float],
Tuple[
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
],
] = None,
dither: Optional[int] = None,
palette: int = 0,
colors: int = 256,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Converts the image in terms of color modes
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mode: defines the type and depth of a pixel in the image. If mode is omitted,
a mode is chosen so that all information in the image and the palette can be
represented without a palette. For list of available modes, check:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
@param matrix: an optional conversion matrix. If given, this should be 4- or
12-tuple containing floating point values
@param dither: dithering method, used when converting from mode “RGB” to “P” or from
“RGB” or “L” to “1”. Available methods are NONE or FLOYDSTEINBERG (default).
@param palette: palette to use when converting from mode “RGB” to “P”. Available
palettes are WEB or ADAPTIVE
@param colors: number of colors to use for the ADAPTIVE palette. Defaults to 256.
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Image.Image - Augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
# pyre-fixme[6]: Expected `Union[typing_extensions.Literal[0],
# typing_extensions.Literal[1]]` for 4th param but got `int`.
aug_image = image.convert(mode, matrix, dither, palette, colors)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
imutils.get_metadata(
metadata=metadata,
function_name="convert_color",
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path)
def crop(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
x1: float = 0.25,
y1: float = 0.25,
x2: float = 0.75,
y2: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Crops the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param x1: position of the left edge of cropped image relative to the width of
the original image; must be a float value between 0 and 1
@param y1: position of the top edge of cropped image relative to the height of
the original image; must be a float value between 0 and 1
@param x2: position of the right edge of cropped image relative to the width of
the original image; must be a float value between 0 and 1
@param y2: position of the bottom edge of cropped image relative to the height of
the original image; must be a float value between 0 and 1
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0 <= x1 <= 1.0, "x1 must be a value in the range [0, 1]"
assert 0 <= y1 <= 1.0, "y1 must be a value in the range [0, 1]"
assert x1 < x2 <= 1.0, "x2 must be a value in the range [x1, 1]"
assert y1 < y2 <= 1.0, "y2 must be a value in the range [y1, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
left, right = int(width * x1), int(width * x2)
top, bottom = int(height * y1), int(height * y2)
aug_image = image.crop((left, top, right, bottom))
imutils.get_metadata(
metadata=metadata,
function_name="crop",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def encoding_quality(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
quality: int = 50,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the JPEG encoding quality level
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param quality: JPEG encoding quality. 0 is lowest quality, 100 is highest
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0 <= quality <= 100, "'quality' must be a value in the range [0, 100]"
image = imutils.validate_and_load_image(image).convert("RGB")
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
buffer = io.BytesIO()
image.save(buffer, format="JPEG", quality=quality)
aug_image = Image.open(buffer)
imutils.get_metadata(
metadata=metadata,
function_name="encoding_quality",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def grayscale(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mode: str = "luminosity",
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes an image to be grayscale
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mode: the type of greyscale conversion to perform; two options
are supported ("luminosity" and "average")
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert mode in [
"luminosity",
"average",
], "Greyscale mode not supported -- choose either 'luminosity' or 'average'"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
# If grayscale image is passed in, return it
if image.mode == "L":
aug_image = image
else:
if mode == "luminosity":
aug_image = image.convert(mode="L")
elif mode == "average":
np_image = np.asarray(image).astype(np.float32)
np_image = np.average(np_image, axis=2)
aug_image = Image.fromarray(np.uint8(np_image))
aug_image = aug_image.convert(mode="RGB")
imutils.get_metadata(
metadata=metadata,
function_name="grayscale",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def hflip(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Horizontally flips an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = image.transpose(Image.FLIP_LEFT_RIGHT)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="hflip", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def masked_composite(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mask: Optional[Union[str, Image.Image]] = None,
transform_function: Optional[Callable] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies given augmentation function to the masked area of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mask: the path to an image or a variable of type PIL.Image.Image for
masking. This image can have mode “1”, “L”, or “RGBA”, and must have the
same size as the other two images. If the mask is not provided the function
returns the augmented image
@param transform_function: the augmentation function to be applied. If
transform_function is not provided, the function returns the input image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = deepcopy(locals())
if transform_function is not None:
try:
func_kwargs["transform_function"] = transform_function.__name__
except AttributeError:
func_kwargs["transform_function"] = type(transform_function).__name__
func_kwargs = imutils.get_func_kwargs(metadata, func_kwargs)
src_mode = image.mode
if transform_function is None:
masked_image = imutils.ret_and_save_image(image, output_path)
else:
aug_image = transform_function(image)
if mask is None:
masked_image = imutils.ret_and_save_image(aug_image, output_path, src_mode)
else:
mask = imutils.validate_and_load_image(mask)
assert image.size == mask.size, "Mask size must be equal to image size"
masked_image = Image.composite(aug_image, image, mask)
imutils.get_metadata(
metadata=metadata,
function_name="masked_composite",
aug_image=masked_image,
**func_kwargs,
)
return imutils.ret_and_save_image(masked_image, output_path, src_mode)
def meme_format(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
text: str = "LOL",
font_file: str = utils.MEME_DEFAULT_FONT,
opacity: float = 1.0,
text_color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
caption_height: int = 250,
meme_bg_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Creates a new image that looks like a meme, given text and an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param text: the text to be overlaid/used in the meme. note: if using a very
long string, please add in newline characters such that the text remains
in a readable font size.
@param font_file: iopath uri to a .ttf font file
@param opacity: the lower the opacity, the more transparent the text
@param text_color: color of the text in RGB values
@param caption_height: the height of the meme caption
@param meme_bg_color: background color of the meme caption in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert isinstance(text, str), "Expected variable `text` to be a string"
assert 0.0 <= opacity <= 1.0, "Opacity must be a value in the range [0.0, 1.0]"
assert caption_height > 10, "Caption height must be greater than 10"
utils.validate_rgb_color(text_color)
utils.validate_rgb_color(meme_bg_color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
local_font_path = utils.pathmgr.get_local_path(font_file)
font_size = caption_height - 10
while True:
font = ImageFont.truetype(local_font_path, font_size)
text_width, text_height = font.getsize_multiline(text)
if text_width <= (width - 10) and text_height <= (caption_height - 10):
break
font_size -= 5
meme = Image.new("RGB", (width, height + caption_height), meme_bg_color)
meme.paste(image, (0, caption_height))
x_pos = round((width - text_width) / 2)
y_pos = round((caption_height - text_height) / 2)
draw = ImageDraw.Draw(meme)
draw.multiline_text(
(x_pos, y_pos),
text,
# pyre-fixme[6]: Expected `Optional[ImageFont._Font]` for 3rd param but got
# `FreeTypeFont`.
font=font,
fill=(text_color[0], text_color[1], text_color[2], round(opacity * 255)),
align="center",
)
imutils.get_metadata(
metadata=metadata,
function_name="meme_format",
aug_image=meme,
**func_kwargs,
)
return imutils.ret_and_save_image(meme, output_path, src_mode)
def opacity(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
level: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alter the opacity of an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param level: the level the opacity should be set to, where 0 means
completely transparent and 1 means no transparency at all
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0 <= level <= 1, "level must be a value in the range [0, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
image = image.convert(mode="RGBA")
mask = image.convert("RGBA").getchannel("A")
mask = Image.fromarray((np.array(mask) * level).astype(np.uint8))
background = Image.new("RGBA", image.size, (255, 255, 255, 0))
aug_image = Image.composite(image, background, mask)
imutils.get_metadata(
metadata=metadata,
function_name="opacity",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def overlay_emoji(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
emoji_path: str = utils.EMOJI_PATH,
opacity: float = 1.0,
emoji_size: float = 0.15,
x_pos: float = 0.4,
y_pos: float = 0.8,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay an emoji onto the original image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param emoji_path: iopath uri to the emoji image
@param opacity: the lower the opacity, the more transparent the overlaid emoji
@param emoji_size: size of the emoji is emoji_size * height of the original image
@param x_pos: position of emoji relative to the image width
@param y_pos: position of emoji relative to the image height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
local_emoji_path = utils.pathmgr.get_local_path(emoji_path)
aug_image = overlay_image(
image,
overlay=local_emoji_path,
output_path=output_path,
opacity=opacity,
overlay_size=emoji_size,
x_pos=x_pos,
y_pos=y_pos,
)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_emoji",
aug_image=aug_image,
**func_kwargs,
)
return aug_image
def overlay_image(
image: Union[str, Image.Image],
overlay: Union[str, Image.Image],
output_path: Optional[str] = None,
opacity: float = 1.0,
overlay_size: float = 1.0,
x_pos: float = 0.4,
y_pos: float = 0.4,
max_visible_opacity: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlays an image onto another image at position (width * x_pos, height * y_pos)
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param overlay: the path to an image or a variable of type PIL.Image.Image
that will be overlaid
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param opacity: the lower the opacity, the more transparent the overlaid image
@param overlay_size: size of the overlaid image is overlay_size * height
of the original image
@param x_pos: position of overlaid image relative to the image width
@param max_visible_opacity: if bboxes are passed in, this param will be used as the
maximum opacity value through which the src image will still be considered
visible; see the function `overlay_image_bboxes_helper` in `utils/bboxes.py` for
more details about how this is used. If bboxes are not passed in this is not used
@param y_pos: position of overlaid image relative to the image height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0.0 <= opacity <= 1.0, "Opacity must be a value in the range [0, 1]"
assert 0.0 <= overlay_size <= 1.0, "Image size must be a value in the range [0, 1]"
assert 0.0 <= x_pos <= 1.0, "x_pos must be a value in the range [0, 1]"
assert 0.0 <= y_pos <= 1.0, "y_pos must be a value in the range [0, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
overlay = imutils.validate_and_load_image(overlay)
im_width, im_height = image.size
overlay_width, overlay_height = overlay.size
new_height = max(1, int(im_height * overlay_size))
new_width = int(overlay_width * new_height / overlay_height)
overlay = overlay.resize((new_width, new_height))
try:
mask = overlay.convert("RGBA").getchannel("A")
mask = Image.fromarray((np.array(mask) * opacity).astype(np.uint8))
except ValueError:
mask = Image.new(mode="L", size=overlay.size, color=int(opacity * 255))
x = int(im_width * x_pos)
y = int(im_height * y_pos)
aug_image = image.convert(mode="RGBA")
aug_image.paste(im=overlay, box=(x, y), mask=mask)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_image",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def overlay_onto_background_image(
image: Union[str, Image.Image],
background_image: Union[str, Image.Image],
output_path: Optional[str] = None,
opacity: float = 1.0,
overlay_size: float = 1.0,
x_pos: float = 0.4,
y_pos: float = 0.4,
scale_bg: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlays the image onto a given background image at position
(width * x_pos, height * y_pos)
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param background_image: the path to an image or a variable of type PIL.Image.Image
onto which the source image will be overlaid
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param opacity: the lower the opacity, the more transparent the overlaid image
@param overlay_size: size of the overlaid image is overlay_size * height
of the background image
@param x_pos: position of overlaid image relative to the background image width with
respect to the x-axis
@param y_pos: position of overlaid image relative to the background image height with
respect to the y-axis
@param scale_bg: if True, the background image will be scaled up or down so that
overlay_size is respected; if False, the source image will be scaled instead
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0.0 <= overlay_size <= 1.0, "Image size must be a value in the range [0, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if scale_bg:
background_image = resize(
background_image,
width=math.floor(image.width / overlay_size),
height=math.floor(image.height / overlay_size),
)
aug_image = overlay_image(
background_image,
overlay=image,
output_path=output_path,
opacity=opacity,
overlay_size=overlay_size,
x_pos=x_pos,
y_pos=y_pos,
)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_onto_background_image",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def overlay_onto_screenshot(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
template_filepath: str = utils.TEMPLATE_PATH,
template_bboxes_filepath: str = utils.BBOXES_PATH,
max_image_size_pixels: Optional[int] = None,
crop_src_to_fit: bool = False,
resize_src_to_match_template: bool = True,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay the image onto a screenshot template so it looks like it was
screenshotted on Instagram
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param template_filepath: iopath uri to the screenshot template
@param template_bboxes_filepath: iopath uri to the file containing the
bounding box for each template
@param max_image_size_pixels: if provided, the template image and/or src image
will be scaled down to avoid an output image with an area greater than this
size (in pixels)
@param crop_src_to_fit: if True, the src image will be cropped if necessary to fit
into the template image if the aspect ratios are different. If False, the src
image will instead be resized if needed
@param resize_src_to_match_template: if True, the src image will be resized if it is
too big or small in both dimensions to better match the template image. If False,
the template image will be resized to match the src image instead. It can be
useful to set this to True if the src image is very large so that the augmented
image isn't huge, but instead is the same size as the template image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
template, bbox = imutils.get_template_and_bbox(
template_filepath, template_bboxes_filepath
)
if resize_src_to_match_template:
bbox_w, bbox_h = bbox[2] - bbox[0], bbox[3] - bbox[1]
image = scale(image, factor=min(bbox_w / image.width, bbox_h / image.height))
else:
template, bbox = imutils.scale_template_image(
image.size[0],
image.size[1],
template,
bbox,
max_image_size_pixels,
crop_src_to_fit,
)
bbox_w, bbox_h = bbox[2] - bbox[0], bbox[3] - bbox[1]
cropped_src = imutils.resize_and_pad_to_given_size(
image, bbox_w, bbox_h, crop=crop_src_to_fit
)
template.paste(cropped_src, box=bbox)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_onto_screenshot",
aug_image=template,
**func_kwargs,
)
return imutils.ret_and_save_image(template, output_path, src_mode)
def overlay_stripes(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
line_width: float = 0.5,
line_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
line_angle: float = 0,
line_density: float = 0.5,
line_type: Optional[str] = "solid",
line_opacity: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay stripe pattern onto the image (by default, white horizontal
stripes are overlaid)
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param line_width: the width of individual stripes as a float value ranging
from 0 to 1. Defaults to 0.5
@param line_color: color of the overlaid stripes in RGB values
@param line_angle: the angle of the stripes in degrees, ranging from
-360° to 360°. Defaults to 0° or horizontal stripes
@param line_density: controls the distance between stripes represented as a
float value ranging from 0 to 1, with 1 indicating more densely spaced
stripes. Defaults to 0.5
@param line_type: the type of stripes. Current options include: dotted,
dashed, and solid. Defaults to solid
@param line_opacity: the opacity of the stripes, ranging from 0 to 1 with
1 being opaque. Defaults to 1.0
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert (
0.0 <= line_width <= 1.0
), "Line width must be a value in the range [0.0, 1.0]"
assert (
-360.0 <= line_angle <= 360.0
), "Line angle must be a degree in the range [360.0, 360.0]"
assert (
0.0 <= line_density <= 1.0
), "Line density must be a value in the range [0.0, 1.0]"
assert (
0.0 <= line_opacity <= 1.0
), "Line opacity must be a value in the range [0.0, 1.0]"
assert line_type in utils.SUPPORTED_LINE_TYPES, "Stripe type not supported"
utils.validate_rgb_color(line_color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
binary_mask = imutils.compute_stripe_mask(
src_w=width,
src_h=height,
line_width=line_width,
line_angle=line_angle,
line_density=line_density,
)
if line_type == "dotted":
# To create dotted effect, multiply mask by stripes in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=width,
src_h=height,
line_width=line_width,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
elif line_type == "dashed":
# To create dashed effect, multiply mask by stripes with a larger line
# width in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=width,
src_h=height,
line_width=0.7,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
mask = Image.fromarray(np.uint8(binary_mask * line_opacity * 255))
foreground = Image.new("RGB", image.size, line_color)
aug_image = image.copy() # to avoid modifying the input image
aug_image.paste(foreground, (0, 0), mask=mask)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_stripes",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def overlay_text(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
text: List[Union[int, List[int]]] = utils.DEFAULT_TEXT_INDICES,
font_file: str = utils.FONT_PATH,
font_size: float = 0.15,
opacity: float = 1.0,
color: Tuple[int, int, int] = utils.RED_RGB_COLOR,
x_pos: float = 0.0,
y_pos: float = 0.5,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay text onto the image (by default, text is randomly overlaid)
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param text: indices (into the file) of the characters to be overlaid. Each line of
text is represented as a list of int indices; if a list of lists is supplied,
multiple lines of text will be overlaid
@param font_file: iopath uri to the .ttf font file
@param font_size: size of the overlaid characters, calculated as
font_size * min(height, width) of the original image
@param opacity: the lower the opacity, the more transparent the overlaid text
@param color: color of the overlaid text in RGB values
@param x_pos: position of the overlaid text relative to the image width
@param y_pos: position of the overlaid text relative to the image height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0.0 <= opacity <= 1.0, "Opacity must be a value in the range [0.0, 1.0]"
assert 0.0 <= font_size <= 1.0, "Font size must be a value in the range [0.0, 1.0]"
assert 0.0 <= x_pos <= 1.0, "x_pos must be a value in the range [0.0, 1.0]"
assert 0.0 <= y_pos <= 1.0, "y_pos must be a value in the range [0.0, 1.0]"
utils.validate_rgb_color(color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
text_lists = text if all(isinstance(t, list) for t in text) else [text]
assert all(isinstance(t, list) for t in text_lists) and all(
all(isinstance(t, int) for t in text_l) # pyre-ignore text_l is a List[int]
for text_l in text_lists
), "Text must be a list of ints or a list of list of ints for multiple lines"
image = image.convert("RGBA")
width, height = image.size
local_font_path = utils.pathmgr.get_local_path(font_file)
font_size = int(min(width, height) * font_size)
font = ImageFont.truetype(local_font_path, font_size)
pkl_file = os.path.splitext(font_file)[0] + ".pkl"
local_pkl_path = utils.pathmgr.get_local_path(pkl_file)
with open(local_pkl_path, "rb") as f:
chars = pickle.load(f)
try:
text_strs = [
# pyre-fixme[16]: Item `int` of `Union[List[int], List[Union[List[int],
# int]], int]` has no attribute `__iter__`.
"".join([chr(chars[c % len(chars)]) for c in t])
for t in text_lists
]
except Exception:
raise IndexError("Invalid text indices specified")
draw = ImageDraw.Draw(image)
for i, text_str in enumerate(text_strs):
draw.text(
xy=(x_pos * width, y_pos * height + i * (font_size + 5)),
text=text_str,
fill=(color[0], color[1], color[2], round(opacity * 255)),
# pyre-fixme[6]: Expected `Optional[ImageFont._Font]` for 4th param but got
# `FreeTypeFont`.
font=font,
)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_text",
aug_image=image,
**func_kwargs,
)
return imutils.ret_and_save_image(image, output_path, src_mode)
def pad(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
w_factor: float = 0.25,
h_factor: float = 0.25,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pads the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param w_factor: width * w_factor pixels are padded to both left and right
of the image
@param h_factor: height * h_factor pixels are padded to the top and the
bottom of the image
@param color: color of the padded border in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert w_factor >= 0, "w_factor cannot be a negative number"
assert h_factor >= 0, "h_factor cannot be a negative number"
utils.validate_rgb_color(color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
left = right = int(w_factor * width)
top = bottom = int(h_factor * height)
aug_image = Image.new(
mode="RGB",
size=(width + left + right, height + top + bottom),
color=color,
)
aug_image.paste(image, box=(left, top))
imutils.get_metadata(
metadata=metadata,
function_name="pad",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def pad_square(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pads the shorter edge of the image such that it is now square-shaped
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param color: color of the padded border in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
utils.validate_rgb_color(color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
width, height = image.size
if width < height:
h_factor = 0
dw = height - width
w_factor = dw / (2 * width)
else:
w_factor = 0
dh = width - height
h_factor = dh / (2 * height)
aug_image = pad(image, output_path, w_factor, h_factor, color)
imutils.get_metadata(
metadata=metadata,
function_name="pad_square",
aug_image=aug_image,
**func_kwargs,
)
return aug_image
def perspective_transform(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
sigma: float = 50.0,
dx: float = 0.0,
dy: float = 0.0,
seed: Optional[int] = 42,
crop_out_black_border: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Apply a perspective transform to the image so it looks like it was taken
as a photo from another device (e.g. taking a picture from your phone of a
picture on a computer).
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param sigma: the standard deviation of the distribution of destination
coordinates. the larger the sigma value, the more intense the transform
@param dx: change in x for the perspective transform; instead of providing
`sigma` you can provide a scalar value to be precise
@param dy: change in y for the perspective transform; instead of providing
`sigma` you can provide a scalar value to be precise
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param crop_out_black_border: if True, will crop out the black border resulting
from the perspective transform by cropping to the largest center rectangle
with no black
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert sigma >= 0, "Expected 'sigma' to be nonnegative"
assert isinstance(dx, (int, float)), "Expected 'dx' to be a number"
assert isinstance(dy, (int, float)), "Expected 'dy' to be a number"
assert seed is None or isinstance(
seed, int
), "Expected 'seed' to be an integer or set to None"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
rng = np.random.RandomState(seed) if seed is not None else np.random
width, height = image.size
src_coords = [(0, 0), (width, 0), (width, height), (0, height)]
dst_coords = [
(rng.normal(point[0], sigma) + dx, rng.normal(point[1], sigma) + dy)
for point in src_coords
]
perspective_transform_coeffs = imutils.compute_transform_coeffs(
src_coords, dst_coords
)
aug_image = image.transform(
(width, height), Image.PERSPECTIVE, perspective_transform_coeffs, Image.BICUBIC
)
if crop_out_black_border:
top_left, top_right, bottom_right, bottom_left = dst_coords
new_left = max(0, top_left[0], bottom_left[0])
new_right = min(width, top_right[0], bottom_right[0])
new_top = max(0, top_left[1], top_right[1])
new_bottom = min(height, bottom_left[1], bottom_right[1])
if new_left >= new_right or new_top >= new_bottom:
raise Exception(
"Cannot crop out black border of a perspective transform this intense"
)
aug_image = aug_image.crop((new_left, new_top, new_right, new_bottom))
imutils.get_metadata(
metadata=metadata,
function_name="perspective_transform",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def pixelization(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
ratio: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pixelizes an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param ratio: smaller values result in a more pixelated image, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert ratio > 0, "Expected 'ratio' to be a positive number"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
aug_image = image.resize((int(width * ratio), int(height * ratio)))
aug_image = aug_image.resize((width, height))
imutils.get_metadata(
metadata=metadata,
function_name="pixelization",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def random_noise(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mean: float = 0.0,
var: float = 0.01,
seed: int = 42,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Adds random noise to the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mean: mean of the gaussian noise added
@param var: variance of the gaussian noise added
@param seed: if provided, this will set the random seed before generating noise
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert type(mean) in [float, int], "Mean must be an integer or a float"
assert type(var) in [float, int], "Variance must be an integer or a float"
assert type(seed) == int, "Seed must be an integer"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if seed is not None:
np.random.seed(seed=seed)
np_image = np.asarray(image).astype(np.float32)
np_image = np_image / 255.0
if np_image.min() < 0:
low_clip = -1.0
else:
low_clip = 0.0
sigma = var**0.5
gauss = np.random.normal(mean, sigma, (np_image.shape))
noisy_image = np_image + gauss
noisy_image = np.clip(noisy_image, low_clip, 1.0)
noisy_image *= 255.0
aug_image = Image.fromarray(np.uint8(noisy_image))
imutils.get_metadata(
metadata=metadata,
function_name="random_noise",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def resize(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
width: Optional[int] = None,
height: Optional[int] = None,
resample: Any = Image.BILINEAR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Resizes an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param width: the desired width the image should be resized to have. If
None, the original image width will be used
@param height: the desired height the image should be resized to have. If
None, the original image height will be used
@param resample: A resampling filter. This can be one of PIL.Image.NEAREST,
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC, or
PIL.Image.LANCZOS
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert width is None or type(width) == int, "Width must be an integer"
assert height is None or type(height) == int, "Height must be an integer"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
im_w, im_h = image.size
aug_image = image.resize((width or im_w, height or im_h), resample)
imutils.get_metadata(
metadata=metadata,
function_name="resize",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def rotate(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
degrees: float = 15.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Rotates the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param degrees: the amount of degrees that the original image will be rotated
counter clockwise
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert type(degrees) in [float, int], "Degrees must be an integer or a float"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
rotated_image = image.rotate(degrees, expand=True)
center_x, center_y = rotated_image.width / 2, rotated_image.height / 2
wr, hr = imutils.rotated_rect_with_max_area(image.width, image.height, degrees)
aug_image = rotated_image.crop(
(
int(center_x - wr / 2),
int(center_y - hr / 2),
int(center_x + wr / 2),
int(center_y + hr / 2),
)
)
imutils.get_metadata(
metadata=metadata,
function_name="rotate",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def saturation(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the saturation of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: a saturation factor of below 1.0 lowers the saturation, a
factor of 1.0 gives the original image, and a factor greater than 1.0
adds saturation
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
aug_image = ImageEnhance.Color(image).enhance(factor)
imutils.get_metadata(
metadata=metadata,
function_name="saturation",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def scale(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 0.5,
interpolation: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the resolution of an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: the ratio by which the image should be downscaled or upscaled
@param interpolation: interpolation method. This can be one of PIL.Image.NEAREST,
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC or
PIL.Image.LANCZOS
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert factor > 0, "Expected 'factor' to be a positive number"
assert interpolation in [
Image.NEAREST,
Image.BOX,
Image.BILINEAR,
Image.HAMMING,
Image.BICUBIC,
Image.LANCZOS,
None,
], "Invalid interpolation specified"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if interpolation is None:
interpolation = Image.LANCZOS if factor < 1 else Image.BILINEAR
width, height = image.size
scaled_width = int(width * factor)
scaled_height = int(height * factor)
# pyre-fixme[6]: Expected `Union[typing_extensions.Literal[0],
# typing_extensions.Literal[1], typing_extensions.Literal[2],
# typing_extensions.Literal[3], typing_extensions.Literal[4],
# typing_extensions.Literal[5], None]` for 2nd param but got `int`.
aug_image = image.resize((scaled_width, scaled_height), resample=interpolation)
imutils.get_metadata(
metadata=metadata,
function_name="scale",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def sharpen(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the sharpness of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: a factor of below 1.0 blurs the image, a factor of 1.0 gives
the original image, and a factor greater than 1.0 sharpens the image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = ImageEnhance.Sharpness(image).enhance(factor)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="sharpen", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def shuffle_pixels(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
seed: int = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Shuffles the pixels of an image with respect to the shuffling factor. The
factor denotes percentage of pixels to be shuffled and randomly selected
Note: The actual number of pixels will be less than the percentage given
due to the probability of pixels staying in place in the course of shuffling
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: a control parameter between 0.0 and 1.0. While a factor of
0.0 returns the original image, a factor of 1.0 performs full shuffling
@param seed: seed for numpy random generator to select random pixels for shuffling
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
np.random.seed(seed)
image = imutils.validate_and_load_image(image)
assert 0.0 <= factor <= 1.0, "'factor' must be a value in range [0, 1]"
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if factor == 0.0:
aug_image = image
else:
aug_image = np.asarray(image, dtype=int)
height, width = aug_image.shape[:2]
number_of_channels = aug_image.size // (height * width)
number_of_pixels = height * width
aug_image = np.reshape(aug_image, (number_of_pixels, number_of_channels))
mask = np.random.choice(
number_of_pixels, size=int(factor * number_of_pixels), replace=False
)
pixels_to_be_shuffled = aug_image[mask]
np.random.shuffle(pixels_to_be_shuffled)
aug_image[mask] = pixels_to_be_shuffled
aug_image = np.reshape(aug_image, (height, width, number_of_channels))
aug_image = np.squeeze(aug_image)
aug_image = Image.fromarray(aug_image.astype("uint8"))
imutils.get_metadata(
metadata=metadata,
function_name="shuffle_pixels",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def skew(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
skew_factor: float = 0.5,
axis: int = 0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Skews an image with respect to its x or y-axis
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param skew_factor: the level of skew to apply to the image; a larger absolute value will
result in a more intense skew. Recommended range is between [-2, 2]
@param axis: the axis along which the image will be skewed; can be set to 0 (x-axis)
or 1 (y-axis)
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
w, h = image.size
if axis == 0:
data = (1, skew_factor, -skew_factor * h / 2, 0, 1, 0)
elif axis == 1:
data = (1, 0, 0, skew_factor, 1, -skew_factor * w / 2)
else:
raise AssertionError(
f"Invalid 'axis' value: Got '{axis}', expected 0 for 'x-axis' or 1 for 'y-axis'"
)
aug_image = image.transform((w, h), Image.AFFINE, data, resample=Image.BILINEAR)
imutils.get_metadata(
metadata=metadata,
function_name="skew",
aug_image=aug_image,
bboxes_helper_func=spatial_bbox_helper,
aug_function=skew,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def vflip(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Vertically flips an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = image.transpose(Image.FLIP_TOP_BOTTOM)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="vflip", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
|
AugLy-main
|
augly/image/functional.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Any, Dict, List, Optional, Tuple
from augly.image.transforms import BaseTransform
from PIL import Image
"""
Composition Operators:
Compose: the Compose operator was added here such that users
do not have to import `torchvision` in order to compose multiple
augmentations together. These operators work identically and either
can be used.
OneOf: the OneOf operator takes as input a list of transforms and
may apply (with probability p) one of the transforms in the list.
If a transform is applied, it is selected using the specified
probabilities of the individual transforms.
Example:
>>> Compose([
>>> Blur(),
>>> ColorJitter(saturation_factor=1.5),
>>> OneOf([
>>> OverlayOntoScreenshot(),
>>> OverlayEmoji(),
>>> OverlayText(),
>>> ]),
>>> ])
"""
class BaseComposition:
def __init__(self, transforms: List[BaseTransform], p: float = 1.0):
"""
@param transforms: a list of transforms
@param p: the probability of the transform being applied; default value is 1.0
"""
for transform in transforms:
assert isinstance(
transform, (BaseTransform, BaseComposition)
), "Expected instances of type `BaseTransform` or `BaseComposition` for variable `transforms`" # noqa: B950
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.transforms = transforms
self.p = p
class Compose(BaseComposition):
def __call__(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies the list of transforms in order to the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies the type of bounding box that was passed in in `bboxes`.
Must specify `bbox_type` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
if random.random() > self.p:
return image
for transform in self.transforms:
image = transform(
image, metadata=metadata, bboxes=bboxes, bbox_format=bbox_format
)
return image
class OneOf(BaseComposition):
def __init__(self, transforms: List[BaseTransform], p: float = 1.0):
"""
@param transforms: a list of transforms to select from; one of which
will be chosen to be applied to the media
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(transforms, p)
transform_probs = [t.p for t in transforms]
probs_sum = sum(transform_probs)
self.transform_probs = [t / probs_sum for t in transform_probs]
def __call__(
self,
image: Image.Image,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies one of the transforms to the image
@param image: PIL Image to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies the type of bounding box that was passed in in `bboxes`.
Must specify `bbox_type` if `bboxes` is provided. Supported bbox_format values
are "pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Augmented PIL Image
"""
if random.random() > self.p:
return image
transform = random.choices(self.transforms, self.transform_probs)[0]
return transform(
image, force=True, metadata=metadata, bboxes=bboxes, bbox_format=bbox_format
)
|
AugLy-main
|
augly/image/composition.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable
import numpy as np
from PIL import Image
def aug_np_wrapper(
image: np.ndarray, aug_function: Callable[..., None], **kwargs
) -> np.ndarray:
"""
This function is a wrapper on all image augmentation functions
such that a numpy array could be passed in as input instead of providing
the path to the image or a PIL Image
@param image: the numpy array representing the image to be augmented
@param aug_function: the augmentation function to be applied onto the image
@param **kwargs: the input attributes to be passed into the augmentation function
"""
pil_image = Image.fromarray(image)
aug_image = aug_function(pil_image, **kwargs)
return np.array(aug_image)
|
AugLy-main
|
augly/image/helpers.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
from augly.image import intensity as imintensity
from augly.image.utils import bboxes as imbboxes
from PIL import Image
def normalize_bbox(bbox: Tuple, bbox_format: str, src_w: int, src_h: int) -> Tuple:
if bbox_format == "pascal_voc_norm":
return bbox
elif bbox_format == "pascal_voc":
# (left, upper, right, lower) -> normalize
left, upper, right, lower = bbox
return (left / src_w, upper / src_h, right / src_w, lower / src_h)
elif bbox_format == "coco":
# (left, upper, w, h) -> add left & upper to w & h, normalize
left, upper, w, h = bbox
return (left / src_w, upper / src_h, (left + w) / src_w, (upper + h) / src_h)
else:
# (x_center_norm, y_center_norm, w_norm, h_norm) -> compute left & upper
x_center_norm, y_center_norm, w_norm, h_norm = bbox
left_norm = x_center_norm - w_norm / 2
upper_norm = y_center_norm - h_norm / 2
return (left_norm, upper_norm, left_norm + w_norm, upper_norm + h_norm)
def validate_and_normalize_bboxes(
bboxes: List[Tuple], bbox_format: str, src_w: int, src_h: int
) -> List[Tuple]:
norm_bboxes = []
for bbox in bboxes:
assert len(bbox) == 4 and all(
isinstance(x, (float, int)) for x in bbox
), f"Bounding boxes must be tuples of 4 floats; {bbox} is invalid"
norm_bboxes.append(normalize_bbox(bbox, bbox_format, src_w, src_h))
assert (
0 <= norm_bboxes[-1][0] <= norm_bboxes[-1][2] <= 1
and 0 <= norm_bboxes[-1][1] <= norm_bboxes[-1][3] <= 1
), f"Bounding box {bbox} is invalid or is not in {bbox_format} format"
return norm_bboxes
def convert_bboxes(
transformed_norm_bboxes: List[Optional[Tuple]],
bboxes: List[Tuple],
bbox_format: str,
aug_w: int,
aug_h: int,
) -> None:
if bbox_format == "pascal_voc_norm":
return
for i, bbox in enumerate(transformed_norm_bboxes):
if bbox is None:
continue
left_norm, upper_norm, right_norm, lower_norm = bbox
if bbox_format == "pascal_voc":
# denormalize -> (left, upper, right, lower)
bboxes[i] = (
left_norm * aug_w,
upper_norm * aug_h,
right_norm * aug_w,
lower_norm * aug_h,
)
elif bbox_format == "coco":
# denormalize, get w & h -> (left, upper, w, h)
left, upper = left_norm * aug_w, upper_norm * aug_h
right, lower = right_norm * aug_w, lower_norm * aug_h
bboxes[i] = (left, upper, right - left, lower - upper)
else:
# compute x & y center -> (x_center_norm, y_center_norm, w_norm, h_norm)
w_norm, h_norm = right_norm - left_norm, lower_norm - upper_norm
x_center_norm = left_norm + w_norm / 2
y_center_norm = upper_norm + h_norm / 2
bboxes[i] = (x_center_norm, y_center_norm, w_norm, h_norm)
def check_for_gone_bboxes(transformed_bboxes: List[Tuple]) -> List[Optional[Tuple]]:
"""
When a bounding box is cropped out of the image or something is overlaid
which obfuscates it, we consider the bbox to no longer be visible/valid, so
we will return it as None
"""
checked_bboxes = []
for transformed_bbox in transformed_bboxes:
left_factor, upper_factor, right_factor, lower_factor = transformed_bbox
checked_bboxes.append(
None
if left_factor >= right_factor or upper_factor >= lower_factor
else transformed_bbox
)
return checked_bboxes
def transform_bboxes(
function_name: str,
image: Image.Image,
aug_image: Image.Image,
dst_bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
bboxes_helper_func: Optional[Callable] = None,
**kwargs,
) -> None:
if dst_bboxes is None:
return
assert bbox_format is not None and bbox_format in [
"pascal_voc",
"pascal_voc_norm",
"coco",
"yolo",
], "bbox_format must be specified if bboxes are passed in and must be a supported format"
src_w, src_h = image.size
aug_w, aug_h = aug_image.size
norm_bboxes = validate_and_normalize_bboxes(dst_bboxes, bbox_format, src_w, src_h)
if bboxes_helper_func is None:
bboxes_helper_func = getattr(
imbboxes, f"{function_name}_bboxes_helper", lambda bbox, **_: bbox
)
func_kwargs = deepcopy(kwargs)
func_kwargs.pop("src_bboxes", None)
transformed_norm_bboxes = [
bboxes_helper_func(bbox=bbox, src_w=src_w, src_h=src_h, **func_kwargs)
for bbox in norm_bboxes
]
transformed_norm_bboxes = check_for_gone_bboxes(transformed_norm_bboxes)
convert_bboxes(transformed_norm_bboxes, dst_bboxes, bbox_format, aug_w, aug_h)
def get_func_kwargs(
metadata: Optional[List[Dict[str, Any]]],
local_kwargs: Dict[str, Any],
**kwargs,
) -> Dict[str, Any]:
if metadata is None:
return {}
bboxes = local_kwargs.pop("bboxes", None)
bboxes = bboxes if len(metadata) == 0 else metadata[-1].get("dst_bboxes", None)
func_kwargs = deepcopy(local_kwargs)
func_kwargs.pop("metadata")
if bboxes is not None:
func_kwargs["src_bboxes"] = deepcopy(bboxes)
func_kwargs["dst_bboxes"] = deepcopy(bboxes)
func_kwargs.update(**deepcopy(kwargs))
return func_kwargs
def get_metadata(
metadata: Optional[List[Dict[str, Any]]],
function_name: str,
image: Optional[Image.Image] = None,
aug_image: Optional[Image.Image] = None,
bboxes: Optional[Tuple] = None,
bboxes_helper_func: Optional[Callable] = None,
**kwargs,
) -> None:
if metadata is None:
return
assert isinstance(
metadata, list
), "Expected `metadata` to be set to None or of type list"
assert (
image is not None
), "Expected `image` to be passed in if metadata was provided"
assert (
aug_image is not None
), "Expected `aug_image` to be passed in if metadata was provided"
transform_bboxes(
function_name=function_name,
image=image,
aug_image=aug_image,
bboxes_helper_func=bboxes_helper_func,
**kwargs,
)
# Json can't represent tuples, so they're represented as lists, which should
# be equivalent to tuples. So let's avoid tuples in the metadata by
# converting any tuples to lists here.
kwargs_types_fixed = dict(
(k, list(v)) if isinstance(v, tuple) else (k, v) for k, v in kwargs.items()
)
if (
bboxes_helper_func is not None
and bboxes_helper_func.__name__ == "spatial_bbox_helper"
):
kwargs_types_fixed.pop("aug_function", None)
metadata.append(
{
"name": function_name,
"src_width": image.width,
"src_height": image.height,
"dst_width": aug_image.width,
"dst_height": aug_image.height,
**kwargs_types_fixed,
}
)
intensity_kwargs = {"metadata": metadata[-1], **kwargs}
metadata[-1]["intensity"] = getattr(
imintensity, f"{function_name}_intensity", lambda **_: 0.0
)(**intensity_kwargs)
|
AugLy-main
|
augly/image/utils/metadata.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.image.utils.metadata import get_func_kwargs, get_metadata
from augly.image.utils.utils import (
compute_stripe_mask,
compute_transform_coeffs,
get_template_and_bbox,
pad_with_black,
resize_and_pad_to_given_size,
ret_and_save_image,
rotated_rect_with_max_area,
scale_template_image,
square_center_crop,
validate_and_load_image,
)
__all__ = [
"get_func_kwargs",
"get_metadata",
"compute_stripe_mask",
"compute_transform_coeffs",
"get_template_and_bbox",
"pad_with_black",
"resize_and_pad_to_given_size",
"ret_and_save_image",
"rotated_rect_with_max_area",
"scale_template_image",
"square_center_crop",
"validate_and_load_image",
]
|
AugLy-main
|
augly/image/utils/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
JPEG_EXTENSIONS = [".jpg", ".JPG", ".jpeg", ".JPEG"]
def validate_and_load_image(image: Union[str, Image.Image]) -> Image.Image:
"""
If image is a str, loads the image as a PIL Image and returns it. Otherwise,
we assert that image is a PIL Image and then return it.
"""
if isinstance(image, str):
local_path = utils.pathmgr.get_local_path(image)
utils.validate_image_path(local_path)
return Image.open(local_path)
assert isinstance(
image, Image.Image
), "Expected type PIL.Image.Image for variable 'image'"
return image
def ret_and_save_image(
image: Image.Image, output_path: Optional[str], src_mode: Optional[str] = None
) -> Image.Image:
if src_mode is not None:
image = image.convert(src_mode)
if output_path is not None:
if (
any(output_path.endswith(extension) for extension in JPEG_EXTENSIONS)
or image.mode == "CMYK"
):
image = image.convert("RGB")
utils.validate_output_path(output_path)
image.save(output_path)
return image
@functools.lru_cache
def get_bboxes(template_bboxes_filepath: Optional[str] = None):
if template_bboxes_filepath is None:
template_bboxes_filepath = utils.BBOXES_PATH
local_bbox_path = utils.pathmgr.get_local_path(template_bboxes_filepath)
return json.load(open(local_bbox_path, "rb"))
def get_template_and_bbox(
template_filepath: str, template_bboxes_filepath: Optional[str]
) -> Tuple[Image.Image, Tuple[int, int, int, int]]:
local_template_path = utils.pathmgr.get_local_path(template_filepath)
template = Image.open(local_template_path)
bboxes = get_bboxes(template_bboxes_filepath)
template_key = os.path.basename(template_filepath)
bbox = bboxes[template_key]
return template, bbox
def rotated_rect_with_max_area(w: int, h: int, angle: float) -> Tuple[float, float]:
"""
Computes the width and height of the largest possible axis-aligned
rectangle (maximal area) within the rotated rectangle
source:
https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders # noqa: B950
"""
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
sin_a = abs(math.sin(math.radians(angle)))
cos_a = abs(math.cos(math.radians(angle)))
if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10:
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
cos_2a = cos_a * cos_a - sin_a * sin_a
wr = (w * cos_a - h * sin_a) / cos_2a
hr = (h * cos_a - w * sin_a) / cos_2a
return wr, hr
def pad_with_black(src: Image.Image, w: int, h: int) -> Image.Image:
"""
Returns the image src with the x dimension padded to width w if it was
smaller than w (and likewise for the y dimension with height h)
"""
curr_w, curr_h = src.size
dx = max(0, (w - curr_w) // 2)
dy = max(0, (h - curr_h) // 2)
padded = Image.new("RGB", (w, h))
padded.paste(src, (dx, dy, curr_w + dx, curr_h + dy))
return padded
def resize_and_pad_to_given_size(
src: Image.Image, w: int, h: int, crop: bool
) -> Image.Image:
"""
Returns the image src resized & padded with black if needed for the screenshot
transformation (i.e. if the spot for the image in the template is too small or
too big for the src image). If crop is True, will crop the src image if necessary
to fit into the template image; otherwise, will resize if necessary
"""
curr_w, curr_h = src.size
if crop:
dx = (curr_w - w) // 2
dy = (curr_h - h) // 2
src = src.crop((dx, dy, w + dx, h + dy))
curr_w, curr_h = src.size
elif curr_w > w or curr_h > h:
resize_factor = min(w / curr_w, h / curr_h)
new_w = int(curr_w * resize_factor)
new_h = int(curr_h * resize_factor)
src = src.resize((new_w, new_h), resample=Image.BILINEAR)
curr_w, curr_h = src.size
if curr_w < w or curr_h < h:
src = pad_with_black(src, w, h)
return src
def scale_template_image(
src_w: int,
src_h: int,
template_image: Image.Image,
bbox: Tuple[int, int, int, int],
max_image_size_pixels: Optional[int],
crop: bool,
) -> Tuple[Image.Image, Tuple[int, int, int, int]]:
"""
Return template_image, and bbox resized to fit the src image. Takes in the
width & height of the src image plus the bounding box where the src image
will be inserted into template_image. If the template bounding box is
bigger than src image in both dimensions, template_image is scaled down
such that the dimension that was closest to src_image matches, without
changing the aspect ratio (and bbox is scaled proportionally). Similarly if
src image is bigger than the bbox in both dimensions, template_image and
the bbox are scaled up.
"""
template_w, template_h = template_image.size
left, upper, right, lower = bbox
bbox_w, bbox_h = right - left, lower - upper
# Scale up/down template_image & bbox
if crop:
resize_factor = min(src_w / bbox_w, src_h / bbox_h)
else:
resize_factor = max(src_w / bbox_w, src_h / bbox_h)
# If a max image size is provided & the resized template image would be too large,
# resize the template image to the max image size.
if max_image_size_pixels is not None:
template_size = template_w * template_h
if template_size * resize_factor**2 > max_image_size_pixels:
resize_factor = math.sqrt(max_image_size_pixels / template_size)
template_w = int(template_w * resize_factor)
template_h = int(template_h * resize_factor)
bbox_w, bbox_h = int(bbox_w * resize_factor), int(bbox_h * resize_factor)
left, upper = int(left * resize_factor), int(upper * resize_factor)
right, lower = left + bbox_w, upper + bbox_h
bbox = (left, upper, right, lower)
template_image = template_image.resize(
(template_w, template_h), resample=Image.BILINEAR
)
return template_image, bbox
def square_center_crop(src: Image.Image) -> Image.Image:
"""Returns a square crop of the center of the image"""
w, h = src.size
smallest_edge = min(w, h)
dx = (w - smallest_edge) // 2
dy = (h - smallest_edge) // 2
return src.crop((dx, dy, dx + smallest_edge, dy + smallest_edge))
def compute_transform_coeffs(
src_coords: List[Tuple[int, int]], dst_coords: List[Tuple[float, float]]
) -> np.ndarray:
"""
Given the starting & desired corner coordinates, computes the
coefficients required by the perspective transform.
"""
matrix = []
for sc, dc in zip(src_coords, dst_coords):
matrix.append([dc[0], dc[1], 1, 0, 0, 0, -sc[0] * dc[0], -sc[0] * dc[1]])
matrix.append([0, 0, 0, dc[0], dc[1], 1, -sc[1] * dc[0], -sc[1] * dc[1]])
A = np.matrix(matrix, dtype=float)
B = np.array(src_coords).reshape(8)
res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.array(res).reshape(8)
def compute_stripe_mask(
src_w: int, src_h: int, line_width: float, line_angle: float, line_density: float
) -> np.ndarray:
"""
Given stripe parameters such as stripe width, angle, and density, returns
a binary mask of the same size as the source image indicating the location
of stripes. This implementation is inspired by
https://stackoverflow.com/questions/34043381/how-to-create-diagonal-stripe-patterns-and-checkerboard-patterns
"""
line_angle *= math.pi / 180
line_distance = (1 - line_density) * min(src_w, src_h)
y_period = math.cos(line_angle) / line_distance
x_period = math.sin(line_angle) / line_distance
y_coord_range = np.arange(0, src_h) - src_h / 2
x_coord_range = np.arange(0, src_w) - src_w / 2
x_grid_coords, y_grid_coords = np.meshgrid(x_coord_range, y_coord_range)
if abs(line_angle) == math.pi / 2 or abs(line_angle) == 3 * math.pi / 2:
# Compute mask for vertical stripes
softmax_mask = (np.cos(2 * math.pi * x_period * x_grid_coords) + 1) / 2
elif line_angle == 0 or abs(line_angle) == math.pi:
# Compute mask for horizontal stripes
softmax_mask = (np.cos(2 * math.pi * y_period * y_grid_coords) + 1) / 2
else:
# Compute mask for diagonal stripes
softmax_mask = (
np.cos(2 * math.pi * (x_period * x_grid_coords + y_period * y_grid_coords))
+ 1
) / 2
binary_mask = softmax_mask > (math.cos(math.pi * line_width) + 1) / 2
return binary_mask
|
AugLy-main
|
augly/image/utils/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
def crop_bboxes_helper(
bbox: Tuple, x1: float, y1: float, x2: float, y2: float, **kwargs
) -> Tuple:
"""
If part of the bbox was cropped out in the x-axis, the left/right side will now be
0/1 respectively; otherwise the fraction x1 is cut off from the left & x2 from the
right and we renormalize with the new width. Analogous for the y-axis
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
new_w, new_h = x2 - x1, y2 - y1
return (
max(0, (left_factor - x1) / new_w),
max(0, (upper_factor - y1) / new_h),
min(1, 1 - (x2 - right_factor) / new_w),
min(1, 1 - (y2 - lower_factor) / new_h),
)
def hflip_bboxes_helper(bbox: Tuple, **kwargs) -> Tuple:
"""
When the src image is horizontally flipped, the bounding box also gets horizontally
flipped
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
return (1 - right_factor, upper_factor, 1 - left_factor, lower_factor)
def meme_format_bboxes_helper(
bbox: Tuple, src_w: int, src_h: int, caption_height: int, **kwargs
) -> Tuple:
"""
The src image is offset vertically by caption_height pixels, so we normalize that to
get the y offset, add that to the upper & lower coordinates, & renormalize with the
new height. The x dimension is unaffected
"""
left_f, upper_f, right_f, lower_f = bbox
y_off = caption_height / src_h
new_h = 1.0 + y_off
return left_f, (upper_f + y_off) / new_h, right_f, (lower_f + y_off) / new_h
def overlay_onto_background_image_bboxes_helper(
bbox: Tuple, overlay_size: float, x_pos: float, y_pos: float, **kwargs
) -> Tuple:
"""
The src image is overlaid on the dst image offset by (`x_pos`, `y_pos`) & with a
size of `overlay_size` (all relative to the dst image dimensions). So the bounding
box is also offset by (`x_pos`, `y_pos`) & scaled by `overlay_size`. It is also
possible that some of the src image will be cut off, so we take the max with 0/min
with 1 in order to crop the bbox if needed
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
return (
max(0, left_factor * overlay_size + x_pos),
max(0, upper_factor * overlay_size + y_pos),
min(1, right_factor * overlay_size + x_pos),
min(1, lower_factor * overlay_size + y_pos),
)
def overlay_image_bboxes_helper(
bbox: Tuple,
opacity: float,
overlay_size: float,
x_pos: float,
y_pos: float,
max_visible_opacity: float,
**kwargs,
) -> Tuple:
"""
We made a few decisions for this augmentation about how bboxes are defined:
1. If `opacity` < `max_visible_opacity` (default 0.75, can be specified by the user),
the bbox stays the same because it is still considered "visible" behind the
overlaid image
2. If the entire bbox is covered by the overlaid image, the bbox is no longer valid
so we return it as (0, 0, 0, 0), which will be turned to None in `check_bboxes()`
3. If the entire bottom of the bbox is covered by the overlaid image
(i.e. `x_pos < left_factor` & `x_pos + overlay_size > right_factor` &
`y_pos + overlay_size > lower_factor`), we crop out the lower part of the bbox
that is covered. The analogue is true for the top/left/right being occluded
4. If just the middle of the bbox is covered or a rectangle is sliced out of the
bbox, we consider that the bbox is unchanged, even though part of it is occluded.
This isn't ideal but otherwise it's very complicated; we could split the
remaining area into smaller visible bboxes, but then we would have to return
multiple dst bboxes corresponding to one src bbox
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
if opacity >= max_visible_opacity:
occluded_left = x_pos < left_factor
occluded_upper = y_pos < upper_factor
occluded_right = x_pos + overlay_size > right_factor
occluded_lower = y_pos + overlay_size > lower_factor
if occluded_left and occluded_right:
# If the bbox is completely covered, it's no longer valid so return zeros
if occluded_upper and occluded_lower:
return (0.0, 0.0, 0.0, 0.0)
if occluded_lower:
lower_factor = y_pos
elif occluded_upper:
upper_factor = y_pos + overlay_size
elif occluded_upper and occluded_lower:
if occluded_right:
right_factor = x_pos
elif occluded_left:
left_factor = x_pos + overlay_size
return left_factor, upper_factor, right_factor, lower_factor
def overlay_onto_screenshot_bboxes_helper(
bbox: Tuple,
src_w: int,
src_h: int,
template_filepath: str,
template_bboxes_filepath: str,
resize_src_to_match_template: bool,
max_image_size_pixels: int,
crop_src_to_fit: bool,
**kwargs,
) -> Tuple:
"""
We transform the bbox by applying all the same transformations as are applied in the
`overlay_onto_screenshot` function, each of which is mentioned below in comments
"""
left_f, upper_f, right_f, lower_f = bbox
template, tbbox = imutils.get_template_and_bbox(
template_filepath, template_bboxes_filepath
)
# Either src image or template image is scaled
if resize_src_to_match_template:
tbbox_w, tbbox_h = tbbox[2] - tbbox[0], tbbox[3] - tbbox[1]
src_scale_factor = min(tbbox_w / src_w, tbbox_h / src_h)
else:
template, tbbox = imutils.scale_template_image(
src_w,
src_h,
template,
tbbox,
max_image_size_pixels,
crop_src_to_fit,
)
tbbox_w, tbbox_h = tbbox[2] - tbbox[0], tbbox[3] - tbbox[1]
src_scale_factor = 1
template_w, template_h = template.size
x_off, y_off = tbbox[:2]
# Src image is scaled (if resize_src_to_match_template)
curr_w, curr_h = src_w * src_scale_factor, src_h * src_scale_factor
left, upper, right, lower = (
left_f * curr_w,
upper_f * curr_h,
right_f * curr_w,
lower_f * curr_h,
)
# Src image is cropped to (tbbox_w, tbbox_h)
if crop_src_to_fit:
dx, dy = (curr_w - tbbox_w) // 2, (curr_h - tbbox_h) // 2
x1, y1, x2, y2 = dx, dy, dx + tbbox_w, dy + tbbox_h
left_f, upper_f, right_f, lower_f = crop_bboxes_helper(
bbox, x1 / curr_w, y1 / curr_h, x2 / curr_w, y2 / curr_h
)
left, upper, right, lower = (
left_f * tbbox_w,
upper_f * tbbox_h,
right_f * tbbox_w,
lower_f * tbbox_h,
)
# Src image is resized to (tbbox_w, tbbox_h)
else:
resize_f = min(tbbox_w / curr_w, tbbox_h / curr_h)
left, upper, right, lower = (
left * resize_f,
upper * resize_f,
right * resize_f,
lower * resize_f,
)
curr_w, curr_h = curr_w * resize_f, curr_h * resize_f
# Padding with black
padding_x = max(0, (tbbox_w - curr_w) // 2)
padding_y = max(0, (tbbox_h - curr_h) // 2)
left, upper, right, lower = (
left + padding_x,
upper + padding_y,
right + padding_x,
lower + padding_y,
)
# Src image is overlaid onto template image
left, upper, right, lower = (
left + x_off,
upper + y_off,
right + x_off,
lower + y_off,
)
return left / template_w, upper / template_h, right / template_w, lower / template_h
def pad_bboxes_helper(bbox: Tuple, w_factor: float, h_factor: float, **kwargs) -> Tuple:
"""
The src image is padded horizontally with w_factor * src_w, so the bbox gets shifted
over by w_factor and then renormalized over the new width. Vertical padding is
analogous
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
new_w = 1 + 2 * w_factor
new_h = 1 + 2 * h_factor
return (
(left_factor + w_factor) / new_w,
(upper_factor + h_factor) / new_h,
(right_factor + w_factor) / new_w,
(lower_factor + h_factor) / new_h,
)
def pad_square_bboxes_helper(bbox: Tuple, src_w: int, src_h: int, **kwargs) -> Tuple:
"""
In pad_square, pad is called with w_factor & h_factor computed as follows, so we can
use the `pad_bboxes_helper` function to transform the bbox
"""
w_factor, h_factor = 0, 0
if src_w < src_h:
w_factor = (src_h - src_w) / (2 * src_w)
else:
h_factor = (src_w - src_h) / (2 * src_h)
return pad_bboxes_helper(bbox, w_factor=w_factor, h_factor=h_factor)
def perspective_transform_bboxes_helper(
bbox: Tuple,
src_w: int,
src_h: int,
sigma: float,
dx: float,
dy: float,
crop_out_black_border: bool,
seed: Optional[int],
**kwargs,
) -> Tuple:
"""
Computes the bbox that encloses the bbox in the perspective transformed image. Also
uses the `crop_bboxes_helper` function since the image is cropped if
`crop_out_black_border` is True.
"""
def transform(x: float, y: float, a: List[float]) -> Tuple:
"""
Transforms a point in the image given the perspective transform matrix; we will
use this to transform the bounding box corners. Based on PIL source code:
https://github.com/python-pillow/Pillow/blob/master/src/libImaging/Geometry.c#L399
"""
return (
(a[0] * x + a[1] * y + a[2]) / (a[6] * x + a[7] * y + a[8]),
(a[3] * x + a[4] * y + a[5]) / (a[6] * x + a[7] * y + a[8]),
)
def get_perspective_transform(
src_coords: List[Tuple[int, int]], dst_coords: List[Tuple[int, int]]
) -> List[float]:
"""
Computes the transformation matrix used for the perspective transform with
the given src & dst corner coordinates. Based on OpenCV source code:
https://github.com/opencv/opencv/blob/master/modules/imgproc/src/imgwarp.cpp#L3277-L3304
"""
a = np.zeros((8, 8), dtype=float)
dst_x, dst_y = zip(*dst_coords)
b = np.asarray(list(dst_x) + list(dst_y))
for i, (sc, dc) in enumerate(zip(src_coords, dst_coords)):
a[i][0] = a[i + 4][3] = sc[0]
a[i][1] = a[i + 4][4] = sc[1]
a[i][2] = a[i + 4][5] = 1
a[i][6] = -sc[0] * dc[0]
a[i][7] = -sc[1] * dc[0]
a[i + 4][6] = -sc[0] * dc[1]
a[i + 4][7] = -sc[1] * dc[1]
A = np.matrix(a, dtype=float)
B = np.array(b).reshape(8)
res = np.linalg.solve(A, B)
return np.array(res).reshape(8).tolist() + [1.0]
assert (
seed is not None
), "Cannot transform bbox for perspective_transform if seed is not provided"
rng = np.random.RandomState(seed)
src_coords = [(0, 0), (src_w, 0), (src_w, src_h), (0, src_h)]
dst_coords = [
(rng.normal(point[0], sigma) + dx, rng.normal(point[1], sigma) + dy)
for point in src_coords
]
perspective_transform_coeffs = get_perspective_transform(src_coords, dst_coords)
left_f, upper_f, right_f, lower_f = bbox
left, upper, right, lower = (
left_f * src_w,
upper_f * src_h,
right_f * src_w,
lower_f * src_h,
)
bbox_coords = [(left, upper), (right, upper), (right, lower), (left, lower)]
transformed_bbox_coords = [
transform(x + 0.5, y + 0.5, perspective_transform_coeffs)
for x, y in bbox_coords
]
transformed_xs, transformed_ys = zip(*transformed_bbox_coords)
transformed_bbox = (
max(0, min(transformed_xs) / src_w),
max(0, min(transformed_ys) / src_h),
min(1, max(transformed_xs) / src_w),
min(1, max(transformed_ys) / src_h),
)
# This is copy-pasted from `functional.py`, exactly how the crop coords are computed
if crop_out_black_border:
top_left, top_right, bottom_right, bottom_left = dst_coords
new_left = max(0, top_left[0], bottom_left[0])
new_right = min(src_w, top_right[0], bottom_right[0])
new_top = max(0, top_left[1], top_right[1])
new_bottom = min(src_h, bottom_left[1], bottom_right[1])
transformed_bbox = crop_bboxes_helper(
transformed_bbox,
x1=new_left / src_w,
y1=new_top / src_h,
x2=new_right / src_w,
y2=new_bottom / src_h,
)
return transformed_bbox
def rotate_bboxes_helper(
bbox: Tuple, src_w: int, src_h: int, degrees: float, **kwargs
) -> Tuple:
"""
Computes the bbox that encloses the rotated bbox in the rotated image. This code was
informed by looking at the source code for PIL.Image.rotate
(https://pillow.readthedocs.io/en/stable/_modules/PIL/Image.html#Image.rotate).
Also uses the `crop_bboxes_helper` function since the image is cropped after being
rotated.
"""
left_f, upper_f, right_f, lower_f = bbox
left, upper, right, lower = (
left_f * src_w,
upper_f * src_h,
right_f * src_w,
lower_f * src_h,
)
# Top left, upper right, lower right, & lower left corner coefficients (in pixels)
bbox_corners = [(left, upper), (right, upper), (right, lower), (left, lower)]
def transform(x: int, y: int, matrix: List[float]) -> Tuple[float, float]:
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
def get_enclosing_bbox(
corners: List[Tuple[int, int]], rotation_matrix: List[float]
) -> Tuple[int, int, int, int]:
rotated_corners = [transform(x, y, rotation_matrix) for x, y in corners]
xs, ys = zip(*rotated_corners)
return (
math.floor(min(xs)),
math.floor(min(ys)),
math.ceil(max(xs)),
math.ceil(max(ys)),
)
# Get rotated bbox corner coefficients
rotation_center = (src_w // 2, src_h // 2)
angle_rad = -math.radians(degrees)
rotation_matrix = [
round(math.cos(angle_rad), 15),
round(math.sin(angle_rad), 15),
0.0,
round(math.sin(angle_rad), 15),
round(-math.cos(angle_rad), 15),
0.0,
]
rotation_matrix[2], rotation_matrix[5] = transform(
-rotation_center[0], -rotation_center[1], rotation_matrix
)
rotation_matrix[2] += rotation_center[0]
rotation_matrix[5] += rotation_center[1]
# Get rotated image dimensions
src_img_corners = [(0, 0), (src_w, 0), (src_w, src_h), (0, src_h)]
(
rotated_img_min_x,
rotated_img_min_y,
rotated_img_max_x,
rotated_img_max_y,
) = get_enclosing_bbox(src_img_corners, rotation_matrix)
rotated_img_w = rotated_img_max_x - rotated_img_min_x
rotated_img_h = rotated_img_max_y - rotated_img_min_y
# Get enclosing box corners around rotated bbox (on rotated image)
new_bbox_left, new_bbox_upper, new_bbox_right, new_bbox_lower = get_enclosing_bbox(
bbox_corners, rotation_matrix
)
bbox_enclosing_bbox = (
new_bbox_left / rotated_img_w,
new_bbox_upper / rotated_img_h,
new_bbox_right / rotated_img_w,
new_bbox_lower / rotated_img_h,
)
# Crop bbox as src image is cropped inside `rotate`
cropped_w, cropped_h = imutils.rotated_rect_with_max_area(src_w, src_h, degrees)
cropped_img_left, cropped_img_upper, cropped_img_right, cropped_img_lower = (
(rotated_img_w - cropped_w) // 2 + rotated_img_min_x,
(rotated_img_h - cropped_h) // 2 + rotated_img_min_y,
(rotated_img_w + cropped_w) // 2 + rotated_img_min_x,
(rotated_img_h + cropped_h) // 2 + rotated_img_min_y,
)
return crop_bboxes_helper(
bbox_enclosing_bbox,
x1=cropped_img_left / rotated_img_w,
y1=cropped_img_upper / rotated_img_h,
x2=cropped_img_right / rotated_img_w,
y2=cropped_img_lower / rotated_img_h,
)
def spatial_bbox_helper(
bbox: Tuple[float, float, float, float],
src_w: int,
src_h: int,
aug_function: Callable,
**kwargs,
) -> Tuple:
"""
Computes the bbox that encloses the transformed bbox in the image transformed by
`aug_function`. This helper can be used to compute the transformed bbox for any
augmentation which doesn't affect the color of the source image (e.g. any spatial
augmentation).
"""
dummy_image = Image.new("RGB", (src_w, src_h))
draw = ImageDraw.Draw(dummy_image)
draw.rectangle(
(bbox[0] * src_w, bbox[1] * src_h, bbox[2] * src_w, bbox[3] * src_h),
fill="white",
)
aug_image = aug_function(dummy_image, **kwargs)
aug_w, aug_h = aug_image.size
array_image = np.array(aug_image)
white_y, white_x, _ = np.where(array_image > 0)
min_x, max_x = np.min(white_x), np.max(white_x)
min_y, max_y = np.min(white_y), np.max(white_y)
return (min_x / aug_w, min_y / aug_h, max_x / aug_w, max_y / aug_h)
def vflip_bboxes_helper(bbox: Tuple, **kwargs) -> Tuple:
"""
Analogous to hflip, when the src image is vertically flipped, the bounding box also
gets vertically flipped
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
return (left_factor, 1 - lower_factor, right_factor, 1 - upper_factor)
|
AugLy-main
|
augly/image/utils/bboxes.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.audio import functional as F
from augly.audio.utils import RNGSeed
"""
Base Classes for Transforms
"""
class BaseTransform:
def __init__(self, p: float = 1.0):
"""
@param p: the probability of the transform being applied; default value is 1.0
"""
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.p = p
def __call__(
self,
audio: np.ndarray,
sample_rate: int = utils.DEFAULT_SAMPLE_RATE,
metadata: Optional[List[Dict[str, Any]]] = None,
force: bool = False,
) -> Tuple[np.ndarray, int]:
"""
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@param force: if set to True, the transform will be applied. otherwise,
application is determined by the probability set
@returns: the augmented audio array and sample rate
"""
assert isinstance(audio, np.ndarray), "Audio passed in must be a np.ndarray"
assert type(force) == bool, "Expected type bool for variable `force`"
if not force and random.random() > self.p:
return audio, sample_rate
return self.apply_transform(audio, sample_rate, metadata)
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
This function is to be implemented in the child classes.
From this function, call the augmentation function with the
parameters specified
"""
raise NotImplementedError()
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> audio_array = np.array([...])
>>> pitch_shift_tsfm = PitchShift(n_steps=4.0, p=0.5)
>>> shifted_audio = pitch_shift_tsfm(audio_array, sample_rate)
"""
class AddBackgroundNoise(BaseTransform):
def __init__(
self,
background_audio: Optional[Union[str, np.ndarray]] = None,
snr_level_db: float = 10.0,
seed: Optional[RNGSeed] = None,
p: float = 1.0,
):
"""
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
@param snr_level_db: signal-to-noise ratio in dB
@param seed: a NumPy random generator (or seed) such that these results
remain reproducible
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.background_audio = background_audio
self.snr_level_db = snr_level_db
self.seed = seed
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Mixes in a background sound into the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.add_background_noise(
audio,
sample_rate,
self.background_audio,
self.snr_level_db,
self.seed,
metadata=metadata,
)
class ApplyLambda(BaseTransform):
def __init__(
self,
aug_function: Callable[..., Tuple[np.ndarray, int]] = lambda x, y: (x, y),
p: float = 1.0,
):
"""
@param aug_function: the augmentation function to be applied onto the audio
(should expect the audio np.ndarray & sample rate int as input, and return
the transformed audio & sample rate)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_function = aug_function
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Apply a user-defined lambda to the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.apply_lambda(audio, sample_rate, self.aug_function, metadata=metadata)
class ChangeVolume(BaseTransform):
def __init__(self, volume_db: float = 0.0, p: float = 1.0):
"""
@param volume_db: the decibel amount by which to either increase (positive
value) or decrease (negative value) the volume of the audio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.volume_db = volume_db
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the volume of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.change_volume(audio, sample_rate, self.volume_db, metadata=metadata)
class Clicks(BaseTransform):
def __init__(self, seconds_between_clicks: float = 0.5, p: float = 1.0):
"""
@param seconds_between_clicks: the amount of time between each click that will
be added to the audio, in seconds
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.seconds_between_clicks = seconds_between_clicks
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds clicks to the audio at a given regular interval
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.clicks(
audio, sample_rate, self.seconds_between_clicks, metadata=metadata
)
class Clip(BaseTransform):
def __init__(
self, offset_factor: float = 0.0, duration_factor: float = 1.0, p: float = 1.0
):
"""
@param offset_factor: start point of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param duration_factor: the length of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor = offset_factor
self.duration_factor = duration_factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Clips the audio using the specified offset and duration factors
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.clip(
audio,
sample_rate,
self.offset_factor,
self.duration_factor,
metadata=metadata,
)
class Harmonic(BaseTransform):
def __init__(
self,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
p: float = 1.0,
):
"""
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the harmonic part of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.harmonic(
audio,
sample_rate,
self.kernel_size,
self.power,
self.margin,
metadata=metadata,
)
class HighPassFilter(BaseTransform):
def __init__(self, cutoff_hz: float = 3000.0, p: float = 1.0):
"""
@param cutoff_hz: frequency (in Hz) where signals with lower frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) below this point
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.cutoff_hz = cutoff_hz
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency higher than the given cutoff to pass
through and attenuates signals with frequencies lower than the cutoff frequency
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.high_pass_filter(audio, sample_rate, self.cutoff_hz, metadata=metadata)
class InsertInBackground(BaseTransform):
def __init__(
self,
offset_factor: float = 0.0,
background_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[RNGSeed] = None,
p: float = 1.0,
):
"""
@param offset_factor: start point of the crop relative to the background duration
(this parameter is multiplied by the background duration)
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
@param seed: a NumPy random generator (or seed) such that these results
remain reproducible
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor = offset_factor
self.background_audio = background_audio
self.seed = seed
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Non-overlapping insert audio in a background audio.
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.insert_in_background(
audio,
sample_rate,
self.offset_factor,
self.background_audio,
self.seed,
metadata=metadata,
)
class InvertChannels(BaseTransform):
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Inverts the channels of the audio.
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.invert_channels(audio, sample_rate, metadata=metadata)
class Loop(BaseTransform):
def __init__(self, n: int = 1, p: float = 1.0):
"""
@param n: the number of times the audio will be looped
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.n = n
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Loops the audio 'n' times
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.loop(audio, sample_rate, self.n, metadata=metadata)
class LowPassFilter(BaseTransform):
def __init__(self, cutoff_hz: float = 500.0, p: float = 1.0):
"""
@param cutoff_hz: frequency (in Hz) where signals with higher frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) above this point
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.cutoff_hz = cutoff_hz
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency lower than the given cutoff to pass through
and attenuates signals with frequencies higher than the cutoff frequency
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.low_pass_filter(audio, sample_rate, self.cutoff_hz, metadata=metadata)
class Normalize(BaseTransform):
def __init__(
self,
norm: Optional[float] = np.inf,
axis: int = 0,
threshold: Optional[float] = None,
fill: Optional[bool] = None,
p: float = 1.0,
):
"""
@param norm: the type of norm to compute:
- np.inf: maximum absolute value
- -np.inf: minimum absolute value
- 0: number of non-zeros (the support)
- float: corresponding l_p norm
- None: no normalization is performed
@param axis: axis along which to compute the norm
@param threshold: if provided, only the columns (or rows) with norm of at
least `threshold` are normalized
@param fill: if None, then columns (or rows) with norm below `threshold` are
left as is. If False, then columns (rows) with norm below `threshold` are
set to 0. If True, then columns (rows) with norm below `threshold` are
filled uniformly such that the corresponding norm is 1
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.norm, self.axis = norm, axis
self.threshold, self.fill = threshold, fill
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1)
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.normalize(
audio,
sample_rate,
self.norm,
self.axis,
self.threshold,
self.fill,
metadata=metadata,
)
class PeakingEqualizer(BaseTransform):
def __init__(
self,
center_hz: float = 500.0,
q: float = 1.0,
gain_db: float = -3.0,
p: float = 1.0,
):
"""
@param center_hz: point in the frequency spectrum at which EQ is applied
@param q: ratio of center frequency to bandwidth; bandwidth is inversely
proportional to Q, meaning that as you raise Q, you narrow the bandwidth
@param gain_db: amount of gain (boost) or reduction (cut) that is applied
at a given frequency. Beware of clipping when using positive gain
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.center_hz = center_hz
self.q = q
self.gain_db = gain_db
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a two-pole peaking equalization filter. The signal-level at and around
`center_hz` can be increased or decreased, while all other frequencies are unchanged
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.peaking_equalizer(
audio, sample_rate, self.center_hz, self.q, self.gain_db, metadata=metadata
)
class Percussive(BaseTransform):
def __init__(
self,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
p: float = 1.0,
):
"""
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the percussive part of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.percussive(
audio,
sample_rate,
self.kernel_size,
self.power,
self.margin,
metadata=metadata,
)
class PitchShift(BaseTransform):
def __init__(self, n_steps: float = 1.0, p: float = 1.0):
"""
@param n_steps: each step is equal to one semitone
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.n_steps = n_steps
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Shifts the pitch of the audio by `n_steps`
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.pitch_shift(audio, sample_rate, self.n_steps, metadata=metadata)
class Reverb(BaseTransform):
def __init__(
self,
reverberance: float = 50.0,
hf_damping: float = 50.0,
room_scale: float = 100.0,
stereo_depth: float = 100.0,
pre_delay: float = 0.0,
wet_gain: float = 0.0,
wet_only: bool = False,
p: float = 1.0,
):
"""
@param reverberance: (%) sets the length of the reverberation tail. This
determines how long the reverberation continues for after the original
sound being reverbed comes to an end, and so simulates the "liveliness"
of the room acoustics
@param hf_damping: (%) increasing the damping produces a more "muted" effect.
The reverberation does not build up as much, and the high frequencies decay
faster than the low frequencies
@param room_scale: (%) sets the size of the simulated room. A high value will
simulate the reverberation effect of a large room and a low value will
simulate the effect of a small room
@param stereo_depth: (%) sets the apparent "width" of the reverb effect for
stereo tracks only. Increasing this value applies more variation between
left and right channels, creating a more "spacious" effect. When set at
zero, the effect is applied independently to left and right channels
@param pre_delay: (ms) delays the onset of the reverberation for the set time
after the start of the original input. This also delays the onset of the
reverb tail
@param wet_gain: (db) applies volume adjustment to the reverberation ("wet")
component in the mix
@param wet_only: only the wet signal (added reverberation) will be in the
resulting output, and the original audio will be removed
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.reverberance = reverberance
self.hf_damping = hf_damping
self.room_scale = room_scale
self.stereo_depth = stereo_depth
self.pre_delay = pre_delay
self.wet_gain = wet_gain
self.wet_only = wet_only
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds reverberation to the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.reverb(
audio,
sample_rate,
self.reverberance,
self.hf_damping,
self.room_scale,
self.stereo_depth,
self.pre_delay,
self.wet_gain,
self.wet_only,
metadata=metadata,
)
class Speed(BaseTransform):
def __init__(self, factor: float = 2.0, p: float = 1.0):
"""
@param factor: the speed factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the speed of the audio, affecting pitch as well
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.speed(audio, sample_rate, self.factor, metadata=metadata)
class Tempo(BaseTransform):
def __init__(self, factor: float = 2.0, p: float = 1.0):
"""
@param factor: the tempo factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor, without
affecting the pitch
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adjusts the tempo of the audio by a given factor
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.tempo(audio, sample_rate, self.factor, metadata=metadata)
class TimeStretch(BaseTransform):
def __init__(self, rate: float = 1.5, p: float = 1.0):
"""
@param rate: the time stretch factor
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.rate = rate
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Time-stretches the audio by a fixed rate
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.time_stretch(audio, sample_rate, self.rate, metadata=metadata)
class ToMono(BaseTransform):
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Converts the audio from stereo to mono by averaging samples across channels
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.to_mono(audio, sample_rate, metadata=metadata)
class FFTConvolve(BaseTransform):
def __init__(
self,
normalize: bool = True,
impulse_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[RNGSeed] = None,
p: float = 1.0,
):
"""
@param normalize: if True, normalize the output to the maximum amplitude
@param impulse_audio: the path to the audio or a variable of type np.ndarray that
will be used as the convolution filter
@param seed: the seed for the random number generator
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.impulse_audio = impulse_audio
self.seed = seed
self.normalize = normalize
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a convolution to input tensor using given filter using FFT
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.fft_convolve(
audio,
sample_rate,
self.normalize,
self.impulse_audio,
self.seed,
metadata=metadata,
)
|
AugLy-main
|
augly/audio/transforms.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
"""
This file contains 'intensity' functions for each of our augmentations.
Intensity functions give a float representation of how intense a particular
transform called with particular parameters is.
Each intensity function expects as parameters the kwargs the corresponding
transform was called with, as well as the metadata dictionary computed by the
transform (e.g. metadata[-1] after passing a list 'metadata' into the transform).
All intensity functions are normalized to be in [0, 100]. This means we are
assuming a range of valid values for each param - e.g. for pitch_shift we
assume we will never pitch shift more than an octave, meaning the range of
valid values for n_steps is [-12.0, 12.0], which you can see is assumed in the
intensity function below.
"""
def add_background_noise_intensity(snr_level_db: float = 10.0, **kwargs) -> float:
assert isinstance(snr_level_db, (float, int)), "snr_level_db must be a number"
max_snr_level_db_val = 110.0
return min(
((max_snr_level_db_val - snr_level_db) / max_snr_level_db_val) * 100.0, 100.0
)
def apply_lambda_intensity(
aug_function: Callable[..., Tuple[np.ndarray, int]],
**kwargs,
) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0
def change_volume_intensity(volume_db: float = 0.0, **kwargs) -> float:
assert isinstance(volume_db, (float, int)), "volume_db must be a nonnegative number"
max_volume_db_val = 110.0
return min((abs(volume_db) / max_volume_db_val) * 100.0, 100.0)
def clicks_intensity(
seconds_between_clicks: float = 0.5, snr_level_db: float = 1.0, **kwargs
) -> float:
assert (
isinstance(seconds_between_clicks, (float, int)) and seconds_between_clicks >= 0
), "seconds_between_clicks must be a nonnegative number"
assert isinstance(snr_level_db, (float, int)), "snr_level_db must be a number"
max_seconds_between_clicks_val = 60.0
max_snr_level_db_val = 110.0
seconds_between_clicks_intensity = (
max_seconds_between_clicks_val - seconds_between_clicks
) / max_seconds_between_clicks_val
snr_level_db_intensity = (
max_snr_level_db_val - snr_level_db
) / max_snr_level_db_val
return min(
(seconds_between_clicks_intensity * snr_level_db_intensity) * 100.0, 100.0
)
def clip_intensity(duration_factor: float = 1.0, **kwargs) -> float:
assert 0 < duration_factor <= 1, "duration_factor must be a number in (0, 1]"
max_duration_factor = 1.0
return min(
((max_duration_factor - duration_factor) / max_duration_factor) * 100.0, 100.0
)
def harmonic_intensity(**kwargs) -> float:
return 100.0
def high_pass_filter_intensity(cutoff_hz: float = 3000.0, **kwargs) -> float:
assert (
isinstance(cutoff_hz, (float, int)) and cutoff_hz >= 0
), "cutoff_hz must be a nonnegative number"
max_cutoff_hz_val = 20000.0
return min((cutoff_hz / max_cutoff_hz_val) * 100.0, 100.0)
def insert_in_background_intensity(metadata: Dict[str, Any], **kwargs) -> float:
bg_to_src_duration_ratio = (
metadata["dst_duration"] - metadata["src_duration"]
) / metadata["dst_duration"]
return bg_to_src_duration_ratio * 100.0
def invert_channels_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return 0.0 if metadata["src_num_channels"] == 1 else 100.0
def loop_intensity(n: int = 1, **kwargs) -> float:
assert isinstance(n, int) and n >= 0, "Expected 'n' to be a nonnegative integer"
max_num_loops = 100
return min((n / max_num_loops) * 100.0, 100.0)
def low_pass_filter_intensity(cutoff_hz: float = 500.0, **kwargs) -> float:
assert (
isinstance(cutoff_hz, (float, int)) and cutoff_hz >= 0
), "cutoff_hz must be a nonnegative number"
max_cutoff_hz_val = 20000.0
return min(((max_cutoff_hz_val - cutoff_hz) / max_cutoff_hz_val) * 100.0, 100.0)
def normalize_intensity(norm: Optional[float] = np.inf, **kwargs) -> float:
return 100.0 if norm else 0.0
def peaking_equalizer_intensity(q: float, gain_db: float, **kwargs) -> float:
assert isinstance(q, (int, float)) and q > 0, "Expected 'q' to be a positive number"
assert isinstance(gain_db, (int, float)), "Expected 'gain_db' to be a number"
max_q_val, max_gain_db_val = 46, 110.0
q_intensity = (max_q_val - q) / max_q_val
gain_db_intensity = abs(gain_db) / max_gain_db_val
return min((q_intensity * gain_db_intensity) * 100.0, 100.0)
def percussive_intensity(**kwargs) -> float:
return 100.0
def pitch_shift_intensity(n_steps: float = 2.0, **kwargs) -> float:
assert isinstance(n_steps, (float, int)), "n_steps must be a number"
max_nsteps_val = 84.0
return min((abs(n_steps) / max_nsteps_val) * 100.0, 100.0)
def reverb_intensity(
reverberance: float = 50.0,
wet_only: bool = False,
room_scale: float = 100.0,
**kwargs,
) -> float:
assert (
isinstance(reverberance, (float, int))
and 0 <= reverberance <= 100
and isinstance(room_scale, (float, int))
and 0 <= room_scale <= 100
), "reverberance & room_scale must be numbers in [0, 100]"
if wet_only:
return 100.0
max_reverberance_val = 100.0
max_room_scale_val = 100.0
return min(
(reverberance / max_reverberance_val)
* (room_scale / max_room_scale_val)
* 100.0,
100.0,
)
def speed_intensity(factor: float = 2.0, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor_val = 10.0
# We want the intensity of factor = 2 to be the same as the intensity of
# factor = 0.5, since they both change the speed by 2x.
# speed_change_factor represents how much the speed of the audio has changed,
# with a value in [1, inf).
speed_change_factor = factor if factor >= 1 else 1 / factor
return min((speed_change_factor / max_factor_val) * 100.0, 100.0)
def tempo_intensity(factor: float = 2.0, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor_val = 10.0
speed_change_factor = factor if factor >= 1 else 1 / factor
return min((speed_change_factor / max_factor_val) * 100.0, 100.0)
def time_stretch_intensity(rate: float = 1.5, **kwargs) -> float:
assert (
isinstance(rate, (float, int)) and rate > 0
), "factor must be a positive number"
if rate == 1.0:
return 0.0
max_rate_val = 10.0
speed_change_rate = rate if rate >= 1 else 1 / rate
return min((speed_change_rate / max_rate_val) * 100.0, 100.0)
def to_mono_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return 0.0 if metadata["src_num_channels"] == 1 else 100.0
def fft_convolve_intensity(**kwargs) -> float:
# This is a full convolution, so return 100.0 here.
return 100.0
|
AugLy-main
|
augly/audio/intensity.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.audio.composition import Compose, OneOf
from augly.audio.functional import (
add_background_noise,
apply_lambda,
change_volume,
clicks,
clip,
fft_convolve,
harmonic,
high_pass_filter,
insert_in_background,
invert_channels,
loop,
low_pass_filter,
normalize,
peaking_equalizer,
percussive,
pitch_shift,
reverb,
speed,
tempo,
time_stretch,
to_mono,
)
from augly.audio.intensity import (
add_background_noise_intensity,
apply_lambda_intensity,
change_volume_intensity,
clicks_intensity,
clip_intensity,
fft_convolve_intensity,
harmonic_intensity,
high_pass_filter_intensity,
insert_in_background_intensity,
invert_channels_intensity,
loop_intensity,
low_pass_filter_intensity,
normalize_intensity,
peaking_equalizer_intensity,
percussive_intensity,
pitch_shift_intensity,
reverb_intensity,
speed_intensity,
tempo_intensity,
time_stretch_intensity,
to_mono_intensity,
)
from augly.audio.transforms import (
AddBackgroundNoise,
ApplyLambda,
ChangeVolume,
Clicks,
Clip,
FFTConvolve,
Harmonic,
HighPassFilter,
InsertInBackground,
InvertChannels,
Loop,
LowPassFilter,
Normalize,
PeakingEqualizer,
Percussive,
PitchShift,
Reverb,
Speed,
Tempo,
TimeStretch,
ToMono,
)
__all__ = [
"add_background_noise",
"apply_lambda",
"change_volume",
"clicks",
"clip",
"fft_convolve",
"harmonic",
"high_pass_filter",
"insert_in_background",
"invert_channels",
"loop",
"low_pass_filter",
"normalize",
"peaking_equalizer",
"percussive",
"pitch_shift",
"reverb",
"speed",
"tempo",
"time_stretch",
"to_mono",
"AddBackgroundNoise",
"ApplyLambda",
"ChangeVolume",
"Clicks",
"Clip",
"Compose",
"FFTConvolve",
"Harmonic",
"HighPassFilter",
"InsertInBackground",
"InvertChannels",
"Loop",
"LowPassFilter",
"Normalize",
"OneOf",
"PeakingEqualizer",
"Percussive",
"PitchShift",
"Reverb",
"Speed",
"Tempo",
"TimeStretch",
"ToMono",
"add_background_noise_intensity",
"apply_lambda_intensity",
"change_volume_intensity",
"clicks_intensity",
"clip_intensity",
"fft_convolve_intensity",
"harmonic_intensity",
"high_pass_filter_intensity",
"insert_in_background_intensity",
"invert_channels_intensity",
"loop_intensity",
"low_pass_filter_intensity",
"normalize_intensity",
"peaking_equalizer_intensity",
"percussive_intensity",
"pitch_shift_intensity",
"reverb_intensity",
"speed_intensity",
"tempo_intensity",
"time_stretch_intensity",
"to_mono_intensity",
]
|
AugLy-main
|
augly/audio/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
def add_background_noise(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
background_audio: Optional[Union[str, np.ndarray]] = None,
snr_level_db: float = 10.0,
seed: Optional[audutils.RNGSeed] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Mixes in a background sound into the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
@param snr_level_db: signal-to-noise ratio in dB
@param seed: a NumPy random generator (or seed) such that the results
remain reproducible
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(
snr_level_db, (int, float)
), "Expected 'snr_level_db' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
func_kwargs.pop("seed")
random_generator = audutils.check_random_state(seed)
if background_audio is None:
background_audio = random_generator.standard_normal(audio.shape)
else:
background_audio, background_sr = audutils.validate_and_load_audio(
background_audio, sample_rate
)
if background_sr != sample_rate:
background_audio = resample(
torch.tensor(background_audio), background_sr, sample_rate
).numpy()
if metadata is not None:
func_kwargs["background_duration"] = background_audio.shape[-1] / sample_rate
audio_rms = np.sqrt(np.mean(np.square(audio), axis=-1))
bg_rms = np.sqrt(np.mean(np.square(background_audio), axis=-1))
desired_bg_rms = audio_rms / (10 ** (snr_level_db / 20))
if isinstance(bg_rms, np.number) and isinstance(desired_bg_rms, np.ndarray):
desired_bg_rms = desired_bg_rms.mean()
elif isinstance(bg_rms, np.ndarray) and isinstance(desired_bg_rms, np.number):
bg_rms = bg_rms.mean()
elif isinstance(bg_rms, np.ndarray) and isinstance(desired_bg_rms, np.ndarray):
bg_rms = bg_rms.reshape((bg_rms.shape[0], 1))
desired_bg_rms = desired_bg_rms.reshape((desired_bg_rms.shape[0], 1))
assert bg_rms.shape == desired_bg_rms.shape, (
"Handling stereo audio and stereo background audio with different "
"amounts of channels is currently unsupported"
)
background_audio *= desired_bg_rms / bg_rms
while background_audio.shape[-1] < audio.shape[-1]:
axis = 0 if background_audio.ndim == 1 else 1
background_audio = np.concatenate(
(background_audio, background_audio), axis=axis
)
background_audio = (
background_audio[: audio.shape[-1]]
if background_audio.ndim == 1
else background_audio[:, : audio.shape[-1]]
)
aug_audio = audio + background_audio
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="add_background_noise",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def apply_lambda(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
aug_function: Callable[..., Tuple[np.ndarray, int]] = lambda x, y: (x, y),
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> Tuple[np.ndarray, int]:
"""
Apply a user-defined lambda to the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param aug_function: the augmentation function to be applied onto the audio (should
expect the audio np.ndarray & sample rate int as input, and return the
transformed audio & sample rate)
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@param **kwargs: the input attributes to be passed into `aug_function`
@returns: the augmented audio array and sample rate
"""
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio, out_sample_rate = aug_function(audio, sample_rate, **kwargs)
audutils.get_metadata(
metadata=metadata,
function_name="apply_lambda",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
aug_function=aug_function.__name__,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
def change_volume(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
volume_db: float = 0.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the volume of the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param volume_db: the decibel amount by which to either increase
(positive value) or decrease (negative value) the volume of the audio
@param output_path: the path in which the resulting audio will be stored. If
None, the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(volume_db, (int, float)), "Expected 'volume_db' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
aug_audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio), sample_rate, [["vol", str(volume_db), "dB"]]
)
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
audutils.get_metadata(
metadata=metadata,
function_name="change_volume",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
volume_db=volume_db,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def clicks(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
seconds_between_clicks: float = 0.5,
snr_level_db: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds clicks to the audio at a given regular interval
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param seconds_between_clicks: the amount of time between each click that
will be added to the audio, in seconds
@param snr_level_db: signal-to-noise ratio in dB
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(
seconds_between_clicks, (int, float)
), "Expected 'seconds_between_clicks' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_samples = audio.shape[-1]
seconds_in_audio = num_samples / sample_rate
times = np.arange(0, seconds_in_audio, seconds_between_clicks)
clicks_audio = librosa.clicks(times=times, sr=sample_rate)
aug_audio, out_sample_rate = add_background_noise(
audio,
sample_rate=sample_rate,
background_audio=clicks_audio,
snr_level_db=snr_level_db,
)
audutils.get_metadata(
metadata=metadata,
function_name="clicks",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
seconds_between_clicks=seconds_between_clicks,
output_path=output_path,
clicks_duration=clicks_audio.shape[-1] / sample_rate,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
def clip(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Clips the audio using the specified offset and duration factors
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param offset_factor: start point of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param duration_factor: the length of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
0.0 <= (offset_factor + duration_factor) <= 1.0
), "Combination of offset and duration factors exceed audio length"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_samples = audio.shape[-1]
start = int(offset_factor * num_samples)
end = int((offset_factor + duration_factor) * num_samples)
aug_audio = audio[..., start:end]
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="clip",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
start_sample=start,
end_sample=end,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def harmonic(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the harmonic part of the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft
mask matrices
@param margin: margin size for the masks
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(kernel_size, int), "Expected 'kernel_size' to be an int"
assert isinstance(power, (int, float)), "Expected 'power' to be a number"
assert isinstance(margin, (int, float)), "Expected 'margin' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.harmonic(
audio, kernel_size=kernel_size, power=power, margin=margin
)
else:
aug_audio = np.vstack(
[
librosa.effects.harmonic(
np.asfortranarray(audio[c]),
kernel_size=kernel_size,
power=power,
margin=margin,
)
for c in range(num_channels)
]
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="harmonic",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def high_pass_filter(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
cutoff_hz: float = 3000.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency higher than the given cutoff to pass
through and attenuates signals with frequencies lower than the cutoff frequency
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param cutoff_hz: frequency (in Hz) where signals with lower frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) below this point
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(cutoff_hz, (int, float)), "Expected 'cutoff_hz' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(audio), sample_rate, [["highpass", str(cutoff_hz)]]
)
high_pass_array = aug_audio.numpy()
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="high_pass_filter",
dst_audio=high_pass_array,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(high_pass_array, output_path, out_sample_rate)
def insert_in_background(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
offset_factor: float = 0.0,
background_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[audutils.RNGSeed] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Inserts audio into a background clip in a non-overlapping manner.
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param offset_factor: insert point relative to the background duration
(this parameter is multiplied by the background duration)
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise, with the same duration as the audio.
@param seed: a NumPy random generator (or seed) such that the results
remain reproducible
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
0.0 <= offset_factor <= 1.0
), "Expected 'offset_factor' to be a number in the range [0, 1]"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
func_kwargs.pop("seed")
random_generator = audutils.check_random_state(seed)
if background_audio is None:
background_audio = random_generator.standard_normal(audio.shape)
else:
background_audio, _ = audutils.validate_and_load_audio(
background_audio, sample_rate
)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
bg_num_channels = 1 if background_audio.ndim == 1 else background_audio.shape[0]
if bg_num_channels != num_channels:
background_audio, _background_sr = to_mono(background_audio)
if num_channels > 1:
background_audio = np.tile(background_audio, (num_channels, 1))
num_samples_bg = background_audio.shape[-1]
offset = int(offset_factor * num_samples_bg)
aug_audio = np.hstack(
[background_audio[..., :offset], audio, background_audio[..., offset:]]
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="insert_in_background",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
background_duration=background_audio.shape[-1] / sample_rate,
offset=offset,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def invert_channels(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Inverts channels of the audio.
If the audio has only one channel, no change is applied.
Otherwise, it inverts the order of the channels, eg for 4 channels,
it returns channels in order [3, 2, 1, 0].
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio = audio
if audio.ndim > 1:
num_channels = audio.shape[0]
inverted_channels = list(range(num_channels))[::-1]
aug_audio = audio[inverted_channels, :]
audutils.get_metadata(
metadata=metadata,
function_name="invert_channels",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def loop(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
n: int = 1,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Loops the audio 'n' times
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param n: the number of times the audio will be looped
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(n, int) and n >= 0, "Expected 'n' to be a nonnegative integer"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio = audio
for _ in range(n):
aug_audio = np.append(aug_audio, audio, axis=(0 if audio.ndim == 1 else 1))
audutils.get_metadata(
metadata=metadata,
function_name="loop",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
n=n,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def low_pass_filter(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
cutoff_hz: float = 500.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency lower than the given cutoff to pass through
and attenuates signals with frequencies higher than the cutoff frequency
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param cutoff_hz: frequency (in Hz) where signals with higher frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) above this point
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(cutoff_hz, (int, float)), "Expected 'cutoff_hz' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(audio), sample_rate, [["lowpass", str(cutoff_hz)]]
)
low_pass_array = aug_audio.numpy()
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="low_pass_filter",
dst_audio=low_pass_array,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(low_pass_array, output_path, out_sample_rate)
def normalize(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
norm: Optional[float] = np.inf,
axis: int = 0,
threshold: Optional[float] = None,
fill: Optional[bool] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1)
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param norm: the type of norm to compute:
- np.inf: maximum absolute value
- -np.inf: minimum absolute value
- 0: number of non-zeros (the support)
- float: corresponding l_p norm
- None: no normalization is performed
@param axis: axis along which to compute the norm
@param threshold: if provided, only the columns (or rows) with norm of at
least `threshold` are normalized
@param fill: if None, then columns (or rows) with norm below `threshold` are left
as is. If False, then columns (rows) with norm below `threshold` are set to 0.
If True, then columns (rows) with norm below `threshold` are filled uniformly
such that the corresponding norm is 1
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(axis, int) and axis >= 0
), "Expected 'axis' to be a nonnegative number"
assert threshold is None or isinstance(
threshold, (int, float)
), "Expected 'threshold' to be a number or None"
assert fill is None or isinstance(
fill, bool
), "Expected 'threshold' to be a boolean or None"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs["norm"] = str(func_kwargs["norm"])
func_kwargs.pop("metadata")
aug_audio = librosa.util.normalize(
audio, norm=norm, axis=axis, threshold=threshold, fill=fill
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="normalize",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def peaking_equalizer(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
center_hz: float = 500.0,
q: float = 1.0,
gain_db: float = -3.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a two-pole peaking equalization filter. The signal-level at and around
`center_hz` can be increased or decreased, while all other frequencies are unchanged
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param center_hz: point in the frequency spectrum at which EQ is applied
@param q: ratio of center frequency to bandwidth; bandwidth is inversely
proportional to Q, meaning that as you raise Q, you narrow the bandwidth
@param gain_db: amount of gain (boost) or reduction (cut) that is applied at a
given frequency. Beware of clipping when using positive gain
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(center_hz, (int, float)), "Expected 'center_hz' to be a number"
assert isinstance(q, (int, float)) and q > 0, "Expected 'q' to be a positive number"
assert isinstance(gain_db, (int, float)), "Expected 'gain_db' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
aug_audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio),
sample_rate,
[["equalizer", str(center_hz), f"{q}q", str(gain_db)]],
)
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="peaking_equalizer",
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
def percussive(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the percussive part of the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(kernel_size, int), "Expected 'kernel_size' to be an int"
assert isinstance(power, (int, float)), "Expected 'power' to be a number"
assert isinstance(margin, (int, float)), "Expected 'margin' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.percussive(
audio, kernel_size=kernel_size, power=power, margin=margin
)
else:
aug_audio = np.vstack(
[
librosa.effects.percussive(
np.asfortranarray(audio[c]),
kernel_size=kernel_size,
power=power,
margin=margin,
)
for c in range(num_channels)
]
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="percussive",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def pitch_shift(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
n_steps: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Shifts the pitch of the audio by `n_steps`
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param n_steps: each step is equal to one semitone
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(n_steps, (int, float)), "Expected 'n_steps' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.pitch_shift(audio, sr=sample_rate, n_steps=n_steps)
else:
aug_audio = np.vstack(
[
librosa.effects.pitch_shift(
np.asfortranarray(audio[c]), sr=sample_rate, n_steps=n_steps
)
for c in range(num_channels)
]
)
audutils.get_metadata(
metadata=metadata,
function_name="pitch_shift",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
n_steps=n_steps,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def reverb(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
reverberance: float = 50.0,
hf_damping: float = 50.0,
room_scale: float = 100.0,
stereo_depth: float = 100.0,
pre_delay: float = 0.0,
wet_gain: float = 0.0,
wet_only: bool = False,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds reverberation to the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param reverberance: (%) sets the length of the reverberation tail. This determines
how long the reverberation continues for after the original sound being reverbed
comes to an end, and so simulates the "liveliness" of the room acoustics
@param hf_damping: (%) increasing the damping produces a more "muted" effect. The
reverberation does not build up as much, and the high frequencies decay faster
than the low frequencies
@param room_scale: (%) sets the size of the simulated room. A high value will
simulate the reverberation effect of a large room and a low value will simulate
the effect of a small room
@param stereo_depth: (%) sets the apparent "width" of the reverb effect for stereo
tracks only. Increasing this value applies more variation between left and right
channels, creating a more "spacious" effect. When set at zero, the effect is
applied independently to left and right channels
@param pre_delay: (ms) delays the onset of the reverberation for the set time after
the start of the original input. This also delays the onset of the reverb tail
@param wet_gain: (db) applies volume adjustment to the reverberation ("wet")
component in the mix
@param wet_only: only the wet signal (added reverberation) will be in the resulting
output, and the original audio will be removed
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(
reverberance, (int, float)
), "Expected 'reverberance' to be a number"
assert isinstance(hf_damping, (int, float)), "Expected 'hf_damping' to be a number"
assert isinstance(room_scale, (int, float)), "Expected 'room_scale' to be a number"
assert isinstance(
stereo_depth, (int, float)
), "Expected 'stereo_depth' to be a number"
assert isinstance(pre_delay, (int, float)), "Expected 'pre_delay' to be a number"
assert isinstance(wet_gain, (int, float)), "Expected 'wet_gain' to be a number"
assert isinstance(wet_only, bool), "Expected 'wet_only' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
aug_audio = audio.reshape((1, audio.shape[-1])) if audio.ndim == 1 else audio
effect = ["reverb"]
if wet_only:
effect.append("-w")
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio),
sample_rate,
[
effect
+ [
str(reverberance),
str(hf_damping),
str(room_scale),
str(stereo_depth),
str(pre_delay),
str(wet_gain),
]
],
)
aug_audio = aug_audio.numpy()
if audio.shape[0] == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="reverb",
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
def speed(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
factor: float = 2.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the speed of the audio, affecting pitch as well
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param factor: the speed factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(factor, (int, float)) and factor > 0
), "Expected 'factor' to be a positive number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
out_sample_rate = int(sample_rate * factor)
audutils.get_metadata(
metadata=metadata,
function_name="speed",
audio=audio,
sample_rate=sample_rate,
dst_audio=audio,
dst_sample_rate=out_sample_rate,
output_path=output_path,
factor=factor,
)
return audutils.ret_and_save_audio(audio, output_path, out_sample_rate)
def tempo(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
factor: float = 2.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adjusts the tempo of the audio by a given factor
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param factor: the tempo factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor, without
affecting the pitch
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(factor, (int, float)) and factor > 0
), "Expected 'factor' to be a positive number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
aug_audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio), sample_rate, [["tempo", str(factor)]]
)
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
audutils.get_metadata(
metadata=metadata,
function_name="tempo",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
output_path=output_path,
factor=factor,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
def time_stretch(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
rate: float = 1.5,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Time-stretches the audio by a fixed rate
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param rate: the time stretch factor. If rate > 1 the audio will be sped up by
that factor; if rate < 1 the audio will be slowed down by that factor
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(rate, (int, float)) and rate > 0
), "Expected 'rate' to be a positive number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.time_stretch(audio, rate=rate)
else:
aug_audio = np.vstack(
[
librosa.effects.time_stretch(np.asfortranarray(audio[c]), rate=rate)
for c in range(num_channels)
]
)
audutils.get_metadata(
metadata=metadata,
function_name="time_stretch",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
rate=rate,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def to_mono(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Converts the audio from stereo to mono by averaging samples across channels
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio = librosa.core.to_mono(audio)
audutils.get_metadata(
metadata=metadata,
function_name="to_mono",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
def fft_convolve(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
normalize: bool = True,
impulse_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[audutils.RNGSeed] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a convolution operation to audio using an impulse response as the convolution filter
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param normalize: if True, normalize the output to the maximum amplitude
@param impulse_audio: the path to the audio or a variable of type np.ndarray that
will be used as the convolution filter
@param seed: the seed for the random number generator
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if impulse_audio is None:
random_generator = audutils.check_random_state(seed)
impulse_audio = random_generator.standard_normal(audio.shape)
else:
impulse_audio, impulse_sample_rate = audutils.validate_and_load_audio(
impulse_audio, sample_rate
)
if impulse_sample_rate != sample_rate:
impulse_audio = resample(
torch.tensor(impulse_audio), impulse_sample_rate, sample_rate
).numpy()
aug_audio = fftconvolve(torch.Tensor(audio), torch.Tensor(impulse_audio))
if normalize:
aug_audio = aug_audio / aug_audio.abs().max()
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
audutils.get_metadata(
metadata=metadata,
function_name="fft_convolve",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
|
AugLy-main
|
augly/audio/functional.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numbers
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly import utils
from augly.audio import intensity as audintensity
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
import librosa
import soundfile as sf # @manual=fbsource//third-party/pypi/soundfile:soundfile
import torchaudio
# Use Any because np.random.Generator is not a valid type for pyre
RNG = Any
RNGSeed = Union[int, RNG]
Segment = utils.Segment
def validate_and_load_audio(
audio: Union[str, np.ndarray], sample_rate: int = utils.DEFAULT_SAMPLE_RATE
) -> Tuple[np.ndarray, int]:
"""
If audio is a str, loads the audio as an np.ndarray and returns that & the
audio's sample rate (returned by librosa.load()). If audio is an np.ndarray,
just returns the passed in audio & sample_rate.
"""
if isinstance(audio, str):
local_path = utils.pathmgr.get_local_path(audio)
utils.validate_audio_path(local_path)
return librosa.load(local_path, sr=None, mono=False)
assert isinstance(
audio, np.ndarray
), "Expected type np.ndarray for variable 'audio'"
assert (
isinstance(sample_rate, int) and sample_rate > 0
), "Expected 'sample_rate' to be a positive integer"
return audio, sample_rate
def ret_and_save_audio(
audio: np.ndarray,
output_path: Optional[str],
sample_rate: int = utils.DEFAULT_SAMPLE_RATE,
) -> Tuple[np.ndarray, int]:
if output_path is not None:
utils.validate_output_path(output_path)
try:
# Note: librosa reads in audio data as (num_channels, num_samples),
# but soundfile expects it to be (num_samples, num_channels) when
# writing it out, so we have to swap axes here.
saved_audio = np.swapaxes(audio, 0, 1) if audio.ndim > 1 else audio
sf.write(output_path, saved_audio, sample_rate)
except TypeError:
saved_audio = audio if audio.ndim > 1 else audio.reshape(1, audio.shape[-1])
torchaudio.backend.sox_io_backend.save(
output_path, torch.Tensor(saved_audio), sample_rate, channels_first=True
)
return audio, sample_rate
def check_random_state(seed: Optional[RNGSeed]) -> RNG:
"""
Turn seed into a np.random.RandomState instance
@param seed: instance of RandomState:
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, (np.random.RandomState, np.random.Generator)):
return seed
raise ValueError(
f"{seed} cannot be used to seed a numpy.random.RandomState instance"
)
def get_metadata(
metadata: Optional[List[Dict[str, Any]]],
function_name: str,
audio: np.ndarray,
sample_rate: int,
dst_audio: np.ndarray,
dst_sample_rate: int,
**kwargs,
) -> None:
if metadata is None:
return
assert isinstance(
metadata, list
), "Expected 'metadata' to be set to None or of type list"
src_duration = audio.shape[-1] / sample_rate
dst_duration = dst_audio.shape[-1] / dst_sample_rate
src_segments, dst_segments = compute_segments(
function_name, src_duration, dst_duration, metadata, **kwargs
)
metadata.append(
{
"name": function_name,
"src_duration": src_duration,
"dst_duration": dst_duration,
"src_num_channels": 1 if audio.ndim == 1 else audio.shape[0],
"dst_num_channels": 1 if dst_audio.ndim == 1 else dst_audio.shape[0],
"src_sample_rate": sample_rate,
"dst_sample_rate": dst_sample_rate,
"src_segments": [src_segment._asdict() for src_segment in src_segments],
"dst_segments": [dst_segment._asdict() for dst_segment in dst_segments],
**kwargs,
}
)
intensity_kwargs = {"metadata": metadata[-1], **kwargs}
metadata[-1]["intensity"] = getattr(
audintensity, f"{function_name}_intensity", lambda **_: 0.0
)(**intensity_kwargs)
def compute_changed_segments(
name: str,
src_segments: List[Segment],
dst_segments: List[Segment],
src_duration: float,
dst_duration: float,
speed_factor: float,
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
"""
This function performs the logic of computing the new matching segments based
on the old ones, for the set of transforms that temporally change the video.
Returns the lists of new src segments & dst segments, respectively.
"""
new_src_segments, new_dst_segments = [], []
for src_segment, dst_segment in zip(src_segments, dst_segments):
if name == "insert_in_background":
offset = kwargs["offset_factor"] * kwargs["background_duration"]
# The matching segments are just offset in the dst audio by the amount
# of background video inserted before the src video.
new_src_segments.append(src_segment)
new_dst_segments.append(dst_segment.delta(offset, offset))
elif name == "clip":
crop_start = kwargs["offset_factor"] * src_duration
crop_end = crop_start + kwargs["duration_factor"] * src_duration
utils.compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
crop_start,
crop_end,
new_src_segments,
new_dst_segments,
)
elif name == "fft_convolve":
new_src_segments.append(src_segment)
new_dst_segments.append(Segment(dst_segment.start, dst_duration))
elif name in [
"speed",
"tempo",
"time_stretch",
]:
# speed_factor > 1 if speedup, < 1 if slow down
speed_factor = src_duration / dst_duration
new_src_segments.append(src_segment)
new_dst_segments.append(
Segment(
dst_segment.start / speed_factor, dst_segment.end / speed_factor
)
)
return new_src_segments, new_dst_segments
def compute_segments(
name: str,
src_duration: float,
dst_duration: float,
metadata: List[Dict[str, Any]],
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
speed_factor = 1.0
if not metadata:
src_segments = [Segment(0.0, src_duration)]
dst_segments = [Segment(0.0, src_duration)]
else:
src_segments = [
Segment(segment_dict["start"], segment_dict["end"])
for segment_dict in metadata[-1]["src_segments"]
]
dst_segments = [
Segment(segment_dict["start"], segment_dict["end"])
for segment_dict in metadata[-1]["dst_segments"]
]
for meta in metadata:
if meta["name"] in ["speed", "tempo"]:
speed_factor *= meta["factor"]
if meta["name"] == "time_stretch":
speed_factor *= meta["rate"]
if name in [
"insert_in_background",
"clip",
"speed",
"tempo",
"time_stretch",
"fft_convolve",
]:
return compute_changed_segments(
name,
src_segments,
dst_segments,
src_duration,
dst_duration,
speed_factor,
**kwargs,
)
else:
return src_segments, dst_segments
|
AugLy-main
|
augly/audio/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.audio.transforms import BaseTransform
"""
Composition Operators:
Compose: identical to the Compose object provided by the torchvision
library, this class provides a similar experience for applying multiple
transformations onto audio
OneOf: the OneOf operator takes as input a list of transforms and
may apply (with probability p) one of the transforms in the list.
If a transform is applied, it is selected using the specified
probabilities of the individual transforms.
Example:
>>> Compose([
>>> Clip(duration_factor=0.5),
>>> VolumeChange(volume_db=10.0),
>>> OneOf([
>>> PitchShift(n_steps=4.0),
>>> TimeStretch(rate=1.5),
>>> ]),
>>> ])
"""
class BaseComposition:
def __init__(self, transforms: List[BaseTransform], p: float = 1.0):
"""
@param transforms: a list of transforms
@param p: the probability of the transform being applied; default value is 1.0
"""
for transform in transforms:
assert isinstance(
transform, (BaseTransform, BaseComposition)
), "Expected instances of type `BaseTransform` or `BaseComposition` for variable `transforms`" # noqa: B950
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.transforms = transforms
self.p = p
class Compose(BaseComposition):
def __call__(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies the list of transforms in order to the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
for transform in self.transforms:
audio, sample_rate = transform(audio, sample_rate, metadata)
return audio, sample_rate
class OneOf(BaseComposition):
def __init__(self, transforms: List[BaseTransform], p: float = 1.0):
"""
@param transforms: a list of transforms to select from; one of which will
be chosen to be applied to the audio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(transforms, p)
transform_probs = [t.p for t in transforms]
probs_sum = sum(transform_probs)
self.transform_probs = [t / probs_sum for t in transform_probs]
def __call__(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies one of the transforms to the audio (with probability p)
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
if random.random() > self.p:
return audio, sample_rate
transform = random.choices(self.transforms, self.transform_probs)[0]
return transform(audio, sample_rate, metadata, force=True)
|
AugLy-main
|
augly/audio/composition.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import random
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import functional as F
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
"""
Base Classes for Transforms
"""
class BaseTransform:
def __init__(self, p: float = 1.0):
"""
@param p: the probability of the transform being applied; default value is 1.0
"""
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.p = p
def __call__(
self,
texts: Union[str, List[str]],
force: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> Union[str, List[str]]:
"""
@param texts: a string or a list of text documents to be augmented
@param force: if set to True, the transform will be applied. Otherwise,
application is determined by the probability set
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
assert isinstance(
texts, (str, list)
), "Expected types List[str] or str for variable 'texts'"
assert isinstance(force, bool), "Expected type bool for variable 'force'"
if not force and random.random() > self.p:
return texts if isinstance(texts, list) else [texts]
return self.apply_transform(texts, metadata, **self.get_aug_kwargs(**kwargs))
def get_aug_kwargs(self, **kwargs) -> Dict[str, Any]:
"""
@param kwargs: any kwargs that were passed into __call__() intended to override
the instance variables set in __init__() when calling the augmentation
function in apply_transform()
@returns: the kwargs that should be passed into the augmentation function
apply_transform() -- this will be the instance variables set in __init__(),
potentially overridden by anything passed in as kwargs
"""
attrs = {
k: v
for k, v in inspect.getmembers(self)
if k not in {"apply_transform", "get_aug_kwargs", "p"}
and not k.startswith("__")
}
return {**attrs, **kwargs}
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
This function is to be implemented in the child classes. From this function, call
the augmentation function, passing in 'texts', 'metadata', & the given
'aug_kwargs'
"""
raise NotImplementedError()
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> texts = ["hello world", "bye planet"]
>>> tsfm = InsertPunctuationChars(granularity="all", p=0.5)
>>> aug_texts = tsfm(texts)
"""
class ApplyLambda(BaseTransform):
def __init__(
self,
aug_function: Callable[..., List[str]] = lambda x: x,
p: float = 1.0,
**kwargs,
):
"""
@param aug_function: the augmentation function to be applied onto the text
(should expect a list of text documents as input and return a list of
text documents)
@param p: the probability of the transform being applied; default value is 1.0
@param **kwargs: the input attributes to be passed into the augmentation
function to be applied
"""
super().__init__(p)
self.aug_function = aug_function
self.kwargs = kwargs
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Apply a user-defined lambda on a list of text documents
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
lambda_kwargs = aug_kwargs.pop("kwargs")
return F.apply_lambda(texts, metadata=metadata, **lambda_kwargs, **aug_kwargs)
class ChangeCase(BaseTransform):
def __init__(
self,
granularity: str = "word",
cadence: float = 1.0,
case: str = "random",
seed: Optional[int] = 10,
p: float = 1.0,
):
"""
@param granularity: 'all' (case of the entire text is changed), 'word' (case of
random words is changed), or 'char' (case of random chars is changed)
@param cadence: how frequent (i.e. between this many characters/words) to change
the case. Must be at least 1.0. Non-integer values are used as an 'average'
cadence. Not used for granularity 'all'
@param case: the case to change words to; valid values are 'lower', 'upper',
'title', or 'random' (in which case every word will be randomly changed to
one of the 3 cases)
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.granularity = granularity
self.cadence = cadence
self.case = case
self.seed = seed
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Changes the case (e.g. upper, lower, title) of random chars, words, or the entire
text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.change_case(texts, metadata=metadata, **aug_kwargs)
class Contractions(BaseTransform):
def __init__(
self,
aug_p: float = 0.3,
mapping: Optional[Union[str, Dict[str, Any]]] = CONTRACTIONS_MAPPING,
max_contraction_length: int = 2,
seed: Optional[int] = 10,
p: float = 1.0,
):
"""
@param aug_p: the probability that each pair (or longer string) of words will be
replaced with the corresponding contraction, if there is one in the mapping
@param mapping: either a dictionary representing the mapping or an iopath uri
where the mapping is stored
@param max_contraction_length: the words in each text will be checked for matches
in the mapping up to this length; i.e. if 'max_contraction_length' is 3 then
every substring of 2 *and* 3 words will be checked
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_p = aug_p
self.mapping = mapping
self.max_contraction_length = max_contraction_length
self.seed = seed
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Replaces pairs (or longer strings) of words with contractions given a mapping
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.contractions(texts, metadata=metadata, **aug_kwargs)
class GetBaseline(BaseTransform):
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Generates a baseline by tokenizing and detokenizing the text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.get_baseline(texts, metadata=metadata, **aug_kwargs)
class InsertPunctuationChars(BaseTransform):
def __init__(
self,
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
p: float = 1.0,
):
"""
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert
a punctuation character. Must be at least 1.0. Non-integer values are used
as an 'average' cadence
@param vary_chars: if true, picks a different punctuation char each time one is
used instead of just one per word/text
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.granularity = granularity
self.cadence = cadence
self.vary_chars = vary_chars
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Inserts punctuation characters in each input text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.insert_punctuation_chars(texts, metadata=metadata, **aug_kwargs)
class InsertText(BaseTransform):
def __init__(
self,
num_insertions: int = 1,
insertion_location: str = "random",
seed: Optional[int] = 10,
p: float = 1.0,
):
"""
@param num_insertions: the number of times to sample from insert_text and insert
@param insertion_location: where to insert the insert_text in the input text;
valid values are "prepend", "append", or "random"
(inserts at a random index between words in the input text)
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.num_insertions = num_insertions
self.insertion_location = insertion_location
self.seed = seed
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Inserts some specified text into the input text a given number of times at a
given location
@param texts: a string or a list of text documents to be augmented
@param insert_text: a list of text to sample from and insert into each text in
texts
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.insert_text(texts, metadata=metadata, **aug_kwargs)
class InsertWhitespaceChars(BaseTransform):
def __init__(
self,
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
p: float = 1.0,
):
"""
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert
a whitespace character. Must be at least 1.0. Non-integer values
are used as an 'average' cadence
@param vary_chars: if true, picks a different whitespace char each time
one is used instead of just one per word/text
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.granularity = granularity
self.cadence = cadence
self.vary_chars = vary_chars
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Inserts whitespace characters in each input text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.insert_whitespace_chars(texts, metadata=metadata, **aug_kwargs)
class InsertZeroWidthChars(BaseTransform):
def __init__(
self,
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
p: float = 1.0,
):
"""
@param granularity: 'all' or 'word' -- if 'word', a new char is picked
and the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert
a zero-width character. Must be at least 1.0. Non-integer values are used
as an 'average' cadence
@param vary_chars: If true, picks a different zero-width char each time one is
used instead of just one per word/text
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.granularity = granularity
self.cadence = cadence
self.vary_chars = vary_chars
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Inserts zero-width characters in each input text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.insert_zero_width_chars(texts, metadata=metadata, **aug_kwargs)
class MergeWords(BaseTransform):
def __init__(
self,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
priority_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of characters in a word to be merged
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_word_p = aug_word_p
self.min_char = min_char
self.aug_word_min = aug_word_min
self.aug_word_max = aug_word_max
self.n = n
self.priority_words = priority_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Merges words in the text together
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.merge_words(texts, metadata=metadata, **aug_kwargs)
class ReplaceBidirectional(BaseTransform):
def __init__(
self,
granularity: str = "all",
split_word: bool = False,
p: float = 1.0,
):
"""
@param granularity: the level at which the font is applied; this must be
either 'word' or 'all'
@param split_word: if true and granularity is 'word', reverses only the
second half of each word
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.granularity = granularity
self.split_word = split_word
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Reverses each word (or part of the word) in each input text and uses
bidirectional marks to render the text in its original order. It reverses
each word separately which keeps the word order even when a line wraps
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.replace_bidirectional(texts, metadata=metadata, **aug_kwargs)
class ReplaceFunFonts(BaseTransform):
def __init__(
self,
aug_p: float = 0.3,
aug_min: int = 1,
aug_max: int = 10000,
granularity: str = "all",
vary_fonts: bool = False,
fonts_path: str = FUN_FONTS_PATH,
n: int = 1,
priority_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_p: probability of words to be augmented
@param aug_min: minimum # of words to be augmented
@param aug_max: maximum # of words to be augmented
@param granularity: the level at which the font is applied; this
must be be either word, char, or all
@param vary_fonts: whether or not to switch font in each replacement
@param fonts_path: iopath uri where the fonts are stored
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_p = aug_p
self.aug_min = aug_min
self.aug_max = aug_max
self.granularity = granularity
self.vary_fonts = vary_fonts
self.fonts_path = fonts_path
self.n = n
self.priority_words = priority_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Replaces words or characters depending on the granularity with fun fonts applied
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.replace_fun_fonts(texts, metadata=metadata, **aug_kwargs)
class ReplaceSimilarChars(BaseTransform):
def __init__(
self,
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1000,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping_path: Optional[str] = None,
priority_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_char_p: probability of letters to be replaced in each word
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation
@param aug_char_min: minimum # of letters to be replaced in each word
@param aug_char_max: maximum # of letters to be replaced in each word
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping_path: iopath uri where the mapping is stored
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_char_p = aug_char_p
self.aug_word_p = aug_word_p
self.min_char = min_char
self.aug_char_min = aug_char_min
self.aug_char_max = aug_char_max
self.aug_word_min = aug_word_min
self.aug_word_max = aug_word_max
self.n = n
self.mapping_path = mapping_path
self.priority_words = priority_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Replaces letters in each text with similar characters
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.replace_similar_chars(texts, metadata=metadata, **aug_kwargs)
class ReplaceSimilarUnicodeChars(BaseTransform):
def __init__(
self,
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1000,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping_path: str = UNICODE_MAPPING_PATH,
priority_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_char_p: probability of letters to be replaced in each word
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation
@param aug_char_min: minimum # of letters to be replaced in each word
@param aug_char_max: maximum # of letters to be replaced in each word
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping_path: iopath uri where the mapping is stored
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_char_p = aug_char_p
self.aug_word_p = aug_word_p
self.min_char = min_char
self.aug_char_min = aug_char_min
self.aug_char_max = aug_char_max
self.aug_word_min = aug_word_min
self.aug_word_max = aug_word_max
self.n = n
self.mapping_path = mapping_path
self.priority_words = priority_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Replaces letters in each text with similar unicodes
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.replace_similar_unicode_chars(texts, metadata=metadata, **aug_kwargs)
class ReplaceText(BaseTransform):
def __init__(
self,
replace_text: Union[str, Dict[str, str]],
p: float = 1.0,
):
"""
@param replace_text: specifies the text to replace the input text with,
either as a string or a mapping from input text to new text
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.replace_text = replace_text
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Replaces the input text entirely with some specified text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
@returns: the list of augmented text documents
"""
return F.replace_text(texts, metadata=metadata, **aug_kwargs)
class ReplaceUpsideDown(BaseTransform):
def __init__(
self,
aug_p: float = 0.3,
aug_min: int = 1,
aug_max: int = 1000,
granularity: str = "all",
n: int = 1,
p: float = 1.0,
):
"""
@param aug_p: probability of words to be augmented
@param aug_min: minimum # of words to be augmented
@param aug_max: maximum # of words to be augmented
@param granularity: the level at which the font is applied;
this must be be either word, char, or all
@param n: number of augmentations to be performed for each text
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_p = aug_p
self.aug_min = aug_min
self.aug_max = aug_max
self.granularity = granularity
self.n = n
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Flips words in the text upside down depending on the granularity
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.replace_upside_down(texts, metadata=metadata, **aug_kwargs)
class ReplaceWords(BaseTransform):
def __init__(
self,
aug_word_p: float = 0.3,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping: Optional[Union[str, Dict[str, Any]]] = None,
priority_words: Optional[List[str]] = None,
ignore_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_word_p: probability of words to be augmented
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping: either a dictionary representing the mapping or an iopath uri where
the mapping is stored
@param priority_words: list of target words that the augmenter should prioritize
to augment first
@param ignore_words: list of words that the augmenter should not augment
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_word_p = aug_word_p
self.aug_word_min = aug_word_min
self.aug_word_max = aug_word_max
self.n = n
self.mapping = mapping
self.priority_words = priority_words
self.ignore_words = ignore_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Replaces words in each text based on a given mapping
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.replace_words(texts, metadata=metadata, **aug_kwargs)
class SimulateTypos(BaseTransform):
def __init__(
self,
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
typo_type: str = "all",
misspelling_dict_path: Optional[str] = MISSPELLING_DICTIONARY_PATH,
max_typo_length: int = 1,
priority_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_char_p: probability of letters to be replaced in each word;
This is only applicable for keyboard distance and swapping
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation;
This is only applicable for keyboard distance and swapping
@param aug_char_min: minimum # of letters to be replaced/swapped in each word;
This is only applicable for keyboard distance and swapping
@param aug_char_max: maximum # of letters to be replaced/swapped in each word;
This is only applicable for keyboard distance and swapping
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param typo_type: the type of typos to apply to the text; valid values are
"misspelling", "keyboard", "charmix", or "all"
@param misspelling_dict_path: iopath uri where the misspelling dictionary is
stored; must be specified if typo_type is "misspelling" or "all", but
otherwise can be None
@param max_typo_length: the words in the misspelling dictionary will be checked for
matches in the mapping up to this length; i.e. if 'max_typo_length' is 3 then
every substring of 2 *and* 3 words will be checked
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_char_p = aug_char_p
self.aug_word_p = aug_word_p
self.min_char = min_char
self.aug_char_min = aug_char_min
self.aug_char_max = aug_char_max
self.aug_word_min = aug_word_min
self.aug_word_max = aug_word_max
self.n = n
self.typo_type = typo_type
self.misspelling_dict_path = misspelling_dict_path
self.max_typo_length = max_typo_length
self.priority_words = priority_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Simulates typos in each text using misspellings, keyboard distance, and swapping.
You can specify a typo_type: charmix, which does a combination of character-level
modifications (delete, insert, substitute, & swap); keyboard, which swaps
characters which those close to each other on the QWERTY keyboard; misspelling,
which replaces words with misspellings defined in a dictionary file; or all,
which will apply a random combination of all 4
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.simulate_typos(texts, metadata=metadata, **aug_kwargs)
class SplitWords(BaseTransform):
def __init__(
self,
aug_word_p: float = 0.3,
min_char: int = 4,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
priority_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of characters in a word for a split
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_word_p = aug_word_p
self.min_char = min_char
self.aug_word_min = aug_word_min
self.aug_word_max = aug_word_max
self.n = n
self.priority_words = priority_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Splits words in the text into subwords
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.split_words(texts, metadata=metadata, **aug_kwargs)
class SwapGenderedWords(BaseTransform):
def __init__(
self,
aug_word_p: float = 0.3,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping: Union[str, Dict[str, str]] = GENDERED_WORDS_MAPPING,
priority_words: Optional[List[str]] = None,
ignore_words: Optional[List[str]] = None,
p: float = 1.0,
):
"""
@param aug_word_p: probability of words to be augmented
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping: a mapping of words from one gender to another; a mapping can be
supplied either directly as a dict or as a filepath to a json file containing
the dict
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param ignore_words: list of words that the augmenter should not augment
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_word_p = aug_word_p
self.aug_word_min = aug_word_min
self.aug_word_max = aug_word_max
self.n = n
self.mapping = mapping
self.priority_words = priority_words
self.ignore_words = ignore_words
def apply_transform(
self,
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
**aug_kwargs,
) -> Union[str, List[str]]:
"""
Replaces words in each text based on a provided `mapping`, which can either be a
dict already constructed mapping words from one gender to another or a file path
to a dict. Note: the logic in this augmentation was originally written by
Adina Williams and has been used in influential work, e.g.
https://arxiv.org/pdf/2005.00614.pdf
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@param aug_kwargs: kwargs to pass into the augmentation that will override values
set in __init__
@returns: the list of augmented text documents
"""
return F.swap_gendered_words(texts, metadata=metadata, **aug_kwargs)
|
AugLy-main
|
augly/text/transforms.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Union
def apply_lambda_intensity(aug_function: str, **kwargs) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0
def change_case_intensity(granularity: str, cadence: float, **kwargs) -> float:
return char_insertion_intensity_helper(granularity, cadence)
def contractions_intensity(aug_p: float, **kwargs) -> float:
return aug_p * 100.0
def get_baseline_intensity(**kwargs) -> float:
# get_baseline simply tokenizes and detokenizes text and at most adds extra spaces
return 0.0
def insert_punctuation_chars_intensity(
granularity: str, cadence: float, **kwargs
) -> float:
return char_insertion_intensity_helper(granularity, cadence)
def insert_text_intensity(num_insertions: int, **kwargs) -> float:
assert (
isinstance(num_insertions, int) and num_insertions >= 0
), "Expected 'num_insertions' to be a nonnegative integer"
max_insertions = 10
return min((num_insertions / max_insertions) * 100.0, 100.0)
def insert_whitespace_chars_intensity(
granularity: str, cadence: float, **kwargs
) -> float:
return char_insertion_intensity_helper(granularity, cadence)
def insert_zero_width_chars_intensity(
granularity: str, cadence: float, **kwargs
) -> float:
return char_insertion_intensity_helper(granularity, cadence)
def merge_words_intensity(aug_word_p: float, aug_word_max: int, **kwargs) -> float:
return replace_intensity_helper(aug_word_p, aug_word_max)
def replace_bidirectional_intensity(**kwargs) -> float:
return 100.0
def replace_fun_fonts_intensity(
aug_p: float, aug_max: int, granularity: str, **kwargs
) -> float:
return 100.0 if granularity == "all" else replace_intensity_helper(aug_p, aug_max)
def replace_similar_chars_intensity(
aug_char_p: float, aug_word_p: float, aug_char_max: int, aug_word_max: int, **kwargs
) -> float:
# we only care if aug_*_max is zero or not, so it's okay to multiply the values here
return replace_intensity_helper(
aug_word_p * aug_char_p, aug_word_max * aug_char_max
)
def replace_similar_unicode_chars_intensity(
aug_char_p: float, aug_word_p: float, aug_char_max: int, aug_word_max: int, **kwargs
) -> float:
# we only care if aug_*_max is zero or not, so it's okay to multiply the values here
return replace_intensity_helper(
aug_word_p * aug_char_p, aug_word_max * aug_char_max
)
def replace_text_intensity(
texts: Union[List[str], str], replace_text: Union[Dict[str, str], str], **kwargs
) -> float:
return (
100.0
if isinstance(replace_text, str) or any(t in texts for t in replace_text)
else 0.0
)
def replace_upside_down_intensity(
aug_p: float, aug_max: int, granularity: str, **kwargs
) -> float:
return 100.0 if granularity == "all" else replace_intensity_helper(aug_p, aug_max)
def replace_words_intensity(
aug_word_p: float,
aug_word_max: int,
mapping: Optional[Union[str, Dict[str, Any]]],
**kwargs,
) -> float:
return 0.0 if not mapping else replace_intensity_helper(aug_word_p, aug_word_max)
def simulate_typos_intensity(
aug_char_p: float, aug_word_p: float, aug_char_max: int, aug_word_max: int, **kwargs
) -> float:
# we only care if aug_*_max is zero or not, so it's okay to multiply the values here
return replace_intensity_helper(
aug_word_p * aug_char_p, aug_word_max * aug_char_max
)
def split_words_intensity(aug_word_p: float, aug_word_max: int, **kwargs) -> float:
return replace_intensity_helper(aug_word_p, aug_word_max)
def swap_gendered_words_intensity(
aug_word_p: float,
aug_word_max: int,
**kwargs,
) -> float:
return replace_intensity_helper(aug_word_p, aug_word_max)
def char_insertion_intensity_helper(granularity: str, cadence: float) -> float:
return 100.0 if granularity == "all" else (1 / cadence) * 100.0
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
return 0.0 if aug_max == 0 else aug_p * 100.0
|
AugLy-main
|
augly/text/intensity.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.text.composition import Compose, OneOf
from augly.text.functional import (
apply_lambda,
change_case,
contractions,
get_baseline,
insert_punctuation_chars,
insert_text,
insert_whitespace_chars,
insert_zero_width_chars,
merge_words,
replace_bidirectional,
replace_fun_fonts,
replace_similar_chars,
replace_similar_unicode_chars,
replace_text,
replace_upside_down,
replace_words,
simulate_typos,
split_words,
swap_gendered_words,
)
from augly.text.intensity import (
apply_lambda_intensity,
change_case_intensity,
contractions_intensity,
get_baseline_intensity,
insert_punctuation_chars_intensity,
insert_text_intensity,
insert_whitespace_chars_intensity,
insert_zero_width_chars_intensity,
merge_words_intensity,
replace_bidirectional_intensity,
replace_fun_fonts_intensity,
replace_similar_chars_intensity,
replace_similar_unicode_chars_intensity,
replace_text_intensity,
replace_upside_down_intensity,
replace_words_intensity,
simulate_typos_intensity,
split_words_intensity,
swap_gendered_words_intensity,
)
from augly.text.transforms import (
ApplyLambda,
ChangeCase,
Contractions,
GetBaseline,
InsertPunctuationChars,
InsertText,
InsertWhitespaceChars,
InsertZeroWidthChars,
MergeWords,
ReplaceBidirectional,
ReplaceFunFonts,
ReplaceSimilarChars,
ReplaceSimilarUnicodeChars,
ReplaceText,
ReplaceUpsideDown,
ReplaceWords,
SimulateTypos,
SplitWords,
SwapGenderedWords,
)
__all__ = [
"Compose",
"OneOf",
"ApplyLambda",
"ChangeCase",
"Contractions",
"GetBaseline",
"InsertPunctuationChars",
"InsertText",
"InsertWhitespaceChars",
"InsertZeroWidthChars",
"MergeWords",
"ReplaceBidirectional",
"ReplaceFunFonts",
"ReplaceSimilarChars",
"ReplaceSimilarUnicodeChars",
"ReplaceText",
"ReplaceUpsideDown",
"ReplaceWords",
"SimulateTypos",
"SplitWords",
"SwapGenderedWords",
"apply_lambda",
"change_case",
"contractions",
"get_baseline",
"insert_punctuation_chars",
"insert_text",
"insert_whitespace_chars",
"insert_zero_width_chars",
"merge_words",
"replace_bidirectional",
"replace_fun_fonts",
"replace_similar_chars",
"replace_similar_unicode_chars",
"replace_text",
"replace_upside_down",
"replace_words",
"simulate_typos",
"split_words",
"swap_gendered_words",
"apply_lambda_intensity",
"change_case_intensity",
"contractions_intensity",
"get_baseline_intensity",
"insert_punctuation_chars_intensity",
"insert_text_intensity",
"insert_whitespace_chars_intensity",
"insert_zero_width_chars_intensity",
"merge_words_intensity",
"replace_bidirectional_intensity",
"replace_fun_fonts_intensity",
"replace_similar_chars_intensity",
"replace_similar_unicode_chars_intensity",
"replace_text_intensity",
"replace_upside_down_intensity",
"replace_words_intensity",
"simulate_typos_intensity",
"split_words_intensity",
"swap_gendered_words_intensity",
]
|
AugLy-main
|
augly/text/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
def apply_lambda(
texts: Union[str, List[str]],
aug_function: Callable[..., List[str]] = lambda x: x,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> Union[str, List[str]]:
"""
Apply a user-defined lambda on a list of text documents
@param texts: a string or a list of text documents to be augmented
@param aug_function: the augmentation function to be applied onto the text
(should expect a list of text documents as input and return a list of
text documents)
@param **kwargs: the input attributes to be passed into the augmentation
function to be applied
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
func_kwargs = deepcopy(locals())
if aug_function is not None:
try:
func_kwargs["aug_function"] = aug_function.__name__
except AttributeError:
func_kwargs["aug_function"] = type(aug_function).__name__
func_kwargs = txtutils.get_func_kwargs(metadata, func_kwargs)
aug_texts = aug_function(texts, **kwargs)
txtutils.get_metadata(
metadata=metadata,
function_name="apply_lambda",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def change_case(
texts: Union[str, List[str]],
granularity: str = "word",
cadence: float = 1.0,
case: str = "random",
seed: Optional[int] = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Changes the case (e.g. upper, lower, title) of random chars, words, or the entire
text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' (case of the entire text is changed), 'word' (case of
random words is changed), or 'char' (case of random chars is changed)
@param cadence: how frequent (i.e. between this many characters/words) to change the
case. Must be at least 1.0. Non-integer values are used as an 'average' cadence.
Not used for granularity 'all'
@param case: the case to change words to; valid values are 'lower', 'upper', 'title',
or 'random' (in which case the case will randomly be changed to one of the
previous three)
@param seed: if provided, this will set the random seed to ensure consistency between
runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
case_aug = a.CaseAugmenter(case, granularity, cadence, seed)
aug_texts = case_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="change_case",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def contractions(
texts: Union[str, List[str]],
aug_p: float = 0.3,
mapping: Optional[Union[str, Dict[str, Any]]] = CONTRACTIONS_MAPPING,
max_contraction_length: int = 2,
seed: Optional[int] = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces pairs (or longer strings) of words with contractions given a mapping
@param texts: a string or a list of text documents to be augmented
@param aug_p: the probability that each pair (or longer string) of words will be
replaced with the corresponding contraction, if there is one in the mapping
@param mapping: either a dictionary representing the mapping or an iopath uri where
the mapping is stored
@param max_contraction_length: the words in each text will be checked for matches in
the mapping up to this length; i.e. if 'max_contraction_length' is 3 then every
substring of 2 *and* 3 words will be checked
@param seed: if provided, this will set the random seed to ensure consistency between
runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
assert 0 <= aug_p <= 1, "'aug_p' must be in the range [0, 1]"
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
contraction_aug = a.ContractionAugmenter(
aug_p, mapping, max_contraction_length, seed
)
aug_texts = contraction_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="contractions",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def get_baseline(
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Generates a baseline by tokenizing and detokenizing the text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
baseline_aug = a.BaselineAugmenter()
aug_texts = baseline_aug.augment(texts, 1)
txtutils.get_metadata(
metadata=metadata,
function_name="get_baseline",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def insert_punctuation_chars(
texts: Union[str, List[str]],
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts punctuation characters in each input text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert a
punctuation character. Must be at least 1.0. Non-integer values are used
as an 'average' cadence
@param vary_chars: if true, picks a different punctuation char each time one
is used instead of just one per word/text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
punctuation_aug = a.InsertionAugmenter(
"punctuation", granularity, cadence, vary_chars
)
aug_texts = punctuation_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_punctuation_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def insert_text(
texts: Union[str, List[str]],
insert_text: List[str],
num_insertions: int = 1,
insertion_location: str = "random",
seed: Optional[int] = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts some specified text into the input text a given number of times at a given
location
@param texts: a string or a list of text documents to be augmented
@param insert_text: a list of text to sample from and insert into each text in texts
@param num_insertions: the number of times to sample from insert_text and insert
@param insertion_location: where to insert the insert_text in the input text; valid
values are "prepend", "append", or "random" (inserts at a random index between
words in the input text)
@param seed: if provided, this will set the random seed to ensure consistency between
runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
insert_texts_aug = a.InsertTextAugmenter(num_insertions, insertion_location, seed)
aug_texts = insert_texts_aug.augment(texts, insert_text)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_text",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def insert_whitespace_chars(
texts: Union[str, List[str]],
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts whitespace characters in each input text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert a
whitespace character. Must be at least 1.0. Non-integer values are used
as an 'average' cadence
@param vary_chars: if true, picks a different whitespace char each time one
is used instead of just one per word/text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
whitespace_aug = a.InsertionAugmenter(
"whitespace", granularity, cadence, vary_chars
)
aug_texts = whitespace_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_whitespace_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def insert_zero_width_chars(
texts: Union[str, List[str]],
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts zero-width characters in each input text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert
a zero-width character. Must be at least 1.0. Non-integer values are
used as an 'average' cadence
@param vary_chars: if true, picks a different zero-width char each time one
is used instead of just one per word/text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
zero_width_aug = a.InsertionAugmenter(
"zero_width", granularity, cadence, vary_chars
)
aug_texts = zero_width_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_zero_width_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def merge_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
min_char: int = 2,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Merges words in the text together
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of characters in a word to be merged
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
merge_aug = a.WordsAugmenter(
"delete", min_char, aug_word_min, aug_word_max, aug_word_p, priority_words
)
aug_texts = merge_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="merge_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def replace_bidirectional(
texts: Union[str, List[str]],
granularity: str = "all",
split_word: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Reverses each word (or part of the word) in each input text and uses
bidirectional marks to render the text in its original order. It reverses
each word separately which keeps the word order even when a line wraps
@param texts: a string or a list of text documents to be augmented
@param granularity: the level at which the font is applied; this must be either
'word' or 'all'
@param split_word: if true and granularity is 'word', reverses only the second
half of each word
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
bidirectional_aug = a.BidirectionalAugmenter(granularity, split_word)
aug_texts = bidirectional_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_bidirectional",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def replace_fun_fonts(
texts: Union[str, List[str]],
aug_p: float = 0.3,
aug_min: int = 1,
aug_max: int = 10000,
granularity: str = "all",
vary_fonts: bool = False,
fonts_path: str = FUN_FONTS_PATH,
n: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces words or characters depending on the granularity with fun fonts applied
@param texts: a string or a list of text documents to be augmented
@param aug_p: probability of words to be augmented
@param aug_min: minimum # of words to be augmented
@param aug_max: maximum # of words to be augmented
@param granularity: the level at which the font is applied; this must be be
either word, char, or all
@param vary_fonts: whether or not to switch font in each replacement
@param fonts_path: iopath uri where the fonts are stored
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
fun_fonts_aug = a.FunFontsAugmenter(
granularity, aug_min, aug_max, aug_p, vary_fonts, fonts_path, priority_words
)
aug_texts = fun_fonts_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_fun_fonts",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def replace_similar_chars(
texts: Union[str, List[str]],
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1000,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping_path: Optional[str] = None,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces letters in each text with similar characters
@param texts: a string or a list of text documents to be augmented
@param aug_char_p: probability of letters to be replaced in each word
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation
@param aug_char_min: minimum # of letters to be replaced in each word
@param aug_char_max: maximum # of letters to be replaced in each word
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping_path: iopath uri where the mapping is stored
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
char_aug = a.LetterReplacementAugmenter(
min_char,
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
mapping_path,
priority_words,
)
aug_texts = char_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_similar_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def replace_similar_unicode_chars(
texts: Union[str, List[str]],
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1000,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping_path: str = UNICODE_MAPPING_PATH,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces letters in each text with similar unicodes
@param texts: a string or a list of text documents to be augmented
@param aug_char_p: probability of letters to be replaced in each word
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation
@param aug_char_min: minimum # of letters to be replaced in each word
@param aug_char_max: maximum # of letters to be replaced in each word
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping_path: iopath uri where the mapping is stored
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
unicode_aug = a.LetterReplacementAugmenter(
min_char,
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
mapping_path,
priority_words,
)
aug_texts = unicode_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_similar_unicode_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def replace_text(
texts: Union[str, List[str]],
replace_text: Union[str, Dict[str, str]],
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces the input text entirely with some specified text
@param texts: a string or a list of text documents to be augmented
@param replace_text: specifies the text to replace the input text with,
either as a string or a mapping from input text to new text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: a string or a list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
text_aug = a.TextReplacementAugmenter()
aug_texts = text_aug.augment(texts, replace_text)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_text",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def replace_upside_down(
texts: Union[str, List[str]],
aug_p: float = 0.3,
aug_min: int = 1,
aug_max: int = 1000,
granularity: str = "all",
n: int = 1,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Flips words in the text upside down depending on the granularity
@param texts: a string or a list of text documents to be augmented
@param aug_p: probability of words to be augmented
@param aug_min: minimum # of words to be augmented
@param aug_max: maximum # of words to be augmented
@param granularity: the level at which the font is applied; this must be
either word, char, or all
@param n: number of augmentations to be performed for each text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
upside_down_aug = a.UpsideDownAugmenter(granularity, aug_min, aug_max, aug_p)
aug_texts = upside_down_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_upside_down",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def replace_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping: Optional[Union[str, Dict[str, Any]]] = None,
priority_words: Optional[List[str]] = None,
ignore_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces words in each text based on a given mapping
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping: either a dictionary representing the mapping or an iopath uri where
the mapping is stored
@param priority_words: list of target words that the augmenter should prioritize to
augment first
@param ignore_words: list of words that the augmenter should not augment
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
word_aug = a.WordReplacementAugmenter(
aug_word_min, aug_word_max, aug_word_p, mapping, priority_words, ignore_words
)
aug_texts = word_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def simulate_typos(
texts: Union[str, List[str]],
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
typo_type: str = "all",
misspelling_dict_path: Optional[str] = MISSPELLING_DICTIONARY_PATH,
max_typo_length: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Simulates typos in each text using misspellings, keyboard distance, and swapping.
You can specify a typo_type: charmix, which does a combination of character-level
modifications (delete, insert, substitute, & swap); keyboard, which swaps characters
which those close to each other on the QWERTY keyboard; misspelling, which replaces
words with misspellings defined in a dictionary file; or all, which will apply a
random combination of all 4
@param texts: a string or a list of text documents to be augmented
@param aug_char_p: probability of letters to be replaced in each word;
This is only applicable for keyboard distance and swapping
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation;
This is only applicable for keyboard distance and swapping
@param aug_char_min: minimum # of letters to be replaced/swapped in each word;
This is only applicable for keyboard distance and swapping
@param aug_char_max: maximum # of letters to be replaced/swapped in each word;
This is only applicable for keyboard distance and swapping
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param typo_type: the type of typos to apply to the text; valid values are
"misspelling", "keyboard", "charmix", or "all"
@param misspelling_dict_path: iopath uri where the misspelling dictionary is stored;
must be specified if typo_type is "misspelling" or "all", but otherwise can be
None
@param max_typo_length: the words in the misspelling dictionary will be checked for
matches in the mapping up to this length; i.e. if 'max_typo_length' is 3 then
every substring of 2 *and* 3 words will be checked
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
typo_aug = a.TypoAugmenter(
min_char,
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
typo_type,
misspelling_dict_path,
max_typo_length,
priority_words,
)
aug_texts = typo_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="simulate_typos",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def split_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
min_char: int = 4,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Splits words in the text into subwords
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of characters in a word for a split
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
split_aug = a.WordsAugmenter(
"split", min_char, aug_word_min, aug_word_max, aug_word_p, priority_words
)
aug_texts = split_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="split_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
def swap_gendered_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping: Union[str, Dict[str, str]] = GENDERED_WORDS_MAPPING,
priority_words: Optional[List[str]] = None,
ignore_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces words in each text based on a provided `mapping`, which can either be a dict
already constructed mapping words from one gender to another or a file path to a
dict. Note: the logic in this augmentation was originally written by Adina Williams
and has been used in influential work, e.g. https://arxiv.org/pdf/2005.00614.pdf
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping: a mapping of words from one gender to another; a mapping can be
supplied either directly as a dict or as a filepath to a json file containing the
dict
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param ignore_words: list of words that the augmenter should not augment
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
mapping = txtutils.get_gendered_words_mapping(mapping)
word_aug = a.WordReplacementAugmenter(
aug_word_min, aug_word_max, aug_word_p, mapping, priority_words, ignore_words
)
aug_texts = word_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="swap_gendered_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts
|
AugLy-main
|
augly/text/functional.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from augly import utils
from augly.text import intensity as txtintensity
def get_func_kwargs(
metadata: Optional[List[Dict[str, Any]]], local_kwargs: Dict[str, Any], **kwargs
) -> Dict[str, Any]:
if metadata is None:
return {}
func_kwargs = deepcopy(local_kwargs)
func_kwargs.pop("metadata")
func_kwargs.update(**kwargs)
return func_kwargs
def get_metadata(
metadata: Optional[List[Dict[str, Any]]],
function_name: str,
texts: Optional[List[str]] = None,
aug_texts: Optional[Union[List[str], str]] = None,
**kwargs,
) -> None:
if metadata is None:
return
assert isinstance(
metadata, list
), "Expected `metadata` to be set to None or of type list"
assert (
texts is not None
), "Expected `texts` to be passed in if metadata was provided"
assert (
aug_texts is not None
), "Expected `aug_texts` to be passed in if metadata was provided"
metadata.append(
{
"name": function_name,
"input_type": "list" if isinstance(texts, list) else "string",
"src_length": len(texts) if isinstance(texts, list) else 1,
"dst_length": len(aug_texts) if isinstance(aug_texts, list) else 1,
**kwargs,
}
)
intensity_kwargs = {"metadata": metadata[-1], "texts": texts, **kwargs}
metadata[-1]["intensity"] = getattr(
txtintensity, f"{function_name}_intensity", lambda **_: 0.0
)(**intensity_kwargs)
def get_gendered_words_mapping(mapping: Union[str, Dict[str, str]]) -> Dict[str, str]:
"""
Note: The `swap_gendered_words` augmentation, including this logic, was originally
written by Adina Williams and has been used in influential work, e.g.
https://arxiv.org/pdf/2005.00614.pdf
"""
assert isinstance(
mapping, (str, Dict)
), "Mapping must be either a dict or filepath to a mapping of gendered words"
if isinstance(mapping, Dict):
return mapping
if isinstance(mapping, str):
with utils.pathmgr.open(mapping) as f:
return json.load(f)
|
AugLy-main
|
augly/text/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Any, Dict, List, Optional, Union
from augly.text.transforms import BaseTransform
"""
Composition Operators:
Compose: identical to the Compose object provided by the torchvision
library, this class provides a similar experience for applying multiple
transformations onto text
OneOf: the OneOf operator takes as input a list of transforms and
may apply (with probability p) one of the transforms in the list.
If a transform is applied, it is selected using the specified
probabilities of the individual transforms.
Example:
>>> Compose([
>>> InsertPunctuationChars(),
>>> ReplaceFunFonts(),
>>> OneOf([
>>> ReplaceSimilarChars(),
>>> SimulateTypos(),
>>> ]),
>>> ])
"""
class BaseComposition(BaseTransform):
def __init__(self, transforms: List[BaseTransform], p: float = 1.0):
"""
@param transforms: a list of transforms
@param p: the probability of the transform being applied; default value is 1.0
"""
for transform in transforms:
assert isinstance(
transform, BaseTransform
), "Expected instances of type 'BaseTransform' for parameter 'transforms'"
super().__init__(p)
self.transforms = transforms
class Compose(BaseComposition):
def __call__(
self,
texts: Union[str, List[str]],
seed: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Applies the list of transforms in order to the text
@param texts: a string or a list of text documents to be augmented
@param seed: if provided, the random seed will be set to this before calling
the transform
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
if seed is not None:
random.seed(seed)
texts = [texts] if isinstance(texts, str) else texts
for transform in self.transforms:
texts = transform(texts, metadata=metadata)
return texts
class OneOf(BaseComposition):
def __init__(self, transforms: List[BaseTransform], p: float = 1.0):
"""
@param transforms: a list of transforms to select from; one of which will
be chosen to be applied to the text
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(transforms, p)
transform_probs = [t.p for t in transforms]
probs_sum = sum(transform_probs)
self.transform_probs = [t / probs_sum for t in transform_probs]
def __call__(
self,
texts: Union[str, List[str]],
force: bool = False,
seed: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
@param texts: a string or a list of text documents to be augmented
@param force: if set to True, the transform will be applied. Otherwise,
application is determined by the probability set
@param seed: if provided, the random seed will be set to this before calling
the transform
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
if seed is not None:
random.seed(seed)
texts = [texts] if isinstance(texts, str) else texts
if random.random() > self.p:
return texts
transform = random.choices(self.transforms, self.transform_probs)[0]
return transform(texts, force=True, metadata=metadata)
|
AugLy-main
|
augly/text/composition.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Union
from augly.text.augmenters.utils import (
detokenize,
get_aug_idxes,
tokenize,
UPSIDE_DOWN_CHAR_MAPPING,
)
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug.augmenter.word import Augmenter # @manual
from nlpaug.util import Action, Method # @manual
def _flip(c: str) -> str:
if c in UPSIDE_DOWN_CHAR_MAPPING:
return UPSIDE_DOWN_CHAR_MAPPING[c]
else:
return c
class UpsideDownAugmenter(Augmenter):
"""Augmenter that flips the text"""
def __init__(self, granularity: str, aug_min: int, aug_max: int, aug_p: float):
assert granularity in [
"char",
"word",
"all",
], "Granularity must be either char, word, or all"
assert (
0 <= aug_min <= aug_max
), "aug_min must be non-negative and aug_max must be greater than or equal to aug_min"
assert 0 <= aug_p <= 1, "aug_p must be a value in the range [0, 1]"
self.granularity = granularity
super().__init__(
name="UpsideDownAugmenter",
action=Action.INSERT,
method=Method.WORD,
aug_min=aug_min,
aug_max=aug_max,
aug_p=aug_p,
)
@classmethod
def clean(cls, data: Union[List[str], str]) -> Union[str, List[str]]:
if isinstance(data, list):
return [d.strip() for d in data]
return data.strip()
@classmethod
def is_duplicate(cls, dataset: List[str], data: str) -> bool:
return data in dataset
def flip_text(self, text: str) -> str:
return "".join([_flip(c) for c in reversed(text)])
def insert(self, data: str) -> str:
if self.granularity == "all":
return self.flip_text(data)
tokens = tokenize(data)
if self.granularity == "word":
aug_word_cnt = self._generate_aug_cnt(
len(tokens), self.aug_min, self.aug_max, self.aug_p
)
aug_word_idxes = set(
get_aug_idxes(
self, tokens, list(range(len(tokens))), aug_word_cnt, Method.WORD
)
)
for i, token in enumerate(tokens):
if i in aug_word_idxes:
tokens[i] = self.flip_text(token)
elif self.granularity == "char":
all_chars = [char for token in tokens for char in list(token)]
aug_char_idxes = self.generate_aug_idxes(all_chars)
char_idx = 0
for t_i, token in enumerate(tokens):
chars = list(token)
for c_i, char in enumerate(chars):
if char_idx in aug_char_idxes:
chars[c_i] = _flip(char)
char_idx += 1
tokens[t_i] = "".join(chars)
return detokenize(tokens)
|
AugLy-main
|
augly/text/augmenters/upside_down.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import Any, Dict, List, Optional, Tuple, Union
from augly.text.augmenters.utils import detokenize, get_aug_idxes, tokenize
from augly.utils import pathmgr
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug.augmenter.word import WordAugmenter # @manual
from nlpaug.util import Action, Method # @manual
class WordReplacement:
def __init__(self, mapping: Optional[Union[str, Dict[str, Any]]]):
if isinstance(mapping, str):
local_mapping_path = pathmgr.get_local_path(mapping)
with open(local_mapping_path) as json_file:
self.mapping = {k.lower(): v for k, v in json.load(json_file).items()}
elif isinstance(mapping, Dict):
self.mapping = mapping
else:
self.mapping = {}
def replace(self, word: str) -> Tuple[str, bool]:
new_word = self.mapping.get(word, None) or self.mapping.get(word.lower(), None)
if new_word is not None and word[0].isupper():
new_word = new_word.capitalize()
return (new_word, True) if new_word else (word, False)
class WordReplacementAugmenter(WordAugmenter):
"""Augmenter that replaces words based on a given mapping"""
def __init__(
self,
aug_word_min: int,
aug_word_max: int,
aug_word_p: float,
mapping: Optional[Union[str, Dict[str, Any]]],
priority_words: Optional[List[str]],
ignore_words: Optional[List[str]],
):
super().__init__(
action=Action.SUBSTITUTE,
aug_min=aug_word_min,
aug_max=aug_word_max,
aug_p=aug_word_p,
)
self.word_mapping = self.get_mapping(mapping)
self.priority_words = (
set(priority_words) if priority_words is not None else priority_words
)
self.ignore_words = (
{word.lower() for word in ignore_words}
if ignore_words is not None
else set()
)
def get_mapping(
self, mapping: Optional[Union[str, Dict[str, Any]]]
) -> WordReplacement:
return WordReplacement(mapping)
def substitute(self, data: str) -> str:
"""
Returns a text where random words are replaced using the specified mapping
@param data: the text to which the word substitution will be applied
"""
results = []
tokens = tokenize(data)
aug_word_cnt = self._generate_aug_cnt(
len(tokens), self.aug_min, self.aug_max, self.aug_p
)
filtered_word_idxes = self.pre_skip_aug(tokens)
if self.priority_words is None:
self.priority_words = self.word_mapping.mapping.keys()
aug_word_idxes = set(
get_aug_idxes(
self,
tokens,
filtered_word_idxes,
aug_word_cnt,
Method.WORD,
)
)
if not aug_word_idxes:
return data
is_diff = False
for t_i, token in enumerate(tokens):
if t_i not in aug_word_idxes:
results.append(token)
continue
new_token, has_changed = self.word_mapping.replace(token)
is_diff = is_diff or has_changed
results.append(new_token)
return detokenize(results) if is_diff else data
|
AugLy-main
|
augly/text/augmenters/word_replacement.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import List, Optional
from augly.text.augmenters.utils import (
detokenize,
get_aug_idxes,
tokenize,
validate_augmenter_params,
)
from augly.utils import pathmgr
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug.augmenter.char import KeyboardAug, RandomCharAug # @manual
from nlpaug.augmenter.word import WordAugmenter # @manual
from nlpaug.util import Action, Method # @manual
class MisspellingReplacement:
def __init__(self, misspelling_dict_path: str):
local_misspelling_dict_path = pathmgr.get_local_path(misspelling_dict_path)
with open(local_misspelling_dict_path) as json_file:
self.dictionary = json.load(json_file)
def replace(self, word: str) -> Optional[List[str]]:
return self.dictionary.get(word, None) or self.dictionary.get(
word.lower(), None
)
class TypoAugmenter(WordAugmenter):
"""
Augmenter that replaces words with typos. You can specify a typo_type: charmix, which
does a combination of character-level modifications (delete, insert, substitute, &
swap); keyboard, which swaps characters which those close to each other on the QWERTY
keyboard; misspelling, which replaces words with misspellings defined in a dictionary
file; or all, which will apply a random combination of all 4
"""
def __init__(
self,
min_char: int,
aug_char_min: int,
aug_char_max: int,
aug_char_p: float,
aug_word_min: int,
aug_word_max: int,
aug_word_p: float,
typo_type: str,
misspelling_dict_path: Optional[str],
max_typo_length: int,
priority_words: Optional[List[str]],
):
validate_augmenter_params(
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
)
assert typo_type in [
"all",
"charmix",
"keyboard",
"misspelling",
], "Typo type must be one of: all, charmix, keyboard, misspelling"
super().__init__(
action=Action.SUBSTITUTE,
aug_min=aug_word_min,
aug_max=aug_word_max,
aug_p=aug_word_p,
)
self.augmenters, self.model = [], None
if typo_type in ["all", "charmix"]:
for action in [
Action.DELETE,
Action.INSERT,
Action.SUBSTITUTE,
Action.SWAP,
]:
self.augmenters.append(
RandomCharAug(
action=action,
min_char=min_char,
aug_char_min=aug_char_min,
aug_char_max=aug_char_max,
aug_char_p=aug_char_p,
reverse_tokenizer=detokenize,
tokenizer=tokenize,
),
)
if typo_type in ["all", "keyboard"]:
self.augmenters.append(
KeyboardAug(
min_char=min_char,
aug_char_min=aug_char_min,
aug_char_max=aug_char_max,
aug_char_p=aug_char_p,
include_upper_case=True,
reverse_tokenizer=detokenize,
tokenizer=tokenize,
),
)
if typo_type in ["all", "misspelling"]:
assert (
misspelling_dict_path is not None
), "'misspelling_dict_path' must be provided if 'typo_type' is 'all' or 'misspelling'"
assert max_typo_length >= 1, "Must set 'max_typo_length' >= 1"
self.max_typo_length = max_typo_length
self.model = self.get_model(misspelling_dict_path)
else:
self.max_typo_length = 1
self.priority_words = (
set(priority_words) if priority_words is not None else priority_words
)
def align_capitalization(self, src_token: str, dest_token: str) -> str:
if self.get_word_case(src_token) == "upper":
return dest_token.upper()
elif self.get_word_case(src_token) == "lower":
return dest_token.lower()
elif self.get_word_case(src_token) == "capitalize":
return dest_token.capitalize()
return dest_token
def get_model(self, misspelling_dict_path: str) -> MisspellingReplacement:
return MisspellingReplacement(misspelling_dict_path)
def substitute(self, data: str) -> str:
"""
Returns text where random words are typos
@param data: the text where the word substitutions will occur
"""
results = []
tokens = self.tokenizer(data)
aug_word_cnt = self._generate_aug_cnt(
len(tokens), self.aug_min, self.aug_max, self.aug_p
)
filtered_word_idxes = self.skip_aug(self.pre_skip_aug(tokens), tokens)
aug_word_idxes = set(
get_aug_idxes(self, tokens, filtered_word_idxes, aug_word_cnt, Method.WORD)
)
for t_i in range(1, self.max_typo_length + 1):
i = 0
while i <= len(tokens) - t_i:
if i not in aug_word_idxes:
results.append(tokens[i])
i += 1
continue
misspellings = (
self.model.replace(" ".join(tokens[i : i + t_i]))
if self.model
else None
)
if misspellings:
misspelling = self.sample(misspellings, 1)[0]
results.append(self.align_capitalization(tokens[i], misspelling))
i += t_i - 1
elif len(self.augmenters) > 0:
aug = self.sample(self.augmenters, 1)[0]
new_token = aug.augment(tokens[i])
results.append(self.align_capitalization(tokens[i], new_token))
else:
# If no misspelling is found in the dict & no other typo types are being
# used, don't change the token
results.append(tokens[i])
i += 1
if t_i > 1:
results.extend(tokens[-t_i + 1 :])
return detokenize(results)
def _tokenizer(self, text: str) -> List[str]:
return tokenize(text)
|
AugLy-main
|
augly/text/augmenters/typo.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.text.augmenters.utils import detokenize, tokenize
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug.augmenter.word import WordAugmenter # @manual
from nlpaug.util import Action # @manual
class BaselineAugmenter(WordAugmenter):
"""Baseline augmenter that serves as comparison with the original text"""
def __init__(self):
super().__init__(action=Action.SUBSTITUTE)
def substitute(self, data: str) -> str:
"""
Returns a text that should be the same as the original input text
@param data: the text that will be tokenized and detokenized
"""
return detokenize(tokenize(data))
|
AugLy-main
|
augly/text/augmenters/baseline.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import List, Optional
from augly.text.augmenters.utils import (
get_aug_idxes,
LETTER_CHAR_MAPPING,
rejoin_words_and_whitespace,
split_words_on_whitespace,
validate_augmenter_params,
)
from augly.utils import pathmgr
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug.augmenter.char import CharAugmenter # @manual
from nlpaug.util import Action, Method # @manual
class CharReplacement:
def __init__(self, mapping_path: Optional[str]):
if mapping_path:
local_mapping_path = pathmgr.get_local_path(mapping_path)
with open(local_mapping_path) as json_file:
self.mapping = json.load(json_file)
else:
self.mapping = LETTER_CHAR_MAPPING
def replace(self, character: str) -> List[str]:
return (
self.mapping[character.lower()]
if character.lower() in self.mapping
else [character]
)
class LetterReplacementAugmenter(CharAugmenter):
"""Augmenter that replaces letters with similar mappings"""
def __init__(
self,
min_char: int,
aug_char_min: int,
aug_char_max: int,
aug_char_p: float,
aug_word_min: int,
aug_word_max: int,
aug_word_p: float,
mapping_path: Optional[str],
priority_words: Optional[List[str]],
):
validate_augmenter_params(
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
)
super().__init__(
action=Action.SUBSTITUTE,
min_char=min_char,
aug_char_min=aug_char_min,
aug_char_max=aug_char_max,
aug_char_p=aug_char_p,
aug_word_min=aug_word_min,
aug_word_max=aug_word_max,
aug_word_p=aug_word_p,
)
self.letter_mapping = self.get_mapping(mapping_path)
self.priority_words = (
set(priority_words) if priority_words is not None else priority_words
)
def get_mapping(self, mapping_path: Optional[str]) -> CharReplacement:
return CharReplacement(mapping_path)
def substitute(self, data: str) -> str:
"""
Returns a text where random letters are replaced by the specified mapping
@param data: the text where the letter substitution will be applied on
"""
tokens, whitespaces = split_words_on_whitespace(data)
aug_word_cnt = self._generate_aug_cnt(
len(tokens), self.aug_word_min, self.aug_word_max, self.aug_word_p
)
filtered_word_idxes = self.skip_aug(self.pre_skip_aug(tokens), tokens)
aug_word_idxes = set(
get_aug_idxes(self, tokens, filtered_word_idxes, aug_word_cnt, Method.WORD)
)
for t_i, token in enumerate(tokens):
if t_i not in aug_word_idxes:
continue
chars = list(token)
aug_char_cnt = self._generate_aug_cnt(
len(chars), self.aug_char_min, self.aug_char_max, self.aug_char_p
)
aug_char_idxes = (
None
if len(chars) < self.min_char
else set(
get_aug_idxes(
self, chars, list(range(len(chars))), aug_char_cnt, Method.CHAR
)
)
)
if not aug_char_idxes:
continue
for c_i, char in enumerate(chars):
if c_i in aug_char_idxes:
chars[c_i] = self.sample(self.letter_mapping.replace(char), 1)[0]
tokens[t_i] = "".join(chars)
return rejoin_words_and_whitespace(tokens, whitespaces)
|
AugLy-main
|
augly/text/augmenters/letter_replacement.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import random
from typing import List, Optional, Union
from augly.text.augmenters.utils import detokenize, get_aug_idxes, tokenize
from augly.utils import pathmgr
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug import Augmenter # @manual
from nlpaug.util import Action, Method # @manual
class FunFontsAugmenter(Augmenter):
def __init__(
self,
granularity: str,
aug_min: int,
aug_max: int,
aug_p: float,
vary_fonts: bool,
fonts_path: str,
priority_words: Optional[List[str]],
):
assert granularity in [
"char",
"word",
"all",
], "Granularity must be either char, word, or all"
assert (
0 <= aug_min <= aug_max
), "aug_min must be non-negative and aug_max must be greater than or equal to aug_min"
assert 0 <= aug_p <= 1, "aug_p must be a value in the range [0, 1]"
super().__init__(
name="FunFontsAugmenter",
action=Action.SUBSTITUTE,
method=Method.WORD,
aug_min=aug_min,
aug_max=aug_max,
aug_p=aug_p,
)
self.fonts = self.load_fonts(fonts_path)
self.aug_p = aug_p
self.granularity = granularity
self.vary_fonts = vary_fonts
self.priority_words = (
set(priority_words) if priority_words is not None else priority_words
)
def load_fonts(self, fonts_path: str) -> List[Union[str, dict]]:
"""
Loads the fonts from a json file iopath uri
@returns mapping: the key corresponds to the font name; the value is
a str if the font is the same for any replacement and the value is
a dict[str, str] if the font maps every letter to a new font letter
"""
local_fonts_path = pathmgr.get_local_path(fonts_path)
with open(local_fonts_path, encoding="utf-8") as text_file:
font_dictionary = json.load(text_file)
return list(font_dictionary.values())
return []
@classmethod
def clean(cls, data: Union[List[str], str]) -> Union[str, List[str]]:
if isinstance(data, list):
return [d.strip() for d in data]
return data.strip()
@classmethod
def is_duplicate(cls, dataset: List[str], data: str) -> bool:
return data in dataset
def apply_font(self, text: str, font: Union[str, dict], method: str) -> str:
assert (
method in Method.getall()
), "Expected 'method' to be a value defined in nlpaug.util.method.Method"
if isinstance(font, str):
return font.join(text) + font if method == Method.WORD else text + font
if isinstance(font, dict):
return (
"".join([font.get(char, char) for char in text])
if method == Method.WORD
else font.get(text, text)
)
def substitute(self, data: str) -> str:
tokens = tokenize(data)
font = random.sample(self.fonts, 1)[0]
results = []
if self.granularity == "word":
aug_word_cnt = self._generate_aug_cnt(
len(tokens), self.aug_min, self.aug_max, self.aug_p
)
aug_word_idxes = set(
get_aug_idxes(
self, tokens, list(range(len(tokens))), aug_word_cnt, Method.WORD
)
)
for i, token in enumerate(tokens):
if i not in aug_word_idxes:
results.append(token)
continue
if self.vary_fonts:
font = random.sample(self.fonts, 1)[0]
results.append(self.apply_font(token, font, Method.WORD))
elif self.granularity == "char":
all_chars = [char for token in tokens for char in list(token)]
aug_char_idxes = set(self.generate_aug_idxes(all_chars))
char_idx = 0
for token in tokens:
result = ""
chars = list(token)
for char in chars:
if self.vary_fonts:
font = random.sample(self.fonts, 1)[0]
if char_idx not in aug_char_idxes:
result += char
else:
result += self.apply_font(char, font, Method.CHAR)
char_idx += 1
results.append(result)
else:
if random.random() < self.aug_p:
for token in tokens:
results.append(self.apply_font(token, font, Method.WORD))
else:
results = tokens
return detokenize(results)
|
AugLy-main
|
augly/text/augmenters/fun_fonts.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from augly.text.augmenters.baseline import BaselineAugmenter
from augly.text.augmenters.bidirectional import BidirectionalAugmenter
from augly.text.augmenters.case import CaseAugmenter
from augly.text.augmenters.contraction import ContractionAugmenter
from augly.text.augmenters.fun_fonts import FunFontsAugmenter
from augly.text.augmenters.insert_text import InsertTextAugmenter
from augly.text.augmenters.insertion import InsertionAugmenter
from augly.text.augmenters.letter_replacement import LetterReplacementAugmenter
from augly.text.augmenters.text_replacement import TextReplacementAugmenter
from augly.text.augmenters.typo import TypoAugmenter
from augly.text.augmenters.upside_down import UpsideDownAugmenter
from augly.text.augmenters.word_replacement import WordReplacementAugmenter
from augly.text.augmenters.words_augmenter import WordsAugmenter
__all__ = [
"BaselineAugmenter",
"BidirectionalAugmenter",
"CaseAugmenter",
"ContractionAugmenter",
"FunFontsAugmenter",
"InsertTextAugmenter",
"InsertionAugmenter",
"LetterReplacementAugmenter",
"WordsAugmenter",
"TextReplacementAugmenter",
"TypoAugmenter",
"UpsideDownAugmenter",
"WordReplacementAugmenter",
]
|
AugLy-main
|
augly/text/augmenters/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import List, Optional
from augly.text.augmenters.utils import detokenize, get_aug_idxes, tokenize
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug.augmenter.word import WordAugmenter # @manual
from nlpaug.util import Method # @manual
class WordsAugmenter(WordAugmenter):
"""
Augmenter that performs a given action at the word level.
Valid actions are defined below as methods
"""
def __init__(
self,
action: str,
min_char: int,
aug_word_min: int,
aug_word_max: int,
aug_word_p: float,
priority_words: Optional[List[str]],
):
assert min_char >= 2, "Expected min_char to be greater than or equal to 2"
super().__init__(
action=action,
aug_min=aug_word_min,
aug_max=aug_word_max,
aug_p=aug_word_p,
)
self.min_char = min_char
self.priority_words = (
set(priority_words) if priority_words is not None else priority_words
)
def delete(self, data: str) -> str:
"""Augmenter that merges selected words with the following word"""
results = []
tokens = tokenize(data)
aug_word_cnt = self._generate_aug_cnt(
len(tokens), self.aug_min, self.aug_max, self.aug_p
)
# Skip last word in the sentence as merges occur with the following word
filtered_word_idxes = self.pre_skip_aug(tokens[:-1])
aug_word_idxes = set(
get_aug_idxes(
self,
tokens,
filtered_word_idxes,
aug_word_cnt,
Method.WORD,
self.min_char,
)
)
if not aug_word_idxes:
return data
t_i = 0
while t_i < len(tokens):
if t_i in aug_word_idxes and len(tokens[t_i + 1]) >= self.min_char:
results.append(tokens[t_i] + tokens[t_i + 1])
t_i += 1
else:
results.append(tokens[t_i])
t_i += 1
return detokenize(results)
def split(self, data: str) -> str:
"""Augmenter that splits words in two"""
results = []
tokens = tokenize(data)
aug_word_cnt = self._generate_aug_cnt(
len(tokens), self.aug_min, self.aug_max, self.aug_p
)
filtered_word_idxes = self.pre_skip_aug(tokens)
aug_word_idxes = set(
get_aug_idxes(
self,
tokens,
filtered_word_idxes,
aug_word_cnt,
Method.WORD,
self.min_char,
)
)
if not aug_word_idxes:
return data
for t_i, token in enumerate(tokens):
if t_i not in aug_word_idxes:
results.append(token)
continue
target_token = tokens[t_i]
split_position = random.randint(1, len(target_token) - 1)
first_token = target_token[:split_position]
second_token = target_token[split_position:]
results.extend([first_token, second_token])
return detokenize(results)
|
AugLy-main
|
augly/text/augmenters/words_augmenter.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Union
from augly.text.augmenters.utils import (
rejoin_words_and_whitespace,
split_words_on_whitespace,
)
POP_DIRECTIONAL = "\u202C"
RTL_OVERRIDE = "\u202E"
LTR_OVERRIDE = "\u202D"
class BidirectionalAugmenter:
"""
Reverses words in a string (or the whole string), using bidirectional
override marks so the rendered string is visually identical to the original.
Preserves whitespace, including newlines.
"""
def __init__(self, granularity: str, split_word: bool = False):
assert granularity in [
"word",
"all",
], "Must set 'granularity' to either 'word' or 'all'."
self.granularity = granularity
self.split_word = split_word
def rtl_flip(self, text: str, split_text: bool) -> str:
if split_text and len(text) > 1:
split_point = len(text) // 2
return (
text[:split_point]
+ RTL_OVERRIDE
+ text[split_point:][::-1]
+ POP_DIRECTIONAL
)
return RTL_OVERRIDE + text[::-1] + POP_DIRECTIONAL
def rtl_flip_per_word(self, text: str) -> str:
words, spaces = split_words_on_whitespace(text)
augmented_words = []
for word in words:
augmented_words.append(self.rtl_flip(word, self.split_word))
return LTR_OVERRIDE + rejoin_words_and_whitespace(augmented_words, spaces)
def augment(self, texts: Union[str, List[str]]) -> List[str]:
texts = texts if isinstance(texts, list) else [texts]
if self.granularity == "all":
return [self.rtl_flip(text, False) for text in texts]
# otherwise, self.granularity == "word"
return [self.rtl_flip_per_word(text) for text in texts]
|
AugLy-main
|
augly/text/augmenters/bidirectional.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Union
class TextReplacementAugmenter:
"""
Replaces the input text entirely with some specified text
"""
def augment(
self,
texts: Union[str, List[str]],
replace_text: Union[str, Dict[str, str]],
) -> Union[str, List[str]]:
return (
[self.replace(text, replace_text) for text in texts]
if isinstance(texts, list)
else self.replace(texts, replace_text)
)
def replace(self, text: str, replace_text: Union[str, Dict[str, str]]) -> str:
return (
replace_text.get(text, text)
if isinstance(replace_text, dict)
else replace_text
)
|
AugLy-main
|
augly/text/augmenters/text_replacement.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from random import Random
from typing import List, Optional, Union
class InsertTextAugmenter:
"""
Inserts some specified text into the input text a given number of times at a given
location
"""
def __init__(
self,
num_insertions: int = 1,
insertion_location: str = "random",
seed: Optional[int] = 10,
):
VALID_LOCATIONS = {"prepend", "append", "random"}
assert (
insertion_location in VALID_LOCATIONS
), f"{insertion_location} has to be one of {VALID_LOCATIONS}"
assert num_insertions >= 1, "num_insertions has to be a positive number"
self.num_insertions = num_insertions
self.insertion_location = insertion_location
self.seeded_random = Random(seed)
def augment(
self,
texts: Union[str, List[str]],
insert_text: List[str],
) -> List[str]:
transformed_texts = []
for text in texts:
transformed_text = text
for _ in range(self.num_insertions):
text_choice = self.seeded_random.choice(insert_text)
if self.insertion_location == "append":
transformed_text = f"{transformed_text} {text_choice}"
elif self.insertion_location == "prepend":
transformed_text = f"{text_choice} {transformed_text}"
else:
words = transformed_text.split()
random_loc = self.seeded_random.randint(0, len(words))
words.insert(random_loc, text_choice)
transformed_text = " ".join(words)
transformed_texts.append(transformed_text)
return transformed_texts
|
AugLy-main
|
augly/text/augmenters/insert_text.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import re
from typing import List, Optional, Tuple
import regex
from augly.utils.libsndfile import install_libsndfile
install_libsndfile()
from nlpaug import Augmenter # @manual
from nlpaug.util import Method # @manual
LANGUAGES = {
"ar": "ar_AR",
"de": "de_DE",
"el": "el_GR",
"en": "en_XX",
"es": "es_XX",
"fr": "fr_XX",
"id": "id_ID",
"it": "it_IT",
"nl": "nl_NL",
"pl": "pl_PL",
"pt": "pt_XX",
"ro": "ro_RO",
"sv": "sv_SE",
"th": "th_TH",
"tr": "tr_TR",
"vi": "vi_VN",
}
LETTER_CHAR_MAPPING = {
"a": ["@", "4"],
"b": ["|3", "13", "!3"],
"c": ["(", "[", "{", "<"],
"d": ["|)"],
"e": ["3"],
"g": ["6", "9"],
"h": ["|-|", "/-/", ")-("],
"i": ["1", "7", "!", "|"],
"k": ["|<", "1<", "|(", "|{"],
"l": ["1", "7", "!", "|", "|_"],
"m": ["|V|", "IVI", "^^", "nn"],
"n": ["^"],
"o": ["D", "0", "()", "[]", "<>"],
"r": ["12", "I2", "|2", "/2"],
"s": ["$", "5", "z", "Z"],
"t": ["7", "+"],
"w": ["vv", "VV", "UU", "uu"],
"x": ["><", ")("],
"z": ["2"],
}
WORDNET_LANGUAGE_MAPPING = {
"ar": "arb",
"bg": "bul",
"ca": "cat",
"da": "dan",
"el": "ell",
"en": "eng",
"es": "spa",
"eu": "eus",
"fa": "fas",
"fi": "fin",
"fr": "fra",
"gl": "glg",
"he": "heb",
"hr": "hrv",
"id": "ind",
"it": "ita",
"ja": "jpn",
"ms": "zsm",
"nl": "nld",
"no": "nno",
"pl": "pol",
"pt": "por",
"sl": "slv",
"sv": "swe",
"th": "tha",
"zh": "cmn",
}
WORDNET_POS_MAPPING = {
"NN": ["n"],
"NNS": ["n"],
"NNP": ["n"],
"NNPS": ["n"],
"NP": ["n"],
"VB": ["v"],
"VBD": ["v"],
"VBG": ["v"],
"VBN": ["v"],
"VBZ": ["v"],
"VBP": ["v"],
"JJ": ["a", "s"],
"JJR": ["a", "s"],
"JJS": ["a", "s"],
"IN": ["a", "s"],
"RB": ["r"],
"RBR": ["r"],
"RBS": ["r"],
}
NEGATE_CONTRACTIONS = {
"aren't": "are",
"can't": "can",
"couldn't": "could",
"didn't": "did",
"doesn't": "does",
"hadn't": "had",
"haven't": "have",
"mustn't": "must",
"oughtn't": "ought",
"shouldn't": "should",
"shouldn't've": "should've",
"wasn't": "was",
"weren't": "were",
"won't": "will",
"wouldn't": "would",
}
PARENS_BRACKETS = [
(re.compile(r"\s([\[\(\{\<])\s"), r" \1"),
(re.compile(r"\s([\]\)\}\>])\s"), r"\1 "),
]
PUNCTUATION = [
(re.compile(r"\s([-])\s"), r"\1"), # Zero pad
(re.compile(r"(\s)?([#])\s"), r"\2"), # Hashtags
(re.compile(r"\s([,;:%])\s"), r"\1 "), # Right pad
(re.compile(r"([\$])\s([\d])"), r"\1\2"), # $ amounts
(re.compile(r"([\$])\s"), r"\1"), # Consecutive $ signs
(re.compile(r"(\s)?([\.\?\!])"), r"\2"), # End punctuation
]
QUOTES = [
(re.compile(r"([\'])\s(.*?)\s([\'])"), r"\1\2\3"),
(re.compile(r"([\"])\s(.*?)\s([\"])"), r"\1\2\3"),
(re.compile(r"\s(\')\s"), r"\1 "),
]
SPLIT_BY_WHITESPACE = re.compile(r"(\S+)")
TOKENIZER_REGEXPS = (
r"""
(?: [\w]+['][\w]+) # Contractions
|
(?:[+\-]?\d+[,/.:^]\d+) # Numbers (fractions, decimals, time, ratios)
|
(?:[\w_]+) # Words without punctuation
|
(?:\S) # Everything else
""",
)
TOKENIZER_REGEX = regex.compile(
r"""(%s)""" % "|".join(TOKENIZER_REGEXPS), regex.VERBOSE | regex.UNICODE
)
UPSIDE_DOWN_CHAR_MAPPING = dict(
zip(
"zyxwvutsrqponmlkjihgfedcbaZYXWVUTSRQPONMLKJIHGFEDCBA0987654321&_?!\"'.,;",
"zʎxʍʌnʇsɹbdouɯlʞɾᴉɥɓɟǝpɔqɐZ⅄XMΛՈꞱSᴚტԀONW⅂ꓘᒋIH⅁ℲƎᗡƆᗺⱯ068ㄥ9Ϛ߈Ɛᘔ⇂⅋‾¿¡„,˙'؛",
)
)
def tokenize(text: str) -> List[str]:
return TOKENIZER_REGEX.findall(text)
def detokenize(tokens: List[str]) -> str:
text = " ".join(tokens)
text = " " + text + " "
for regexp, substitution in PARENS_BRACKETS:
text = regexp.sub(substitution, text)
for regexp, substitution in PUNCTUATION:
text = regexp.sub(substitution, text)
for regexp, substitution in QUOTES:
text = regexp.sub(substitution, text)
return text.strip()
def split_words_on_whitespace(text: str) -> Tuple[List[str], List[str]]:
# Beginning and end are treated as whitespace even if they are empty strings
split_elements = SPLIT_BY_WHITESPACE.split(text)
# Return words and whitespace separately
return split_elements[1::2], split_elements[::2]
def rejoin_words_and_whitespace(words: List[str], whitespace: List[str]) -> str:
# The split regex returns one whitespace element than word
assert len(whitespace) == len(words) + 1, "Input lengths do not match!"
# Add a dummy entry to 'words' so we can zip it easily, then drop it
# pyre-fixme[6]: For 2nd param expected `int` but got `Tuple[]`.
ordered_elements = sum(zip(whitespace, words + [""]), ())[:-1]
return "".join(ordered_elements)
def validate_augmenter_params(
aug_char_min: int,
aug_char_max: int,
aug_char_p: float,
aug_word_min: int,
aug_word_max: int,
aug_word_p: float,
) -> None:
assert aug_char_min >= 0, "aug_char_min must be non-negative"
assert aug_char_max >= 0, "aug_char_max must be non-negative"
assert 0 <= aug_char_p <= 1, "aug_char_p must be a value in the range [0, 1]"
assert aug_word_min >= 0, "aug_word_min must be non-negative"
assert aug_word_max >= 0, "aug_word_max must be non-negative"
assert 0 <= aug_word_p <= 1, "aug_word_p must be a value in the range [0,1]"
def get_aug_idxes(
augmenter: Augmenter,
tokens: List[str],
filtered_idxes: List[int],
aug_cnt: int,
mode: str,
min_char: Optional[int] = None,
) -> List[int]:
assert (
mode in Method.getall()
), "Expected 'mode' to be a value defined in nlpaug.util.method.Method"
priority_idxes = []
priority_words = getattr(augmenter, "priority_words", None)
ignore_words = getattr(augmenter, "ignore_words", set())
if mode == Method.WORD and priority_words is not None:
priority_words_set = set(priority_words)
for i, token in enumerate(tokens):
if token in priority_words_set and token.lower() not in ignore_words:
if min_char is None or len(token) >= min_char:
priority_idxes.append(i)
idxes = []
for i in filtered_idxes:
if i not in priority_idxes:
if (min_char is None or len(tokens[i]) >= min_char) and tokens[
i
].lower() not in ignore_words:
idxes.append(i)
if len(priority_idxes) + len(idxes) == 0:
return []
if len(priority_idxes) <= aug_cnt:
aug_idxes = priority_idxes
aug_cnt -= len(priority_idxes)
if len(idxes) < aug_cnt:
aug_cnt = len(idxes)
aug_idxes += augmenter.sample(idxes, aug_cnt)
else:
aug_idxes = augmenter.sample(priority_idxes, aug_cnt)
return aug_idxes
|
AugLy-main
|
augly/text/augmenters/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
from typing import List, Union
from augly.text.augmenters.utils import (
rejoin_words_and_whitespace,
split_words_on_whitespace,
)
# Not meant to be an exhaustive list
CHARACTER_TYPES = {
"zero_width": [
"\u200B", # Zero-Width Space
"\u200C", # Zero-Width Non-Joiner
"\u200D", # Zero-Width Joiner
"\u2060", # Word Joiner
"\u2061", # Function Application
"\u2062", # Invisible Times
"\u2063", # Invisible Separator
"\u2064", # Invisible Plus
],
"whitespace": [
" ", # Space
"\t", # Horizontal Tab
"\n", # Newline
"\r", # Carriage Return
"\v", # Vertical Tab
"\f", # Feed
],
"punctuation": [".", "?", "!", ",", ";", ":", "-", "'", "..."],
}
class InsertionAugmenter:
"""
Inserts various types of characters (including zero-width or punctuation),
which in turn breaks up the text.
The 'cadence' and 'vary_chars' options are used to adjust the frequency and
pattern of the characters, to avoid detection by automated deobfuscation techniques.
If 'cadence' is not an integer (say, 2.5), a char will be added
on *average* about every 2.5 chars.
"""
def __init__(
self,
char_type: str,
granularity: str,
cadence: float = 1.0,
vary_chars: bool = False,
):
char_types = set(CHARACTER_TYPES.keys())
assert char_type in char_types, f"Must set 'char_type' to one of: {char_types}"
assert granularity in {
"word",
"all",
}, "Must set 'granularity' to either 'word' or 'all'."
assert cadence >= 1.0, "Must set 'cadence' to be no less than 1.0."
self.char_set = CHARACTER_TYPES[char_type]
self.granularity = granularity
self.cadence = cadence
self.vary_chars = vary_chars
def insert_chars(self, text: str) -> str:
splits = int((len(text) + self.cadence - 1) // self.cadence)
split_text = [
text[math.ceil(i * self.cadence) : math.ceil((i + 1) * self.cadence)]
for i in range(splits)
]
if not self.vary_chars:
return random.choice(self.char_set).join(split_text)
separators = random.choices(self.char_set, k=len(split_text))
# pyre-fixme[6]: For 2nd param expected `int` but got `Tuple[]`.
return "".join(sum(zip(split_text, separators), ())[:-1])
def insert_chars_per_word(self, text: str) -> str:
words, spaces = split_words_on_whitespace(text)
augmented_words = []
for word in words:
augmented_words.append(self.insert_chars(word))
return rejoin_words_and_whitespace(augmented_words, spaces)
def augment(self, texts: Union[str, List[str]]) -> List[str]:
texts = texts if isinstance(texts, list) else [texts]
if self.granularity == "all":
return [self.insert_chars(text) for text in texts]
# otherwise, self.granularity == "word"
return [self.insert_chars_per_word(text) for text in texts]
|
AugLy-main
|
augly/text/augmenters/insertion.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Optional, Union
import numpy as np
from augly.text.augmenters.utils import (
rejoin_words_and_whitespace,
split_words_on_whitespace,
)
class CaseChanger:
def __init__(self, case: str, seed: Optional[int]):
self.rng = np.random.RandomState(seed) if seed is not None else np.random
self.case = case
def change(self, text: str, case: Optional[str] = None) -> str:
case = case or self.case
if case == "lower":
return text.lower()
elif case == "upper":
return text.upper()
elif case == "title":
return text.title()
return self.change(text, self.rng.choice(["lower", "upper", "title"]))
class CaseAugmenter:
"""
Augmenter that changes the case characters, words, or entire texts depending on the
given 'granularity'.
The 'cadence' option are used to adjust the frequency of the case changes, to avoid
detection by automated deobfuscation techniques. If 'cadence' is not an integer
(e.g. 2.5), the case will be changed on *average* about every 2.5 chars.
"""
def __init__(
self,
case: str,
granularity: str,
cadence: float = 1.0,
seed: Optional[int] = 10,
):
assert granularity in {
"char",
"word",
"all",
}, "Must set 'granularity' to either 'word' or 'all'."
assert cadence >= 1.0, "Must set 'cadence' to be no less than 1.0."
assert case in {
"lower",
"upper",
"title",
"random",
}, "'case' must be one of: lower, upper, title, random"
self.case_changer = CaseChanger(case, seed)
self.granularity = granularity
self.cadence = cadence
def change_case_all(self, text: str) -> str:
return self.case_changer.change(text)
def change_case_chars(self, text: str) -> str:
num_change_case = int((len(text) + self.cadence - 1) // self.cadence)
change_case_idxs = {math.ceil(i * self.cadence) for i in range(num_change_case)}
augmented_chars = [
self.case_changer.change(str(c)) if i in change_case_idxs else str(c)
for i, c in enumerate(text)
]
return "".join(augmented_chars)
def change_case_words(self, text: str) -> str:
words, spaces = split_words_on_whitespace(text)
num_change_case = int((len(words) + self.cadence - 1) // self.cadence)
change_case_idxs = {math.ceil(i * self.cadence) for i in range(num_change_case)}
augmented_words = [
self.case_changer.change(word) if i in change_case_idxs else word
for i, word in enumerate(words)
]
return rejoin_words_and_whitespace(augmented_words, spaces)
def augment(self, texts: Union[str, List[str]]) -> List[str]:
texts = texts if isinstance(texts, list) else [texts]
if self.granularity == "all":
return [self.change_case_all(text) for text in texts]
elif self.granularity == "char":
return [self.change_case_chars(text) for text in texts]
# self.granularity == "word"
return [self.change_case_words(text) for text in texts]
|
AugLy-main
|
augly/text/augmenters/case.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import Any, Dict, List, Optional, Union
import numpy as np
from augly.text.augmenters.utils import detokenize, tokenize
from augly.utils import pathmgr
class ContractionMapping:
def __init__(self, mapping: Optional[Union[str, Dict[str, Any]]]):
if isinstance(mapping, str):
local_mapping_path = pathmgr.get_local_path(mapping)
with open(local_mapping_path) as json_file:
self.mapping = {k.lower(): v for k, v in json.load(json_file).items()}
elif isinstance(mapping, Dict):
self.mapping = mapping
else:
self.mapping = {}
def replace(self, text: str) -> Optional[str]:
new_text = self.mapping.get(text.lower(), None)
if new_text is not None and text[0].isupper():
new_text = new_text.capitalize()
return new_text
class ContractionAugmenter:
"""Augmenter that replaces words based on a given mapping"""
def __init__(
self,
aug_p: float,
mapping: Optional[Union[str, Dict[str, Any]]],
max_contraction_length: int = 2,
seed: Optional[int] = 10,
):
assert max_contraction_length >= 2, "Must set 'max_contraction_length' >= 2"
self.aug_p = aug_p
self.contraction_mapping = self.get_mapping(mapping)
self.max_contraction_length = max_contraction_length
self.rng = np.random.RandomState(seed) if seed is not None else np.random
def get_mapping(
self, mapping: Optional[Union[str, Dict[str, Any]]]
) -> ContractionMapping:
return ContractionMapping(mapping)
def substitute_contractions(self, text: str) -> str:
"""
Returns a text where random words are replaced using the specified mapping
@param text: the text to which the word substitution will be applied
"""
results = []
tokens = tokenize(text)
for c_len in range(2, self.max_contraction_length + 1):
i = 0
while i <= len(tokens) - c_len:
result = tokens[i]
if self.rng.rand() <= self.aug_p:
contraction = self.contraction_mapping.replace(
" ".join(tokens[i : i + c_len])
)
if contraction is not None:
result = contraction
i += c_len - 1
results.append(result)
i += 1
results.extend(tokens[-c_len + 1 :])
return detokenize(results)
def augment(self, texts: Union[str, List[str]]) -> List[str]:
texts_list = [texts] if isinstance(texts, str) else texts
return [self.substitute_contractions(text) for text in texts_list]
|
AugLy-main
|
augly/text/augmenters/contraction.py
|
"""
Useful information about tasks.
"""
TASKS = [
"abercrombie",
"canada_tax_court_outcomes",
"citation_prediction_classification",
"citation_prediction_open",
"consumer_contracts_qa",
"contract_nli_confidentiality_of_agreement",
"contract_nli_explicit_identification",
"contract_nli_inclusion_of_verbally_conveyed_information",
"contract_nli_limited_use",
"contract_nli_no_licensing",
"contract_nli_notice_on_compelled_disclosure",
"contract_nli_permissible_acquirement_of_similar_information",
"contract_nli_permissible_copy",
"contract_nli_permissible_development_of_similar_information",
"contract_nli_permissible_post-agreement_possession",
"contract_nli_return_of_confidential_information",
"contract_nli_sharing_with_employees",
"contract_nli_sharing_with_third-parties",
"contract_nli_survival_of_obligations",
"contract_qa",
"corporate_lobbying",
"cuad_affiliate_license-licensee",
"cuad_affiliate_license-licensor",
"cuad_anti-assignment",
"cuad_audit_rights",
"cuad_cap_on_liability",
"cuad_change_of_control",
"cuad_competitive_restriction_exception",
"cuad_covenant_not_to_sue",
"cuad_effective_date",
"cuad_exclusivity",
"cuad_expiration_date",
"cuad_governing_law",
"cuad_insurance",
"cuad_ip_ownership_assignment",
"cuad_irrevocable_or_perpetual_license",
"cuad_joint_ip_ownership",
"cuad_license_grant",
"cuad_liquidated_damages",
"cuad_minimum_commitment",
"cuad_most_favored_nation",
"cuad_no-solicit_of_customers",
"cuad_no-solicit_of_employees",
"cuad_non-compete",
"cuad_non-disparagement",
"cuad_non-transferable_license",
"cuad_notice_period_to_terminate_renewal",
"cuad_post-termination_services",
"cuad_price_restrictions",
"cuad_renewal_term",
"cuad_revenue-profit_sharing",
"cuad_rofr-rofo-rofn",
"cuad_source_code_escrow",
"cuad_termination_for_convenience",
"cuad_third_party_beneficiary",
"cuad_uncapped_liability",
"cuad_unlimited-all-you-can-eat-license",
"cuad_volume_restriction",
"cuad_warranty_duration",
"definition_classification",
"definition_extraction",
"diversity_1",
"diversity_2",
"diversity_3",
"diversity_4",
"diversity_5",
"diversity_6",
"function_of_decision_section",
"hearsay",
"insurance_policy_interpretation",
"jcrew_blocker",
"international_citizenship_questions",
"nys_judicial_ethics",
"learned_hands_benefits",
"learned_hands_business",
"learned_hands_consumer",
"learned_hands_courts",
"learned_hands_crime",
"learned_hands_divorce",
"learned_hands_domestic_violence",
"learned_hands_education",
"learned_hands_employment",
"learned_hands_estates",
"learned_hands_family",
"learned_hands_health",
"learned_hands_housing",
"learned_hands_immigration",
"learned_hands_torts",
"learned_hands_traffic",
"legal_reasoning_causality",
"maud_ability_to_consummate_concept_is_subject_to_mae_carveouts",
"maud_financial_point_of_view_is_the_sole_consideration",
"maud_accuracy_of_fundamental_target_rws_bringdown_standard",
"maud_accuracy_of_target_general_rw_bringdown_timing_answer",
"maud_accuracy_of_target_capitalization_rw_(outstanding_shares)_bringdown_standard_answer",
"maud_additional_matching_rights_period_for_modifications_(cor)",
"maud_application_of_buyer_consent_requirement_(negative_interim_covenant)",
"maud_buyer_consent_requirement_(ordinary_course)",
"maud_change_in_law__subject_to_disproportionate_impact_modifier",
"maud_changes_in_gaap_or_other_accounting_principles__subject_to_disproportionate_impact_modifier",
"maud_cor_permitted_in_response_to_intervening_event",
"maud_cor_permitted_with_board_fiduciary_determination_only",
"maud_cor_standard_(intervening_event)",
"maud_cor_standard_(superior_offer)",
"maud_definition_contains_knowledge_requirement_-_answer",
"maud_definition_includes_asset_deals",
"maud_definition_includes_stock_deals",
"maud_fiduciary_exception__board_determination_standard",
"maud_fiduciary_exception_board_determination_trigger_(no_shop)",
"maud_fls_(mae)_standard",
"maud_general_economic_and_financial_conditions_subject_to_disproportionate_impact_modifier",
"maud_includes_consistent_with_past_practice",
"maud_initial_matching_rights_period_(cor)",
"maud_initial_matching_rights_period_(ftr)",
"maud_intervening_event_-_required_to_occur_after_signing_-_answer",
"maud_knowledge_definition",
"maud_liability_standard_for_no-shop_breach_by_target_non-do_representatives",
"maud_ordinary_course_efforts_standard",
"maud_pandemic_or_other_public_health_event__subject_to_disproportionate_impact_modifier",
"maud_pandemic_or_other_public_health_event_specific_reference_to_pandemic-related_governmental_responses_or_measures",
"maud_relational_language_(mae)_applies_to",
"maud_specific_performance",
"maud_tail_period_length",
"maud_type_of_consideration",
"opp115_data_retention",
"opp115_data_security",
"opp115_do_not_track",
"opp115_first_party_collection_use",
"opp115_international_and_specific_audiences",
"opp115_policy_change",
"opp115_third_party_sharing_collection",
"opp115_user_access,_edit_and_deletion",
"opp115_user_choice_control",
"oral_argument_question_purpose",
"overruling",
"personal_jurisdiction",
"privacy_policy_entailment",
"privacy_policy_qa",
"proa",
"rule_qa",
"scalr",
"ssla_company_defendants",
"ssla_individual_defendants",
"ssla_plaintiff",
"sara_entailment",
"sara_numeric",
"successor_liability",
"supply_chain_disclosure_best_practice_accountability",
"supply_chain_disclosure_best_practice_audits",
"supply_chain_disclosure_best_practice_certification",
"supply_chain_disclosure_best_practice_training",
"supply_chain_disclosure_best_practice_verification",
"supply_chain_disclosure_disclosed_accountability",
"supply_chain_disclosure_disclosed_audits",
"supply_chain_disclosure_disclosed_certification",
"supply_chain_disclosure_disclosed_training",
"supply_chain_disclosure_disclosed_verification",
"telemarketing_sales_rule",
"textualism_tool_dictionaries",
"textualism_tool_plain",
"ucc_v_common_law",
"unfair_tos",
]
ISSUE_TASKS = [
"corporate_lobbying",
"learned_hands_benefits",
"learned_hands_business",
"learned_hands_consumer",
"learned_hands_courts",
"learned_hands_crime",
"learned_hands_divorce",
"learned_hands_domestic_violence",
"learned_hands_education",
"learned_hands_employment",
"learned_hands_estates",
"learned_hands_family",
"learned_hands_health",
"learned_hands_housing",
"learned_hands_immigration",
"learned_hands_torts",
"learned_hands_traffic",
]
RULE_TASKS = [
"rule_qa",
"international_citizenship_questions",
"nys_judicial_ethics",
"citation_prediction_classification",
"citation_prediction_open",
]
CONCLUSION_TASKS = [
"abercrombie",
"diversity_1",
"diversity_2",
"diversity_3",
"diversity_4",
"diversity_5",
"diversity_6",
"hearsay",
"personal_jurisdiction",
"successor_liability",
"telemarketing_sales_rule",
"ucc_v_common_law",
]
INTERPRETATION_TASKS = [
"consumer_contracts_qa",
"contract_nli_confidentiality_of_agreement",
"contract_nli_explicit_identification",
"contract_nli_inclusion_of_verbally_conveyed_information",
"contract_nli_limited_use",
"contract_nli_no_licensing",
"contract_nli_notice_on_compelled_disclosure",
"contract_nli_permissible_acquirement_of_similar_information",
"contract_nli_permissible_copy",
"contract_nli_permissible_development_of_similar_information",
"contract_nli_permissible_post-agreement_possession",
"contract_nli_return_of_confidential_information",
"contract_nli_sharing_with_employees",
"contract_nli_sharing_with_third-parties",
"contract_nli_survival_of_obligations",
"contract_qa",
"cuad_affiliate_license-licensee",
"cuad_affiliate_license-licensor",
"cuad_anti-assignment",
"cuad_audit_rights",
"cuad_cap_on_liability",
"cuad_change_of_control",
"cuad_competitive_restriction_exception",
"cuad_covenant_not_to_sue",
"cuad_effective_date",
"cuad_exclusivity",
"cuad_expiration_date",
"cuad_governing_law",
"cuad_insurance",
"cuad_ip_ownership_assignment",
"cuad_irrevocable_or_perpetual_license",
"cuad_joint_ip_ownership",
"cuad_license_grant",
"cuad_liquidated_damages",
"cuad_minimum_commitment",
"cuad_most_favored_nation",
"cuad_no-solicit_of_customers",
"cuad_no-solicit_of_employees",
"cuad_non-compete",
"cuad_non-disparagement",
"cuad_non-transferable_license",
"cuad_notice_period_to_terminate_renewal",
"cuad_post-termination_services",
"cuad_price_restrictions",
"cuad_renewal_term",
"cuad_revenue-profit_sharing",
"cuad_rofr-rofo-rofn",
"cuad_source_code_escrow",
"cuad_termination_for_convenience",
"cuad_third_party_beneficiary",
"cuad_uncapped_liability",
"cuad_unlimited-all-you-can-eat-license",
"cuad_volume_restriction",
"cuad_warranty_duration",
"insurance_policy_interpretation",
"jcrew_blocker",
"maud_ability_to_consummate_concept_is_subject_to_mae_carveouts",
"maud_financial_point_of_view_is_the_sole_consideration",
"maud_accuracy_of_fundamental_target_rws_bringdown_standard",
"maud_accuracy_of_target_general_rw_bringdown_timing_answer",
"maud_accuracy_of_target_capitalization_rw_(outstanding_shares)_bringdown_standard_answer",
"maud_additional_matching_rights_period_for_modifications_(cor)",
"maud_application_of_buyer_consent_requirement_(negative_interim_covenant)",
"maud_buyer_consent_requirement_(ordinary_course)",
"maud_change_in_law__subject_to_disproportionate_impact_modifier",
"maud_changes_in_gaap_or_other_accounting_principles__subject_to_disproportionate_impact_modifier",
"maud_cor_permitted_in_response_to_intervening_event",
"maud_cor_permitted_with_board_fiduciary_determination_only",
"maud_cor_standard_(intervening_event)",
"maud_cor_standard_(superior_offer)",
"maud_definition_contains_knowledge_requirement_-_answer",
"maud_definition_includes_asset_deals",
"maud_definition_includes_stock_deals",
"maud_fiduciary_exception__board_determination_standard",
"maud_fiduciary_exception_board_determination_trigger_(no_shop)",
"maud_fls_(mae)_standard",
"maud_general_economic_and_financial_conditions_subject_to_disproportionate_impact_modifier",
"maud_includes_consistent_with_past_practice",
"maud_initial_matching_rights_period_(cor)",
"maud_initial_matching_rights_period_(ftr)",
"maud_intervening_event_-_required_to_occur_after_signing_-_answer",
"maud_knowledge_definition",
"maud_liability_standard_for_no-shop_breach_by_target_non-do_representatives",
"maud_ordinary_course_efforts_standard",
"maud_pandemic_or_other_public_health_event__subject_to_disproportionate_impact_modifier",
"maud_pandemic_or_other_public_health_event_specific_reference_to_pandemic-related_governmental_responses_or_measures",
"maud_relational_language_(mae)_applies_to",
"maud_specific_performance",
"maud_tail_period_length",
"maud_type_of_consideration",
"opp115_data_retention",
"opp115_data_security",
"opp115_do_not_track",
"opp115_first_party_collection_use",
"opp115_international_and_specific_audiences",
"opp115_policy_change",
"opp115_third_party_sharing_collection",
"opp115_user_access,_edit_and_deletion",
"opp115_user_choice_control",
"privacy_policy_entailment",
"privacy_policy_qa",
"proa",
"ssla_company_defendants",
"ssla_individual_defendants",
"ssla_plaintiff",
"sara_entailment",
"sara_numeric",
"supply_chain_disclosure_best_practice_accountability",
"supply_chain_disclosure_best_practice_audits",
"supply_chain_disclosure_best_practice_certification",
"supply_chain_disclosure_best_practice_training",
"supply_chain_disclosure_best_practice_verification",
"supply_chain_disclosure_disclosed_accountability",
"supply_chain_disclosure_disclosed_audits",
"supply_chain_disclosure_disclosed_certification",
"supply_chain_disclosure_disclosed_training",
"supply_chain_disclosure_disclosed_verification",
"unfair_tos",
]
RHETORIC_TASKS = [
"canada_tax_court_outcomes",
"definition_classification",
"definition_extraction",
"function_of_decision_section",
"legal_reasoning_causality",
"oral_argument_question_purpose",
"overruling",
"scalr",
"textualism_tool_dictionaries",
"textualism_tool_plain",
]
|
legalbench-main
|
tasks.py
|
"""Functions for evaluating model outputs."""
from typing import List
import string
from sklearn.metrics import balanced_accuracy_score
from nltk.stem.porter import *
import re
import numpy as np
# These tasks are evaluated using exact-match balanced-accuracy
EXACT_MATCH_BALANCED_ACC_TASKS = [
"abercrombie",
"canada_tax_court_outcomes",
"citation_prediction_classification",
"consumer_contracts_qa",
"contract_nli_confidentiality_of_agreement",
"contract_nli_explicit_identification",
"contract_nli_inclusion_of_verbally_conveyed_information",
"contract_nli_limited_use",
"contract_nli_no_licensing",
"contract_nli_notice_on_compelled_disclosure",
"contract_nli_permissible_acquirement_of_similar_information",
"contract_nli_permissible_copy",
"contract_nli_permissible_development_of_similar_information",
"contract_nli_permissible_post-agreement_possession",
"contract_nli_return_of_confidential_information",
"contract_nli_sharing_with_employees",
"contract_nli_sharing_with_third-parties",
"contract_nli_survival_of_obligations",
"contract_qa",
"corporate_lobbying",
"cuad_affiliate_license-licensee",
"cuad_affiliate_license-licensor",
"cuad_anti-assignment",
"cuad_audit_rights",
"cuad_cap_on_liability",
"cuad_change_of_control",
"cuad_competitive_restriction_exception",
"cuad_covenant_not_to_sue",
"cuad_effective_date",
"cuad_exclusivity",
"cuad_expiration_date",
"cuad_governing_law",
"cuad_insurance",
"cuad_ip_ownership_assignment",
"cuad_irrevocable_or_perpetual_license",
"cuad_joint_ip_ownership",
"cuad_license_grant",
"cuad_liquidated_damages",
"cuad_minimum_commitment",
"cuad_most_favored_nation",
"cuad_no-solicit_of_customers",
"cuad_no-solicit_of_employees",
"cuad_non-compete",
"cuad_non-disparagement",
"cuad_non-transferable_license",
"cuad_notice_period_to_terminate_renewal",
"cuad_post-termination_services",
"cuad_price_restrictions",
"cuad_renewal_term",
"cuad_revenue-profit_sharing",
"cuad_rofr-rofo-rofn",
"cuad_source_code_escrow",
"cuad_termination_for_convenience",
"cuad_third_party_beneficiary",
"cuad_uncapped_liability",
"cuad_unlimited-all-you-can-eat-license",
"cuad_volume_restriction",
"cuad_warranty_duration",
"definition_classification",
"diversity_1",
"diversity_2",
"diversity_3",
"diversity_4",
"diversity_5",
"diversity_6",
"function_of_decision_section",
"hearsay",
"insurance_policy_interpretation",
"international_citizenship_questions",
"intra_rule_distinguishing",
"jcrew_blocker",
"learned_hands_benefits",
"learned_hands_business",
"learned_hands_consumer",
"learned_hands_courts",
"learned_hands_crime",
"learned_hands_divorce",
"learned_hands_domestic_violence",
"learned_hands_education",
"learned_hands_employment",
"learned_hands_estates",
"learned_hands_family",
"learned_hands_health",
"learned_hands_housing",
"learned_hands_immigration",
"learned_hands_torts",
"learned_hands_traffic",
"legal_reasoning_causality",
"maud_ability_to_consummate_concept_is_subject_to_mae_carveouts",
"maud_financial_point_of_view_is_the_sole_consideration",
"maud_accuracy_of_fundamental_target_rws_bringdown_standard",
"maud_accuracy_of_target_general_rw_bringdown_timing_answer",
"maud_accuracy_of_target_capitalization_rw_(outstanding_shares)_bringdown_standard_answer",
"maud_additional_matching_rights_period_for_modifications_(cor)",
"maud_application_of_buyer_consent_requirement_(negative_interim_covenant)",
"maud_buyer_consent_requirement_(ordinary_course)",
"maud_change_in_law__subject_to_disproportionate_impact_modifier",
"maud_changes_in_gaap_or_other_accounting_principles__subject_to_disproportionate_impact_modifier",
"maud_cor_permitted_in_response_to_intervening_event",
"maud_cor_permitted_with_board_fiduciary_determination_only",
"maud_cor_standard_(intervening_event)",
"maud_cor_standard_(superior_offer)",
"maud_definition_contains_knowledge_requirement_-_answer",
"maud_definition_includes_asset_deals",
"maud_definition_includes_stock_deals",
"maud_fiduciary_exception__board_determination_standard",
"maud_fiduciary_exception_board_determination_trigger_(no_shop)",
"maud_fls_(mae)_standard",
"maud_general_economic_and_financial_conditions_subject_to_disproportionate_impact_modifier",
"maud_includes_consistent_with_past_practice",
"maud_initial_matching_rights_period_(cor)",
"maud_initial_matching_rights_period_(ftr)",
"maud_intervening_event_-_required_to_occur_after_signing_-_answer",
"maud_knowledge_definition",
"maud_liability_standard_for_no-shop_breach_by_target_non-do_representatives",
"maud_ordinary_course_efforts_standard",
"maud_pandemic_or_other_public_health_event__subject_to_disproportionate_impact_modifier",
"maud_pandemic_or_other_public_health_event_specific_reference_to_pandemic-related_governmental_responses_or_measures",
"maud_relational_language_(mae)_applies_to",
"maud_specific_performance",
"maud_tail_period_length",
"maud_type_of_consideration",
"nys_judicial_ethics",
"opp115_data_retention",
"opp115_data_security",
"opp115_do_not_track",
"opp115_first_party_collection_use",
"opp115_international_and_specific_audiences",
"opp115_policy_change",
"opp115_third_party_sharing_collection",
"opp115_user_access,_edit_and_deletion",
"opp115_user_choice_control",
"oral_argument_question_purpose",
"overruling",
"personal_jurisdiction",
"privacy_policy_entailment",
"privacy_policy_qa",
"proa",
"sara_entailment",
"successor_liability",
"scalr",
"supply_chain_disclosure_best_practice_accountability",
"supply_chain_disclosure_best_practice_audits",
"supply_chain_disclosure_best_practice_certification",
"supply_chain_disclosure_best_practice_training",
"supply_chain_disclosure_best_practice_verification",
"supply_chain_disclosure_disclosed_accountability",
"supply_chain_disclosure_disclosed_audits",
"supply_chain_disclosure_disclosed_certification",
"supply_chain_disclosure_disclosed_training",
"supply_chain_disclosure_disclosed_verification",
"telemarketing_sales_rule",
"textualism_tool_dictionaries",
"textualism_tool_plain",
"ucc_v_common_law",
"unfair_tos",
]
# This task needs to be evaluated by hand.
MANUAL_EVAL_TASKS = ["rule_qa"]
def normalize(text: str, stem: bool) -> str:
"""
Normalizes strings.
Args:
- text: text to normalize
- stem: whether or not to apply a stemmer
Returns: normalized text
"""
# Remove punctuation
text = str(text).translate(str.maketrans("", "", string.punctuation))
# Remove extra spaces
text = text.strip()
# Make lower case
text = text.lower()
# Stem
if stem:
text = PorterStemmer().stem(text)
return text
def evaluate(task: str, generations: List[str], answers: List[str]):
"""
Computes performance metric for task.
Args:
task: name of task
generations: list of LLM outputs
answers: list of correct answers (i.e., ground truth)
Returns: score for generations on given task
"""
if task in EXACT_MATCH_BALANCED_ACC_TASKS:
return evaluate_exact_match_balanced_accuracy(generations, answers)
elif task == "sara_numeric":
return evaluate_sara_numeric_acc(generations, answers)
elif task == "successor_liability":
return evaluate_successor_liability(generations, answers)
elif task == "citation_prediction_open":
return evaluate_citation_open(generations, answers)
elif task == "definition_extraction":
return evaluate_definition_extraction(generations, answers)
elif task.startswith("ssla"):
return evaluate_ssla(generations, answers)
elif task in MANUAL_EVAL_TASKS:
raise Exception("This task needs to be manually evaluated:", task)
else:
raise Exception(f"Unknown task: {task}")
def evaluate_exact_match_balanced_accuracy(generations: List[str], answers: List[str]):
"""
Evaluates exact match using balanced_accuracy.
"""
normalized_answers = [normalize(a, stem=False) for a in answers]
normalized_gens = [normalize(g, stem=False) for g in generations]
return balanced_accuracy_score(normalized_answers, normalized_gens)
def evaluate_successor_liability(generations: List[str], answers: List[str]):
"""
For successor liability, we measure F1 over the predicted exceptions.
"""
CLASSES = [
"express agreement",
"fraudulent conveyance",
"de facto merger",
"mere continuation",
]
tp, fp, fn = 0, 0, 0
for i in range(len(generations)):
predictions = [c for c in CLASSES if c in str(generations[i])]
sample_answers = str(answers[i]).split(",")
for j in range(len(predictions)):
if predictions[j] in sample_answers:
index = sample_answers.index(predictions[j])
del sample_answers[index]
tp += 1
else:
fp += 1
fn += len(sample_answers)
f1 = 2 * tp / (2 * tp + fp + fn)
return f1
def evaluate_sara_numeric_acc(generations: List[str], answers: List[str]):
"""
For sara_numeric, we evaluate the proportion of generations which are within 10%
of the correct answer.
"""
corrects = []
for i in range(len(generations)):
sentence = str(generations[i])
sentence = sentence.replace(",", "").replace(".", "")
prediction = re.search(r"\d+", sentence)
if prediction is None:
prediction = 0
else:
prediction = int(prediction.group())
answer = int(answers[i].replace("$", ""))
correct = int(abs(prediction / (answer + 1e-1) - 1.0) < 0.1)
corrects.append(correct)
return np.mean(corrects)
def evaluate_definition_extraction(generations: List[str], answers: List[str]):
"""
Evaluates definition extraction task.
Example
-----------------------------------
Extract the term being defined.
Sentence: Although the word “authorize” sometimes means simply “to permit,”
it ordinarily denotes affirmative enabling action. Black's Law Dictionary
122 (5th ed. 1979) defines “authorize” as “[t]o empower; to give a right
or authority to act.”
Possible correct answers:
- authorize
- authorized
For definition_extraction, we normalize both the generation and answers by
applying a stemmer. We then report accuracy over the number of times the generation
matches ground truth.
"""
total = 0
correct = 0
for i in range(len(generations)):
answers_list = answers[i].split(",")
generations_list = generations[i].split(",")
normalized_answers = [normalize(a, stem=True) for a in answers_list]
normalized_gens = [normalize(g, stem=True) for g in generations_list]
total += 1
for gen in normalized_gens:
if gen in normalized_answers:
correct += 1
break
return correct / total
def evaluate_citation_open(generations: List[str], answers: List[str]):
"""
For the open citation prediction task, we only check if the correct case name is generated.
We verify this by stripping punctuation, and then checking if the answer is contained in the generation.
NOTE: we do not check if the reporter citation/circuit is correct!
"""
normalized_answers = [normalize(a, stem=False) for a in answers]
normalized_gens = [normalize(g, stem=False) for g in generations]
total, correct = 0, 0
for g, a in zip(normalized_gens, normalized_answers):
total += 1
if a in g:
correct += 1
return correct / total
def evaluate_ssla(generations: List[str], answers: List[str]):
"""
For SSLA evaluation tasks, we measure F1.
"""
tp = 0
fp = 0
fn = 0
for i in range(len(generations)):
answers_split = answers[i].split(",")
generations_split = str(generations[i]).split(",")
normalized_answers = [normalize(a, stem=False) for a in answers_split]
normalized_gens = [normalize(g, stem=False) for g in generations_split]
# For each answer, check if it is contained in any of the generations.
# If it is, delete that generation.
for a in normalized_answers:
found = False
for j in range(len(normalized_gens)):
if a in normalized_gens[j]:
tp += 1
found = True
break
if found:
del normalized_gens[j]
else:
fn += 1
fp += len(normalized_gens)
f1 = 2 * tp / (2 * tp + fp + fn)
return f1
|
legalbench-main
|
evaluation.py
|
import pandas as pd
from typing import List
def generate_prompts(prompt_template: str, data_df: pd.DataFrame) -> List[str]:
"""
Generates prompts for the rows in data_df using the template prompt_template.
Args:
prompt_template: a prompt template
data_df: pandas dataframe of samples to generate prompts for
Returns:
prompts: a list of prompts corresponding to the rows of data_df
"""
assert (
"{{" in prompt_template
), f"Prompt template has no fields to fill, {prompt_template}"
prompts = []
dicts = data_df.to_dict(orient="records")
for dd in dicts:
prompt = str(prompt_template)
for k, v in dd.items():
prompt = prompt.replace("{{" + k + "}}", str(v))
assert not "{{" in prompt, print(prompt)
prompts.append(prompt)
assert len(set(prompts)) == len(prompts), "Duplicated prompts detected"
return prompts
|
legalbench-main
|
utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
Clinical-Trial-Parser-main
|
src/embedding/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# fastText options for training word embeddings
options = {
"model": "cbow",
"loss": "ns",
"dim": 100,
"ws": 5,
"neg": 5,
"minCount": 5,
"bucket": 2000000,
"t": 1e-5,
"minn": 0,
"maxn": 0,
"lr": 0.05,
"epoch": 20,
"thread": 12,
"verbose": 2,
}
# List of words whose nearest neighbors are computed to inspect the embedding quality
test_words = ["covid-19", "a1c", "cardiomyopathy", "obese", "hemiplegia", "tp53", "cd137", "<", "@NUMBER"]
# Output file extensions
FREQ = ".freq"
VEC = ".vec"
BIN = ".bin"
TMP = ".tmp"
# Vector is approximated by zero if its norm is less than EPS
EPS = 1.0e-6
|
Clinical-Trial-Parser-main
|
src/embedding/param.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Train word embeddings with FastText.
Run: python src/embedding/train_embeddings.py -i <input file> -o <output file>
"""
import os
import fasttext
import numpy as np
from argparse import ArgumentParser
import embedding.param as param
from text.transformer import Transformer
def similarity(v1, v2):
n1 = np.linalg.norm(v1)
n2 = np.linalg.norm(v2)
if n1 < param.EPS or n2 < param.EPS:
return 0
return np.dot(v1, v2) / (n1 * n2)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"-i",
"--input",
dest="input",
required=True,
help="input file for training word embeddings",
metavar="INPUT",
)
parser.add_argument(
"-o",
"--output",
dest="output",
required=True,
help="output file for trained word embeddings",
metavar="OUTPUT",
)
return parser.parse_args()
def transform_data(input_file, output_file):
xfmr = Transformer()
line_cnt = 0
with open(output_file, "w", encoding="utf-8") as writer:
with open(input_file, "r", encoding="utf-8") as reader:
for line in reader:
writer.write(xfmr.transform(line) + "\n")
line_cnt += 1
print(f"Lines transformed: {line_cnt}")
def train_embeddings(input_file, output_file):
model = fasttext.train_unsupervised(input_file, **param.options)
model.save_model(output_file + param.BIN)
word_writer = open(output_file + param.FREQ, "w", encoding="utf-8")
vec_writer = open(output_file + param.VEC, "w", encoding="utf-8")
words, freqs = model.get_words(include_freq=True)
for w, f in zip(words, freqs):
word_writer.write(f"{w} {f:d}\n")
vec = " ".join(format(v, ".6f") for v in model.get_word_vector(w))
vec_writer.write(f"{w} {vec}\n")
word_writer.close()
vec_writer.close()
def test_embeddings(output_file):
model = fasttext.load_model(output_file + param.BIN)
for w in param.test_words:
nearest_neighbors(w, model)
print()
def nearest_neighbors(w1, model, top=40):
words, freqs = model.get_words(include_freq=True)
v1 = model.get_word_vector(w1)
scores = {}
for w2, f2 in zip(words, freqs):
v2 = model.get_word_vector(w2)
score = similarity(v1, v2)
if score > 0.5:
scores[(w2, f2)] = score
if len(scores) is 0:
print(f"No nearest neighbors for '{w1}'")
return
for k, v in sorted(scores.items(), key=lambda item: item[1], reverse=True)[:top]:
print(f"{k[0]:20s} {v:.3f} {k[1]:7d}")
def main(input_file, output_file):
tmp_file = output_file + param.TMP
transform_data(input_file, tmp_file)
train_embeddings(tmp_file, output_file)
test_embeddings(output_file)
os.remove(tmp_file)
if __name__ == "__main__":
args = parse_args()
main(args.input, args.output)
|
Clinical-Trial-Parser-main
|
src/embedding/train_embeddings.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from text.transformer import Normalizer
from text.transformer import Transformer
class TestNormalizer(unittest.TestCase):
norm = Normalizer()
def test_less(self):
actual = self.norm.normalize("x <= 4")
expected = "x ≤ 4"
self.assertEqual(actual, expected)
def test_greater(self):
actual = self.norm.normalize("3>x>/=4 ")
expected = "3 > x ≥ 4"
self.assertEqual(actual, expected)
class TestTransformer(unittest.TestCase):
xfrm = Transformer()
def test_transform(self):
actual = self.xfrm.transform("Normal hear, gen 134dtt4d = /> organ. And/or (marrow): Leukocytes ≥3,000/μL.")
expected = "normal hear , gen @NUMBER dtt4d ≥ organ . and / or ( marrow ) : leukocytes ≥ @NUMBER / μl ."
self.assertEqual(actual, expected)
def test_empty(self):
actual = self.xfrm.transform("")
expected = ""
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
Clinical-Trial-Parser-main
|
src/text/transformer_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
Clinical-Trial-Parser-main
|
src/text/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import re
import nltk
# number or number range
def is_number(s):
try:
float(s.replace(",", "").replace("-", ""))
return True
except ValueError:
return False
class Normalizer:
def __init__(self):
self.less_pattern = re.compile(r"[\uFE64\uFF1C\u003C\u227A]")
self.greater_pattern = re.compile(r"[\uFE65\uFF1E\u003E\u227B]")
self.less_eq_pattern = re.compile(r"(<\s*/?\s*=|=\s*/?\s*<)")
self.greater_eq_pattern = re.compile(r"(>\s*/?\s*=|=\s*/?\s*>)")
self.comparison_pattern = re.compile(r"([=<>\u2264\u2265])")
self.space_pattern = re.compile(r"\s+")
def normalize(self, text):
text = text.strip('"')
text = re.sub(self.less_pattern, "<", text)
text = re.sub(self.greater_pattern, ">", text)
text = text.replace(r"\u2266", r"\u2264")
text = text.replace(r"\u2267", r"\u2265")
text = re.sub(self.less_eq_pattern, "\u2264", text)
text = re.sub(self.greater_eq_pattern, "\u2265", text)
text = re.sub(self.comparison_pattern, r" \g<1> ", text)
text = re.sub(self.space_pattern, " ", text)
text = text.strip().lower()
return text
class Tokenizer:
def __init__(self, mask_numbers=True):
self.mask_numbers = mask_numbers
self.subtoken_pattern1 = re.compile("(/)")
self.subtoken_pattern2 = re.compile(r"^(\d+)([a-z].*)")
def tokenize(self, text):
tokens = nltk.tokenize.word_tokenize(text)
tokens = [subtoken for token in tokens for subtoken in re.split(self.subtoken_pattern1, token)]
tokens = [subtoken for token in tokens for subtoken in re.split(self.subtoken_pattern2, token)]
tokens = [token for token in tokens if token is not ""]
if self.mask_numbers:
tokens = ["@NUMBER" if is_number(token) else token for token in tokens]
return tokens
class Transformer:
def __init__(self, mask_numbers=True, download=True):
self.normalizer = Normalizer()
self.tokenizer = Tokenizer(mask_numbers=mask_numbers)
if download:
nltk.download("punkt")
def tokenize(self, text):
text = self.normalizer.normalize(text)
tokens = self.tokenizer.tokenize(text)
return tokens
def transform(self, text):
tokens = self.tokenize(text)
return " ".join(tokens)
|
Clinical-Trial-Parser-main
|
src/text/transformer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Process annotated eligibility criteria to a format suitable for training
and testing NER models with PyText. Duplicate samples are removed.
Run: python src/ie/process_ner_data.py -i <input file> -o <output file>
For example:
export PYTHONPATH="$(pwd)/src"
python src/ie/process_ner_data.py -i data/ner/medical_ner.tsv -t data/ner/train_processed_medical_ner.tsv -v data/ner/test_processed_medical_ner.tsv
"""
import random
import os.path
from argparse import ArgumentParser
from text.transformer import Transformer
# Text transformer for eligibility criteria
xfmr = Transformer()
# Ratio of samples assigned to the test set
test_ratio = 0.1
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"-i",
"--input",
dest="input",
required=True,
help="input file",
metavar="INPUT",
)
parser.add_argument(
"-t",
"--train",
dest="train",
required=True,
help="output train file",
metavar="TRAIN",
)
parser.add_argument(
"-v",
"--test",
dest="test",
required=True,
help="output test file",
metavar="TEST",
)
return parser.parse_args()
def transform(slots, text):
new_labels = []
new_text = []
previous_hi = 0
for slot in slots:
values = slot.split(":")
lo = int(values[0]) - 1
hi = int(values[1]) - 1
label = values[2]
if lo > previous_hi:
new_text.append(xfmr.transform(text[previous_hi:lo]))
new_labels.append("")
new_text.append(xfmr.transform(text[lo:hi]))
new_labels.append(label)
previous_hi = hi
if previous_hi < len(text):
new_text.append(xfmr.transform(text[previous_hi:]))
new_labels.append("")
new_text = [v.strip() for v in new_text]
text = ""
labels = ""
for i, v in enumerate(new_text):
label = new_labels[i]
if v == "":
if label == "":
continue
else:
print("bad transform:")
print("original: " + labels + "\t" + text)
print("next text: " + " ".join(new_text))
print("new labels: " + " ".join(new_labels))
exit(1)
if i > 0:
text += " "
if label == "":
text += v
else:
lo = len(text) + 1
text += v
hi = len(text) + 1
if labels != "":
labels += ","
labels += f"{lo}:{hi}:{label}"
return labels + "\t" + text
def main(input_file, train_file, test_file):
samples = set()
trials = set()
histo = dict()
line_cnt = 0
slot_cnt = 0
train_cnt = 0
test_cnt = 0
with open(input_file, "r", encoding="utf-8") as reader:
with open(train_file, "w", encoding="utf-8") as train_writer:
with open(test_file, "w", encoding="utf-8") as test_writer:
for line in reader:
line_cnt += 1
values = line.strip().split("\t")
sample = values[1] + "\t" + values[2]
trials.add(values[0])
if sample not in samples:
samples.add(sample)
slots = values[1].split(",")
slot_cnt += len(slots)
new_sample = transform(slots, values[2])
for slot in slots:
label = slot.split(":")[2]
histo[label] = histo.get(label, 0) + 1
if random.random() < test_ratio:
test_writer.write(new_sample + "\n")
test_cnt += 1
else:
train_writer.write(new_sample + "\n")
train_cnt += 1
print(f"Train count: {train_cnt}, test count: {test_cnt} ({100 * test_cnt / len(samples):.1f}%)")
print(f"Lines read: {line_cnt}, slots: {slot_cnt}, samples: {len(samples)}, trials: {len(trials)}")
print()
items = list(histo.items())
items.sort(key=lambda i: i[1], reverse=True)
print("label".ljust(22, " ") + "count")
tot = 0
for k, v in items:
print(f"{k:21s} {v:5d}")
tot += v
print("total".ljust(21, " ") + f"{tot:6d}")
if __name__ == "__main__":
args = parse_args()
main(args.input, args.train, args.test)
|
Clinical-Trial-Parser-main
|
src/ie/process_ner_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
Clinical-Trial-Parser-main
|
src/ie/__init__.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# Extract medical terms from clinical-trial eligibility criteria with a NER model.
#
# python src/ie/ner.py -m <model file> -i <input file> -o <output file>
import json
from itertools import groupby
from argparse import ArgumentParser
import numpy as np
from caffe2.python import workspace
from caffe2.python.predictor import predictor_exporter
from text.transformer import Transformer
xfmr = Transformer()
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"-m",
"--model",
dest="model",
required=True,
help="model bin for NER",
metavar="MODEL",
)
parser.add_argument(
"-i",
"--input",
dest="input",
required=True,
help="input file for embeddings",
metavar="INPUT",
)
parser.add_argument(
"-o",
"--output",
dest="output",
required=True,
help="output file for embeddings",
metavar="OUTPUT",
)
return parser.parse_args()
def tokenize(text, add_bos=False):
tokens = xfmr.tokenize(text)
if add_bos:
tokens = ["__BEGIN_OF_SENTENCE__"] + tokens
return tokens
class OfflinePredictor:
def __init__(self, model_file):
self.predict_net = predictor_exporter.prepare_prediction_net(
model_file, db_type="minidb"
)
def predict_pytext(self, tokens):
tokens_vals = np.array([tokens], dtype=np.str)
tokens_lens = np.array([len(tokens)], dtype=np.int64)
workspace.FeedBlob("tokens_vals_str:value", tokens_vals)
workspace.FeedBlob("tokens_lens", tokens_lens)
workspace.RunNet(self.predict_net)
prediction = {
str(blob): workspace.blobs[blob]
for blob in self.predict_net.external_outputs
}
return prediction
def predictions_by_word(predictions, split_prediction_text):
word_prediction_array = []
for index, word in enumerate(split_prediction_text, start=0):
predictions_by_label = [
{"score": value_array[0][index][0], "label": label}
for label, value_array in predictions.items()
]
exps_sum = sum(
[np.exp(prediction["score"]) for prediction in predictions_by_label]
)
soft_maxed_predictions_by_label = [
{
"score": np.exp(prediction["score"]) / exps_sum,
"label": prediction["label"],
}
for prediction in predictions_by_label
]
best_prediction = max(soft_maxed_predictions_by_label, key=lambda x: x["score"])
word_prediction_array.append(
{
"word": word,
"label": best_prediction["label"],
"score": best_prediction["score"],
}
)
return word_prediction_array
def group_slots(word_prediction_array):
prev_prediction = word_prediction_array[0]
lumped_predictions = [[prev_prediction]]
del word_prediction_array[0]
for word_prediction in word_prediction_array:
if word_prediction["label"] == prev_prediction["label"]:
prediction_set = lumped_predictions.pop()
else:
prediction_set = []
prediction_set.append(word_prediction)
lumped_predictions.append(prediction_set)
prev_prediction = word_prediction
slots = []
for prediction_set in lumped_predictions:
slot_text = " ".join(map(lambda x: x["word"], prediction_set))
slot_score = np.mean(list(map(lambda x: float(x["score"]), prediction_set)))
slot_label = prediction_set[0]["label"]
slots.append({"text": slot_text, "score": slot_score, "label": slot_label})
grouped_slots = {}
for label, slot in groupby(slots, lambda x: x["label"]):
if label not in grouped_slots:
grouped_slots[label] = []
slot = list(slot)[0]
grouped_slots[label].append([slot["score"], slot["text"]])
return grouped_slots
def predict(offline_predictor, prediction_text):
tokens = tokenize(prediction_text)
predictions = offline_predictor.predict_pytext(tokens)
word_prediction_array = predictions_by_word(predictions, tokens)
return group_slots(word_prediction_array)
def main(model_file, input_file, output_file):
offline_predictor = OfflinePredictor(model_file)
with open(input_file, "r") as reader:
with open(output_file, "w") as writer:
line = reader.readline().strip()
writer.write(line + "\tdetected_slots\n") # header
for line in reader:
fields = line.strip().split("\t")
if len(fields) != 3:
print(f"bad row: {fields}")
continue
grouped_slots = predict(offline_predictor, fields[2])
filtered_grouped_slots = {
slot_name: slots
for slot_name, slots in grouped_slots.items()
if slot_name != "word_scores:NoLabel"
}
writer.write("\t".join(fields + [json.dumps(filtered_grouped_slots)]) + "\n")
if __name__ == "__main__":
args = parse_args()
main(args.model, args.input, args.output)
|
Clinical-Trial-Parser-main
|
src/ie/ner.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import argparse
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import timm
assert timm.__version__ == "0.3.2" # version check
import timm.optim.optim_factory as optim_factory
import util.misc as misc
from util.pos_embed import interpolate_pos_embed, interpolate_pos_embed_audio, interpolate_patch_embed_audio
from util.misc import NativeScalerWithGradNormCount as NativeScaler
import models_mae
from engine_pretrain import train_one_epoch
from dataset import AudiosetDataset
def get_args_parser():
parser = argparse.ArgumentParser('MAE pre-training', add_help=False)
parser.add_argument('--batch_size', default=4, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=400, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='mae_vit_base_patch16', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--mask_ratio', default=0.8, type=float,
help='Masking ratio (percentage of removed patches).') # 0.75
#parser.add_argument('--norm_pix_loss', action='store_true',
# help='Use (per-patch) normalized pixels as targets for computing loss')
parser.add_argument('--norm_pix_loss', type=bool, default=False, help='Use (per-patch) normalized pixels as targets for computing loss')
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N',
help='epochs to warmup LR')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
# For audioset
parser.add_argument('--audio_exp', type=bool, default=True, help='audio exp')
#parser.add_argument("--data_train", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/train.json', help="training data json")
#parser.add_argument("--data_eval", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/eval.json', help="validation data json")
parser.add_argument("--data_train", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/train_video.json', help="training data json")
parser.add_argument("--data_eval", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/eval_video.json', help="validation data json")
parser.add_argument("--label_csv", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/class_labels_indices.csv', help="csv with class labels")
parser.add_argument('--freqm', help='frequency mask max length', type=int, default=0) # pretraining 0
parser.add_argument('--timem', help='time mask max length', type=int, default=0) # pretraining 0
parser.add_argument("--mixup", type=float, default=0, help="how many (0-1) samples need to be mixup during training")
parser.add_argument("--dataset", type=str, default="audioset", help="dataset", choices=["audioset", "esc50", "speechcommands"])
parser.add_argument("--use_fbank", type=bool, default=False)
parser.add_argument("--fbank_dir", type=str, default="/checkpoint/berniehuang/ast/egs/esc50/data/ESC-50-master/fbank", help="fbank dir")
parser.add_argument("--alpha", type=float, default=0.0, help="contrastive loss weight")
parser.add_argument("--omega", type=float, default=1.0, help="reconstruction loss weight")
parser.add_argument('--mode', default=0, type=int,help='contrastive mode')
parser.add_argument('--save_every_epoch', default=20, type=int,help='save_every_epoch')
parser.add_argument('--use_custom_patch', type=bool, default=False, help='use custom patch and override timm PatchEmbed')
#parser.add_argument("--distributed", type=bool, default=True)
parser.add_argument('--roll_mag_aug', type=bool, default=False, help='use roll_mag_aug')
parser.add_argument('--split_pos', type=bool, default=False, help='use splitted pos emb')
parser.add_argument('--pos_trainable', type=bool, default=False, help='use trainable pos emb')
parser.add_argument('--use_nce', type=bool, default=False, help='use use_nce')
parser.add_argument('--load_video', type=bool, default=False, help='load video')
parser.add_argument('--decoder_mode', default=1, type=int,help='decoder mode 0: global attn 1: swined local attn')
# remove for A-MAE
#parser.add_argument('--v_weight', default=1.0, type=float, help='reconstruction weight for the visual part')
#parser.add_argument('--video_only', type=bool, default=False, help='video_only pre-training')
#parser.add_argument('--cl', type=bool, default=False, help='use pre-text curriculum')
#parser.add_argument('--n_frm', default=4, type=int,help='how many frames to encode, at least 2 as temporal kernel stride is 2')
#parser.add_argument('--depth_av', default=3, type=int,help='depth of multimodal fusion encoder')
parser.add_argument('--mask_t_prob', default=0.7, type=float, help='ratio of masking time')
parser.add_argument('--mask_f_prob', default=0.3, type=float, help='ratio of masking freq')
parser.add_argument('--mask_2d', type=bool, default=False, help='use 2d masking')
parser.add_argument('--init_audio_with_video_mae', type=bool, default=False, help='init_audio_with_video_mae')
parser.set_defaults(audio_exp=True)
parser.add_argument('--no_shift', type=bool, default=False, help='no_shift')
# set norm_pix_loss=True for normal training, norm_pix_loss=False for visualization
parser.set_defaults(norm_pix_loss=True)
return parser
def main(args):
misc.init_distributed_mode(args)
print('======================= starting pretrain =======================')
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# simple augmentation
if not args.audio_exp:
transform_train = transforms.Compose([
transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
else:
norm_stats = {'audioset':[-4.2677393, 4.5689974], 'esc50':[-6.6268077, 5.358466], 'speechcommands':[-6.845978, 5.5654526]}
target_length = {'audioset':1024, 'esc50':512, 'speechcommands':128}
multilabel_dataset = {'audioset': True, 'esc50': False, 'k400': False, 'speechcommands': True}
audio_conf = {'num_mel_bins': 128,
'target_length': target_length[args.dataset],
'freqm': args.freqm,
'timem': args.timem,
'mixup': args.mixup,
'dataset': args.dataset,
'mode':'train',
'mean':norm_stats[args.dataset][0],
'std':norm_stats[args.dataset][1],
'multilabel':multilabel_dataset[args.dataset],
'noise':False}
dataset_train = AudiosetDataset(args.data_train, label_csv=args.label_csv, audio_conf=audio_conf, roll_mag_aug=args.roll_mag_aug,
load_video=args.load_video)
#print(dataset_train)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
# define the model
if args.audio_exp:
model = models_mae.__dict__[args.model](norm_pix_loss=args.norm_pix_loss,
in_chans=1, audio_exp=True,
img_size=(target_length[args.dataset],128),
alpha=args.alpha, mode=args.mode, use_custom_patch=args.use_custom_patch,
split_pos=args.split_pos, pos_trainable=args.pos_trainable, use_nce=args.use_nce,
decoder_mode=args.decoder_mode,
mask_2d=args.mask_2d, mask_t_prob=args.mask_t_prob, mask_f_prob=args.mask_f_prob,
no_shift=args.no_shift,
# remove for A-MAE
#v_weight=args.v_weight, n_frm=args.n_frm, video_only=args.video_only, cl=args.cl, depth_av=args.depth_av,
)
else:
model = models_mae.__dict__[args.model](norm_pix_loss=args.norm_pix_loss)
model.to(device)
model_without_ddp = model
print("Model = %s" % str(model_without_ddp))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
print('use distributed!!')
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
# following timm: set wd as 0 for bias and norm layers
param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
print(optimizer)
loss_scaler = NativeScaler()
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, data_loader_train,
optimizer, device, epoch, loss_scaler,
log_writer=log_writer,
args=args
)
if args.output_dir and (epoch % args.save_every_epoch == 0 or epoch + 1 == args.epochs):
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,}
if args.output_dir and misc.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
AudioMAE-main
|
main_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# A script to run multinode training with submitit.
# --------------------------------------------------------
import argparse
import os
import uuid
from pathlib import Path
#import main_finetune as classification
import main_finetune_as as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for MAE finetune", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
#import main_finetune as classification
import main_finetune_as as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.log_dir = self.args.output_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min,
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="mae")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
# print("Submitted job_id:", job.job_id)
print(job.job_id)
if __name__ == "__main__":
main()
|
AudioMAE-main
|
submitit_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
from functools import partial
import torch
import torch.nn as nn
import numpy as np
import timm.models.vision_transformer
from timm.models.vision_transformer import PatchEmbed, Block
from util.patch_embed import PatchEmbed_new, PatchEmbed3D_new
class VisionTransformer(timm.models.vision_transformer.VisionTransformer):
""" Vision Transformer with support for global average pooling
"""
def __init__(self, global_pool=False, mask_2d=True, use_custom_patch=False, **kwargs):
super(VisionTransformer, self).__init__(**kwargs)
self.global_pool = global_pool
if self.global_pool:
norm_layer = kwargs['norm_layer']
embed_dim = kwargs['embed_dim']
self.fc_norm = norm_layer(embed_dim)
del self.norm # remove the original norm
self.mask_2d = mask_2d
self.use_custom_patch = use_custom_patch
num_heads=12
depth=12
mlp_ratio=4
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
x = x + self.pos_embed[:, 1:, :]
cls_token = self.cls_token + self.pos_embed[:, :1, :]
cls_tokens = cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
if self.global_pool:
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.fc_norm(x)
else:
x = self.norm(x)
outcome = x[:, 0]
return outcome
def random_masking(self, x, mask_ratio):
"""
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape # batch, length, dim
len_keep = int(L * (1 - mask_ratio))
noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([N, L], device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return x_masked, mask, ids_restore
def random_masking_2d(self, x, mask_t_prob, mask_f_prob):
"""
2D: Spectrogram (msking t and f under mask_t_prob and mask_f_prob)
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape # batch, length, dim
if self.use_custom_patch:
# # for AS
T=101 #64,101
F=12 #8,12
# # for ESC
# T=50
# F=12
# for SPC
# T=12
# F=12
else:
# ## for AS
T=64
F=8
# ## for ESC
#T=32
#F=8
## for SPC
# T=8
# F=8
# mask T
x = x.reshape(N, T, F, D)
len_keep_T = int(T * (1 - mask_t_prob))
noise = torch.rand(N, T, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_keep = ids_shuffle[:, :len_keep_T]
index = ids_keep.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, F, D)
#x_masked = torch.gather(x, dim=1, index=index)
#x_masked = x_masked.reshape(N,len_keep_T*F,D)
x = torch.gather(x, dim=1, index=index) # N, len_keep_T(T'), F, D
# mask F
#x = x.reshape(N, T, F, D)
x = x.permute(0,2,1,3) # N T' F D => N F T' D
len_keep_F = int(F * (1 - mask_f_prob))
noise = torch.rand(N, F, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_keep = ids_shuffle[:, :len_keep_F]
#index = ids_keep.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, T, D)
index = ids_keep.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, len_keep_T, D)
x_masked = torch.gather(x, dim=1, index=index)
x_masked = x_masked.permute(0,2,1,3) # N F' T' D => N T' F' D
#x_masked = x_masked.reshape(N,len_keep*T,D)
x_masked = x_masked.reshape(N,len_keep_F*len_keep_T,D)
return x_masked, None, None
def forward_features_mask(self, x, mask_t_prob, mask_f_prob):
B = x.shape[0] #4,1,1024,128
x = self.patch_embed(x) # 4, 512, 768
x = x + self.pos_embed[:, 1:, :]
if self.random_masking_2d:
x, mask, ids_restore = self.random_masking_2d(x, mask_t_prob, mask_f_prob)
else:
x, mask, ids_restore = self.random_masking(x, mask_t_prob)
cls_token = self.cls_token + self.pos_embed[:, :1, :]
cls_tokens = cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self.pos_drop(x)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
if self.global_pool:
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.fc_norm(x)
else:
x = self.norm(x)
outcome = x[:, 0]
return outcome
# overwrite original timm
def forward(self, x, v=None, mask_t_prob=0.0, mask_f_prob=0.0):
if mask_t_prob > 0.0 or mask_f_prob > 0.0:
x = self.forward_features_mask(x, mask_t_prob=mask_t_prob, mask_f_prob=mask_f_prob)
else:
x = self.forward_features(x)
x = self.head(x)
return x
def vit_small_patch16(**kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base_patch16(**kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_large_patch16(**kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_huge_patch14(**kwargs):
model = VisionTransformer(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
|
AudioMAE-main
|
models_vit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import argparse
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import timm
assert timm.__version__ == "0.3.2" # version check
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
import util.lr_decay as lrd
import util.misc as misc
from util.datasets import build_dataset
from util.pos_embed import interpolate_pos_embed
from util.misc import NativeScalerWithGradNormCount as NativeScaler
import models_vit
from engine_finetune import train_one_epoch, evaluate
from dataset import AudiosetDataset
from timm.models.vision_transformer import PatchEmbed
def get_args_parser():
parser = argparse.ArgumentParser('MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='vit_base_patch16', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.75,
help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
# For audioset
parser.add_argument('--audio_exp', action='store_true', help='audio exp')
parser.add_argument("--data_train", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/train.json', help="training data json")
parser.add_argument("--data_eval", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/train.json', help="validation data json")
parser.add_argument("--label_csv", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/class_labels_indices.csv', help="csv with class labels")
parser.add_argument('--freqm', help='frequency mask max length', type=int, default=96)
parser.add_argument('--timem', help='time mask max length', type=int, default=24)
#parser.add_argument("--mixup", type=float, default=0, help="how many (0-1) samples need to be mixup during training")
parser.add_argument("--dataset", type=str, default="audioset", help="the dataset used", choices=["audioset", "esc50", "speechcommands"])
parser.add_argument("--use_fbank", type=bool, default=False)
parser.add_argument("--fbank_dir", type=str, default="/checkpoint/berniehuang/ast/egs/esc50/data/ESC-50-master/fbank", help="fbank dir")
parser.set_defaults(audio_exp=True)
return parser
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
if not args.audio_exp:
dataset_train = build_dataset(is_train=True, args=args)
dataset_val = build_dataset(is_train=False, args=args)
else:
norm_stats = {'audioset':[-4.2677393, 4.5689974], 'esc50':[-6.6268077, 5.358466], 'speechcommands':[-6.845978, 5.5654526]}
target_length = {'audioset':1024, 'esc50':512, 'speechcommands':128}
multilabel_dataset = {'audioset': True, 'esc50': False, 'k400': False, 'speechcommands': True}
audio_conf_train = {'num_mel_bins': 128,
'target_length': target_length[args.dataset],
'freqm': args.freqm,
'timem': args.timem,
'mixup': args.mixup,
'dataset': args.dataset,
'mode':'train',
'mean':norm_stats[args.dataset][0],
'std':norm_stats[args.dataset][1],
'multilabel':multilabel_dataset[args.dataset],
'noise':False}
audio_conf_val = {'num_mel_bins': 128,
'target_length': target_length[args.dataset],
'freqm': 0,
'timem': 0,
'mixup': 0,
'dataset': args.dataset,
'mode':'val',
'mean':norm_stats[args.dataset][0],
'std':norm_stats[args.dataset][1],
'multilabel':multilabel_dataset[args.dataset],
'noise':False}
dataset_train = AudiosetDataset(args.data_train, label_csv=args.label_csv, audio_conf=audio_conf_train, use_fbank=args.use_fbank, fbank_dir=args.fbank_dir)
dataset_val = AudiosetDataset(args.data_eval, label_csv=args.label_csv, audio_conf=audio_conf_val, use_fbank=args.use_fbank, fbank_dir=args.fbank_dir)
if True: #args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = models_vit.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
global_pool=args.global_pool,
)
if args.audio_exp:
img_size=(target_length[args.dataset],128) # 1024, 128
in_chans=1
if args.use_custom_patch:
model.patch_embed = PatchEmbed_new(img_size=img_size, patch_size=16, in_chans=1, embed_dim=768, stride=10)
model.pos_embed = nn.Parameter(torch.zeros(1, 1212 + 1, 768), requires_grad=False) # fixed sin-cos embedding
else:
model.patch_embed = PatchEmbed(img_size, 16, in_chans, 768) # no overlap. stride=img_size=16
num_patches = model.patch_embed.num_patches
num_patches = 512 # assume audioset, 1024//16=64, 128//16=8, 512=64x8
model.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, 768), requires_grad=False) # fixed sin-cos embedding
if args.finetune and not args.eval:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
if 0: #not args.audio_exp:
interpolate_pos_embed(model, checkpoint_model)
else: # override for audio_exp for now
# imgnet: 14,14
# audioset size: (8,64)
# esc size: (8,32)
# only when pt-ft mismatch when loading an imgnet-pt model
#interpolate_patch_embed_audio(model, checkpoint_model, orig_channel=3, new_channel=1)
# when imgnet-pt(224/16,224/16) to audioset(128//16,1024//16)
if args.use_custom_patch:
# imgnet-pt
#interpolate_pos_embed_audio(model, checkpoint_model, orig_size=(14,14), new_size=(12,101))
if args.source_custom_patch:
pass
else:
interpolate_pos_embed_audio(model, checkpoint_model, orig_size=(8,64), new_size=(12,101))
else:
# imgnet-pt
#interpolate_pos_embed_audio(model, checkpoint_model, orig_size=(14,14), new_size=(8,64))
if args.source_custom_patch:
# don't do this
#interpolate_pos_embed_audio(model, checkpoint_model, orig_size=(12,101), new_size=(8,64))
pass
else:
# with 16 stride, don't need to do anything
#interpolate_pos_embed_audio(model, checkpoint_model, orig_size=(8,64), new_size=(8,64))
pass
# when audioset-pt(128/16,1024/16) to audioset-ft(128//10,1024//10) (change to overlap) # try avoiding this
#interpolate_pos_embed_audio(model, checkpoint_model, orig_size=(8,64), new_size=(12,101))
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
#if args.global_pool:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
#else:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# manually initialize fc layer
trunc_normal_(model.head.weight, std=2e-5)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
# build optimizer with layer-wise lr decay (lrd)
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
no_weight_decay_list=model_without_ddp.no_weight_decay(),
layer_decay=args.layer_decay
)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
#optimizer = torch.optim.Adam(param_groups, args.lr, weight_decay=5e-7, betas=(0.95, 0.999))
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, mixup_fn,
log_writer=log_writer,
args=args
)
if args.output_dir:
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and misc.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
AudioMAE-main
|
main_finetune_esc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy
import util.misc as misc
import util.lr_sched as lr_sched
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
mixup_fn: Optional[Mixup] = None, log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=False,
update_grad=(data_iter_step + 1) % accum_iter == 0)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
AudioMAE-main
|
engine_finetune.py
|
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# AST: https://github.com/YuanGongND/ast
# --------------------------------------------------------
import csv, os, sys
import json
import torchaudio
import numpy as np
import torch
import torch.nn.functional
from torch.utils.data import Dataset, Sampler
from torch.utils.data import DistributedSampler, WeightedRandomSampler
import torch.distributed as dist
import random
import math
class DistributedSamplerWrapper(DistributedSampler):
def __init__(
self, sampler, dataset,
num_replicas=None,
rank=None,
shuffle: bool = True):
super(DistributedSamplerWrapper, self).__init__(
dataset, num_replicas, rank, shuffle)
# source: @awaelchli https://github.com/PyTorchLightning/pytorch-lightning/issues/3238
self.sampler = sampler
def __iter__(self):
if self.sampler.generator is None:
self.sampler.generator = torch.Generator()
self.sampler.generator.manual_seed(self.seed + self.epoch)
indices = list(self.sampler)
if self.epoch == 0:
print(f"\n DistributedSamplerWrapper : {indices[:10]} \n\n")
indices = indices[self.rank:self.total_size:self.num_replicas]
return iter(indices)
class DistributedWeightedSampler(Sampler):
#dataset_train, samples_weight, num_replicas=num_tasks, rank=global_rank
def __init__(self, dataset, weights, num_replicas=None, rank=None, replacement=True, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.replacement = replacement
self.weights = torch.from_numpy(weights)
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
# # get targets (you can alternatively pass them in __init__, if this op is expensive)
# targets = self.dataset.targets
# # select only the wanted targets for this subsample
# targets = torch.tensor(targets)[indices]
# assert len(targets) == self.num_samples
# # randomly sample this subset, producing balanced classes
# weights = self.calculate_weights(targets)
weights = self.weights[indices]
subsample_balanced_indicies = torch.multinomial(weights, self.num_samples, self.replacement)
# now map these target indicies back to the original dataset index...
dataset_indices = torch.tensor(indices)[subsample_balanced_indicies]
return iter(dataset_indices.tolist())
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
def make_index_dict(label_csv):
index_lookup = {}
with open(label_csv, 'r') as f:
csv_reader = csv.DictReader(f)
line_count = 0
for row in csv_reader:
index_lookup[row['mid']] = row['index']
line_count += 1
return index_lookup
def make_name_dict(label_csv):
name_lookup = {}
with open(label_csv, 'r') as f:
csv_reader = csv.DictReader(f)
line_count = 0
for row in csv_reader:
name_lookup[row['index']] = row['display_name']
line_count += 1
return name_lookup
def lookup_list(index_list, label_csv):
label_list = []
table = make_name_dict(label_csv)
for item in index_list:
label_list.append(table[item])
return label_list
class AudiosetDataset(Dataset):
def __init__(self, dataset_json_file, audio_conf, label_csv=None, use_fbank=False, fbank_dir=None, roll_mag_aug=False, load_video=False, mode='train'):
"""
Dataset that manages audio recordings
:param audio_conf: Dictionary containing the audio loading and preprocessing settings
:param dataset_json_file
"""
self.datapath = dataset_json_file
with open(dataset_json_file, 'r') as fp:
data_json = json.load(fp)
self.use_fbank = use_fbank
self.fbank_dir = fbank_dir
self.data = data_json['data']
self.audio_conf = audio_conf
print('---------------the {:s} dataloader---------------'.format(self.audio_conf.get('mode')))
if 'multilabel' in self.audio_conf.keys():
self.multilabel = self.audio_conf['multilabel']
else:
self.multilabel = False
print(f'multilabel: {self.multilabel}')
self.melbins = self.audio_conf.get('num_mel_bins')
self.freqm = self.audio_conf.get('freqm')
self.timem = self.audio_conf.get('timem')
print('using following mask: {:d} freq, {:d} time'.format(self.audio_conf.get('freqm'), self.audio_conf.get('timem')))
self.mixup = self.audio_conf.get('mixup')
print('using mix-up with rate {:f}'.format(self.mixup))
self.dataset = self.audio_conf.get('dataset')
self.norm_mean = self.audio_conf.get('mean')
self.norm_std = self.audio_conf.get('std')
print('Dataset: {}, mean {:.3f} and std {:.3f}'.format(self.dataset, self.norm_mean, self.norm_std))
self.noise = self.audio_conf.get('noise')
if self.noise == True:
print('now use noise augmentation')
self.index_dict = make_index_dict(label_csv)
self.label_num = len(self.index_dict)
self.roll_mag_aug=roll_mag_aug
print(f'number of classes: {self.label_num}')
print(f'size of dataset {self.__len__()}')
def _roll_mag_aug(self, waveform):
waveform=waveform.numpy()
idx=np.random.randint(len(waveform))
rolled_waveform=np.roll(waveform,idx)
mag = np.random.beta(10, 10) + 0.5
return torch.Tensor(rolled_waveform*mag)
def _wav2fbank(self, filename, filename2=None):
if filename2 == None:
waveform, sr = torchaudio.load(filename)
waveform = waveform - waveform.mean()
if self.roll_mag_aug:
waveform = self._roll_mag_aug(waveform)
# mixup
else:
waveform1, sr = torchaudio.load(filename)
waveform2, _ = torchaudio.load(filename2)
waveform1 = waveform1 - waveform1.mean()
waveform2 = waveform2 - waveform2.mean()
if self.roll_mag_aug:
waveform1 = self._roll_mag_aug(waveform1)
waveform2 = self._roll_mag_aug(waveform2)
if waveform1.shape[1] != waveform2.shape[1]:
if waveform1.shape[1] > waveform2.shape[1]:
# padding
temp_wav = torch.zeros(1, waveform1.shape[1])
temp_wav[0, 0:waveform2.shape[1]] = waveform2
waveform2 = temp_wav
else:
# cutting
waveform2 = waveform2[0, 0:waveform1.shape[1]]
# sample lambda from beta distribtion
mix_lambda = np.random.beta(10, 10)
mix_waveform = mix_lambda * waveform1 + (1 - mix_lambda) * waveform2
waveform = mix_waveform - mix_waveform.mean()
# 498 128, 998, 128
fbank = torchaudio.compliance.kaldi.fbank(waveform, htk_compat=True, sample_frequency=sr, use_energy=False,
window_type='hanning', num_mel_bins=self.melbins, dither=0.0, frame_shift=10)
# 512
target_length = self.audio_conf.get('target_length')
n_frames = fbank.shape[0]
p = target_length - n_frames
# cut and pad
if p > 0:
m = torch.nn.ZeroPad2d((0, 0, 0, p))
fbank = m(fbank)
elif p < 0:
fbank = fbank[0:target_length, :]
if filename2 == None:
return fbank, 0
else:
return fbank, mix_lambda
def _fbank(self, filename, filename2=None):
if filename2 == None:
fn1 = os.path.join(self.fbank_dir, os.path.basename(filename).replace('.wav','.npy'))
fbank = np.load(fn1)
return torch.from_numpy(fbank), 0
else:
fn1 = os.path.join(self.fbank_dir, os.path.basename(filename).replace('.wav','.npy'))
fn2 = os.path.join(self.fbank_dir, os.path.basename(filename2).replace('.wav','.npy'))
# sample lambda from beta distribtion
mix_lambda = np.random.beta(10, 10)
fbank = mix_lambda * np.load(fn1) + (1-mix_lambda) * np.load(fn2)
return torch.from_numpy(fbank), mix_lambda
def __getitem__(self, index):
"""
returns: image, audio, nframes
where image is a FloatTensor of size (3, H, W)
audio is a FloatTensor of size (N_freq, N_frames) for spectrogram, or (N_frames) for waveform
nframes is an integer
"""
# do mix-up for this sample (controlled by the given mixup rate)
if random.random() < self.mixup: # for audio_exp, when using mixup, assume multilabel
datum = self.data[index]
# find another sample to mix, also do balance sampling
# sample the other sample from the multinomial distribution, will make the performance worse
# mix_sample_idx = np.random.choice(len(self.data), p=self.sample_weight_file)
# sample the other sample from the uniform distribution
mix_sample_idx = random.randint(0, len(self.data)-1)
mix_datum = self.data[mix_sample_idx]
# get the mixed fbank
if not self.use_fbank:
fbank, mix_lambda = self._wav2fbank(datum['wav'], mix_datum['wav'])
else:
fbank, mix_lambda = self._fbank(datum['wav'], mix_datum['wav'])
# initialize the label
label_indices = np.zeros(self.label_num)
# add sample 1 labels
for label_str in datum['labels'].split(','):
label_indices[int(self.index_dict[label_str])] += mix_lambda
# add sample 2 labels
for label_str in mix_datum['labels'].split(','):
label_indices[int(self.index_dict[label_str])] += 1.0-mix_lambda
label_indices = torch.FloatTensor(label_indices)
# if not do mixup
else:
datum = self.data[index]
label_indices = np.zeros(self.label_num)
if not self.use_fbank:
fbank, mix_lambda = self._wav2fbank(datum['wav'])
else:
fbank, mix_lambda = self._fbank(datum['wav'])
for label_str in datum['labels'].split(','):
label_indices[int(self.index_dict[label_str])] = 1.0
if self.multilabel:
label_indices = torch.FloatTensor(label_indices)
else:
# remark : for ft cross-ent
label_indices = int(self.index_dict[label_str])
# SpecAug for training (not for eval)
freqm = torchaudio.transforms.FrequencyMasking(self.freqm)
timem = torchaudio.transforms.TimeMasking(self.timem)
fbank = fbank.transpose(0,1).unsqueeze(0) # 1, 128, 1024 (...,freq,time)
if self.freqm != 0:
fbank = freqm(fbank)
if self.timem != 0:
fbank = timem(fbank) # (..., freq, time)
fbank = torch.transpose(fbank.squeeze(), 0, 1) # time, freq
fbank = (fbank - self.norm_mean) / (self.norm_std * 2)
if self.noise == True: # default is false, true for spc
fbank = fbank + torch.rand(fbank.shape[0], fbank.shape[1]) * np.random.rand() / 10
fbank = torch.roll(fbank, np.random.randint(-10, 10), 0)
# the output fbank shape is [time_frame_num, frequency_bins], e.g., [1024, 128]
return fbank.unsqueeze(0), label_indices, datum['wav']
def __len__(self):
return len(self.data)
|
AudioMAE-main
|
dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import math
import sys
from typing import Iterable
import torch
import util.misc as misc
import util.lr_sched as lr_sched
def train_one_epoch(model: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler,
log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 200
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
# set model epoch
model.epoch = epoch
for data_iter_step, (samples, _labels, _vids) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
#print(samples.shape)# 64x3x224x224 for img, 64x1x512x128 for audio
samples = samples.to(device, non_blocking=True)
# comment out when not debugging
# from fvcore.nn import FlopCountAnalysis, parameter_count_table
# if data_iter_step == 1:
# flops = FlopCountAnalysis(model, samples)
# print(flops.total())
# print(parameter_count_table(model))
with torch.cuda.amp.autocast():
loss_a, _, _, _ = model(samples, mask_ratio=args.mask_ratio)
loss_value = loss_a.item()
loss_total = loss_a
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
#loss /= accum_iter
loss_total = loss_total / accum_iter
loss_scaler(loss_total, optimizer, parameters=model.parameters(),
update_grad=(data_iter_step + 1) % accum_iter == 0)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]["lr"]
metric_logger.update(lr=lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', lr, epoch_1000x)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
AudioMAE-main
|
engine_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import math
import sys
from typing import Iterable, Optional
import numpy as np
import torch
from timm.data import Mixup
from timm.utils import accuracy
import util.misc as misc
import util.lr_sched as lr_sched
from util.stat import calculate_stats, concat_all_gather
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
mixup_fn: Optional[Mixup] = None, log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 500
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
for data_iter_step, (samples, targets, _vids) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
outputs = model(samples, mask_t_prob=args.mask_t_prob, mask_f_prob=args.mask_f_prob)
loss = criterion(outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=False,
update_grad=(data_iter_step + 1) % accum_iter == 0)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, dist_eval=False):
criterion = torch.nn.BCEWithLogitsLoss()
metric_logger = misc.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
outputs=[]
targets=[]
vids=[]
for batch in metric_logger.log_every(data_loader, 300, header):
images = batch[0]
target = batch[1]
vid = batch[2]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
# remark:
# 1. use concat_all_gather and --dist_eval for faster eval by distributed load over gpus
# 2. otherwise comment concat_all_gather and remove --dist_eval one every gpu
if dist_eval:
output = concat_all_gather(output)
target = concat_all_gather(target)
outputs.append(output)
targets.append(target)
vids.append(vid)
outputs=torch.cat(outputs).cpu().numpy()
targets=torch.cat(targets).cpu().numpy()
vids = [j for sub in vids for j in sub]
np.save('inf_output.npy', {'vids':vids, 'embs_527':outputs, 'targets':targets})
stats = calculate_stats(outputs, targets)
AP = [stat['AP'] for stat in stats]
mAP = np.mean([stat['AP'] for stat in stats])
print("mAP: {:.6f}".format(mAP))
return {"mAP": mAP, "AP": AP}
|
AudioMAE-main
|
engine_finetune_as.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# A script to run multinode training with submitit.
# --------------------------------------------------------
import argparse
import os
import uuid
from pathlib import Path
import main_linprobe as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for MAE linear probe", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main_linprobe as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.log_dir = self.args.output_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min,
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="mae")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
# print("Submitted job_id:", job.job_id)
print(job.job_id)
if __name__ == "__main__":
main()
|
AudioMAE-main
|
submitit_linprobe.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
from functools import partial
from json import encoder
import torch
import torch.nn as nn
#from timm.models.vision_transformer import PatchEmbed, Block
from timm.models.vision_transformer import Block
from util.pos_embed import get_2d_sincos_pos_embed, get_2d_sincos_pos_embed_flexible, get_1d_sincos_pos_embed_from_grid
from util.misc import concat_all_gather
from util.patch_embed import PatchEmbed_new, PatchEmbed_org
from timm.models.swin_transformer import SwinTransformerBlock
class MaskedAutoencoderViT(nn.Module):
""" Masked Autoencoder with VisionTransformer backbone
"""
def __init__(self, img_size=224, patch_size=16, stride=10, in_chans=3,
embed_dim=1024, depth=24, num_heads=16,
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False,
audio_exp=False, alpha=0.0, temperature=.2, mode=0, contextual_depth=8,
use_custom_patch=False, split_pos=False, pos_trainable=False, use_nce=False, beta=4.0, decoder_mode=0,
mask_t_prob=0.6, mask_f_prob=0.5, mask_2d=False,
epoch=0, no_shift=False,
):
super().__init__()
self.audio_exp=audio_exp
self.embed_dim = embed_dim
self.decoder_embed_dim = decoder_embed_dim
# --------------------------------------------------------------------------
# MAE encoder specifics
if use_custom_patch:
print(f'Use custom patch_emb with patch size: {patch_size}, stride: {stride}')
self.patch_embed = PatchEmbed_new(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, stride=stride)
else:
self.patch_embed = PatchEmbed_org(img_size, patch_size, in_chans, embed_dim)
self.use_custom_patch = use_custom_patch
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
#self.split_pos = split_pos # not useful
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=pos_trainable) # fixed sin-cos embedding
self.encoder_depth = depth
self.contextual_depth = contextual_depth
self.blocks = nn.ModuleList([
Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# --------------------------------------------------------------------------
# MAE decoder specifics
self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)
self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))
self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=pos_trainable) # fixed sin-cos embedding
self.no_shift=no_shift
self.decoder_mode = decoder_mode
if self.use_custom_patch: # overlapped patches as in AST. Similar performance yet compute heavy
window_size= (6,6)
feat_size = (102,12)
else:
window_size= (4,4)
feat_size = (64,8)
if self.decoder_mode == 1:
decoder_modules = []
for index in range(16):
if self.no_shift:
shift_size = (0,0)
else:
if (index % 2) == 0:
shift_size = (0,0)
else:
shift_size = (2,0)
#shift_size = tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size])
decoder_modules.append(
SwinTransformerBlock(
dim=decoder_embed_dim,
num_heads=16,
feat_size=feat_size,
window_size=window_size,
shift_size=shift_size,
mlp_ratio=mlp_ratio,
drop=0.0,
drop_attn=0.0,
drop_path=0.0,
extra_norm=False,
sequential_attn=False,
norm_layer=norm_layer, #nn.LayerNorm,
)
)
self.decoder_blocks = nn.ModuleList(decoder_modules)
else:
# Transfomer
self.decoder_blocks = nn.ModuleList([
Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer)
for i in range(decoder_depth)])
self.decoder_norm = norm_layer(decoder_embed_dim)
self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) # decoder to patch
# --------------------------------------------------------------------------
self.norm_pix_loss = norm_pix_loss
self.patch_size=patch_size
self.stride=stride
# audio exps
self.alpha = alpha
self.T = temperature
self.mode = mode
self.use_nce = use_nce
self.beta = beta
self.log_softmax=nn.LogSoftmax(dim=-1)
self.mask_t_prob=mask_t_prob
self.mask_f_prob=mask_f_prob
self.mask_2d=mask_2d
self.epoch = epoch
self.initialize_weights()
def initialize_weights(self):
# initialization
# initialize (and freeze) pos_embed by sin-cos embedding
if self.audio_exp:
pos_embed = get_2d_sincos_pos_embed_flexible(self.pos_embed.shape[-1], self.patch_embed.patch_hw, cls_token=True)
else:
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
if self.audio_exp:
decoder_pos_embed = get_2d_sincos_pos_embed_flexible(self.decoder_pos_embed.shape[-1], self.patch_embed.patch_hw, cls_token=True)
else:
decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))
# initialize patch_embed like nn.Linear (instead of nn.Conv2d)
w = self.patch_embed.proj.weight.data
torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
torch.nn.init.normal_(self.cls_token, std=.02)
torch.nn.init.normal_(self.mask_token, std=.02)
# initialize nn.Linear and nn.LayerNorm
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
# we use xavier_uniform following official JAX ViT:
torch.nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def patchify(self, imgs):
"""
imgs: (N, 3, H, W)
x: (N, L, patch_size**2 *3)
L = (H/p)*(W/p)
"""
p = self.patch_embed.patch_size[0]
#assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
if self.audio_exp:
if self.use_custom_patch: # overlapped patch
h,w = self.patch_embed.patch_hw
# todo: fixed h/w patch size and stride size. Make hw custom in the future
x = imgs.unfold(2, self.patch_size, self.stride).unfold(3, self.patch_size, self.stride) # n,1,H,W -> n,1,h,w,p,p
x = x.reshape(shape=(imgs.shape[0], h*w, p**2 * 1))
#x = imgs.reshape(shape=(imgs.shape[0], 1, h, p, w, p))
#x = torch.einsum('nchpwq->nhwpqc', x)
#x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 1))
else:
h = imgs.shape[2] // p
w = imgs.shape[3] // p
#h,w = self.patch_embed.patch_hw
x = imgs.reshape(shape=(imgs.shape[0], 1, h, p, w, p))
x = torch.einsum('nchpwq->nhwpqc', x)
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 1))
else:
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum('nchpwq->nhwpqc', x)
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
return x
def unpatchify(self, x):
"""
x: (N, L, patch_size**2 *3)
specs: (N, 1, H, W)
"""
p = self.patch_embed.patch_size[0]
h = 1024//p
w = 128//p
x = x.reshape(shape=(x.shape[0], h, w, p, p, 1))
x = torch.einsum('nhwpqc->nchpwq', x)
specs = x.reshape(shape=(x.shape[0], 1, h * p, w * p))
return specs
def random_masking(self, x, mask_ratio):
"""
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape # batch, length, dim
len_keep = int(L * (1 - mask_ratio))
noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([N, L], device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return x_masked, mask, ids_restore
def random_masking_2d(self, x, mask_t_prob, mask_f_prob):
"""
2D: Spectrogram (msking t and f under mask_t_prob and mask_f_prob)
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape # batch, length, dim
if self.use_custom_patch: # overlapped patch
T=101
F=12
else:
T=64
F=8
#x = x.reshape(N, T, F, D)
len_keep_t = int(T * (1 - mask_t_prob))
len_keep_f = int(F * (1 - mask_f_prob))
# noise for mask in time
noise_t = torch.rand(N, T, device=x.device) # noise in [0, 1]
# sort noise for each sample aling time
ids_shuffle_t = torch.argsort(noise_t, dim=1) # ascend: small is keep, large is remove
ids_restore_t = torch.argsort(ids_shuffle_t, dim=1)
ids_keep_t = ids_shuffle_t[:,:len_keep_t]
# noise mask in freq
noise_f = torch.rand(N, F, device=x.device) # noise in [0, 1]
ids_shuffle_f = torch.argsort(noise_f, dim=1) # ascend: small is keep, large is remove
ids_restore_f = torch.argsort(ids_shuffle_f, dim=1)
ids_keep_f = ids_shuffle_f[:,:len_keep_f] #
# generate the binary mask: 0 is keep, 1 is remove
# mask in freq
mask_f = torch.ones(N, F, device=x.device)
mask_f[:,:len_keep_f] = 0
mask_f = torch.gather(mask_f, dim=1, index=ids_restore_f).unsqueeze(1).repeat(1,T,1) # N,T,F
# mask in time
mask_t = torch.ones(N, T, device=x.device)
mask_t[:,:len_keep_t] = 0
mask_t = torch.gather(mask_t, dim=1, index=ids_restore_t).unsqueeze(1).repeat(1,F,1).permute(0,2,1) # N,T,F
mask = 1-(1-mask_t)*(1-mask_f) # N, T, F
# get masked x
id2res=torch.Tensor(list(range(N*T*F))).reshape(N,T,F).to(x.device)
id2res = id2res + 999*mask # add a large value for masked elements
id2res2 = torch.argsort(id2res.flatten(start_dim=1))
ids_keep=id2res2.flatten(start_dim=1)[:,:len_keep_f*len_keep_t]
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
ids_restore = torch.argsort(id2res2.flatten(start_dim=1))
mask = mask.flatten(start_dim=1)
return x_masked, mask, ids_restore
def forward_encoder(self, x, mask_ratio, mask_2d=False):
# embed patches
x = self.patch_embed(x)
# add pos embed w/o cls token
x = x + self.pos_embed[:, 1:, :]
# masking: length -> length * mask_ratio
if mask_2d:
x, mask, ids_restore = self.random_masking_2d(x, mask_t_prob=self.mask_t_prob, mask_f_prob=self.mask_f_prob)
else:
x, mask, ids_restore = self.random_masking(x, mask_ratio)
# append cls token
cls_token = self.cls_token + self.pos_embed[:, :1, :]
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
#emb = self.encoder_emb(x)
return x, mask, ids_restore, None
def forward_encoder_no_mask(self, x):
# embed patches
x = self.patch_embed(x)
# add pos embed w/o cls token
x = x + self.pos_embed[:, 1:, :]
# masking: length -> length * mask_ratio
#x, mask, ids_restore = self.random_masking(x, mask_ratio)
# append cls token
cls_token = self.cls_token + self.pos_embed[:, :1, :]
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# apply Transformer blocks
contextual_embs=[]
for n, blk in enumerate(self.blocks):
x = blk(x)
if n > self.contextual_depth:
contextual_embs.append(self.norm(x))
#x = self.norm(x)
contextual_emb = torch.stack(contextual_embs,dim=0).mean(dim=0)
return contextual_emb
def forward_decoder(self, x, ids_restore):
# embed tokens
x = self.decoder_embed(x)
# append mask tokens to sequence
mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle
x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token
# add pos embed
x = x + self.decoder_pos_embed
if self.decoder_mode != 0:
B,L,D=x.shape
x = x[:,1:,:]
if self.use_custom_patch:
x = x.reshape(B,101,12,D)
x = torch.cat([x,x[:,-1,:].unsqueeze(1)],dim=1) # hack
x = x.reshape(B,1224,D)
if self.decoder_mode > 3: # mvit
x = self.decoder_blocks(x)
else:
# apply Transformer blocks
for blk in self.decoder_blocks:
x = blk(x)
x = self.decoder_norm(x)
# predictor projection
pred = self.decoder_pred(x)
# remove cls token
if self.decoder_mode != 0:
if self.use_custom_patch:
pred = pred.reshape(B,102,12,256)
pred = pred[:,:101,:,:]
pred = pred.reshape(B,1212,256)
else:
pred = pred
else:
pred = pred[:, 1:, :]
return pred, None, None #emb, emb_pixel
def forward_loss(self, imgs, pred, mask, norm_pix_loss=False):
"""
imgs: [N, 3, H, W]
pred: [N, L, p*p*3]
mask: [N, L], 0 is keep, 1 is remove,
"""
target = self.patchify(imgs)
if norm_pix_loss:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.e-6)**.5
loss = (pred - target) ** 2
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
return loss
def forward(self, imgs, mask_ratio=0.8):
emb_enc, mask, ids_restore, _ = self.forward_encoder(imgs, mask_ratio, mask_2d=self.mask_2d)
pred, _, _ = self.forward_decoder(emb_enc, ids_restore) # [N, L, p*p*3]
loss_recon = self.forward_loss(imgs, pred, mask, norm_pix_loss=self.norm_pix_loss)
loss_contrastive = torch.FloatTensor([0.0]).cuda()
return loss_recon, pred, mask, loss_contrastive
def mae_vit_small_patch16_dec512d8b(**kwargs):
model = MaskedAutoencoderViT(
patch_size=16, embed_dim=384, depth=12, num_heads=6,
decoder_embed_dim=512, decoder_num_heads=16,
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def mae_vit_base_patch16_dec512d8b(**kwargs):
model = MaskedAutoencoderViT(
patch_size=16, embed_dim=768, depth=12, num_heads=12,
decoder_embed_dim=512, decoder_num_heads=16,
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def mae_vit_large_patch16_dec512d8b(**kwargs):
model = MaskedAutoencoderViT(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
decoder_embed_dim=512, decoder_num_heads=16,
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def mae_vit_huge_patch14_dec512d8b(**kwargs):
model = MaskedAutoencoderViT(
patch_size=14, embed_dim=1280, depth=32, num_heads=16,
decoder_embed_dim=512, decoder_num_heads=16,
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
# set recommended archs
mae_vit_base_patch16 = mae_vit_base_patch16_dec512d8b # decoder: 512 dim, 8 blocks
mae_vit_large_patch16 = mae_vit_large_patch16_dec512d8b # decoder: 512 dim, 8 blocks
mae_vit_huge_patch14 = mae_vit_huge_patch14_dec512d8b # decoder: 512 dim, 8 blocks
mae_vit_small_patch16 = mae_vit_small_patch16_dec512d8b # decoder: 512 dim, 8 blocks
|
AudioMAE-main
|
models_mae.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import argparse
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import timm
assert timm.__version__ == "0.3.2" # version check
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.models.layers import to_2tuple
import util.lr_decay as lrd
import util.misc as misc
from util.datasets import build_dataset
from util.pos_embed import interpolate_pos_embed, interpolate_pos_embed_audio, interpolate_patch_embed_audio, interpolate_pos_embed_img2audio
from util.misc import NativeScalerWithGradNormCount as NativeScaler
import models_vit
from engine_finetune_as import train_one_epoch, evaluate #, train_one_epoch_av, evaluate_av
from dataset import AudiosetDataset, DistributedWeightedSampler, DistributedSamplerWrapper
from timm.models.vision_transformer import PatchEmbed
from torch.utils.data import WeightedRandomSampler
def get_args_parser():
parser = argparse.ArgumentParser('MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=4, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=60, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='vit_base_patch16', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.0005,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=2e-3, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.75,
help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=4, metavar='N',
help='epochs to warmup LR')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.5,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=527, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
# For audioset
parser.add_argument('--audio_exp', action='store_true', help='audio exp')
parser.add_argument("--data_train", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/train_video.json', help="training data json")
parser.add_argument("--data_eval", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/eval_video.json', help="validation data json")
parser.add_argument("--label_csv", type=str, default='/checkpoint/berniehuang/ast/egs/audioset/data/class_labels_indices.csv', help="csv with class labels")
parser.add_argument("--weight_csv", type=str, default='/checkpoint/berniehuang/mae/data/audioset/weight_train_all.csv', help="weight file")
parser.add_argument('--freqm', help='frequency mask max length', type=int, default=192)
parser.add_argument('--timem', help='time mask max length', type=int, default=48)
#parser.add_argument("--mixup", type=float, default=0, help="how many (0-1) samples need to be mixup during training")
parser.add_argument("--dataset", type=str, default="audioset", help="the dataset used", choices=["audioset", "esc50", "speechcommands", "k400"])
parser.add_argument("--use_fbank", type=bool, default=False)
parser.add_argument("--use_soft", type=bool, default=False)
parser.add_argument("--fbank_dir", type=str, default="/checkpoint/berniehuang/ast/egs/esc50/data/ESC-50-master/fbank", help="fbank dir")
parser.set_defaults(audio_exp=True)
#parser.add_argument("--distributed", type=bool, default=True)
parser.add_argument('--first_eval_ep', default=0, type=int, help='do eval after first_eval_ep')
parser.add_argument('--use_custom_patch', type=bool, default=False, help='use custom patch with overlapping and override timm PatchEmbed')
parser.add_argument('--source_custom_patch', type=bool, default=False, help='the pre-trained model already use custom patch')
parser.add_argument('--roll_mag_aug', type=bool, default=False, help='use roll_mag_aug')
parser.add_argument('--mask_t_prob', default=0.0, type=float, help='T masking ratio (percentage of removed patches).') #
parser.add_argument('--mask_f_prob', default=0.0, type=float, help='F masking ratio (percentage of removed patches).') #
#parser.add_argument('--split_pos', type=bool, default=False, help='use splitted pos emb')
parser.add_argument('--weight_sampler', type=bool, default=False, help='use weight_sampler')
parser.add_argument('--epoch_len', default=200000, type=int, help='num of samples/epoch with weight_sampler')
parser.add_argument('--distributed_wrapper', type=bool, default=False, help='use distributedwrapper for weighted sampler')
parser.add_argument('--replacement', type=bool, default=False, help='use weight_sampler')
parser.add_argument('--mask_2d', type=bool, default=True, help='use 2d masking')
parser.add_argument('--load_video', type=bool, default=False, help='load video')
parser.add_argument('--av_fusion', type=bool, default=False, help='load video')
parser.add_argument('--n_frm', default=6, type=int, help='num of frames for video')
parser.add_argument('--replace_with_mae', type=bool, default=False, help='replace_with_mae')
parser.add_argument('--load_imgnet_pt', type=bool, default=False, help='when img_pt_ckpt, if load_imgnet_pt, use img_pt_ckpt to initialize audio branch, if not, keep audio branch random')
return parser
class PatchEmbed_new(nn.Module):
""" Flexible Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, stride=10):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
stride = to_2tuple(stride)
self.img_size = img_size
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride) # with overlapped patches
#self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
#self.patch_hw = (img_size[1] // patch_size[1], img_size[0] // patch_size[0])
#self.num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
_, _, h, w = self.get_output_shape(img_size) # n, emb_dim, h, w
self.patch_hw = (h, w)
self.num_patches = h*w
def get_output_shape(self, img_size):
# todo: don't be lazy..
return self.proj(torch.randn(1,1,img_size[0],img_size[1])).shape
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
#assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
x = x.flatten(2).transpose(1, 2)
return x
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
if not args.audio_exp:
dataset_train = build_dataset(is_train=True, args=args)
dataset_val = build_dataset(is_train=False, args=args)
else:
norm_stats = {'audioset':[-4.2677393, 4.5689974], 'k400':[-4.2677393, 4.5689974],
'esc50':[-6.6268077, 5.358466], 'speechcommands':[-6.845978, 5.5654526]}
target_length = {'audioset':1024, 'k400':1024, 'esc50':512, 'speechcommands':128}
multilabel_dataset = {'audioset': True, 'esc50': False, 'k400': False, 'speechcommands': True}
audio_conf_train = {'num_mel_bins': 128,
'target_length': target_length[args.dataset],
'freqm': 48,
'timem': 192,
'mixup': args.mixup,
'dataset': args.dataset,
'mode':'train',
'mean':norm_stats[args.dataset][0],
'std':norm_stats[args.dataset][1],
'noise':False,
'multilabel':multilabel_dataset[args.dataset],
}
audio_conf_val = {'num_mel_bins': 128,
'target_length': target_length[args.dataset],
'freqm': 0,
'timem': 0,
'mixup': 0,
'dataset': args.dataset,
'mode':'val',
'mean':norm_stats[args.dataset][0],
'std':norm_stats[args.dataset][1],
'noise':False,
'multilabel':multilabel_dataset[args.dataset],
}
dataset_train = AudiosetDataset(args.data_train, label_csv=args.label_csv, audio_conf=audio_conf_train,
use_fbank=args.use_fbank, fbank_dir=args.fbank_dir,
roll_mag_aug=args.roll_mag_aug, load_video=args.load_video, mode='train')
dataset_val = AudiosetDataset(args.data_eval, label_csv=args.label_csv, audio_conf=audio_conf_val,
use_fbank=args.use_fbank, fbank_dir=args.fbank_dir,
roll_mag_aug=False, load_video=args.load_video, mode='eval')
if True: #args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
num_nodes = int(os.environ.get('num_nodes', 1))
ddp = int(os.environ.get('DDP', 1))
num_nodes = max(ddp, num_nodes)
rank = int(os.environ.get('NODE_RANK', 0))
print(f"num_nodes:{num_nodes}, rank:{rank}, ddp:{ddp}, num_tasks:{num_tasks}, global_rank:{global_rank}")
# num_nodes:1, rank:0, ddp:1, num_tasks:8, global_rank:0 (sbatch)
if args.weight_sampler:
samples_weight = np.loadtxt(args.weight_csv, delimiter=',')
if args.distributed_wrapper:
print('use distributed_wrapper sampler')
epoch_len=args.epoch_len #200000 #=> 250000
#epoch_len=21000 # AS-20K
# replacement should be False
sampler_train = DistributedSamplerWrapper(
sampler=WeightedRandomSampler(samples_weight, num_samples=epoch_len, replacement=args.replacement),
dataset=range(epoch_len),
num_replicas=num_tasks, #num_nodes, #num_tasks?
rank=global_rank, #rank, # global_rank?
)
else:
#sampler_train = WeightedRandomSampler(samples_weight, len(samples_weight), replacement=True)
sampler_train = DistributedWeightedSampler(dataset_train, samples_weight, num_replicas=num_tasks, rank=global_rank, replacement=args.replacement)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = models_vit.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
global_pool=args.global_pool,
mask_2d=args.mask_2d,
use_custom_patch=args.use_custom_patch,
## remove video part for A-MAE
#load_video=args.load_video,
# n_frm=args.n_frm,
#split_pos=args.split_pos,
#av_fusion=args.av_fusion,
)
if args.audio_exp:
img_size=(target_length[args.dataset],128) # 1024, 128
in_chans=1
emb_dim = 768
if args.model == "vit_small_patch16":
emb_dim = 384
if args.use_custom_patch:
model.patch_embed = PatchEmbed_new(img_size=img_size, patch_size=16, in_chans=1, embed_dim=emb_dim, stride=10)
model.pos_embed = nn.Parameter(torch.zeros(1, 1212 + 1, emb_dim), requires_grad=False) # fixed sin-cos embedding
else:
model.patch_embed = PatchEmbed_new(img_size=img_size, patch_size=(16,16), in_chans=1, embed_dim=emb_dim, stride=16) # no overlap. stride=img_size=16
num_patches = model.patch_embed.num_patches
#num_patches = 512 # assume audioset, 1024//16=64, 128//16=8, 512=64x8
model.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, emb_dim), requires_grad=False) # fixed sin-cos embedding
#if args.finetune and not args.eval:
if args.finetune:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
if not args.eval:
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
# manually initialize fc layer
if not args.eval:
trunc_normal_(model.head.weight, std=2e-5)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
# build optimizer with layer-wise lr decay (lrd)
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
no_weight_decay_list=model_without_ddp.no_weight_decay(),
layer_decay=args.layer_decay
)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
loss_scaler = NativeScaler()
if args.use_soft:
criterion = SoftTargetCrossEntropy()
else:
criterion = nn.BCEWithLogitsLoss() # works better
print("criterion = %s" % str(criterion))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device, args.dist_eval)
with open('aps.txt', 'w') as fp:
aps=test_stats['AP']
aps=[str(ap) for ap in aps]
fp.write('\n'.join(aps))
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['mAP']:.4f}")
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_mAP = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
# if args.load_video:
# train_stats = train_one_epoch_av(
# model, criterion, data_loader_train,
# optimizer, device, epoch, loss_scaler,
# args.clip_grad, mixup_fn,
# log_writer=log_writer,
# args=args
# )
# else:
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, mixup_fn,
log_writer=log_writer,
args=args
)
if args.output_dir:
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
if epoch >= args.first_eval_ep:
test_stats = evaluate(data_loader_val, model, device, args.dist_eval)
print(f"mAP of the network on the {len(dataset_val)} test images: {test_stats['mAP']:.4f}")
max_mAP = max(max_mAP, test_stats["mAP"])
print(f'Max mAP: {max_mAP:.4f}')
else:
test_stats ={'mAP': 0.0}
print(f'too new to evaluate!')
if log_writer is not None:
log_writer.add_scalar('perf/mAP', test_stats['mAP'], epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and misc.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
AudioMAE-main
|
main_finetune_as.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# A script to run multinode training with submitit.
# --------------------------------------------------------
import argparse
import os
import uuid
from pathlib import Path
import main_pretrain as trainer
import submitit
def parse_args():
trainer_parser = trainer.get_args_parser()
parser = argparse.ArgumentParser("Submitit for MAE pretrain", parents=[trainer_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main_pretrain as trainer
self._setup_gpu_args()
trainer.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.log_dir = self.args.output_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="mae")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
# print("Submitted job_id:", job.job_id)
print(job.job_id)
if __name__ == "__main__":
main()
|
AudioMAE-main
|
submitit_pretrain.py
|
import logging
import math
from copy import deepcopy
from typing import Tuple, Optional, List, Union, Any, Type
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from .layers import DropPath, to_2tuple
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = to_2tuple(drop)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor:
"""Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """
return x.permute(0, 2, 3, 1)
def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor:
"""Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """
return x.permute(0, 3, 1, 2)
def window_partition(x, window_size: Tuple[int, int]):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
"""
Args:
windows: (num_windows * B, window_size[0], window_size[1], C)
window_size (Tuple[int, int]): Window size
img_size (Tuple[int, int]): Image size
Returns:
x: (B, H, W, C)
"""
H, W = img_size
B = int(windows.shape[0] / (H * W / window_size[0] / window_size[1]))
x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowMultiHeadAttention(nn.Module):
r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias.
Args:
dim (int): Number of input features
window_size (int): Window size
num_heads (int): Number of attention heads
drop_attn (float): Dropout rate of attention map
drop_proj (float): Dropout rate after projection
meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network
sequential_attn (bool): If true sequential self-attention is performed
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: Tuple[int, int],
drop_attn: float = 0.0,
drop_proj: float = 0.0,
meta_hidden_dim: int = 384, # FIXME what's the optimal value?
sequential_attn: bool = False,
) -> None:
super(WindowMultiHeadAttention, self).__init__()
assert dim % num_heads == 0, \
"The number of input features (in_features) are not divisible by the number of heads (num_heads)."
self.in_features: int = dim
self.window_size: Tuple[int, int] = window_size
self.num_heads: int = num_heads
self.sequential_attn: bool = sequential_attn
self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True)
self.attn_drop = nn.Dropout(drop_attn)
self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True)
self.proj_drop = nn.Dropout(drop_proj)
# meta network for positional encodings
self.meta_mlp = Mlp(
2, # x, y
hidden_features=meta_hidden_dim,
out_features=num_heads,
act_layer=nn.ReLU,
drop=0.1 # FIXME should there be stochasticity, appears to 'overfit' without?
)
self.register_parameter("tau", torch.nn.Parameter(torch.ones(num_heads)))
self._make_pair_wise_relative_positions()
def _make_pair_wise_relative_positions(self) -> None:
"""Method initializes the pair-wise relative positions to compute the positional biases."""
device = self.tau.device
coordinates = torch.stack(torch.meshgrid([
torch.arange(self.window_size[0], device=device),
torch.arange(self.window_size[1], device=device)]), dim=0).flatten(1)
relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :]
relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float()
relative_coordinates_log = torch.sign(relative_coordinates) * torch.log(
1.0 + relative_coordinates.abs())
self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False)
def update_input_size(self, new_window_size: int, **kwargs: Any) -> None:
"""Method updates the window size and so the pair-wise relative positions
Args:
new_window_size (int): New window size
kwargs (Any): Unused
"""
# Set new window size and new pair-wise relative positions
self.window_size: int = new_window_size
self._make_pair_wise_relative_positions()
def _relative_positional_encodings(self) -> torch.Tensor:
"""Method computes the relative positional encodings
Returns:
relative_position_bias (torch.Tensor): Relative positional encodings
(1, number of heads, window size ** 2, window size ** 2)
"""
window_area = self.window_size[0] * self.window_size[1]
relative_position_bias = self.meta_mlp(self.relative_coordinates_log)
relative_position_bias = relative_position_bias.transpose(1, 0).reshape(
self.num_heads, window_area, window_area
)
relative_position_bias = relative_position_bias.unsqueeze(0)
return relative_position_bias
def _forward_sequential(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
"""
# FIXME TODO figure out 'sequential' attention mentioned in paper (should reduce GPU memory)
assert False, "not implemented"
def _forward_batch(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""This function performs standard (non-sequential) scaled cosine self-attention.
"""
Bw, L, C = x.shape
qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
query, key, value = qkv.unbind(0)
# compute attention map with scaled cosine attention
denom = torch.norm(query, dim=-1, keepdim=True) @ torch.norm(key, dim=-1, keepdim=True).transpose(-2, -1)
attn = query @ key.transpose(-2, -1) / denom.clamp(min=1e-6)
attn = attn / self.tau.clamp(min=0.01).reshape(1, self.num_heads, 1, 1)
attn = attn + self._relative_positional_encodings()
if mask is not None:
# Apply mask if utilized
num_win: int = mask.shape[0]
attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L)
attn = attn + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, L, L)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
""" Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape (B * windows, N, C)
mask (Optional[torch.Tensor]): Attention mask for the shift case
Returns:
Output tensor of the shape [B * windows, N, C]
"""
if self.sequential_attn:
return self._forward_sequential(x, mask)
else:
return self._forward_batch(x, mask)
class SwinTransformerBlock(nn.Module):
r"""This class implements the Swin transformer block.
Args:
dim (int): Number of input channels
num_heads (int): Number of attention heads to be utilized
feat_size (Tuple[int, int]): Input resolution
window_size (Tuple[int, int]): Window size to be utilized
shift_size (int): Shifting size to be used
mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels
drop (float): Dropout in input mapping
drop_attn (float): Dropout rate of attention map
drop_path (float): Dropout in main path
extra_norm (bool): Insert extra norm on 'main' branch if True
sequential_attn (bool): If true sequential self-attention is performed
norm_layer (Type[nn.Module]): Type of normalization layer to be utilized
"""
def __init__(
self,
dim: int,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
shift_size: Tuple[int, int] = (0, 0),
mlp_ratio: float = 4.0,
drop: float = 0.0,
drop_attn: float = 0.0,
drop_path: float = 0.0,
extra_norm: bool = False,
sequential_attn: bool = False,
norm_layer: Type[nn.Module] = nn.LayerNorm,
) -> None:
super(SwinTransformerBlock, self).__init__()
self.dim: int = dim
self.feat_size: Tuple[int, int] = feat_size
self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size)
self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size))
self.window_area = self.window_size[0] * self.window_size[1]
self.num_heads =num_heads
# attn branch
self.attn = WindowMultiHeadAttention(
dim=dim,
num_heads=num_heads,
window_size=self.window_size,
drop_attn=drop_attn,
drop_proj=drop,
sequential_attn=sequential_attn,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
# mlp branch
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
drop=drop,
out_features=dim,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
# Extra main branch norm layer mentioned for Huge/Giant models in V2 paper.
# Also being used as final network norm and optional stage ending norm while still in a C-last format.
self.norm3 = norm_layer(dim) if extra_norm else nn.Identity()
self._make_attention_mask()
def _calc_window_shift(self, target_window_size):
window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)]
shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, self.target_shift_size)]
return tuple(window_size), tuple(shift_size)
def _make_attention_mask(self) -> None:
"""Method generates the attention mask used in shift case."""
# Make masks for shift case
if any(self.shift_size):
# calculate attention mask for SW-MSA
H, W = self.feat_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
cnt = 0
for h in (
slice(0, -self.window_size[0]),
slice(-self.window_size[0], -self.shift_size[0]),
slice(-self.shift_size[0], None)):
for w in (
slice(0, -self.window_size[1]),
slice(-self.window_size[1], -self.shift_size[1]),
slice(-self.shift_size[1], None)):
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_area)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask, persistent=False)
def update_input_size(self, new_window_size: Tuple[int, int], new_feat_size: Tuple[int, int]) -> None:
"""Method updates the image resolution to be processed and window size and so the pair-wise relative positions.
Args:
new_window_size (int): New window size
new_feat_size (Tuple[int, int]): New input resolution
"""
# Update input resolution
self.feat_size: Tuple[int, int] = new_feat_size
self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(new_window_size))
self.window_area = self.window_size[0] * self.window_size[1]
self.attn.update_input_size(new_window_size=self.window_size)
self._make_attention_mask()
def _shifted_window_attn(self, x):
H, W = self.feat_size
B, L, C = x.shape
x = x.view(B, H, W, C)
# cyclic shift
sh, sw = self.shift_size
do_shift: bool = any(self.shift_size)
if do_shift:
# FIXME PyTorch XLA needs cat impl, roll not lowered
# x = torch.cat([x[:, sh:], x[:, :sh]], dim=1)
# x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2)
x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2))
# partition windows
x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C)
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # num_windows * B, window_size * window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
x = window_reverse(attn_windows, self.window_size, self.feat_size) # B H' W' C
# reverse cyclic shift
if do_shift:
# FIXME PyTorch XLA needs cat impl, roll not lowered
# x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1)
# x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2)
x = torch.roll(x, shifts=(sh, sw), dims=(1, 2))
x = x.view(B, L, C)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape [B, C, H, W]
Returns:
output (torch.Tensor): Output tensor of the shape [B, C, H, W]
"""
# NOTE post-norm branches (op -> norm -> drop)
x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant)
#print(x.shape, self.num_heads, self.window_size, self.feat_size) #
return x
|
AudioMAE-main
|
timm_patch/swin_transformer.py
|
""" Layer/Module Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
from itertools import repeat
import torch
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 1 and TORCH_MINOR < 8:
from torch._six import container_abcs
else:
import collections.abc as container_abcs
#from torch._six import container_abcs
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
|
AudioMAE-main
|
timm_patch/helpers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# ELECTRA https://github.com/google-research/electra
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import json
def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=.75):
"""
Parameter groups for layer-wise lr decay
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
"""
param_group_names = {}
param_groups = {}
num_layers = len(model.blocks) + 1
layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1))
for n, p in model.named_parameters():
if not p.requires_grad:
continue
# no decay: all 1D parameters and model specific ones
if p.ndim == 1 or n in no_weight_decay_list:
g_decay = "no_decay"
this_decay = 0.
else:
g_decay = "decay"
this_decay = weight_decay
layer_id = get_layer_id_for_vit(n, num_layers)
group_name = "layer_%d_%s" % (layer_id, g_decay)
if group_name not in param_group_names:
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_groups[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_group_names[group_name]["params"].append(n)
param_groups[group_name]["params"].append(p)
# print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2))
return list(param_groups.values())
def get_layer_id_for_vit(name, num_layers):
"""
Assign a parameter with its layer id
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
"""
if name in ['cls_token', 'pos_embed']:
return 0
elif name.startswith('patch_embed'):
return 0
elif name.startswith('blocks'):
return int(name.split('.')[1]) + 1
else:
return num_layers
|
AudioMAE-main
|
util/lr_decay.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import builtins
import datetime
import os
import time
from collections import defaultdict, deque
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
force = force or (get_world_size() > 8)
if is_master or force:
now = datetime.datetime.now().time()
builtin_print('[{}] '.format(now), end='') # print with time stamp
builtin_print(*args, **kwargs)
builtins.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
setup_for_distributed(is_master=True) # hack
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def load_model(args, model_without_ddp, optimizer, loss_scaler):
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval):
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
def all_reduce_mean(x):
world_size = get_world_size()
if world_size > 1:
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def merge_vmae_to_avmae(avmae_state_dict, vmae_ckpt):
#keys_to_copy=['pos_embed','patch_embed']
#replaced=0
vmae_ckpt['cls_token'] = vmae_ckpt['cls_token_v']
vmae_ckpt['mask_token'] = vmae_ckpt['mask_token_v']
# pos_emb % not trainable, use default
pos_embed_v = vmae_ckpt['pos_embed_v'] #1,589,768
pos_embed = pos_embed_v[:,1:,:] #1,588,768
cls_embed = pos_embed_v[:,0,:].unsqueeze(1)
pos_embed = pos_embed.reshape(1, 2, 14, 14,768).sum(dim=1) # 1, 14, 14, 768
print("Position interpolate from 14,14 to 64,8")
pos_embed = pos_embed.permute(0, 3, 1, 2) # 1, 14,14,768 -> 1,768,14,14
pos_embed = torch.nn.functional.interpolate(
pos_embed, size=(64, 8), mode='bicubic', align_corners=False)
pos_embed = pos_embed.permute(0, 2, 3, 1).flatten(1, 2) # 1, 14, 14, 768 => 1, 196,768
pos_embed = torch.cat((cls_embed, pos_embed), dim=1)
assert(vmae_ckpt['pos_embed'].shape == pos_embed.shape)
vmae_ckpt['pos_embed'] = pos_embed
# patch_emb
# aggregate 3 channels in video-rgb ckpt to 1 channel for audio
v_weight = vmae_ckpt['patch_embed_v.proj.weight'] # 768,3,2,16,16
new_proj_weight = torch.nn.Parameter(v_weight.sum(dim=2).sum(dim=1).unsqueeze(1))
assert(new_proj_weight.shape == vmae_ckpt['patch_embed.proj.weight'].shape)
vmae_ckpt['patch_embed.proj.weight'] = new_proj_weight
vmae_ckpt['patch_embed.proj.bias'] = vmae_ckpt['patch_embed_v.proj.bias']
# hack
vmae_ckpt['norm.weight'] = vmae_ckpt['norm_v.weight']
vmae_ckpt['norm.bias'] = vmae_ckpt['norm_v.bias']
# replace transformer encoder
for k,v in vmae_ckpt.items():
if k.startswith('blocks.'):
kk = k.replace('blocks.','blocks_v.')
vmae_ckpt[k] = vmae_ckpt[kk]
elif k.startswith('blocks_v.'):
pass
else:
print(k)
pass
print(k)
|
AudioMAE-main
|
util/misc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# LARS optimizer, implementation from MoCo v3:
# https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
import torch
class LARS(torch.optim.Optimizer):
"""
LARS optimizer, no rate scaling or weight decay for parameters <= 1D.
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim > 1: # if not normalization gamma/beta or bias
dp = dp.add(p, alpha=g['weight_decay'])
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['trust_coefficient'] * param_norm / update_norm), one),
one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
|
AudioMAE-main
|
util/lars.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
import os
import PIL
from torchvision import datasets, transforms
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
print(dataset)
return dataset
def build_transform(is_train, args):
mean = IMAGENET_DEFAULT_MEAN
std = IMAGENET_DEFAULT_STD
# train transform
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation='bicubic',
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
return transform
# eval transform
t = []
if args.input_size <= 224:
crop_pct = 224 / 256
else:
crop_pct = 1.0
size = int(args.input_size / crop_pct)
t.append(
transforms.Resize(size, interpolation=PIL.Image.BICUBIC), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
|
AudioMAE-main
|
util/datasets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torchvision import transforms
from torchvision.transforms import functional as F
class RandomResizedCrop(transforms.RandomResizedCrop):
"""
RandomResizedCrop for matching TF/TPU implementation: no for-loop is used.
This may lead to results different with torchvision's version.
Following BYOL's TF code:
https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206
"""
@staticmethod
def get_params(img, scale, ratio):
width, height = F._get_image_size(img)
area = height * width
target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
log_ratio = torch.log(torch.tensor(ratio))
aspect_ratio = torch.exp(
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
).item()
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
w = min(w, width)
h = min(h, height)
i = torch.randint(0, height - h + 1, size=(1,)).item()
j = torch.randint(0, width - w + 1, size=(1,)).item()
return i, j, h, w
|
AudioMAE-main
|
util/crop.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# Position embedding utils
# --------------------------------------------------------
import numpy as np
import torch
# --------------------------------------------------------
# 2D sine-cosine position embedding
# References:
# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_flexible(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size[0], dtype=np.float32)
grid_w = np.arange(grid_size[1], dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size[0], grid_size[1]])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
# --------------------------------------------------------
# Interpolate position embeddings for high-resolution
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
def interpolate_pos_embed(model, checkpoint_model):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
def interpolate_pos_embed_img2audio(model, checkpoint_model, orig_size, new_size):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
#orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
#new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size[0], orig_size[1], new_size[0], new_size[1]))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size[0], orig_size[1], embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size[0], new_size[1]), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
def interpolate_pos_embed_audio(model, checkpoint_model, orig_size, new_size):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size[0], orig_size[1], new_size[0], new_size[1]))
#extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
cls_token = pos_embed_checkpoint[:,0,:].unsqueeze(1)
pos_tokens = pos_embed_checkpoint[:,1:,:] # remove
pos_tokens = pos_tokens.reshape(-1, orig_size[0], orig_size[1], embedding_size) #.permute(0, 3, 1, 2)
#pos_tokens = torch.nn.functional.interpolate(
# pos_tokens, size=(new_size[0], new_size[1]), mode='bicubic', align_corners=False)
#pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
pos_tokens = pos_tokens[:,:,:new_size[1],:] # assume only time diff
pos_tokens = pos_tokens.flatten(1, 2)
new_pos_embed = torch.cat((cls_token, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
def interpolate_patch_embed_audio(model, checkpoint_model, orig_channel, new_channel=1, kernel_size=(16,16), stride=(16,16), padding=(0,0)):
if orig_channel != new_channel:
if 'patch_embed.proj.weight' in checkpoint_model:
# aggregate 3 channels in rgb ckpt to 1 channel for audio
new_proj_weight = torch.nn.Parameter(torch.sum(checkpoint_model['patch_embed.proj.weight'], dim=1).unsqueeze(1))
checkpoint_model['patch_embed.proj.weight'] = new_proj_weight
|
AudioMAE-main
|
util/pos_embed.py
|
import numpy as np
from scipy import stats
from sklearn import metrics
import torch
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def calculate_stats(output, target):
"""Calculate statistics including mAP, AUC, etc.
Args:
output: 2d array, (samples_num, classes_num)
target: 2d array, (samples_num, classes_num)
Returns:
stats: list of statistic of each class.
"""
classes_num = target.shape[-1]
stats = []
# Accuracy, only used for single-label classification such as esc-50, not for multiple label one such as AudioSet
acc = metrics.accuracy_score(np.argmax(target, 1), np.argmax(output, 1))
# Class-wise statistics
for k in range(classes_num):
# Average precision
avg_precision = metrics.average_precision_score(
target[:, k], output[:, k], average=None)
# AUC
auc = metrics.roc_auc_score(target[:, k], output[:, k], average=None)
# Precisions, recalls
(precisions, recalls, thresholds) = metrics.precision_recall_curve(
target[:, k], output[:, k])
# FPR, TPR
(fpr, tpr, thresholds) = metrics.roc_curve(target[:, k], output[:, k])
save_every_steps = 1000 # Sample statistics to reduce size
dict = {'precisions': precisions[0::save_every_steps],
'recalls': recalls[0::save_every_steps],
'AP': avg_precision,
'fpr': fpr[0::save_every_steps],
'fnr': 1. - tpr[0::save_every_steps],
'auc': auc,
# note acc is not class-wise, this is just to keep consistent with other metrics
'acc': acc
}
stats.append(dict)
return stats
|
AudioMAE-main
|
util/stat.py
|
import torch
import torch.nn as nn
from timm.models.layers import to_2tuple
class PatchEmbed_org(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_hw = (img_size[1] // patch_size[1], img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
#assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class PatchEmbed_new(nn.Module):
""" Flexible Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, stride=10):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
stride = to_2tuple(stride)
self.img_size = img_size
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride) # with overlapped patches
#self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
#self.patch_hw = (img_size[1] // patch_size[1], img_size[0] // patch_size[0])
#self.num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
_, _, h, w = self.get_output_shape(img_size) # n, emb_dim, h, w
self.patch_hw = (h, w)
self.num_patches = h*w
def get_output_shape(self, img_size):
# todo: don't be lazy..
return self.proj(torch.randn(1,1,img_size[0],img_size[1])).shape
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
#assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
#x = self.proj(x).flatten(2).transpose(1, 2)
x = self.proj(x) # 32, 1, 1024, 128 -> 32, 768, 101, 12
x = x.flatten(2) # 32, 768, 101, 12 -> 32, 768, 1212
x = x.transpose(1, 2) # 32, 768, 1212 -> 32, 1212, 768
return x
class PatchEmbed3D_new(nn.Module):
""" Flexible Image to Patch Embedding
"""
def __init__(self, video_size=(16,224,224), patch_size=(2,16,16), in_chans=3, embed_dim=768, stride=(2,16,16)):
super().__init__()
self.video_size = video_size
self.patch_size = patch_size
self.in_chans = in_chans
self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=stride)
_, _, t, h, w = self.get_output_shape(video_size) # n, emb_dim, h, w
self.patch_thw = (t, h, w)
self.num_patches = t*h*w
def get_output_shape(self, video_size):
# todo: don't be lazy..
return self.proj(torch.randn(1, self.in_chans, video_size[0], video_size[1], video_size[2])).shape
def forward(self, x):
B, C, T, H, W = x.shape
x = self.proj(x) # 32, 3, 16, 224, 224 -> 32, 768, 8, 14, 14
x = x.flatten(2) # 32, 768, 1568
x = x.transpose(1, 2) # 32, 768, 1568 -> 32, 1568, 768
return x
if __name__ == '__main__':
#patch_emb = PatchEmbed_new(img_size=224, patch_size=16, in_chans=1, embed_dim=64, stride=(16,16))
#input = torch.rand(8,1,1024,128)
#output = patch_emb(input)
#print(output.shape) # (8,512,64)
patch_emb = PatchEmbed3D_new(video_size=(6,224,224), patch_size=(2,16,16), in_chans=3, embed_dim=768, stride=(2,16,16))
input = torch.rand(8,3,6,224,224)
output = patch_emb(input)
print(output.shape) # (8,64)
|
AudioMAE-main
|
util/patch_embed.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
|
AudioMAE-main
|
util/lr_sched.py
|
# -*- coding: utf-8 -*-
# modified from Yuan Gong@MIT's gen_weight_file
import argparse
import json
import numpy as np
import sys, os, csv
def make_index_dict(label_csv):
index_lookup = {}
with open(label_csv, 'r') as f:
csv_reader = csv.DictReader(f)
line_count = 0
for row in csv_reader:
index_lookup[row['mid']] = row['index']
line_count += 1
return index_lookup
def gen_weight(josn_file, label_file, output_file):
index_dict = make_index_dict(label_file)
label_count = np.zeros(527)
with open(josn_file, 'r', encoding='utf8')as fp:
data = json.load(fp)['data']
for sample in data:
sample_labels = sample['labels'].split(',')
for label in sample_labels:
label_idx = int(index_dict[label])
label_count[label_idx] = label_count[label_idx] + 1
label_weight = 1000.0 / (label_count + 0.01)
#label_weight = 1000.0 / (label_count + 100)
sample_weight = np.zeros(len(data))
for i, sample in enumerate(data):
sample_labels = sample['labels'].split(',')
for label in sample_labels:
label_idx = int(index_dict[label])
# summing up the weight of all appeared classes in the sample, note audioset is multiple-label classification
sample_weight[i] += label_weight[label_idx]
np.savetxt(output_file, sample_weight, delimiter=',')
print(label_weight)
if __name__ == '__main__':
#args = parser.parse_args()
json_file='/checkpoint/berniehuang/ast/egs/audioset/data/datafiles/train_all.json'
label_file='/checkpoint/berniehuang/ast/egs/audioset/data/class_labels_indices.csv'
output_file='./weight_train_all.csv'
gen_weight(json_file,label_file,output_file)
|
AudioMAE-main
|
dataset/audioset/gen_weight.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch.nn as nn
import torchvision.models as models
def get_backbone(is_pretrained: bool, arch: str = "resnet18"):
# Define the backbone
if arch == "resnet18":
backbone = models.resnet18(pretrained=is_pretrained)
out_feature_num = backbone.fc.in_features
layers = list(backbone.children())[:-1]
elif arch == "mobilenet":
backbone = models.mobilenet_v2(pretrained=is_pretrained)
out_feature_num = backbone.classifier[-1].in_features
layers = list(backbone.children())[:-1] + [nn.AdaptiveAvgPool2d((1, 1))]
elif arch == "regnet":
backbone = models.regnet_y_400mf(
pretrained=is_pretrained
) # models.squeezenet1_1(pretrained=is_pretrained)
out_feature_num = backbone.fc.in_features
layers = list(backbone.children())[:-1]
else:
raise ValueError(f"{arch=}")
backbone = nn.Sequential(*layers)
return backbone, out_feature_num
def get_classifier(in_features: int, num_target_classes: int):
classifier = nn.Linear(in_features, num_target_classes)
return classifier
def get_cf_predictor(num_filters: int, cf_vector_dim: int):
# Define the cf vector predictor
cf_layers = nn.Sequential(
nn.BatchNorm1d(num_filters),
nn.Linear(num_filters, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, cf_vector_dim),
)
return cf_layers
|
collaborative_image_understanding-main
|
src/architecture_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
from typing import Tuple
import hydra
import pandas as pd
from tqdm import tqdm
from data_utils import DataHelper
logger = logging.getLogger(__name__)
def convert_to_dataframe(
json_path: str, pkl_path: str, data_downloader: DataHelper
) -> pd.DataFrame:
if not data_downloader.is_exist(pkl_path):
logger.info(f"convert_to_dataframe: not exist {pkl_path=}")
data_downloader.save_df_as_pkl(json_path, pkl_path)
df = data_downloader.read_pickle(pkl_path)
return df
def download_categroy(
url_reviews: str, url_meta: str, dst_dir: str, data_downloader: DataHelper
) -> Tuple[str, str]:
dst_review, dst_meta = [
osp.join(dst_dir, osp.basename(url)).replace(".gz", "")
for url in (url_reviews, url_meta)
]
for url, dst in [(url_reviews, dst_review), (url_meta, dst_meta)]:
logger.info(f"download_categroy: {url=}")
data_downloader.download_url(url, f"{dst}.gz")
data_downloader.ungzip_file(path_src=f"{dst}.gz", path_dst=dst)
return dst_review, dst_meta
def get_download_url(base_url: str, category: str) -> Tuple[str, str]:
url_reviews = f"{base_url}/reviews_{category}_5.json.gz"
url_meta = f"{base_url}/meta_{category}.json.gz"
return url_reviews, url_meta
def filter_meta_df(df_meta: pd.DataFrame, df_review: pd.DataFrame):
# Keep only meta with reviews
meta_df_filterd = df_meta[df_meta["asin"].isin(df_review["asin"].unique())]
# Keep only items with image
no_img = meta_df_filterd["imUrl"].str.contains("no-img-sm").astype(bool)
meta_df_filterd = meta_df_filterd[~no_img]
meta_df_filterd = meta_df_filterd.dropna(subset=["imUrl"])
return meta_df_filterd
@hydra.main(
config_path="../configs",
config_name="download_amazon_sets",
)
def download_data(cfg):
logger.info(cfg)
logger.info(f"{os.getcwd()=}")
data_helper = DataHelper(is_debug=cfg.is_debug, is_override=cfg.is_override)
dst_dir = cfg.data_dir
categories = cfg.category_list
for i, category in enumerate(categories):
t0 = time.time()
# Dowload category data
t1 = time.time()
url_reviews, url_meta = get_download_url(cfg.base_url, category)
dst_review, dst_meta = download_categroy(
url_reviews, url_meta, dst_dir, data_helper
)
logger.info(f"Download category data in {time.time()-t1:.2f} sec")
# Parse json
t1 = time.time()
df_review = convert_to_dataframe(
dst_review, osp.join(dst_dir, f"reviews_{category}.pkl"), data_helper
)
df_meta = convert_to_dataframe(
dst_meta, osp.join(dst_dir, f"meta_{category}.pkl"), data_helper
)
logger.info(f"Load df in {time.time()-t1:.2f} sec")
# Create output directory for images
categrory_dir = osp.join(dst_dir, category)
data_helper.create_dir(categrory_dir)
logger.info(f"{categrory_dir=}")
# Filter download images
meta_df_filt = filter_meta_df(df_meta, df_review)
img_urls = meta_df_filt.drop_duplicates(subset="imUrl", keep="first")["imUrl"]
# Download
logger.info(f"{len(img_urls)=}")
t1 = time.time()
dst_exsits = data_helper.list_files_in_dir(categrory_dir)
dst_imgs = [
osp.join(categrory_dir, osp.basename(img_url)) for img_url in tqdm(img_urls)
]
logger.info(f"{len(dst_exsits)=} in {time.time()-t1:.2f} sec")
# Filter lists for exists
dst_imgs_filt, img_urls_filt = [], []
for dst, url in zip(dst_imgs, img_urls):
if osp.basename(dst) not in dst_exsits:
img_urls_filt.append(url)
dst_imgs_filt.append(dst)
assert len(img_urls_filt) == len(dst_imgs_filt)
logger.info(f"Post filtering. {len(img_urls_filt)=}")
for img_url, dst_img in tqdm(
zip(img_urls_filt, dst_imgs_filt), total=len(img_urls_filt)
):
data_helper.download_url(img_url, dst_img)
logger.info(
f"[{i}/{len(categories)-1}] Finish {category} in {time.time() -t0:.2f} sec"
)
if __name__ == "__main__":
download_data()
|
collaborative_image_understanding-main
|
src/main_download_amazon_sets.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from sklearn.metrics import average_precision_score
import os.path as osp
from architecture_utils import get_backbone, get_cf_predictor, get_classifier
logger = logging.getLogger(__name__)
class LitModel(pl.LightningModule):
def __init__(
self,
num_target_classes: int,
cf_vector_dim: int,
cfg,
pos_weight=None,
out_dir: str = ".",
):
super().__init__()
self.cfg = cfg
self.num_target_classes = num_target_classes
self.save_hyperparameters()
self.map_best = 0
pos_weight = torch.tensor(pos_weight) if pos_weight is not None else None
self.criterion = nn.BCEWithLogitsLoss(reduction="none", pos_weight=pos_weight)
# Define the architecture
self.backbone, out_feature_num = get_backbone(cfg["is_pretrained"], cfg["arch"])
self.classifier = get_classifier(out_feature_num, num_target_classes)
self.cf_layers = get_cf_predictor(out_feature_num, cf_vector_dim)
# Save best prediction paths
self.preds_path = osp.join(out_dir, "preds.npy")
self.labels_path = osp.join(out_dir, "labels.npy")
def criterion_cf(
self,
pred: torch.Tensor,
target: torch.Tensor,
cf_topk_loss_ratio: float,
cf_confidence: torch.tensor,
):
# Dim is as the len of the batch. the mean is for the cf vector dimentsion (64).
loss_reduction_none = nn.functional.mse_loss(
pred, target, reduction="none"
).mean(dim=-1) + nn.functional.l1_loss(pred, target, reduction="none").mean(
dim=-1
)
loss_reduction_none = torch.exp(loss_reduction_none) - 1.0
loss_reduction_none = (
cf_confidence * loss_reduction_none / (cf_confidence.sum())
)
# Take item_num with lowest loss
if cf_topk_loss_ratio < 1.0:
num_cf_items = int(cf_topk_loss_ratio * len(loss_reduction_none))
loss = torch.topk(
loss_reduction_none, k=num_cf_items, largest=False, sorted=False
)[0].sum()
else:
loss = loss_reduction_none.sum()
return loss
def criterion_cf_triplet(self, cf_hat: torch.Tensor, cf_hat_pos: torch.Tensor):
cf_hat_neg = torch.roll(cf_hat_pos, shifts=1, dims=0)
loss = nn.functional.triplet_margin_loss(
cf_hat, cf_hat_pos, cf_hat_neg, margin=0.2, p=2
)
return loss
def forward(self, x):
z = self.backbone(x).flatten(1)
y_hat = self.classifier(z)
cf_hat = self.cf_layers(z)
return y_hat, cf_hat
def get_embeddings(self, x):
z = self.backbone(x).flatten(1)
return z
def _loss_helper(self, batch, phase: str):
assert phase in ["train", "val"]
(
imgs,
imgs_pos,
cf_vectors,
labels,
is_labeled,
cf_confidence,
) = batch
y_hat, cf_hat = self(imgs)
# Compute calssification loss
loss_calssification = self.criterion(y_hat.squeeze(), labels.float().squeeze())
loss_calssification = loss_calssification[is_labeled].mean()
# Compute CF loss
cf_topk_loss_ratio = self.cfg["cf_topk_loss_ratio"] if phase == "train" else 1.0
if self.cfg.cf_loss_type == "exp":
loss_cf = self.criterion_cf(
cf_hat,
cf_vectors,
cf_topk_loss_ratio,
cf_confidence,
)
elif self.cfg.cf_loss_type == "triplet":
_, cf_hat_pos = self(imgs_pos)
loss_cf = self.criterion_cf_triplet(cf_hat.squeeze(), cf_hat_pos.squeeze())
else:
raise ValueError(f"{self.cfg.cf_loss_type=}")
# Combine loss
loss = (
self.cfg["label_weight"] * loss_calssification
+ self.cfg["cf_weight"] * loss_cf
)
res_dict = {
f"loss/{phase}": loss.detach(),
f"loss_classification/{phase}": loss_calssification.detach(),
f"loss_cf/{phase}": loss_cf.detach(),
}
self.log_dict(
res_dict,
prog_bar=True,
logger=True,
on_step=False,
on_epoch=True,
)
preds = torch.sigmoid(y_hat)
res_dict["labels"] = labels.cpu().detach().numpy()
res_dict["preds"] = preds.cpu().detach().numpy()
res_dict["loss"] = loss
return res_dict
def epoch_end_helper(self, outputs, phase: str):
assert phase in ["train", "val"]
loss = torch.mean(torch.stack([out["loss"] for out in outputs])).item()
preds = np.vstack([out["preds"] for out in outputs])
labels = np.vstack([out["labels"] for out in outputs])
# Metrics
ap = average_precision_score(labels, preds)
self.log_dict(
{
f"ap/{phase}": ap,
},
logger=True,
on_epoch=True,
on_step=False,
prog_bar=False,
)
loss, ap = np.round([loss, ap], 3)
logger.info(
f"[{self.current_epoch}/{self.cfg['epochs'] - 1}] {phase} epoch end. {[loss, ap]=}"
)
if phase == "val" and ap > self.map_best:
self.map_best = ap
np.save(self.preds_path, preds)
np.save(self.labels_path, labels)
def training_step(self, batch, batch_idx):
return self._loss_helper(batch, phase="train")
def training_epoch_end(self, outputs):
self.epoch_end_helper(outputs, "train")
def validation_step(self, batch, batch_idx, stage=None):
return self._loss_helper(batch, phase="val")
def validation_epoch_end(self, outputs):
self.epoch_end_helper(outputs, "val")
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
self.cfg["lr"],
weight_decay=self.cfg["weight_decay"],
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.cfg["milestones"]
)
return [optimizer], [lr_scheduler]
class LitModelCFBased(LitModel):
def __init__(
self,
num_target_classes: int,
cf_vector_dim: int,
cfg,
pos_weight=None,
):
# pl.LightningModule().__init__()
cfg["is_pretrained"] = False
cfg["arch"] = "resnet18"
super().__init__(num_target_classes, cf_vector_dim, cfg, pos_weight)
# Define the backbone
layer_dims = torch.linspace(cf_vector_dim, num_target_classes, 3).int()
self.model = nn.Sequential(
nn.BatchNorm1d(layer_dims[0]),
nn.Linear(layer_dims[0], layer_dims[1]),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(layer_dims[1], layer_dims[2]),
)
logger.info(self)
def get_embeddings(self, x):
z = self.model[0](x)
z = self.model[1](z)
return z
def forward(self, x):
return self.model(x)
def _loss_helper(self, batch, phase: str):
assert phase in ["train", "val"]
cf_vectors, labels = batch
y_hat = self(cf_vectors)
# Compute calssification loss
loss = self.criterion(y_hat.squeeze(), labels.float().squeeze()).mean()
res_dict = {
f"loss/{phase}": loss.detach(),
}
self.log_dict(
res_dict,
prog_bar=True,
logger=True,
on_step=False,
on_epoch=True,
)
preds = torch.sigmoid(y_hat)
res_dict["labels"] = labels.cpu().detach().numpy()
res_dict["preds"] = preds.cpu().detach().numpy()
res_dict["loss"] = loss
return res_dict
|
collaborative_image_understanding-main
|
src/lit_utils.py
|
# Copyright (c) 2015-present, Meta Platforms, Inc. and affiliates.
# All rights reserved.
import logging
import os
import os.path as osp
import time
from itertools import chain
import imghdr
import hydra
import numpy as np
import pandas as pd
from omegaconf import DictConfig
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from torchvision.datasets.folder import default_loader as img_loader
from tqdm import tqdm
logger = logging.getLogger(__name__)
label_ignore_list = ["International"]
def merge_label_hierarchy(label_list: list, category: str) -> list:
# label_list: list of list.
label_processed_list = []
for label in label_list:
# Keep only labels with that belong to the top level category
if label[0] == category:
# Remove labels with one letter
label = [label_i for label_i in label if len(label_i) > 1]
label_joined = [" + ".join(label[:i]) for i in range(1, len(label))]
if any(
[label_ignore in label_joined for label_ignore in label_ignore_list]
):
continue
# Join hierarchy
label_processed_list += label_joined
label_processed_list = np.unique(label_processed_list)
return label_processed_list
def remove_downlevel_hierarchy(labels: list, label_mapper: dict) -> list:
outs = []
for label in labels:
if label in label_mapper.keys():
outs.append(label_mapper[label])
else:
outs.append(label)
return outs
def keep_only_exists_images(df: pd.DataFrame) -> pd.DataFrame:
df["img_exists"] = df["img_path"].apply(
lambda img_path: imghdr.what(img_path) is not None
if osp.exists(img_path)
else False
)
logger.info(f"Img exsists {df.img_exists.sum()}/{len(df)}")
return df[df["img_exists"] == True]
def keep_categories_with_min_samples(
df_input: pd.DataFrame, num_samples_threshold: int
):
df = df_input.copy()
min_num_samples = 0
n_iter = 0
while min_num_samples <= num_samples_threshold:
label_count = pd.value_counts(
list(chain.from_iterable(df["merged_labels"].tolist()))
)
min_num_samples = label_count.min()
logger.info(
f"[{n_iter}] {len(label_count)=} {(label_count<num_samples_threshold).sum()=}"
)
logger.info(f"\n{label_count[label_count<num_samples_threshold]}")
label_mapper = {
label: " + ".join(label.split(" + ")[:-1])
for label in label_count[label_count <= num_samples_threshold].index
}
df["merged_labels"] = df["merged_labels"].apply(
lambda labels: remove_downlevel_hierarchy(labels, label_mapper=label_mapper)
)
# Keep unique categories
df["merged_labels"] = df["merged_labels"].apply(
lambda labels: np.unique(labels).tolist()
)
n_iter += 1
return df, label_count
def execute_train_test_split(
df: pd.DataFrame, train_set_ratio: float, min_label_count_thresh: int
):
n_iter = 0
min_label_count = 0
while min_label_count < min_label_count_thresh:
df_train, df_test = train_test_split(df, train_size=train_set_ratio)
train_labels = np.array(df_train.label_vec.to_list())
test_labels = np.array(df_test.label_vec.to_list())
min_label_count = min(
train_labels.sum(axis=0).min(), test_labels.sum(axis=0).min()
)
logger.info(f"[{n_iter}] train-test split {min_label_count=}")
n_iter += 1
return df_train, df_test
@hydra.main(
config_path="../configs",
config_name="process_labels_amazon",
)
def process_labels(cfg: DictConfig):
out_dir = os.getcwd()
logger.info(cfg)
logger.info(os.getcwd())
# Load df
t0 = time.time()
meta_path = osp.join(cfg.data_dir, f"meta_{cfg.category}.pkl")
meta_df = pd.read_pickle(meta_path)
logger.info(f"Loadded meta_df in {time.time() -t0:.2f} sec. {len(meta_df)=}")
t0 = time.time()
review_path = osp.join(cfg.data_dir, f"reviews_{cfg.category}.pkl")
reivew_df = pd.read_pickle(review_path)
logger.info(f"Loadded review_df in {time.time() -t0:.2f} sec. {len(reivew_df)=}")
# Keep only items with reviews
t0 = time.time()
asin = reivew_df.drop_duplicates(subset=["asin"])["asin"].tolist()
meta_df = meta_df[meta_df["asin"].isin(asin)]
logger.info(f"Item with reviews {time.time() -t0:.2f} sec. {len(meta_df)=}")
# Add image paths
meta_df["img_path"] = meta_df["imUrl"].apply(
lambda x: osp.join(cfg.data_dir, cfg.category, osp.basename(str(x)))
)
# Keep only items with images
df = keep_only_exists_images(meta_df)[["asin", "img_path", "categories"]]
# Find top level label name by most ferquent
toplevel_all_labels = [sublist[0][0] for sublist in df["categories"].tolist()]
toplevel_label = max(set(toplevel_all_labels), key=toplevel_all_labels.count)
# Merge label hierarchy:
# For example: [Clothing, Shoes & Jewelry + Girls + Clothing + Swim] -> [Clothing, Shoes & Jewelry + Girls + Clothing]
df["merged_labels"] = df["categories"].apply(
lambda x: merge_label_hierarchy(x, category=toplevel_label)
)
# Count number of samples for each category: remove downlevel category if there are not enough samples
df, label_count = keep_categories_with_min_samples(df, cfg.num_samples_threshold)
# Remove the top category, it is 1 for all.
df["merged_labels"] = df["merged_labels"].apply(
lambda labels: [label for label in labels if label != toplevel_label]
)
# Encode to Multilabel vector
mlb = MultiLabelBinarizer()
df["label_vec"] = mlb.fit_transform(df["merged_labels"].tolist()).tolist()
logger.info(f"\n{df.head()}")
# Save results
out_path = osp.join(out_dir, "label_count.csv")
label_count.to_csv(out_path, header=False)
out_path = osp.join(out_dir, "df_w_labels.pkl")
df = df.reset_index()
df.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "label_mapper.csv")
pd.DataFrame(mlb.classes_).to_csv(out_path, header=False)
logger.info(f"Save to {out_path}")
# Train-test split
df_train, df_test = execute_train_test_split(
df, cfg.train_set_ratio, cfg.min_label_count
)
# Save train
out_path = osp.join(out_dir, "df_train.pkl")
df_train = df_train.reset_index()
df_train.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "..", "df_train.pkl")
df_train.to_pickle(out_path)
logger.info(f"Save to {out_path}")
# Save test
out_path = osp.join(out_dir, "df_test.pkl")
df_test = df_test.reset_index()
df_test.to_pickle(out_path)
logger.info(f"Save to {out_path}")
out_path = osp.join(out_dir, "..", "df_test.pkl")
df_test.to_pickle(out_path)
logger.info(f"Save to {out_path}")
logger.info("Finish")
if __name__ == "__main__":
process_labels()
|
collaborative_image_understanding-main
|
src/main_process_labels_amazon.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.