python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskPoleBalance-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_rewards(env):
terminated = False
while not terminated:
obs, reward, terminated, truncated, info = env.step([1, 0, 0])
if terminated:
assert reward == 0
else:
assert reward == 1
|
bipedal-skills-main
|
tests/test_polebalance.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import numpy as np
import bisk
@pytest.fixture
def env():
env = gym.make('BiskButterflies-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_fixed_policy(env):
# fmt off
policy = []
# Drop to floor
for i in range(5):
policy.append([0, 0, 0])
# Go in a circle
for d in (-1,1):
for i in range(15):
policy.append([d,0,0])
for i in range(15):
policy.append([0,d,0])
# And the other way
for d in (1,-1):
for i in range(15):
policy.append([d,0,0])
for i in range(15):
policy.append([0,d,0])
retrn = 0
for action in policy:
obs, reward, terminated, truncated, info = env.step(action)
assert (not terminated and not truncated)
retrn += reward
assert retrn == 4
|
bipedal-skills-main
|
tests/test_butterflies.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import numpy as np
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGoToTargets-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_scripted_policy(env):
for i in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
retrn = 0
while not (terminated or truncated):
target = obs['targets'][:2]
dir = target / np.linalg.norm(target)
dx, dy = 0, 0
if np.abs(target[0]) > np.abs(target[1]):
dx = np.sign(target[0])
else:
dy = np.sign(target[1])
obs, reward, terminated, truncated, info = env.step([dx, dy, 0])
retrn += reward
assert not terminated
assert truncated
assert retrn == 20
|
bipedal-skills-main
|
tests/test_gototargets.py
|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
from bisk.features.joints import JointsFeaturizer
from bisk.single_robot import BiskSingleRobotEnv
def test_names_walker():
env = BiskSingleRobotEnv('walker')
ftzr = JointsFeaturizer(env.p, 'walker', 'robot')
assert ftzr.observation_space.shape == (60,)
assert ftzr().shape == ftzr.observation_space.shape
# fmt off
assert ftzr.feature_names() == [
'rootz:p',
'rootx:p',
'rooty:p',
'right_hip:p',
'right_knee:p',
'right_ankle:p',
'left_hip:p',
'left_knee:p',
'left_ankle:p',
'rootz:v',
'rootx:v',
'rooty:v',
'right_hip:v',
'right_knee:v',
'right_ankle:v',
'left_hip:v',
'left_knee:v',
'left_ankle:v',
'torso:crx',
'torso:cry',
'torso:crz',
'torso:ctx',
'torso:cty',
'torso:ctz',
'right_thigh:crx',
'right_thigh:cry',
'right_thigh:crz',
'right_thigh:ctx',
'right_thigh:cty',
'right_thigh:ctz',
'right_leg:crx',
'right_leg:cry',
'right_leg:crz',
'right_leg:ctx',
'right_leg:cty',
'right_leg:ctz',
'right_foot:crx',
'right_foot:cry',
'right_foot:crz',
'right_foot:ctx',
'right_foot:cty',
'right_foot:ctz',
'left_thigh:crx',
'left_thigh:cry',
'left_thigh:crz',
'left_thigh:ctx',
'left_thigh:cty',
'left_thigh:ctz',
'left_leg:crx',
'left_leg:cry',
'left_leg:crz',
'left_leg:ctx',
'left_leg:cty',
'left_leg:ctz',
'left_foot:crx',
'left_foot:cry',
'left_foot:crz',
'left_foot:ctx',
'left_foot:cty',
'left_foot:ctz',
]
# fmt on
env.close()
def test_exclude_walker():
env = BiskSingleRobotEnv('walker')
ftzr = JointsFeaturizer(
env.p, 'walker', 'robot', exclude='.*/(left|right)_foot'
)
assert ftzr.observation_space.shape == (48,)
assert ftzr().shape == ftzr.observation_space.shape
# fmt off
assert ftzr.feature_names() == [
'rootz:p',
'rootx:p',
'rooty:p',
'right_hip:p',
'right_knee:p',
'right_ankle:p',
'left_hip:p',
'left_knee:p',
'left_ankle:p',
'rootz:v',
'rootx:v',
'rooty:v',
'right_hip:v',
'right_knee:v',
'right_ankle:v',
'left_hip:v',
'left_knee:v',
'left_ankle:v',
'torso:crx',
'torso:cry',
'torso:crz',
'torso:ctx',
'torso:cty',
'torso:ctz',
'right_thigh:crx',
'right_thigh:cry',
'right_thigh:crz',
'right_thigh:ctx',
'right_thigh:cty',
'right_thigh:ctz',
'right_leg:crx',
'right_leg:cry',
'right_leg:crz',
'right_leg:ctx',
'right_leg:cty',
'right_leg:ctz',
'left_thigh:crx',
'left_thigh:cry',
'left_thigh:crz',
'left_thigh:ctx',
'left_thigh:cty',
'left_thigh:ctz',
'left_leg:crx',
'left_leg:cry',
'left_leg:crz',
'left_leg:ctx',
'left_leg:cty',
'left_leg:ctz',
]
# fmt on
env.close()
|
bipedal-skills-main
|
tests/test_features_joints.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskHurdles-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_render(env):
img = env.render(width=480, height=480)
assert img.shape == (480, 480, 3)
|
bipedal-skills-main
|
tests/test_render.py
|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskLimbo-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, _ = env.reset(seed=0)
# Cross bar
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, 0])
if next_obs['next_bar'][0] > obs['next_bar'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_bar'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Cross bar again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_bar'][0] > obs['next_bar'][0]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Cross next bar, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_bar'][0] > obs['next_bar'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_reward_stuck(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go up so that we'll be stuck at the first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Low threshold, remaining momentum will bring us to the right height
if obs['observation'][2] - obs['next_bar'][1] >= -0.6:
break
# Go forward -- should be stuck at first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_limbo.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskStairs-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Cross first step
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_steps'][0] > obs['next_steps'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_steps'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Cross step again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, 1])
if next_obs['next_steps'][0] > obs['next_steps'][0]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Cross next step, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, 1])
if next_obs['next_steps'][0] > obs['next_steps'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_reward_full(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
terminated, truncated = False, False
ret = 0
while not (terminated or truncated):
obs, reward, terminated, truncated, info = env.step([1, 0, 1])
ret += reward
assert ret == 20
def test_reward_stuck(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go forward -- should be stuck at first step
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_stairs.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import numpy as np
import pytest
import bisk
def test_walker_fallover():
env = gym.make('BiskGoalWall-v1', robot='walker')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert terminated
assert not truncated
env.close()
def test_walker_continuous():
env = gym.make('BiskGoalWallC-v1', robot='walker')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert truncated
assert not terminated
env.close()
def test_humanoidcmupc_fallover():
env = gym.make('BiskGoalWall-v1', robot='humanoidcmupc')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert terminated
assert not truncated
env.close()
def test_humanoidcmupc_continuous():
env = gym.make('BiskGoalWallC-v1', robot='humanoidcmupc')
env.reset(seed=0)
while True:
obs, reward, terminated, truncated, info = env.step(
np.zeros(env.action_space.shape)
)
if terminated or truncated:
break
assert truncated
assert not terminated
env.close()
|
bipedal-skills-main
|
tests/test_fallover.py
|
# Copyright (c) 2021-present, Facebook, Inc.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskHurdlesLimbo-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
assert obs['next_obstacle'][0] == 0
# Cross hurdle
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 1
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_obstacle'][3] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
assert obs['next_obstacle'][0] == 0
# Cross hurdle again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 1
# Go up so that we'll be stuck at the first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Low threshold, remaining momentum will bring us to the right height
if obs['observation'][2] - obs['next_obstacle'][1] >= -0.6:
break
# Cross next bar, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 0
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_obstacle'][3] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
assert obs['next_obstacle'][0] == 1
# Cross bar again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
assert obs['next_obstacle'][0] == 0
def test_reward_stuck1(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go forward -- should be stuck at first hurdle
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
def test_reward_stuck2(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Cross hurdle
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_obstacle'][1] > obs['next_obstacle'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go up so that we'll be stuck at the first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Low threshold, remaining momentum will bring us to the right height
if obs['observation'][2] - obs['next_obstacle'][2] >= -0.6:
break
# Go forward -- should be stuck at first bar
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_hurdleslimbo.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGaps-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_cross(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Go to platform
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_gap_platform'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Go to platform again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Go to next platform, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_touch_gap(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Go to gap
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][0] > obs['next_gap_platform'][0]:
break
obs = next_obs
# Go down into gap
for _ in range(8):
obs, reward, terminated, truncated, info = env.step([-1, 0, -0.6])
if terminated:
break
assert reward == -1
assert terminated == True
def test_touch_platform(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Go to platform
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_gap_platform'][1] > obs['next_gap_platform'][1]:
break
obs = next_obs
# Go down on platform
for _ in range(8):
obs, reward, terminated, truncated, info = env.step([0, 0, -1])
assert reward == 0
assert terminated == False
|
bipedal-skills-main
|
tests/test_gaps.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = "2.0"
from gym.envs.registration import register
from bisk.base import BiskEnv
from bisk.single_robot import BiskSingleRobotEnv
def register_all(robot, shaped, fallover):
fallover_text = 'C' if fallover else ''
shaped_text = 'Shaped' if shaped else ''
register(
id=f'BiskGoToTargets{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.gototarget:BiskGoToTargetEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'tolerance': 0.5,
'goal_area': 8.0,
'num_targets': 2,
'goal_switch_steps': 1,
},
max_episode_steps=1000,
)
register(
id=f'BiskGoToSphere{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.gototarget:BiskGoToTargetEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'tolerance': 0.5,
'goal_area': 4.0,
'num_targets': 1,
'single_target': True,
'on_circle': True,
},
max_episode_steps=1000,
)
register(
id=f'BiskHurdles{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.hurdles:BiskHurdlesEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'max_height': 0.3,
'fixed_height': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskLimbo{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.limbo:BiskLimboEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'notouch': False,
'min_height': 'auto',
'fixed_height': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskHurdlesLimbo{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.hurdleslimbo:BiskHurdlesLimboEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'notouch': False,
'min_bar_height': 'auto',
'max_hurdle_height': 0.3,
'fixed_height': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskGaps{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.gaps:BiskGapsEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'max_size': 2.5,
'min_gap': 0.2,
'max_gap': 0.7,
'fixed_size': False,
},
max_episode_steps=1000,
)
register(
id=f'BiskStairs{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.stairs:BiskStairsEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'step_height': 0.2,
'step_length_min': 0.5,
'step_length_max': 1.0,
'num_flights': 2,
},
max_episode_steps=1000,
)
register(
id=f'BiskStairsCont{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.stairs:BiskStairsEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'step_height': 0.2,
'step_length_min': 0.5,
'step_length_max': 1.0,
'num_flights': 10,
},
max_episode_steps=1000,
)
register(
id=f'BiskGoalWall{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.goalwall:BiskGoalWallEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'init_distance': 2.5,
'touch_ball_reward': 0,
},
max_episode_steps=250,
)
register(
id=f'BiskVelocityControl{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.velocitycontrol:BiskVelocityControlEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
},
max_episode_steps=1000,
)
if shaped:
register(
id=f'BiskRunDir{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.rundir:BiskRunDirEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'heading_deg': 0,
},
max_episode_steps=1000,
)
if not shaped:
register(
id=f'BiskPoleBalance{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.polebalance:BiskPoleBalanceEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'pole_mass': 0.5,
'pole_length': 0.5,
'n_poles': 1,
},
max_episode_steps=1000,
)
register(
id=f'BiskButterflies{shaped_text}{fallover_text}{robot}-v1',
entry_point=f'bisk.tasks.butterflies:BiskButterfliesEnv',
kwargs={
'robot': robot,
'features': 'joints',
'allow_fallover': fallover_text,
'shaped': shaped,
'goal_area': 4,
'n_butterflies': 10,
'zoff': 1.6,
},
max_episode_steps=1000,
)
for robot in (
'',
'HalfCheetah',
'Walker',
'Humanoid',
'HumanoidPC',
'HumanoidCMUPC',
'HumanoidAMASSPC',
):
for shaped in (False, True):
for fallover in (False, True):
register_all(robot, shaped, fallover)
|
bipedal-skills-main
|
bisk/__init__.py
|
# gym.utils.seeding from gym 0.18.3
# Released under an MIT license
# (https://github.com/openai/gym/blob/0.18.3/LICENSE.md)
#
# This is provided for consistency as seeding changed with gym 0.26.
import hashlib
import numpy as np
import os
import random as _random
import struct
import sys
from gym import error
def np_random(seed=None):
if seed is not None and not (isinstance(seed, int) and 0 <= seed):
raise error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed))
seed = create_seed(seed)
rng = np.random.RandomState()
rng.seed(_int_list_from_bigint(hash_seed(seed)))
return rng, seed
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
hash = hashlib.sha512(str(seed).encode('utf8')).digest()
return _bigint_from_bytes(hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode('utf8')
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2**(8 * max_bytes)
else:
raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
def _bigint_from_bytes(bytes):
sizeof_int = 4
padding = sizeof_int - len(bytes) % sizeof_int
bytes += b'\0' * padding
int_count = int(len(bytes) / sizeof_int)
unpacked = struct.unpack("{}I".format(int_count), bytes)
accum = 0
for i, val in enumerate(unpacked):
accum += 2 ** (sizeof_int * 8 * i) * val
return accum
def _int_list_from_bigint(bigint):
# Special case 0
if bigint < 0:
raise error.Error('Seed must be non-negative, not {}'.format(bigint))
elif bigint == 0:
return [0]
ints = []
while bigint > 0:
bigint, mod = divmod(bigint, 2 ** 32)
ints.append(mod)
return ints
|
bipedal-skills-main
|
bisk/legacy_seeding.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Iterable, List
import gym
import numpy as np
from bisk.base import BiskEnv
from bisk.features import make_featurizer
from bisk.helpers import (add_ball, add_box, add_capsule, add_fwd_corridor,
add_robot, root_with_floor)
log = logging.getLogger(__name__)
class BiskSingleRobotEnv(BiskEnv):
def __init__(
self, robot: str, features: str = 'joints', allow_fallover: bool = False
):
super().__init__()
self.allow_fallover = allow_fallover
self.robot = robot.lower()
self.world_scale = 1.0
if self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
# The CMU/AMASS robots are about 1.3 the size of the default
# Humanoid
self.world_scale = 1.3
root = root_with_floor()
_, robot_pos = add_robot(
root, robot, 'robot', init=getattr(self, 'init_robot', None)
)
frameskip = 5
fs = root.find('numeric', 'robot/frameskip')
if fs is not None:
frameskip = int(fs.data[0])
self.init_sim(root, frameskip)
if self.robot.startswith('halfcheetah'):
# qpos is x_pos, z_pos, y_rot
self.init_qpos[0] = robot_pos[0]
self.init_qpos[1] = robot_pos[2]
elif self.robot.startswith('walker'):
# qpos is z_pos, x_pos, y_rot
self.init_qpos[0] = robot_pos[2]
self.init_qpos[1] = robot_pos[0]
elif self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
# initialize to upright position
self.init_qpos[0:3] = [0.0, 0.0, 1.2]
self.init_qpos[3:7] = [0.859, 1.0, 1.0, 0.859]
else:
# TODO Verify that this actually corresponds to the torso position?
self.init_qpos[0:3] = robot_pos
self.featurizer = self.make_featurizer(features)
self.observation_space = self.featurizer.observation_space
@property
def is_2d(self):
# TODO sth more proper? But it needs to be callable from init_sim, i.e.
# before the simulator instance is constructed.
return (
self.robot.startswith('halfcheetah')
or self.robot.startswith('walker')
or self.robot == 'testcube2d'
)
@property
def robot_pos(self) -> np.ndarray:
if self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
return self.p.named.data.xpos['robot/lowerneck']
else:
return self.p.named.data.xpos['robot/torso']
@property
def robot_speed(self) -> np.ndarray:
return self.p.named.data.subtree_linvel['robot/torso']
def make_featurizer(self, features: str):
return make_featurizer(features, self.p, self.robot, 'robot')
def reset_state(self):
noise = 0.1
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.standard_normal(self.p.model.nv)
self.p.data.qpos[:] = qpos
self.p.data.qvel[:] = qvel
self.featurizer.reset()
def get_observation(self):
return self.featurizer()
def fell_over(self) -> bool:
if self.robot.startswith('humanoid'):
zpos = self.robot_pos[2]
return bool(zpos < 0.9)
elif self.robot.startswith('halfcheetah'):
# Orientation pointing upwards and body almost on the ground
up = self.p.named.data.xmat['robot/torso', 'zz']
zpos = self.p.named.data.qpos['robot/rootz']
if up < -0.8 and zpos < 0.12:
return True
elif self.robot.startswith('walker'):
zpos = self.p.named.data.qpos['robot/rootz']
r = self.p.named.data.qpos['robot/rooty']
if zpos < 0.9 or r < -1.4 or r > 1.4:
return True
return False
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
if not self.allow_fallover and self.fell_over():
terminated = True
info['fell_over'] = True
return obs, reward, terminated, truncated, info
def add_box(
self,
root: 'dm_control.mjcf.RootElement',
name: str,
size: Iterable[float],
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
size = [s * self.world_scale for s in size]
if 'pos' in kwargs:
kwargs['pos'] = [p * self.world_scale for p in kwargs['pos']]
return add_box(root, name, size, rgba, with_body, **kwargs)
def add_capsule(
self,
root: 'dm_control.mjcf.RootElement',
name: str,
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
if 'fromto' in kwargs:
kwargs['fromto'] = [p * self.world_scale for p in kwargs['fromto']]
if 'size' in kwargs:
kwargs['size'] = [p * self.world_scale for p in kwargs['size']]
if 'pos' in kwargs:
kwargs['pos'] = [p * self.world_scale for p in kwargs['pos']]
return add_capsule(root, name, rgba, with_body, **kwargs)
def add_fwd_corridor(self, root: 'dm_control.mjcf.RootElement', W=4):
return add_fwd_corridor(root, W * self.world_scale)
class BiskSingleRobotWithBallEnv(BiskSingleRobotEnv):
def __init__(
self, robot: str, features: str = 'joints', allow_fallover: bool = False
):
super().__init__(robot, features, allow_fallover)
self.ball_qpos_idx: List[int] = []
self.ball_qvel_idx: List[int] = []
if self.is_2d:
for j in ['ball-x', 'ball-z', 'ball-ry']:
qppos = self.p.named.model.jnt_qposadr[j]
self.ball_qpos_idx.append(qppos)
qvpos = self.p.named.model.jnt_dofadr[j]
self.ball_qvel_idx.append(qvpos)
else:
qppos = self.p.named.model.jnt_qposadr['ball']
for i in range(3):
self.ball_qpos_idx.append(qppos + i)
qvpos = self.p.named.model.jnt_dofadr['ball']
for i in range(6):
self.ball_qvel_idx.append(qvpos + i)
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(len(self.ball_qpos_idx) + len(self.ball_qvel_idx),),
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[('ball', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: 'mjcf.RootElement', frameskip: int = 5):
ball_size = 0.15 * self.world_scale
add_ball(
root,
'ball',
size=ball_size,
mass=0.1 * self.world_scale,
twod=self.is_2d,
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
# Small noise for ball
noise = 0.01
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.standard_normal(self.p.model.nv)
self.p.data.qpos[self.ball_qpos_idx] = qpos[self.ball_qpos_idx]
self.p.data.qvel[self.ball_qvel_idx] = qvel[self.ball_qvel_idx]
def get_observation(self):
ball_qpos = self.p.data.qpos[self.ball_qpos_idx].ravel().copy()
ball_qvel = self.p.data.qvel[self.ball_qvel_idx]
# Ball X/Y position is relative to robot's torso
ball_qpos[0] -= self.robot_pos[0]
if not self.is_2d:
ball_qpos[1] -= self.robot_pos[1]
else:
# Normalize Y rotation to [-pi,pi], as MuJoCo produces large values
# occasionally.
ball_qpos[2] = np.arctan2(
np.sin(ball_qpos[2]), np.cos(ball_qpos[2])
)
return {
'observation': super().get_observation(),
'ball': np.concatenate([ball_qpos, ball_qvel]).astype(np.float32),
}
|
bipedal-skills-main
|
bisk/single_robot.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from typing import Tuple, Iterable
import logging
import numpy as np
from gym.utils import seeding
log = logging.getLogger(__name__)
FANCY_SKYBOX = False
def root_with_floor() -> 'dm_control.mjcf.RootElement':
'''
Constructs a root element with the commonly used checkered floor.
'''
from dm_control import mjcf
root = mjcf.RootElement()
if FANCY_SKYBOX:
root.asset.add(
'texture',
type='skybox',
file=f'{asset_path()}/rainbow.png',
gridsize=[3, 4],
gridlayout='.U..LFRB.D..',
)
else:
root.asset.add(
'texture',
type='skybox',
builtin='gradient',
width=800,
height=800,
rgb1=[0.3, 0.5, 0.7],
rgb2=[0, 0, 0],
)
root.asset.add(
'texture',
name='tex_plane',
builtin='checker',
width=100,
height=100,
rgb1=[0.2, 0.3, 0.4],
rgb2=[0.1, 0.15, 0.2],
type='2d',
)
root.asset.add(
'material',
name='mat_plane',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 1],
texuniform=True,
texture='tex_plane',
)
root.worldbody.add(
'geom',
name='floor',
type='plane',
size=[100, 100, 100],
rgba=[0.8, 0.9, 0.8, 1.0],
conaffinity=1,
condim=3,
material='mat_plane',
pos=[0, 0, 0],
)
root.worldbody.add(
'light',
cutoff=100,
diffuse=[1, 1, 1],
dir=[0, 0, -1.3],
directional=True,
exponent=1,
pos=[0, 0, 1.3],
specular=[0.1, 0.1, 0.1],
)
return root
def asset_path() -> str:
return os.path.join(os.path.dirname(__file__), 'assets')
def add_robot(
root: 'dm_control.mjcf.RootElement', kind: str, name: str, xyoff=None,
init=None,
) -> Tuple['dm_control.mjcf.Element', np.ndarray]:
'''
Add a robot to the root element.
Returns the attachement frame the original position of the robot's torso.
If the robot requires a fresh freejoint, it returns its original position
(so that qpos can be initialized accordingly); otherwise, (0, 0, 0) is
returned.
'''
from dm_control import mjcf
rm = mjcf.from_path(f'{asset_path()}/{kind.lower()}.xml')
if init is not None:
init(rm, name)
rm.model = name
torso = rm.find('body', 'torso')
if torso is None:
torso = rm.find('body', 'root')
pos = torso.pos
# Use a [0,0,0] torso pos when attaching the frame and rather set
# the default qpos manually later. dm_control's attachment frame
# logic (apparently?) resets the frame of reference of the freejoint.
torso.pos = [0, 0, 0]
if pos is None:
pos = torso.pos
if xyoff is not None:
pos[0] += xyoff[0]
pos[1] += xyoff[1]
root_joint = torso.find('joint', 'root')
if root_joint and (
root_joint.tag == 'freejoint' or root_joint.type == 'free'
):
root_joint.remove()
needs_freejoint = True
else:
needs_freejoint = False
af = root.attach(rm)
if needs_freejoint:
af.add('freejoint')
return af, pos
def add_box(
root: 'dm_control.mjcf.RootElement',
name: str,
size: Iterable[float],
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
if rgba is None:
rgba = np.array([0.8, 0.9, 0.8, 1])
body = root.worldbody
if with_body:
body = root.worldbody.add('body', name=name)
box = body.add(
'geom',
type='box',
name=name,
condim=3,
size=size,
rgba=rgba,
**kwargs,
)
return body if with_body else box
def add_capsule(
root: 'dm_control.mjcf.RootElement',
name: str,
rgba: Iterable[float] = None,
with_body: bool = False,
**kwargs,
) -> 'dm_control.mjcf.Element':
if rgba is None:
rgba = np.array([0.8, 0.9, 0.8, 1])
body = root.worldbody
if with_body:
body = root.worldbody.add('body', name=name)
box = body.add(
'geom',
type='capsule',
name=name,
condim=3,
rgba=rgba,
**kwargs,
)
return body if with_body else box
def add_fwd_corridor(root: 'dm_control.mjcf.RootElement', W=4):
WH = 2
wall_alpha = 0.0 # for debugging
# Change rendering of floor to fit the intended path
floor = root.find('geom', 'floor')
floor.size = [100, W, 1]
floor.pos = [100 - W, 0, 0]
# Add border walls
root.worldbody.add(
'geom',
type='plane',
name='wall_left',
xyaxes=[1, 0, 0, 0, 0, 1],
size=[100, WH, 1],
pos=[100 - W, W, WH],
rgba=[0, 0, 0, wall_alpha],
)
root.worldbody.add(
'geom',
type='plane',
name='wall_right',
xyaxes=[-1, 0, 0, 0, 0, 1],
size=[100, WH, 1],
pos=[100 - W, -W, WH],
rgba=[0, 0, 0, wall_alpha],
)
root.worldbody.add(
'geom',
type='plane',
name='wall_back',
xyaxes=[0, 1, 0, 0, 0, 1],
size=[W, WH, 1],
pos=[-4, 0, WH],
rgba=[0, 0, 0, wall_alpha],
)
# The ball element follows the element definitions in quadruped.xml from
# dm_control:
# https://github.com/deepmind/dm_control/blob/33cea51/dm_control/suite/quadruped.xml
def add_ball(root: 'dm_control.mjcf.RootElement',
name: str,
size: float,
mass: float,
twod: bool = False,
**kwargs) -> 'dm_control.mjcf.Element':
root.asset.add('texture',
name='ball',
builtin='checker',
mark='cross',
width=151,
height=151,
rgb1=[0.1, 0.1, 0.1],
rgb2=[0.9, 0.9, 0.9],
markrgb=[1, 1, 1])
root.asset.add('material', name='ball', texture='ball')
ball = root.worldbody.add('body', name=name, pos=[0, 0, 0])
if twod:
ball.add('joint',
name='ball-x',
type='slide',
damping=0,
axis=[1, 0, 0],
pos=[0, 0, 0],
range=[-1000, 1000])
ball.add('joint',
name='ball-z',
type='slide',
damping=0,
axis=[0, 0, 1],
pos=[0, 0, 0],
range=[-1000, 1000])
ball.add('joint',
name='ball-ry',
type='hinge',
damping=0,
axis=[0, 1, 0],
pos=[0, 0, 0],
range=[-np.pi, np.pi])
else:
ball.add('freejoint', name=name)
ball.add('geom',
type='sphere',
name=name,
size=[size],
mass=mass,
condim=6,
friction=[0.7, 0.005, 0.005],
solref=[-10000, -30],
material='ball',
priority=1)
return ball
|
bipedal-skills-main
|
bisk/helpers.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Optional
import gym
import numpy as np
from bisk import legacy_seeding as seeding
log = logging.getLogger(__name__)
class BiskEnv(gym.Env):
metadata = {'render_modes': ['rgb_array']}
def __init__(self):
# This is a stub; run init_sim() with a root element to set up the
# environment.
self.metadata = dict(**BiskEnv.metadata)
self.p = None
self.np_random = seeding.np_random(None)
def init_sim(self, root: 'mjcf.RootElement', frameskip: int = 5):
from dm_control import mjcf
if self.p is not None:
raise RuntimeError('Simulation already initialized')
self.p = mjcf.Physics.from_mjcf_model(root)
self.model = root
self.frameskip = frameskip
self.post_init()
def post_init(self):
self.init_qpos = self.p.data.qpos.ravel().copy()
self.init_qvel = self.p.data.qvel.ravel().copy()
# Expose all actuators
self.action_space = gym.spaces.Box(
self.p.model.actuator_ctrlrange[:, 0].astype(np.float32),
self.p.model.actuator_ctrlrange[:, 1].astype(np.float32),
dtype=np.float32,
)
# Leave observation space undefined in the base environment
self.metadata['render_fps'] = 1 / (self.p.model.opt.timestep * self.frameskip)
@property
def dt(self):
return self.p.model.opt.timestep * self.frameskip
def reset_state(self):
pass
def get_observation(self):
raise NotImplementedError()
def reset(self, seed: Optional[int] = None, options: Optional[dict] = None):
if seed is not None:
self.np_random, seed = seeding.np_random(seed)
if self.action_space is not None:
self.action_space.seed(seed)
if self.observation_space is not None:
self.observation_space.seed(seed)
# Disable contacts during reset to prevent potentially large contact
# forces that can be applied during initial positioning of bodies in
# reset_state().
with self.p.model.disable('contact'):
self.p.reset()
self.reset_state()
self.step_simulation()
return self.get_observation(), {}
def render(self, mode='rgb_array', **kwargs):
width = kwargs.get('width', 480)
height = kwargs.get('height', 480)
camera = kwargs.get('camera', 0)
flags = kwargs.get('flags', {})
return self.p.render(
width=width,
height=height,
camera_id=camera,
render_flag_overrides=flags,
)
def apply_action(self, action):
self.p.set_control(action)
def on_step_single_frame(self):
pass
def step_simulation(self):
from dm_control.mujoco.wrapper.mjbindings import mjlib
for _ in range(self.frameskip):
self.p.step()
self.on_step_single_frame()
# Call mj_rnePostConstraint to populate cfrc_ext (not done automatically
# in MuJoCo 2.0 unless the model defines the proper sensors)
mjlib.mj_rnePostConstraint(self.p.model.ptr, self.p.data.ptr)
# Same for subtree_linvel
mjlib.mj_subtreeVel(self.p.model.ptr, self.p.data.ptr)
def step(self, action):
from dm_control.rl.control import PhysicsError
self.apply_action(action)
try:
self.step_simulation()
except PhysicsError as e:
log.exception(e)
return self.get_observation(), -1, True, {'physics_error': True}
return self.get_observation(), 0, False, False, {}
|
bipedal-skills-main
|
bisk/base.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import gym
import numpy as np
from dm_control.utils import rewards
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskVelocityControlEnv(BiskSingleRobotEnv):
'''
Track a randomly changing velocity. From MoCapAct:
https://github.com/microsoft/MoCapAct/blob/e11713c/mocapact/tasks/velocity_control.py
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
max_speed: float = 4.5,
reward_margin: float = 0.75,
direction_exponent: float = 1.0,
steps_before_changing_velocity: int = 166,
**kwargs
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.max_speed = max_speed
self.reward_margin = reward_margin
self.direction_exponent = direction_exponent
self.steps_before_changing_velocity = steps_before_changing_velocity
obs_base = self.featurizer.observation_space
obs_task = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('target', obs_task), ('observation', obs_base)]
)
def sample_move_speed(self):
self.move_speed = self.np_random.uniform(high=self.max_speed)
if self.is_2d:
# Go forward or backward
self.move_angle = self.np_random.choice([0, np.pi])
else:
self.move_angle = self.np_random.uniform(high=2 * np.pi)
self.move_speed_counter = 0
def reset_state(self):
super().reset_state()
self.sample_move_speed()
def get_observation(self):
sin, cos = np.sin(self.move_angle), np.cos(self.move_angle)
phase = self.move_speed_counter / self.steps_before_changing_velocity
return {
'observation': super().get_observation(),
'target': np.array([self.move_speed, sin, cos, phase]),
}
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
xvel, yvel = self.robot_speed[:2]
speed = np.linalg.norm([xvel, yvel])
speed_error = self.move_speed - speed
speed_reward = np.exp(-((speed_error / self.reward_margin) ** 2))
if np.isclose(xvel, 0.0) and np.isclose(yvel, 0.0):
dot = 0.0
angle_reward = 1.0
else:
direction = np.array([xvel, yvel])
direction /= np.linalg.norm(direction)
direction_tgt = np.array(
[np.cos(self.move_angle), np.sin(self.move_angle)]
)
dot = direction_tgt.dot(direction)
angle_reward = ((dot + 1) / 2) ** self.direction_exponent
speed_match = np.abs(speed_error) < 0.1
angle_match = dot > np.cos(np.deg2rad(15))
score = 1.0 if speed_match and angle_match else 0.0
if self.shaped:
reward = speed_reward * angle_reward
else:
reward = score
info['score'] = score
self.move_speed_counter += 1
if self.move_speed_counter >= self.steps_before_changing_velocity:
self.sample_move_speed()
obs = self.get_observation()
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/velocitycontrol.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import Dict, List, Union
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskHurdlesLimboEnv(BiskSingleRobotEnv):
'''
Alternating hurdles and limbo bars.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
notouch: bool,
min_bar_height: Union[float, str],
max_hurdle_height: float,
fixed_height: bool,
**kwargs,
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.notouch = notouch
self.fixed_height = fixed_height
self.max_obstacles_cleared = 0
if min_bar_height == 'auto':
if self.robot.startswith('humanoid'):
self.min_bar_height = 1.0
elif self.robot.startswith('walker'):
self.min_bar_height = 1.2
else:
self.min_bar_height = 1.0
else:
self.min_bar_height = float(min_bar_height)
self.max_hurdle_height = max_hurdle_height
self.robot_geoms: List[int] = []
for g in self.p.named.model.body_geomadr.axes.row.names:
if g.startswith('robot/'):
self.robot_geoms.append(self.p.named.model.body_geomadr[g])
self.bar_geoms: Dict[int, int] = {}
for i, g in enumerate(self.p.named.model.geom_bodyid.axes.row.names):
if g.startswith('bar-'):
self.bar_geoms[i] = int(g.split('-')[1])
self.bar_geom_ids = set(self.bar_geoms.keys())
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_obstacle', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
# 200 obstacles should be enough for everybody
self.n_obstacles = 200
for i in range(self.n_obstacles):
if i % 2 == 0:
b = self.add_box(
root, f'hurdle-{i}', size=[0.05, W, 0.1], pos=[2, 0, 0.2]
)
else:
b = self.add_capsule(
root,
f'bar-{i}',
fromto=[2.025, -W, 0.1, 2.025, W, 0.1],
size=[0.1],
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_obstacles_cleared = 0
xpos = 1
intervals = self.np_random.uniform(3, 6, size=(self.n_obstacles,)) * self.world_scale
assert self.n_obstacles % 2 == 0
if self.fixed_height:
bar_heights = (
np.zeros(self.n_obstacles // 2)
+ self.min_bar_height * self.world_scale
)
hurdle_heights = (
np.zeros(self.n_obstacles // 2)
+ self.max_hurdle_height * self.world_scale
)
else:
bar_heights = (
self.np_random.uniform(
self.min_bar_height,
self.min_bar_height + 0.3,
size=(self.n_obstacles // 2,),
)
* self.world_scale
)
hurdle_heights = (
self.np_random.uniform(
0.1, self.max_hurdle_height, size=(self.n_obstacles // 2,)
)
* self.world_scale
)
self.obstacle_pos = []
self.obstacle_type = []
nm = self.p.named.model
for i in range(self.n_obstacles):
xpos += intervals[i]
self.obstacle_pos.append(xpos)
self.obstacle_type.append(i % 2)
if i % 2 == 0:
nm.geom_size[f'hurdle-{i}'][2] = hurdle_heights[i // 2]
nm.geom_pos[f'hurdle-{i}'][0] = xpos
nm.geom_pos[f'hurdle-{i}'][2] = (
hurdle_heights[i // 2] / 2 + 0.01
)
else:
nm.geom_pos[f'bar-{i}'][0] = xpos
nm.geom_pos[f'bar-{i}'][2] = (
bar_heights[i // 2] + nm.geom_size[f'bar-{i}'][0]
)
nm.geom_rgba[f'bar-{i}'] = [0.8, 0.9, 0.8, 1]
self.bar_hit = [False] * self.n_obstacles
self.new_bars_hit = set()
def get_observation(self):
no = self.next_obstacle_index()
if no < len(self.obstacle_pos):
next_obstacle_type = self.obstacle_type[no]
xpos = self.robot_pos[0]
nm = self.p.named.model
if next_obstacle_type == 0:
next_obstacle_d = nm.geom_pos[f'hurdle-{no}'][0] - xpos
next_obstacle_h = nm.geom_pos[f'hurdle-{no}'][2] * 2
else:
next_obstacle_d = nm.geom_pos[f'bar-{no}'][0] - xpos
next_obstacle_h = (
nm.geom_pos[f'bar-{no}'][2] + nm.geom_size[f'bar-{no}'][0]
)
else:
next_obstacle_d = 10.0
next_obstacle_h = 0.1
next_obstacle_cleared = no < self.max_obstacles_cleared
return {
'observation': super().get_observation(),
'next_obstacle': np.array(
[
next_obstacle_type,
next_obstacle_d,
next_obstacle_h,
not next_obstacle_cleared,
],
dtype=np.float32
),
}
def next_obstacle_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.obstacle_pos, xpos)
def on_step_single_frame(self):
contact = self.p.data.contact
for i, c in enumerate(contact.geom1):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
for i, c in enumerate(contact.geom2):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
def step_simulation(self):
super().step_simulation()
self.max_obstacles_cleared = max(
self.max_obstacles_cleared, self.next_obstacle_index()
)
def step(self, action):
self.new_bars_hit = set()
mobefore = self.max_obstacles_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_obstacles_cleared > mobefore else 0
touched = False
for hit in self.new_bars_hit:
if not self.bar_hit[hit] and self.notouch:
touched = True
if self.notouch:
marked = [0.8, 0.0, 0.0, 1.0]
self.p.named.model.geom_rgba[f'bar-{hit}'] = marked
score -= 1
self.bar_hit[hit] = True
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/hurdleslimbo.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import List
import gym
import numpy as np
from dm_control import mjcf
from dm_control.mujoco.wrapper.mjbindings import mjlib
from bisk.features import make_featurizer
from bisk.helpers import add_capsule, asset_path
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskButterfliesEnv(BiskSingleRobotEnv):
'''
Chasing butterflies. This is similar to more classic food gathering tasks,
but in three dimensions. The (humanoid) robot is equipped with a dip net and
has to collect as many "butterflies" (spheres floating in the air) as
possible within an episode. The butterflies are projected to a sphere around
the robot's head, and a fixed-size long/lat grid contains distances to the
closest butterflies in that direction.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
goal_area: int,
n_butterflies: int,
zoff: int,
shaped: bool = False,
**kwargs,
):
self.n_butterflies = n_butterflies
super().__init__(robot, features, allow_fallover, **kwargs)
self.goal_area = goal_area
self.shaped = shaped
self.zoff = zoff
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(10 * 10,), # 10x10 long/lat grid
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[('butterflies', obs_env), ('observation', obs_base)]
)
def make_featurizer(self, features: str):
return make_featurizer(
features, self.p, self.robot, 'robot', exclude=r'robot/net'
)
def init_robot(self, robot: 'mjcf.RootElement', name: str):
size = 0.01 * self.world_scale
if self.robot in {'humanoid', 'humanoidpc'}:
raise NotImplementedError()
elif self.robot in {'humanoidcmupc', 'humanoidamasspc'}:
hand = robot.find('body', 'lhand')
handg = hand.find('geom', 'lhand')
zpos = handg.size[0]
net = hand.add(
'body',
name='net',
pos=handg.pos + [0, 0, -zpos / 2],
xyaxes=[0, -1, 0, 0, 0, 1],
)
dclass = handg.dclass.dclass
elif self.robot == 'testcube':
torso = robot.find('body', 'torso')
torsog = torso.find('geom', 'torso')
zpos = torsog.size[2]
net = torso.add('body', name='net', pos=[0, 0, zpos])
if torsog.dclass:
dclass = torsog.dclass.dclass
else:
dclass = None
else:
raise NotImplementedError(
'Humanoid robot required for BiskButterfliesEnv'
)
robot.asset.add(
'texture',
name='tex_net',
type='2d',
file=f'{asset_path()}/net.png',
)
robot.asset.add(
'material',
name='mat_net',
reflectance=0.5,
shininess=0.2,
specular=1,
texrepeat=[10, 10],
texuniform=False,
texture='tex_net',
)
net_length = 0.5 * self.world_scale
net_mass = 0.01
if self.robot == 'testcube':
net_radius = 0.5 * self.world_scale
else:
net_radius = 0.15 * self.world_scale
net.add(
'geom',
name='net_handle_geom',
type='capsule',
fromto=[0, 0, 0, 0, 0, net_length],
size=[size],
mass=net_mass,
dclass=dclass,
)
net.add(
'geom',
name='net_geom',
type='ellipsoid',
pos=[0, 0, net_length + net_radius],
size=(net_radius, net_radius, 1e-3),
xyaxes=[1, 0, 0, 0, 0, 1],
mass=net_mass,
dclass=dclass,
contype=3, # collide with body and butterflies
material='mat_net',
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
try:
from matplotlib import pyplot as plt
cmap = plt.get_cmap('rainbow')
except:
cmap = lambda x: [1, 0, 0, 1]
root.asset.add(
'material',
name='butterfly',
reflectance=0,
shininess=0,
specular=0,
emission=0.5,
)
for i in range(self.n_butterflies):
root.worldbody.add(
'geom',
name=f'butterfly_{i}',
type='sphere',
pos=(i, 0.0, 1.0),
size=(0.1,),
rgba=cmap(i / self.n_butterflies),
material='butterfly',
gap=1.0, # high gap so that there won't be actual contact forces
conaffinity=2, # only collide with the net
)
super().init_sim(root, frameskip)
def get_observation(self):
nd = self.p.named.data
bf_geoms = [
idx
for idx, name in enumerate(nd.geom_xpos.axes.row.names)
if name.startswith('butterfly')
and not self.butterflies_caught[int(name.split('_')[1])]
]
grid = np.zeros((10, 10), dtype=np.float32)
if len(bf_geoms) == 0:
return {
'observation': super().get_observation(),
'butterflies': grid.flatten(),
}
try:
bf_rpos = np.dot(
nd.geom_xpos[bf_geoms] - nd.xpos['robot/head'],
nd.xmat['robot/head'].reshape(3, 3),
)
except KeyError:
bf_rpos = np.dot(
nd.geom_xpos[bf_geoms] - nd.xpos['robot/torso'],
nd.xmat['robot/torso'].reshape(3, 3),
)
bf_dist = np.linalg.norm(bf_rpos, axis=1)
bf_npos = np.divide(bf_rpos, bf_dist.reshape(bf_rpos.shape[0], 1)).T
lat = np.rad2deg(np.arccos(bf_npos[1]))
lon = np.rad2deg(np.arctan2(bf_npos[0], bf_npos[2]))
lat10 = np.floor(lat / 18).astype(np.int32)
lon10 = np.floor((lon + 180) / 36).astype(np.int32)
expdist = np.exp(-bf_dist)
for i, (x, y) in enumerate(zip(lat10, lon10)):
grid[x][y] = max(grid[x][y], expdist[i])
return {
'observation': super().get_observation(),
'butterflies': grid.flatten(),
}
def reset_state(self):
super().reset_state()
poss = self.np_random.uniform(-1.0, 1.0, size=(self.n_butterflies, 3))
scale = (
np.asarray([self.goal_area, self.goal_area, 0.5]) * self.world_scale
)
off = np.asarray([0, 0, self.zoff * self.world_scale])
if self.robot == 'testcube':
off[2] = 1
for i in range(self.n_butterflies):
self.p.named.model.geom_pos[f'butterfly_{i}'] = (
poss[i] * scale + off
)
self.p.named.model.geom_rgba[f'butterfly_{i}'][3] = 1
self.butterflies_caught = np.zeros(self.n_butterflies, dtype=np.int32)
def on_step_single_frame(self):
contact = self.p.data.contact
gnames = self.p.named.model.geom_type.axes.row.names
for c1, c2 in zip(contact.geom1, contact.geom2):
if not (
gnames[c1].startswith('butterfly_')
and gnames[c2] == 'robot/net_geom'
):
continue
id = int(gnames[c1].split('_')[1])
if self.butterflies_caught[id] == 0:
log.debug(f'contact: {gnames[c1]} - {gnames[c2]}')
self.butterflies_caught[id] = 1
self.p.named.model.geom_rgba[gnames[c1]][3] = 0.1
def step(self, action):
bfs_caught_before = self.butterflies_caught.sum()
obs, reward, terminated, truncated, info = super().step(action)
bfs_caught_after = self.butterflies_caught.sum()
score = bfs_caught_after - bfs_caught_before
info['score'] = score
# TODO: what's a good shaped reward here?
# Based on distance to the closest butterfly?
info['shaped_reward'] = score
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, False, info
|
bipedal-skills-main
|
bisk/tasks/butterflies.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import List
import gym
import numpy as np
from dm_control import mjcf
from dm_control.mujoco.wrapper.mjbindings import mjlib
from bisk.features import make_featurizer
from bisk.helpers import add_capsule
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskPoleBalanceEnv(BiskSingleRobotEnv):
'''
Classic pole balancing, but with robots. The pole is attached to a suitable
point (top of the robot's torso or head) with 3 degrees of freedom for
rotation. If its angle surpasses a threshold, the episode ends.
If n_poles > 1, multiple poles will be stacked on top of each other, and
each connection point will be again have 3 degrees of freedom.
For 2D robots (HalfCheetah, Walker), the pole has just one degree of
freedom (rotation around Y).
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
pole_mass: float,
pole_length: float,
n_poles: int,
**kwargs,
):
self.pole_mass = pole_mass
self.pole_length = pole_length
self.n_poles = n_poles
super().__init__(robot, features, allow_fallover, **kwargs)
self.pole_qpos_idx: List[int] = []
self.pole_qvel_idx: List[int] = []
if self.robot in {'halfcheetah', 'walker'}:
for i in range(self.n_poles):
qppos = self.p.named.model.jnt_qposadr[f'robot/pole-{i}_rot']
self.pole_qpos_idx.append(qppos)
qvpos = self.p.named.model.jnt_dofadr[f'robot/pole-{i}_rot']
self.pole_qvel_idx.append(qvpos)
else:
for i in range(self.n_poles):
for j in range(4):
qppos = (
j
+ self.p.named.model.jnt_qposadr[f'robot/pole-{i}_rot']
)
self.pole_qpos_idx.append(qppos)
for j in range(3):
qvpos = (
j + self.p.named.model.jnt_dofadr[f'robot/pole-{i}_rot']
)
self.pole_qvel_idx.append(qvpos)
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(len(self.pole_qpos_idx) + len(self.pole_qvel_idx),),
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[
('pole', obs_env),
('observation', obs_base),
]
)
def make_featurizer(self, features: str):
return make_featurizer(
features, self.p, self.robot, 'robot', exclude=r'robot/pole'
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
try:
from matplotlib import pyplot as plt
cmap = plt.get_cmap('rainbow')
except:
cmap = lambda x: [1, 0, 0, 1]
size = 0.05 * self.world_scale
if self.robot in {'humanoid', 'humanoidpc'}:
size = 0.02 * self.world_scale
head = root.find('body', 'robot/head')
headg = head.find('geom', 'head')
zpos = headg.size[0]
pole = head.add('body', name='pole-0', pos=[0, 0, zpos])
elif self.robot in {'humanoidcmupc', 'humanoidamasspc'}:
size = 0.02 * self.world_scale
head = root.find('body', 'robot/head')
headg = head.find('geom', 'head')
zpos = headg.size[0]
pole = head.add(
'body',
name='pole-0',
pos=headg.pos + [0, zpos, 0],
xyaxes=[1, 0, 0, 0, 0, -1],
)
elif self.robot in {'halfcheetah'}:
torso = root.find('body', 'robot/torso')
pole = torso.add('body', name='pole-0', pos=[0, 0, 0])
elif self.robot in {'walker'}:
torso = root.find('body', 'robot/torso')
torsog = torso.find('geom', 'torso')
pole = torso.add('body', name='pole-0', pos=[0, 0, torsog.size[1]])
else:
try:
torso = root.find('body', 'robot/torso')
zpos = torso.find('geom', 'torso').size[2]
pole = torso.add('body', name='pole-0', pos=[0, 0, zpos])
except:
raise NotImplementedError(
f'Don\'t know how to place poles on a {self.robot} robot'
)
if self.robot in {'halfcheetah', 'walker'}:
# HalfCheetah model is defined in radians
limit = np.pi if self.robot == 'halfcheetah' else 180
pole.add(
'joint',
name='pole-0_rot',
type='hinge',
damping=0.1,
stiffness=0,
axis='0 1 0',
pos=[0, 0, 0],
range=[-limit, limit],
)
else:
pole.add(
'joint',
name='pole-0_rot',
damping=0.1,
type='ball',
pos=[0, 0, 0],
range=[0, 90],
)
pole.add(
'geom',
name='pole-0_geom',
type='capsule',
fromto=[0, 0, 0, 0, 0, self.pole_length * self.world_scale],
size=[size],
mass=self.pole_mass * self.world_scale,
rgba=cmap(0),
)
for i in range(1, self.n_poles):
pole = pole.add(
'body', name=f'pole-{i}', pos=[0, 0, self.pole_length * self.world_scale]
)
if self.robot in {'halfcheetah', 'walker'}:
limit = np.pi if self.robot == 'halfcheetah' else 180
pole.add(
'joint',
name=f'pole-{i}_rot',
type='hinge',
damping=0.1,
stiffness=0,
axis='0 1 0',
pos=[0, 0, 0],
range=[-limit, limit],
)
else:
pole.add(
'joint',
name=f'pole-{i}_rot',
type='ball',
damping=0.1,
pos=[0, 0, 0],
range=[0, 90],
)
pole.add(
'geom',
name=f'pole-{i}_geom',
type='capsule',
fromto=[0, 0, 0, 0, 0, self.pole_length * self.world_scale],
size=[size],
mass=self.pole_mass * self.world_scale,
rgba=cmap((i + 1) / self.n_poles),
)
super().init_sim(root, frameskip)
def get_observation(self):
pole_qpos = self.p.data.qpos[self.pole_qpos_idx]
pole_qvel = self.p.data.qpos[self.pole_qvel_idx]
return {
'observation': super().get_observation(),
'pole': np.concatenate([pole_qpos, pole_qvel]).astype(np.float32),
}
def reset_state(self):
super().reset_state()
# Small noise for pole
noise = 0.01
qpos = self.init_qpos + self.np_random.uniform(
low=-noise, high=noise, size=self.p.model.nq
)
qvel = self.init_qvel + noise * self.np_random.standard_normal(self.p.model.nv)
self.p.data.qpos[self.pole_qpos_idx] = qpos[self.pole_qpos_idx]
self.p.data.qvel[self.pole_qvel_idx] = qvel[self.pole_qvel_idx]
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
reward = 1.0
# Failure is defined as the z range of bottom and top of pole tower
# falls below 20% of total length.
xpos = self.p.named.data.xpos
xquat = self.p.named.data.xquat
t = np.zeros(3)
mjlib.mju_rotVecQuat(
t,
np.array([0.0, 0.0, -self.pole_length / 2]) * self.world_scale,
xquat['robot/pole-0'],
)
bottom_z = xpos['robot/pole-0'][2] + t[2]
mjlib.mju_rotVecQuat(
t,
np.array([0.0, 0.0, self.pole_length / 2]) * self.world_scale,
xquat[f'robot/pole-{self.n_poles-1}'],
)
top_z = xpos[f'robot/pole-{self.n_poles-1}'][2] + t[2]
zthresh = 0.8 * self.n_poles * self.pole_length * self.world_scale
if top_z - bottom_z < zthresh:
terminated = True
score = 1 if not terminated else 0
info['score'] = score
reward = score
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/polebalance.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
from typing import Dict, List, Union
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskLimboEnv(BiskSingleRobotEnv):
'''
A limbo "dance" environment. There are bunch of geoms along the way which
the robot has to crouch under. Proper limbo posture is not enforced.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
notouch: bool,
min_height: Union[float, str],
fixed_height: bool,
**kwargs,
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.notouch = notouch
self.fixed_height = fixed_height
self.max_bars_cleared = 0
if min_height == 'auto':
if self.robot.startswith('humanoid'):
self.min_height = 1.0
elif self.robot.startswith('walker'):
self.min_height = 1.2
else:
self.min_height = 1.0
else:
self.min_height = float(min_height)
self.robot_geoms: List[int] = []
for g in self.p.named.model.body_geomadr.axes.row.names:
if g.startswith('robot/'):
self.robot_geoms.append(self.p.named.model.body_geomadr[g])
self.bar_geoms: Dict[int, int] = {}
for i, g in enumerate(self.p.named.model.geom_bodyid.axes.row.names):
if g.startswith('bar-'):
self.bar_geoms[i] = int(g.split('-')[1])
self.bar_geom_ids = set(self.bar_geoms.keys())
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_bar', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
# 200 bars should be enough for everybody
self.n_bars = 200
for i in range(self.n_bars):
b = self.add_capsule(
root,
f'bar-{i}',
fromto=[2.025, -W, 0.1, 2.025, W, 0.1],
size=[0.1],
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_bars_cleared = 0
xpos = 1
intervals = (
self.np_random.uniform(3, 6, size=(self.n_bars,)) * self.world_scale
)
if self.fixed_height:
heights = np.zeros(self.n_bars) + self.min_height * self.world_scale
else:
heights = (
self.np_random.uniform(
self.min_height, self.min_height + 0.3, size=(self.n_bars,)
)
* self.world_scale
)
self.bar_pos = []
nm = self.p.named.model
for i in range(self.n_bars):
xpos += intervals[i]
self.bar_pos.append(xpos)
nm.geom_pos[f'bar-{i}'][0] = xpos
nm.geom_pos[f'bar-{i}'][2] = (
heights[i] + nm.geom_size[f'bar-{i}'][0]
)
nm.geom_rgba[f'bar-{i}'] = [0.8, 0.9, 0.8, 1]
self.bar_hit = [False] * self.n_bars
self.new_bars_hit = set()
def get_observation(self):
nb = self.next_bar_index()
if nb < len(self.bar_pos):
xpos = self.robot_pos[0]
nm = self.p.named.model
next_bar_d = nm.geom_pos[f'bar-{nb}'][0] - xpos
next_bar_h = (
nm.geom_pos[f'bar-{nb}'][2] + nm.geom_size[f'bar-{nb}'][0]
)
else:
next_bar_d = 1.0
next_bar_h = 2.0
next_bar_cleared = nb < self.max_bars_cleared
return {
'observation': super().get_observation(),
'next_bar': np.array(
[next_bar_d, next_bar_h, not next_bar_cleared],
dtype=np.float32
),
}
def next_bar_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.bar_pos, xpos)
def on_step_single_frame(self):
contact = self.p.data.contact
for i, c in enumerate(contact.geom1):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
for i, c in enumerate(contact.geom2):
if contact.dist[i] > 0:
continue
if c not in self.bar_geom_ids:
continue
bar = self.bar_geoms[c]
self.new_bars_hit.add(bar)
def step_simulation(self):
super().step_simulation()
self.max_bars_cleared = max(
self.max_bars_cleared, self.next_bar_index()
)
def step(self, action):
self.new_bars_hit = set()
mbbefore = self.max_bars_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_bars_cleared > mbbefore else 0
touched = False
for hit in self.new_bars_hit:
if not self.bar_hit[hit] and self.notouch:
touched = True
if self.notouch:
marked = [0.8, 0.0, 0.0, 1.0]
self.p.named.model.geom_rgba[f'bar-{hit}'] = marked
score -= 1
self.bar_hit[hit] = True
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/limbo.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from dm_control.utils import rewards
from bisk.single_robot import BiskSingleRobotWithBallEnv
log = logging.getLogger(__name__)
class BiskGoalWallEnv(BiskSingleRobotWithBallEnv):
'''
Goal wall shooting. In the dense-reward setting we allow for falling over
since the reward is the negative distance to the closest goal.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
init_distance: float,
touch_ball_reward: float,
**kwargs,
):
self.init_distance = init_distance
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.touch_ball_reward = touch_ball_reward
if self.touch_ball_reward > 0:
self.observation_space = gym.spaces.Dict(
[
('ball', self.observation_space.spaces['ball']),
(
'touched_ball',
gym.spaces.Box(
low=0, high=1, shape=(1,), dtype=np.float32
),
),
(
'observation',
self.observation_space.spaces['observation'],
),
]
)
self.ball_geom = self.p.named.model.body_geomadr['ball']
self.wall_geom = self.p.named.model.geom_type.axes.row.names.index(
'wall'
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
# Add wall
W = 3 * self.world_scale
WH = 1 * self.world_scale
WD = (4 + self.init_distance) * self.world_scale
root.asset.add(
'material',
name='mat_wall',
reflectance=0.5,
shininess=1,
emission=0.5,
specular=1,
)
root.worldbody.add(
'geom',
type='plane',
name='wall',
material='mat_wall',
xyaxes=[0, -1, 0, 0, 0, 1],
size=[W, WH, 1],
pos=[WD, 0, WH],
rgba=[0, 0.5, 0.1, 1],
)
# Add a visual marker
root.asset.add(
'texture',
name='tex_dnc',
builtin='checker',
width=50,
height=50,
rgb1=[0, 0, 0],
rgb2=[1, 0.8, 0],
type='2d',
)
root.asset.add(
'material',
name='mat_dnc',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 10],
texuniform=False,
texture='tex_dnc',
)
root.worldbody.add(
'site',
type='box',
name='line',
size=[0.1, W, 0.01],
pos=[1.5 + self.init_distance, 0, 0.02],
material='mat_dnc',
)
# rgba=[1, 0, 0, 0.3])
# Add goals on wall
if self.is_2d:
GP = WH
root.worldbody.add(
'site',
type='ellipsoid',
name='goal',
material='mat_wall',
size=[
0.01 * self.world_scale,
0.4 * self.world_scale,
0.4 * self.world_scale,
],
pos=[WD, 0, GP],
rgba=[1, 1, 1, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goalb',
material='mat_wall',
size=[
0.005 * self.world_scale,
0.45 * self.world_scale,
0.45 * self.world_scale,
],
pos=[WD, 0, GP],
rgba=[1, 0, 0, 1],
)
else:
root.worldbody.add(
'site',
type='ellipsoid',
name='goal1',
material='mat_wall',
size=[
0.01 * self.world_scale,
0.4 * self.world_scale,
0.4 * self.world_scale,
],
pos=[WD, -1, WH - 0.35],
rgba=[1, 1, 1, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goal1b',
material='mat_wall',
size=[
0.005 * self.world_scale,
0.45 * self.world_scale,
0.45 * self.world_scale,
],
pos=[WD, -1, WH - 0.35],
rgba=[1, 0, 0, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goal2',
material='mat_wall',
size=[
0.01 * self.world_scale,
0.4 * self.world_scale,
0.4 * self.world_scale,
],
pos=[WD, 1, WH + 0.35],
rgba=[1, 1, 1, 1],
)
root.worldbody.add(
'site',
type='ellipsoid',
name='goal2b',
material='mat_wall',
size=[
0.005 * self.world_scale,
0.45 * self.world_scale,
0.45 * self.world_scale,
],
pos=[WD, 1, WH + 0.35],
rgba=[1, 0, 0, 1],
)
# This is the camera we'll use by default
euler = [80, -5, 0]
if root.compiler.angle == 'radian':
euler = [np.deg2rad(e) for e in euler]
root.worldbody.add(
'camera',
name='sideline',
mode='fixed',
pos=[WD / 3, -9 * self.world_scale, 2 * self.world_scale],
euler=euler,
)
super().init_sim(root, frameskip)
def get_observation(self):
obs = super().get_observation()
if self.touch_ball_reward > 0:
obs['touched_ball'] = np.array([float(self.ball_touched)],
dtype=np.float32)
return obs
def reset_state(self) -> None:
super().reset_state()
# Place ball
ball_size = self.p.named.model.geom_size['ball'][0]
if self.is_2d:
self.p.named.data.qpos['ball-x'] += self.init_distance
self.p.named.data.qpos['ball-z'] += ball_size + 0.1
else:
self.p.named.data.qpos['ball'][0] += self.init_distance
self.p.named.data.qpos['ball'][2] += ball_size + 0.1
self.ball_yz = None
self.ball_touched = False
def on_step_single_frame(self):
contact = self.p.data.contact
ball_wall = np.in1d(contact.geom1, self.wall_geom) & np.in1d(
contact.geom2, self.ball_geom
)
touching = contact.dist <= 0
if np.any(ball_wall & touching):
if self.is_2d:
self.ball_yz = [0, self.p.named.data.qpos['ball-z'][0]]
else:
self.ball_yz = self.p.named.data.qpos['ball'][1:3].copy()
if not self.ball_touched:
for c in contact:
names = self.p.named.model.name_geomadr.axes.row.names
if (
names[c.geom1].startswith('ball')
and names[c.geom2].startswith('robot')
and c.dist < 0
):
self.ball_touched = True
def step(self, action):
self.ball_yz = None
btbefore = self.ball_touched
obs, reward, terminated, truncated, info = super().step(action)
goal_hit = None
goal_dists = []
goal_sizes = []
if self.ball_yz is not None:
if self.is_2d:
goals = ('goal',)
else:
goals = ('goal1', 'goal2')
for g in goals:
d = np.linalg.norm(
self.ball_yz - self.p.named.data.site_xpos[g][1:3]
)
goal_dists.append(d)
goal_sizes.append(self.p.named.model.site_size[g][2])
if d <= self.p.named.model.site_size[g][2]:
goal_hit = g
break
score = 0
if goal_hit == 'goal' or goal_hit == 'goal1':
score = 1
elif goal_hit == 'goal2':
score = 2
info['score'] = score
shaped_reward = 0
for i, (d, s) in enumerate(zip(goal_dists, goal_sizes)):
shaped_reward = i * rewards.tolerance(d, (0, s), margin=s)
info['shaped_reward'] = shaped_reward
reward = info['shaped_reward'] if self.shaped else score
if self.touch_ball_reward > 0 and self.ball_touched != btbefore:
reward += self.touch_ball_reward
# Zero reward if we're beyond the line
lpos = self.p.named.data.site_xpos['line', 'x']
if self.robot_pos[0] > lpos:
reward = 0
# Once we've hit the wall we're done
if self.ball_yz is not None:
terminated = True
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/goalwall.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskHurdlesEnv(BiskSingleRobotEnv):
'''
Jump over hurdles to progress in X-direction.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
max_height: float,
fixed_height: bool,
**kwargs,
):
super().__init__(robot, features, allow_fallover, **kwargs)
self.shaped = shaped
self.max_height = max_height
self.fixed_height = fixed_height
self.max_hurdles_cleared = 0
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_hurdle', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
# 200 hurdles should be enough for everybody
self.n_hurdles = 200
for i in range(self.n_hurdles):
b = self.add_box(
root, f'hurdle-{i}', size=[0.05, W, 0.1], pos=[2, 0, 0.2]
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_hurdles_cleared = 0
xpos = 1
intervals = (
self.np_random.uniform(3, 6, size=(self.n_hurdles,))
* self.world_scale
)
if self.fixed_height:
heights = (
np.zeros(self.n_hurdles) + self.max_height * self.world_scale
)
else:
heights = (
self.np_random.uniform(
0.1, self.max_height, size=(self.n_hurdles,)
)
* self.world_scale
)
self.hurdle_pos = []
for i in range(self.n_hurdles):
xpos += intervals[i]
self.hurdle_pos.append(xpos)
self.p.named.model.geom_size[f'hurdle-{i}'][2] = heights[i]
self.p.named.model.geom_pos[f'hurdle-{i}'][0] = xpos
self.p.named.model.geom_pos[f'hurdle-{i}'][2] = (
heights[i] / 2 + 0.01
)
def get_observation(self):
nh = self.next_hurdle_index()
if nh < len(self.hurdle_pos):
xpos = self.robot_pos[0]
nm = self.p.named.model
next_hurdle_d = nm.geom_pos[f'hurdle-{nh}'][0] - xpos
next_hurdle_h = nm.geom_size[f'hurdle-{nh}'][2] * 2
else:
next_hurdle_d = 10.0
next_hurdle_h = 0.1
next_hurdle_cleared = nh < self.max_hurdles_cleared
return {
'observation': super().get_observation(),
'next_hurdle': np.array(
[next_hurdle_d, next_hurdle_h, not next_hurdle_cleared],
dtype=np.float32
),
}
def next_hurdle_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.hurdle_pos, xpos)
def step_simulation(self):
super().step_simulation()
self.max_hurdles_cleared = max(
self.max_hurdles_cleared, self.next_hurdle_index()
)
def step(self, action):
mhbefore = self.max_hurdles_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_hurdles_cleared > mhbefore else 0
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/hurdles.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskStairsEnv(BiskSingleRobotEnv):
'''
Go up and down flights of fixed-height, varible-length stairs.
'''
def __init__(
self,
robot: str,
features: str,
shaped: bool,
step_height: float,
step_length_min: float,
step_length_max: float,
num_flights: int,
**kwargs,
):
self.step_height = step_height
self.step_length_min = step_length_min
self.step_length_max = step_length_max
self.num_flights = num_flights
super().__init__(robot, f'{features}-relz', **kwargs)
self.shaped = shaped
self.max_steps_cleared = 0
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[
('next_steps', obs_env),
('observation', obs_base),
]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
self.flight_steps = 10
self.n_steps = self.flight_steps * self.num_flights
self.start_pos = 3.0
self.top_width = 3.0
color1 = [0.8, 0.9, 0.8, 1.0]
color2 = [0.6, 0.6, 0.6, 1.0]
length = 0.5
xpos = self.start_pos + length / 2
step = 0
for flight in range(self.num_flights):
if flight % 2 == 0:
h2 = self.step_height / 2
for i in range(self.flight_steps):
self.add_box(
root,
f'step-{step}',
size=[length / 2, W, h2],
pos=[xpos, 0, h2],
rgba=color1 if i % 2 == 0 else color2,
)
step += 1
h2 += self.step_height / 2
xpos += length
h2 = self.flight_steps * self.step_height / 2
self.add_box(
root,
f'top-{step}',
size=[self.top_width, W, h2],
pos=[xpos + self.top_width - length / 2, 0, h2],
rgba=color1,
)
xpos += self.top_width * 2
else:
for i in range(self.flight_steps):
self.add_box(
root,
f'step-{step}',
size=[length / 2, W, h2],
pos=[xpos, 0, h2],
rgba=color1 if i % 2 == 1 else color2,
)
step += 1
h2 -= self.step_height / 2
xpos += length
xpos += self.top_width * 4
euler = [80, 0, 0]
if root.compiler.angle == 'radian':
euler = [np.deg2rad(e) for e in euler]
root.worldbody.add(
'camera',
name='stairs_side',
mode='trackcom',
pos=[0, -6, 1],
euler=euler,
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_steps_cleared = 0
self.step_pos: List[float] = []
lengths = (
self.np_random.uniform(
self.step_length_min,
self.step_length_max,
size=(self.n_steps + 1,),
)
* self.world_scale
)
nm = self.p.named.model
xpos = self.start_pos + lengths[0] / 2
step = 0
for flight in range(self.num_flights):
if flight % 2 == 0:
for _ in range(self.flight_steps):
nm.geom_size[f'step-{step}'][0] = lengths[step] / 2
nm.geom_pos[f'step-{step}'][0] = xpos
self.step_pos.append(xpos)
xpos += lengths[step] / 2 + lengths[step + 1] / 2
step += 1
nm.geom_pos[f'top-{step}'][0] = (
xpos + self.top_width - lengths[step] / 2
)
xpos += self.top_width * 2
else:
for _ in range(self.flight_steps):
nm.geom_size[f'step-{step}'][0] = lengths[step] / 2
nm.geom_pos[f'step-{step}'][0] = xpos
self.step_pos.append(xpos)
xpos += lengths[step] / 2 + lengths[step + 1] / 2
step += 1
xpos += self.top_width * 4
# Custom fall-over detection because we want to use the featurizer's
# relative Z position.
def fell_over(self) -> bool:
if self.robot.startswith('humanoidcmu') or self.robot.startswith(
'humanoidamass'
):
# XXX We like the relative position of the lower neck, not the torso
# (which is at the center of the robot for this one).
abs_zpos = self.p.named.data.xpos['robot/torso', 'z']
zpos = self.featurizer.relz()
zdiff = abs_zpos - zpos
return bool(self.robot_pos[2] - zdiff < 0.9)
elif self.robot.startswith('humanoid'):
zpos = self.featurizer.relz()
return bool(zpos < 0.9)
elif self.robot.startswith('halfcheetah'):
# Orientation pointing upwards and body almost on the ground
up = self.p.named.data.xmat['robot/torso', 'zz']
zpos = self.featurizer.relz()
if up < -0.8 and zpos < 0.12:
return True
elif self.robot.startswith('walker'):
zpos = self.featurizer.relz()
r = self.p.named.data.qpos['robot/rooty']
if zpos < 0.9 or r < -1.4 or r > 1.4:
return True
return False
def get_observation(self):
ns = self.next_step_index()
xpos = self.robot_pos[0]
nm = self.p.named.model
if ns < len(self.step_pos):
next_step_d1 = nm.geom_pos[f'step-{ns}'][0] - xpos
if ns + 1 < len(self.step_pos):
next_step_d2 = nm.geom_pos[f'step-{(ns+1)}'][0] - xpos
else:
next_step_d2 = 10.0
else:
next_step_d1 = 10.0
next_step_d2 = 20.0
next_step_cleared = ns < self.max_steps_cleared
return {
'next_steps': np.array(
[next_step_d1, next_step_d2, not next_step_cleared],
dtype=np.float32,
),
'observation': super().get_observation(),
}
def next_step_index(self):
xpos = self.robot_pos[0]
return bisect_left(self.step_pos, xpos)
def step_simulation(self):
super().step_simulation()
self.max_steps_cleared = max(
self.max_steps_cleared, self.next_step_index()
)
def step(self, action):
msbefore = self.max_steps_cleared
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_steps_cleared > msbefore else 0
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
reward = -1
terminated = True
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/stairs.py
|
bipedal-skills-main
|
bisk/tasks/__init__.py
|
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import gym
import numpy as np
from dm_control import mjcf
from dm_control.utils import rewards
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskGoToTargetEnv(BiskSingleRobotEnv):
'''
Simple 1D/2D navigation, a port of dm_control's GoToTarget task.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
tolerance: float,
goal_area: float,
num_targets: int = 1,
goal_switch_steps: int = 10,
single_target: bool = False,
on_circle: bool = False,
**kwargs,
):
self.shaped = shaped
self.goal_area = goal_area
self.tolerance = tolerance
self.goals = np.zeros((num_targets, 2))
self.goal_switch_steps = goal_switch_steps
self.on_circle = on_circle
self.steps_to_switch = 0
self.single_target = single_target
super().__init__(robot, features, allow_fallover, **kwargs)
obs_base = self.featurizer.observation_space
if self.is_2d:
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(1 * num_targets,),
dtype=np.float32,
)
else:
obs_env = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(3 * num_targets,),
dtype=np.float32,
)
self.observation_space = gym.spaces.Dict(
[('targets', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
for i in range(self.goals.shape[0]):
root.worldbody.add(
'site',
name=f'target_{i}',
type='sphere',
pos=(0.0, 0.0, 1.0),
size=(0.1,),
rgba=(0.9, 0.6, 0.6, 1.0 if i == 0 else 0.2),
)
root.worldbody.add(
'site',
name=f'target_tolerance_{i}',
type='ellipsoid',
pos=(0.0, 0.0, 1.0),
size=(self.tolerance, self.tolerance, 1e-3),
rgba=(0.9, 0.6, 0.6, 0.2 if i == 0 else 0.05),
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.sample_goal(all=True)
def get_observation(self):
if self.is_2d:
targets = self.goals[:, 0:1] - self.robot_pos[0:1]
else:
targets = np.zeros((self.goals.shape[0], 3))
targets[:, :2] = self.goals - self.robot_pos[:2]
targets = np.dot(
targets, self.p.named.data.xmat['robot/torso'].reshape(3, 3)
)
return {
'observation': super().get_observation(),
'targets': targets.flatten().astype(np.float32),
}
def step(self, action):
obs, reward, terminated, truncated, info = super().step(action)
dist = np.linalg.norm(self.goals[0] - self.robot_pos[:2])
if dist < self.tolerance:
score = 1
self.steps_to_switch -= 1
else:
score = 0
info['score'] = score
info['distance'] = dist
info['shaped_reward'] = -0.1 * (
1
- rewards.tolerance(
dist,
(0, 0),
margin=self.goal_area / 2,
)
)
reward = info['shaped_reward'] if self.shaped else score
if self.steps_to_switch <= 0:
self.sample_goal()
obs = self.get_observation()
if info.get('fell_over', False):
terminated = True
reward = -1
if score == 1 and self.single_target:
terminated = True
return obs, reward, terminated, truncated, info
def sample_goal(self, all: bool = False):
if all:
if self.on_circle:
self.goals = self.np_random.standard_normal(self.goals.shape)
self.goals /= np.maximum(
np.linalg.norm(self.goals, axis=1).reshape(-1, 1), 1e-5
)
self.goals *= self.goal_area
else:
self.goals = self.np_random.uniform(
-self.goal_area, self.goal_area, size=self.goals.shape
)
else:
self.goals = np.roll(self.goals, -1, axis=0)
if self.on_circle:
self.goals[-1] = self.np_random.standard_normal(2)
self.goals[-1] /= np.maximum(
np.linalg.norm(self.goals[-1]), 1e-5
)
self.goals[-1] *= self.goal_area
else:
self.goals[-1] = self.np_random.uniform(
-self.goal_area, self.goal_area, size=(2,)
)
if self.is_2d:
self.goals[:, 1] = 0
for i in range(self.goals.shape[0]):
self.p.named.model.site_pos[f'target_{i}'][0:2] = self.goals[i]
self.p.named.model.site_pos[f'target_{i}'][2] = 0
self.p.named.model.site_pos[f'target_tolerance_{i}'][
0:2
] = self.goals[i]
self.p.named.model.site_pos[f'target_tolerance_{i}'][2] = 0
self.steps_to_switch = self.goal_switch_steps
|
bipedal-skills-main
|
bisk/tasks/gototarget.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from bisk.single_robot import BiskSingleRobotEnv
class BiskRunDirEnv(BiskSingleRobotEnv):
'''
Dense-reward task: move at a specific angle.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
heading_deg: float,
**kwargs):
super().__init__(robot, features, allow_fallover, **kwargs)
heading = np.deg2rad(heading_deg)
# torso orientation: X/Y are switched
self.dir = np.asarray([np.sin(heading), np.cos(heading), 0])
def step(self, action):
pos_before = self.robot_pos.copy()
obs, reward, terminated, truncated, info = super().step(action)
pos_after = self.robot_pos
displacement = pos_after - pos_before
rdir = np.dot(self.dir, self.p.named.data.xmat['robot/torso'].reshape(3,3))
reward = np.dot(rdir, displacement)
if info.get('fell_over', False):
terminated = True
reward = -1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/rundir.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from bisect import bisect_left
import gym
import numpy as np
from dm_control import mjcf
from bisk.helpers import asset_path
from bisk.single_robot import BiskSingleRobotEnv
log = logging.getLogger(__name__)
class BiskGapsEnv(BiskSingleRobotEnv):
'''
Jump over gaps to progress in X-direction.
'''
def __init__(
self,
robot: str,
features: str,
allow_fallover: bool,
shaped: bool,
max_size: float,
min_gap: float,
max_gap: float,
fixed_size: bool,
**kwargs,
):
super().__init__(robot, features, **kwargs)
self.shaped = shaped
self.max_size = max(0.5, max_size)
self.fixed_size = fixed_size
self.min_gap = min_gap
self.max_gap = max_gap
obs_base = self.featurizer.observation_space
obs_env = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(3,), dtype=np.float32
)
self.observation_space = gym.spaces.Dict(
[('next_gap_platform', obs_env), ('observation', obs_base)]
)
def init_sim(self, root: mjcf.RootElement, frameskip: int = 5):
W = 8
self.add_fwd_corridor(root, W)
root.find('geom', 'floor').remove()
# Base platform
H = 0.1
root.asset.add(
'material',
name='mat_base',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 1],
texuniform=True,
texture='tex_plane',
)
root.asset.add(
'texture',
name='tex_lava',
type='2d',
file=f'{asset_path()}/lava.png',
)
root.asset.add(
'material',
name='mat_gaps',
reflectance=0.5,
shininess=1,
specular=1,
texrepeat=[1, 1],
texuniform=True,
texture='tex_lava',
)
self.add_box(
root,
f'base',
size=[(W + 4) / 2, W, H],
pos=[(-W + 4) / 2, 0, -H],
conaffinity=1,
material='mat_base',
)
# 200 platforms should be enough for everybody
self.n_platforms = 200
root.asset.add(
'material',
name='mat_platform',
reflectance=0.5,
shininess=1,
specular=1,
)
for i in range(self.n_platforms):
o = (i % 2) * 0.1
self.add_box(
root,
f'platform-{i}',
size=[1, W, H],
pos=[2, 0, -H],
material='mat_platform',
rgba=[0.2 + o, 0.3 + o, 0.4 + o, 1.0],
)
# Gaps are placed 5cm below
g = self.add_box(
root,
f'gap-{i}',
size=[1, W, H],
pos=[2, 0, -H - 0.05],
material='mat_gaps',
)
super().init_sim(root, frameskip)
def reset_state(self):
super().reset_state()
self.max_platforms_reached = 0
xpos = 4 * self.world_scale
if self.fixed_size:
gaps = np.zeros(self.n_platforms) + self.min_gap * self.world_scale
sizes = (
np.zeros(self.n_platforms) + self.max_size * self.world_scale
)
else:
if self.robot.startswith('quadruped'):
gaps = (
self.np_random.uniform(0.8, 1.55, size=(self.n_platforms,))
* self.world_scale
)
ms = max(self.max_size * 2, 2.0)
sizes = (
self.np_random.uniform(2.0, ms, size=(self.n_platforms,))
* self.world_scale
)
elif self.robot.startswith('humanoid'):
gaps = (
self.np_random.uniform(
self.min_gap, self.max_gap, size=(self.n_platforms,)
)
* self.world_scale
)
sizes = (
self.np_random.uniform(
1.0, self.max_size, size=(self.n_platforms,)
)
* self.world_scale
)
else:
gaps = (
self.np_random.uniform(
self.min_gap, self.max_gap, size=(self.n_platforms,)
)
* self.world_scale
)
sizes = (
self.np_random.uniform(
0.5, self.max_size, size=(self.n_platforms,)
)
* self.world_scale
)
self.gap_starts = []
self.platform_starts = []
for i in range(self.n_platforms):
self.gap_starts.append(xpos)
self.p.named.model.geom_size[f'gap-{i}'][0] = gaps[i] / 2
self.p.named.model.geom_pos[f'gap-{i}'][0] = xpos + gaps[i] / 2
xpos += gaps[i]
self.platform_starts.append(xpos)
self.p.named.model.geom_size[f'platform-{i}'][0] = sizes[i] / 2
self.p.named.model.geom_pos[f'platform-{i}'][0] = (
xpos + sizes[i] / 2
)
xpos += sizes[i]
def next_gap_platform_index(self):
xpos = self.robot_pos[0]
nxp = bisect_left(self.platform_starts, xpos)
nxg = bisect_left(self.gap_starts, xpos)
return nxg, nxp
def get_observation(self):
nxg, nxp = self.next_gap_platform_index()
xpos = self.robot_pos[0]
if nxg < len(self.gap_starts):
next_gap_d = self.gap_starts[nxg] - xpos
else:
next_gap_d = 1.0
if nxp < len(self.platform_starts):
next_platform_d = self.platform_starts[nxp] - xpos
else:
next_platform_d = 1.0
next_platform_reached = nxp < self.max_platforms_reached
return {
'observation': super().get_observation(),
'next_gap_platform': np.array(
[next_gap_d, next_platform_d, not next_platform_reached],
dtype=np.float32
),
}
def on_step_single_frame(self):
for c in self.p.data.contact:
names = self.p.named.model.name_geomadr.axes.row.names
nams = sorted([names[c.geom1], names[c.geom2]])
if nams[0].startswith('gap') and nams[1].startswith('robot/'):
self.touched_gap = True
break
def step_simulation(self):
super().step_simulation()
self.max_platforms_reached = max(
self.max_platforms_reached, self.next_gap_platform_index()[1]
)
def step(self, action):
mpbefore = self.max_platforms_reached
self.touched_gap = False
xpos1 = self.robot_pos[0]
obs, reward, terminated, truncated, info = super().step(action)
xpos2 = self.robot_pos[0]
score = 1 if self.max_platforms_reached > mpbefore else 0
info['score'] = score
info['shaped_reward'] = xpos2 - xpos1
reward = info['shaped_reward'] if self.shaped else score
if info.get('fell_over', False):
terminated = True
reward = -1
if self.touched_gap:
terminated = True
reward = -1
info['score'] -= 1
return obs, reward, terminated, truncated, info
|
bipedal-skills-main
|
bisk/tasks/gaps.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from bisk.features.base import Featurizer
_registry = {}
def register_featurizer(name, cls):
global _registry
_registry[name] = cls
def make_featurizer(
features: str,
p: 'dm_control.mujoco.Physics',
robot: str,
prefix: str = 'robot',
*args,
**kwargs,
) -> Featurizer:
global _registry
if features == 'joints':
from bisk.features.joints import JointsFeaturizer
return JointsFeaturizer(p, robot, prefix, *args, **kwargs)
elif features == 'joints-relz':
from bisk.features.joints import JointsRelZFeaturizer
return JointsRelZFeaturizer(p, robot, prefix, *args, **kwargs)
elif features in _registry:
return _registry[features](p, robot, prefix, *args, **kwargs)
else:
raise ValueError(f'Unknown feature set {features}')
|
bipedal-skills-main
|
bisk/features/__init__.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import re
from typing import List, Set
import gym
import numpy as np
from bisk.features.base import Featurizer
class JointsFeaturizer(Featurizer):
'''
Featurizes joint observations (qpos, qvel) as well
as contact forces (clipped to [-1,1]).
'''
def __init__(
self, p: 'dm_control.mujoco.Physics', robot: str, prefix: str, exclude: str = None
):
super().__init__(p, robot, prefix, exclude)
self.qpos_idx: List[int] = []
self.qvel_idx: List[int] = []
for jn in self.p.named.model.jnt_type.axes.row.names:
if not jn.startswith(f'{self.prefix}/'):
continue
if exclude is not None and re.match(exclude, jn) is not None:
continue
typ = self.p.named.model.jnt_type[jn]
qpos_adr = self.p.named.model.jnt_qposadr[jn]
for i in range(self.n_qpos[typ]):
self.qpos_idx.append(qpos_adr + i)
qvel_adr = self.p.named.model.jnt_dofadr[jn]
for i in range(self.n_qvel[typ]):
self.qvel_idx.append(qvel_adr + i)
self.cfrc_idx = [
r
for r, k in enumerate(self.p.named.data.cfrc_ext.axes.row.names)
if k.startswith(f'{self.prefix}/')
and k != f'{self.prefix}/'
and (exclude is None or re.match(exclude, k) is None)
]
n_obs = len(self.qpos_idx) + len(self.qvel_idx) + len(self.cfrc_idx) * 6
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(n_obs,), dtype=np.float32
)
def __call__(self) -> np.ndarray:
qpos = self.p.data.qpos[self.qpos_idx]
qvel = self.p.data.qvel[self.qvel_idx]
cfrc_ext = self.p.data.cfrc_ext[self.cfrc_idx]
return np.concatenate(
[qpos, qvel, np.clip(cfrc_ext.flat, -1, 1)]
).astype(np.float32)
def feature_names(self) -> List[str]:
names: List[str] = []
qp = self.qpos_names()
names += [qp[i] for i in self.qpos_idx]
qv = self.qvel_names()
names += [qv[i] for i in self.qvel_idx]
cn = self.cfrc_ext_names()
for i in self.cfrc_idx:
names += cn[i]
for i in range(len(names)):
names[i] = names[i].replace(f'{self.prefix}/', '')
return names
class JointsRelZFeaturizer(JointsFeaturizer):
'''
JointFeaturizer that reports the robots's Z position as relative to the
closest surface underneath it.
'''
def __init__(
self,
p: 'mujoco.Physics',
robot: str,
prefix: str = 'robot',
exclude: str = None,
):
super().__init__(p, robot, prefix, exclude)
self.robot_geoms: Set[int] = set()
for i, p in enumerate(self.p.named.model.geom_bodyid.axes.row.names):
if p.startswith(f'{self.prefix}/'):
self.robot_geoms.add(i)
# XXX Hacky lookup of z feature
names = [':pz', 'slidez:p', 'rootz:p', 'root:pz']
for name in names:
try:
self.zpos_idx = self.feature_names().index(name)
break
except:
continue
def relz(self):
from dm_control.mujoco.wrapper.mjbindings import mjlib
# Find closest non-robot geom from torso downwards
pos = self.p.named.data.xpos[f'{self.prefix}/torso'].copy()
dir = np.array([0.0, 0.0, -1.0])
excl = self.p.named.model.geom_bodyid[f'{self.prefix}/torso']
id = np.array([0], dtype=np.int32)
while True:
d = mjlib.mj_ray(
self.p.model.ptr, self.p.data.ptr, pos, dir, None, 1, excl, id
)
if d < 0.0: # No geom hit
break
pos += dir * d
if id[0] not in self.robot_geoms:
break
excl = self.p.model.geom_bodyid[id[0]]
return self.p.named.data.xpos[f'{self.prefix}/torso', 'z'] - pos[2]
def __call__(self) -> np.ndarray:
obs = super().__call__()
rz = self.relz()
obs[self.zpos_idx] = rz
return obs
|
bipedal-skills-main
|
bisk/features/joints.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Dict, List
import gym
import numpy as np
log = logging.getLogger(__name__)
class Featurizer:
n_qpos: Dict[int, int] = { # qpos entries per joint type
0: 7, # mjenums.mjtJoint.mjJNT_FREE
1: 4, # mjenums.mjtJoint.mjJNT_BALL
2: 1, # mjenums.mjtJoint.mjJNT_SLIDE
3: 1, # mjenums.mjtJoint.mjJNT_HINGE
}
n_qvel: Dict[int, int] = { # qvel entries per joint type
0: 6, # mjenums.mjtJoint.mjJNT_FREE
1: 3, # mjenums.mjtJoint.mjJNT_BALL
2: 1, # mjenums.mjtJoint.mjJNT_SLIDE
3: 1, # mjenums.mjtJoint.mjJNT_HINGE
}
def __init__(
self,
p: 'dm_control.mujoco.Physics',
robot: str,
prefix: str = 'robot',
exclude: str = None,
):
self.p = p
self.prefix = prefix
self.observation_space: gym.spaces.Box = None
def reset(self):
pass
def __call__(self) -> np.ndarray:
raise NotImplementedError()
def set_frame_of_reference(self):
raise NotImplementedError()
def feature_names(self) -> List[str]:
raise NotImplementedError()
def qpos_names(self) -> List[str]:
names = ['' for i in range(len(self.p.data.qpos))]
for jn in self.p.named.model.jnt_type.axes.row.names:
typ = self.p.named.model.jnt_type[jn]
adr = self.p.named.model.jnt_qposadr[jn]
if typ == 0:
names[adr + 0] = f'{jn}:px'
names[adr + 1] = f'{jn}:py'
names[adr + 2] = f'{jn}:pz'
names[adr + 3] = f'{jn}:ow'
names[adr + 4] = f'{jn}:ox'
names[adr + 5] = f'{jn}:oy'
names[adr + 6] = f'{jn}:oz'
elif typ == 1:
names[adr + 0] = f'{jn}:ow'
names[adr + 1] = f'{jn}:ox'
names[adr + 2] = f'{jn}:oy'
names[adr + 3] = f'{jn}:oz'
elif typ == 2 or typ == 3:
names[adr] = f'{jn}:p'
else:
raise ValueError(f'Unknown joint type {typ}')
return names
def qvel_names(self) -> List[str]:
names = ['' for i in range(len(self.p.data.qvel))]
for jn in self.p.named.model.jnt_type.axes.row.names:
typ = self.p.named.model.jnt_type[jn]
adr = self.p.named.model.jnt_dofadr[jn]
if typ == 0:
names[adr + 0] = f'{jn}:lvx'
names[adr + 1] = f'{jn}:lvy'
names[adr + 2] = f'{jn}:lvz'
names[adr + 3] = f'{jn}:avx'
names[adr + 4] = f'{jn}:avy'
names[adr + 5] = f'{jn}:avz'
elif typ == 1:
names[adr + 0] = f'{jn}:avx'
names[adr + 1] = f'{jn}:avy'
names[adr + 2] = f'{jn}:avz'
elif typ == 2 or typ == 3:
names[adr] = f'{jn}:v'
else:
raise ValueError(f'Unknown joint type {typ}')
return names
def cfrc_ext_names(self) -> List[List[str]]:
names: List[List[str]] = []
for cn in self.p.named.data.cfrc_ext.axes.row.names:
names.append(
[f'{cn}:c{n}' for n in ['rx', 'ry', 'rz', 'tx', 'ty', 'tz']]
)
return names
def sensor_names(self) -> List[str]:
from dm_control.mujoco.wrapper.mjbindings import enums as mjenums
names = ['' for i in range(len(self.p.data.sensordata))]
for sn in self.p.named.model.sensor_adr.axes.row.names:
typ = self.p.named.model.sensor_type[sn]
adr = self.p.named.model.sensor_adr[sn]
if typ == mjenums.mjtSensor.mjSENS_GYRO:
feats = ['avx', 'avy', 'avz']
elif (
typ == mjenums.mjtSensor.mjSENS_VELOCIMETER
or typ == mjenums.mjtSensor.mjSENS_SUBTREELINVEL
):
feats = ['lvx', 'lvy', 'lvz']
elif typ == mjenums.mjtSensor.mjSENS_ACCELEROMETER:
feats = ['lax', 'lay', 'laz']
elif (
typ == mjenums.mjtSensor.mjSENS_FRAMEPOS
or typ == mjenums.mjtSensor.mjSENS_SUBTREECOM
):
feats = ['px', 'py', 'pz']
elif typ == mjenums.mjtSensor.mjSENS_JOINTPOS:
feats = ['']
elif typ == mjenums.mjtSensor.mjSENS_JOINTVEL:
feats = ['']
elif typ == mjenums.mjtSensor.mjSENS_FORCE:
feats = ['fx', 'fy', 'fz']
elif typ == mjenums.mjtSensor.mjSENS_TORQUE:
feats = ['tx', 'ty', 'tz']
elif typ == mjenums.mjtSensor.mjSENS_RANGEFINDER:
feats = ['d']
elif typ == mjenums.mjtSensor.mjSENS_TOUCH:
feats = ['f']
else:
raise ValueError(f'Unsupported sensor type: {typ}')
for i, f in enumerate(feats):
names[adr + i] = f'{sn}:{f}'
return names
|
bipedal-skills-main
|
bisk/features/base.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
import gym
from dm_control import _render
from dm_control.viewer import gui, renderer, viewer, views
import bisk
parser = argparse.ArgumentParser()
parser.add_argument('task')
parser.add_argument('robot')
args = parser.parse_args()
env_name = {
'hurdles': 'BiskHurdles-v1',
'limbo': 'BiskLimbo-v1',
'hurdleslimbo': 'BiskHurdlesLimbo-v1',
'gaps': 'BiskGaps-v1',
'stairs': 'BiskStairs-v1',
'goalwall': 'BiskGoalWall-v1',
'polebalance': 'BiskPoleBalance-v1',
'gototarget': 'BiskGoToTarget-v1',
'butterflies': 'BiskButterflies-v1',
}[args.task.lower()]
env = gym.make(env_name, robot=args.robot)
print(
f'timestep {env.p.model.opt.timestep}s x frameskip {env.frameskip} = dt {env.dt}s'
)
width = 480
height = 480
title = f'{args.task} - {args.robot}'
render_surface = None
_MAX_FRONTBUFFER_SIZE = 2048
render_surface = _render.Renderer(
max_width=_MAX_FRONTBUFFER_SIZE, max_height=_MAX_FRONTBUFFER_SIZE
)
ren = renderer.OffScreenRenderer(env.p.model, render_surface)
viewer_layout = views.ViewportLayout()
viewport = renderer.Viewport(width, height)
window = gui.RenderWindow(width, height, title)
vw = viewer.Viewer(viewport, window.mouse, window.keyboard)
ren.components += viewer_layout
vw.initialize(env.p, ren, touchpad=False)
env.seed(0)
step = 0
def tick():
global step
global obs
if step == 0:
obs = env.reset()
#env.p.named.data.qvel['ball'][0:3] = [10, 3, 4]
a = env.action_space.sample()
a *= 0
'''
if step < 1:
a[2] = 1
elif step < 100:
a[0] = 1
else:
a[2] = -1
'''
d = False
obs, r, d, i = env.step(a)
step += 1
if step > 200 or d:
print(r)
print(f'reset after {step} steps')
step = 0
time.sleep(0.05)
vw.render()
def _tick():
viewport.set_size(*window.shape)
tick()
return ren.pixels
window.event_loop(tick_func=_tick)
window.close()
|
bipedal-skills-main
|
exp/testgui.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import yaml
def load_configs(file_path, ws_dir):
with open(file_path, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
config_dict['log_dir'] = os.path.join(ws_dir, config_dict['log_dir'])
config_dict['data']['raw_dataset_dir'] = os.path.join(ws_dir, config_dict['data']['raw_dataset_dir'])
config_dict['data']['dataset_dir'] = os.path.join(ws_dir, config_dict['data']['dataset_dir'])
config_dict['data']['bin_path'] = os.path.join(ws_dir, config_dict['data']['bin_path'])
config_dict['data']['smpl_path'] = os.path.join(ws_dir, config_dict['data']['smpl_path'])
config_dict['data']['uv_info'] = os.path.join(ws_dir, config_dict['data']['uv_info'])
config_dict['data']['resample_idxs_path'] = os.path.join(ws_dir, config_dict['data']['resample_idxs_path'])
config_dict['data']['train_bin_path'] = os.path.join(ws_dir, config_dict['data']['train_bin_path'])
config_dict['data']['interp_bin_path'] = os.path.join(ws_dir, config_dict['data']['interp_bin_path'])
config_dict['data']['extrap_bin_path'] = os.path.join(ws_dir, config_dict['data']['extrap_bin_path'])
if 'type' not in config_dict['data']:
config_dict['data']['type'] = 'CAPE'
if 'separate_detail' not in config_dict['data']:
config_dict['data']['separate_detail'] = True
return config_dict
|
AutoAvatar-main
|
utils/configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
import pickle
from pytorch3d.io import load_ply
# Classes -------------------------------------------------------------------------------------------------------
class DFaustJson():
"""
DFaust .bin Structure:
'subject'
'seqs'
for seq in seqs:
'id'
'seq_name'
'frames'
for frame in frames:
'ply_path'
'poses'
"""
def __init__(self, bin_path=None):
self.data = None
if bin_path is not None:
self.load_bin_file(bin_path)
def load_bin_file(self, bin_path):
with open(bin_path, 'rb') as f:
self.data = pickle.load(f)
def dump_bin_file(self, bin_path):
with open(bin_path, 'wb') as f:
pickle.dump(self.data, f)
def append_frames(self, frames, ply_path, poses):
frames.append({
'ply_path': ply_path,
'poses': poses
})
return frames
def append_seqs(self, seqs, seq_name, frames):
seqs.append({
'id': len(seqs),
'seq_name': seq_name,
'frames': frames
})
return seqs
def set_data(self, subject, seqs):
self.data = {
'subject': subject,
'seqs': seqs
}
def num_of_seqs(self):
return len(self.data['seqs'])
def num_of_frames(self):
count = 0
for seq in self.data['seqs']:
count += len(seq['frames'])
return count
|
AutoAvatar-main
|
utils/DFaust.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.renderer import (
PerspectiveCameras,
AmbientLights,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
HardPhongShader,
TexturesVertex,
rasterize_meshes
)
from pytorch3d.structures import Meshes
from pytorch3d.io import load_obj
from pytorch3d.renderer import rasterize_meshes
from numba import jit
import copy
import open3d as o3d
# Functions -----------------------------------------------------------------------------------------------------
def render_mesh(verts, faces, R, t, f, image_size=(512, 512), colors=None, simplify_mesh=False):
"""
:param verts: (N, 3)
:param faces: (F, 3)
"""
device = verts.device
f_th = torch.tensor(f, dtype=torch.float32, device=device)[None]
image_size_th = torch.tensor(image_size, dtype=torch.int32, device=device)[None]
cameras = PerspectiveCameras(focal_length=f_th, R=R[None], T=t[None], device=device, image_size=image_size_th)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius=0.0,
faces_per_pixel=1,
)
lights = AmbientLights(device=device)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=HardPhongShader(
device=device,
cameras=cameras,
lights=lights
)
)
if not simplify_mesh:
if colors is not None:
mesh = Meshes(verts=verts[None], faces=faces[None], textures=TexturesVertex(colors[None]))
else:
mesh = Meshes(verts=verts[None], faces=faces[None])
normals = (mesh.verts_normals_padded() + 1) / 2
mesh = Meshes(verts=verts[None], faces=faces[None], textures=TexturesVertex(normals))
else:
if colors is None:
mesh = Meshes(verts=verts[None], faces=faces[None])
normals = (mesh.verts_normals_padded() + 1) / 2
colors = normals[0]
mesh_o3d = o3d.geometry.TriangleMesh()
mesh_o3d.vertices = o3d.utility.Vector3dVector(verts.cpu().numpy())
mesh_o3d.triangles = o3d.utility.Vector3iVector(faces.cpu().numpy())
mesh_o3d.vertex_colors = o3d.utility.Vector3dVector(colors.cpu().numpy())
mesh_o3d = mesh_o3d.simplify_quadric_decimation(int(faces.shape[0] * 0.1))
verts, faces, colors = torch.from_numpy(np.asarray(mesh_o3d.vertices)), torch.from_numpy(np.asarray(mesh_o3d.triangles)), torch.from_numpy(np.asarray(mesh_o3d.vertex_colors))
mesh = Meshes(verts=verts[None].float(), faces=faces[None], textures=TexturesVertex(colors[None].float())).to(device)
images = renderer(mesh)[0, ..., :3].clip(min=0, max=1)
return images
def parse_uv_info(obj_path):
verts, faces_tuple, aux_tuple = load_obj(obj_path)
faces = faces_tuple.verts_idx.numpy()
faces_uv = faces_tuple.textures_idx.numpy()
verts_uv = aux_tuple.verts_uvs.numpy()
verts_uv = verts_uv * 2 - 1 #(1 - verts_uv) * 2 - 1
N = verts.shape[0]
F = faces.shape[0]
M = verts_uv.shape[0]
assert faces_uv.shape == (F, 3)
print(N, F, M)
v2uv = np.zeros((N, 10), dtype=np.int32) - 1
v2uv_count = np.zeros((N,), dtype=np.int32)
@jit(nopython=True)
def func(faces, faces_uv, v2uv, v2uv_count):
for i in range(F):
for k in range(3):
v = faces[i, k]
uv = faces_uv[i, k]
included = False
for j in range(10):
if v2uv[v, j] == uv:
included = True
break
if not included:
v2uv[v, v2uv_count[v]] = uv
v2uv_count[v] += 1
for i in range(N):
for k in range(10):
if v2uv[i, k] == -1:
v2uv[i, k] = v2uv[i, 0]
return v2uv, v2uv_count
v2uv, v2uv_count = func(faces, faces_uv, v2uv, v2uv_count)
print(np.amin(v2uv_count), np.amax(v2uv_count))
v2uv = v2uv[:, :np.amax(v2uv_count)]
return verts_uv, faces_uv, v2uv, faces
def compute_per_pixel_verts_idx_bary_weights(verts_uv, faces_uv, v2uv, uv_size):
# Compute uv2v
N, K = v2uv.shape
M = verts_uv.shape[0]
uv2v = torch.zeros((M,), dtype=torch.long) - 1
for i in range(K):
uv2v[v2uv[:, i]] = torch.arange(N)
# Rasterization
verts_uv = -verts_uv
verts_uv_ = torch.cat([verts_uv, torch.ones((M, 1), dtype=torch.float)], dim=-1)
meshes = Meshes(verts=verts_uv_[None].cuda(), faces=faces_uv[None].cuda())
pix_to_face, _, barycentric, _ = rasterize_meshes(meshes, uv_size, faces_per_pixel=1) #, blur_radius=0.0001, clip_barycentric_coords=True)
assert pix_to_face.shape == (1, uv_size, uv_size, 1) and barycentric.shape == (1, uv_size, uv_size, 1, 3)
faces_uv_ = torch.cat([-torch.ones((1, 3), dtype=torch.long), faces_uv], dim=0) # (1 + F, 3)
pix_to_uv = faces_uv_[pix_to_face[0, ..., 0] + 1]
assert pix_to_uv.shape == (uv_size, uv_size, 3)
uv2v_ = torch.cat([-torch.ones((1,), dtype=torch.long), uv2v], dim=0) # (1 + M,)
pix_to_v = uv2v_[pix_to_uv + 1]
assert pix_to_v.shape == (uv_size, uv_size, 3)
return pix_to_v, barycentric[0, ..., 0, :]
# Classes -------------------------------------------------------------------------------------------------------
class UVRender(nn.Module):
def __init__(self, args, verts_uv, faces_uv, v2uv):
super().__init__()
self.args = copy.deepcopy(args)
self.register_buffer('verts_uv', verts_uv)
self.register_buffer('faces_uv', faces_uv)
self.register_buffer('v2uv', v2uv)
pix_to_v, bary_w = compute_per_pixel_verts_idx_bary_weights(verts_uv, faces_uv, v2uv, args['model']['uv_size'])
self.register_buffer('pix_to_v', pix_to_v)
self.register_buffer('bary_w', bary_w)
def to_uv(self, verts):
"""
:param verts: (B, N, C)
"""
B, N, C = verts.shape
verts_ = torch.cat([torch.zeros((B, 1, C), dtype=torch.float, device=verts.device), verts], dim=1) # (B, 1 + N, C)
pix_verts = verts_[:, self.pix_to_v + 1, :] # (B, H, W, 3, C)
verts_uv = (pix_verts * self.bary_w[None, ..., None]).sum(dim=-2) # (B, H, W, C)
assert verts_uv.shape == (B, self.args['model']['uv_size'], self.args['model']['uv_size'], C)
return verts_uv.permute(0, 3, 1, 2).contiguous()
def from_uv(self, verts_uv):
"""
:param verts_uv: (B, C, H, W)
"""
B, C, H, W = verts_uv.shape
N, K = self.v2uv.shape
grid = self.verts_uv[self.v2uv][None].expand(B, N, K, 2).contiguous()
verts = F.grid_sample(verts_uv, grid, mode='bilinear', align_corners=False) # (B, C, N, K)
assert verts.shape == (B, C, N, K)
verts = verts.mean(dim=-1).permute(0, 2, 1).contiguous()
return verts
|
AutoAvatar-main
|
utils/render.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
def load_components(model, ckpt_dir, ckpt_itr, name):
state_dict = model.state_dict()
ckpt_state_dict = torch.load(os.path.join(ckpt_dir, 'ckpt', 'dyn_net_%06d.pth' % ckpt_itr), map_location='cpu')
ckpt_state_dict = {key: value for key, value in ckpt_state_dict.items() if name in key}
state_dict.update(ckpt_state_dict)
model.load_state_dict(state_dict)
|
AutoAvatar-main
|
utils/io.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# import openvdb as vdb
import numpy as np
import os
import torch
import torch.nn.functional as F
import math
from skimage import measure
def build_smooth_conv3D(in_channels=1, out_channels=1, kernel_size=3, padding=1):
smooth_conv = torch.nn.Conv3d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, padding=padding
)
smooth_conv.weight.data = torch.ones(
(kernel_size, kernel_size, kernel_size),
dtype=torch.float32
).reshape(in_channels, out_channels, kernel_size, kernel_size, kernel_size) / (kernel_size**3)
smooth_conv.bias.data = torch.zeros(out_channels)
return smooth_conv
def reconstruction(net, cuda, calib_tensor,
resolution, b_min, b_max,
use_octree=False, num_samples=10000, transform=None, thresh=0.5, texture_net = None, poses=None, shapes=None):
'''
Reconstruct meshes from sdf predicted by the network.
:param net: a BasePixImpNet object. call image filter beforehead.
:param cuda: cuda device
:param calib_tensor: calibration tensor
:param resolution: resolution of the grid cell
:param b_min: bounding box corner [x_min, y_min, z_min]
:param b_max: bounding box corner [x_max, y_max, z_max]
:param use_octree: whether to use octree acceleration
:param num_samples: how many points to query each gpu iteration
:return: marching cubes results.
'''
# Then we define the lambda function for cell evaluation
color_flag = False if texture_net is None else True
def eval_func(points):
samples = points.t().unsqueeze(0).to(cuda)
# pred = net.query(samples, calib_tensor)[0][0]
pred = net(samples, poses, shapes)[0]
return pred
def batch_eval(points, num_samples=num_samples):
num_pts = points.shape[1]
sdf = []
num_batches = num_pts // num_samples
for i in range(num_batches):
sdf.append(
eval_func(points[:, i * num_samples:i * num_samples + num_samples])
)
if num_pts % num_samples:
sdf.append(
eval_func(points[:, num_batches * num_samples:])
)
if num_pts == 0:
return None
sdf = torch.cat(sdf)
return sdf
# Then we evaluate the grid
max_level = int(math.log2(resolution))
sdf = eval_progressive(batch_eval, 4, max_level, cuda, b_min, b_max, thresh)
# calculate matrix
mat = np.eye(4)
length = b_max - b_min
mat[0, 0] = length[0] / sdf.shape[0]
mat[1, 1] = length[1] / sdf.shape[1]
mat[2, 2] = length[2] / sdf.shape[2]
mat[0:3, 3] = b_min
# Finally we do marching cubes
try:
verts, faces, normals, values = measure.marching_cubes(sdf, thresh, gradient_direction='ascent')
except:
print('error cannot marching cubes')
return -1
# grid = vdb.FloatGrid(1.0)
# grid.copyFromArray(sdf)
# verts, quads = grid.convertToQuads()
# faces = np.zeros((quads.shape[0] * 2, 3), dtype=np.uint32)
# faces[:quads.shape[0], :] = quads[:, [0, 2, 1]]
# faces[quads.shape[0]:, :] = quads[:, [0, 3, 2]]
# verts = np.zeros((10, 3), dtype=np.float32)
# faces = np.zeros((10, 3), dtype=np.int32)
# transform verts into world coordinate system
verts = np.matmul(mat[:3, :3], verts.T) + mat[:3, 3:4]
verts = verts.T
if np.linalg.det(mat) > 0:
faces = faces[:,[0,2,1]]
if color_flag:
torch_verts = torch.Tensor(verts).unsqueeze(0).permute(0,2,1).to(cuda)
with torch.no_grad():
_, last_layer_feature, point_local_feat = net.query(torch_verts, calib_tensor, return_last_layer_feature=True)
vertex_colors = texture_net.query(point_local_feat, last_layer_feature)
vertex_colors = vertex_colors.squeeze(0).permute(1,0).detach().cpu().numpy()
return verts, faces, vertex_colors #, normals, values, vertex_colors
else:
return verts, faces #, normals, values
def eval_progressive(batch_eval, min_level, max_level, cuda, b_min, b_max, thresh=0.5):
steps = [i for i in range(min_level, max_level+1)]
b_min = torch.tensor(b_min).to(cuda)
b_max = torch.tensor(b_max).to(cuda)
# init
smooth_conv3x3 = build_smooth_conv3D(in_channels=1, out_channels=1, kernel_size=3, padding=1).to(cuda)
arrange = torch.linspace(0, 2**steps[-1], 2**steps[0]+1).long().to(cuda)
coords = torch.stack(torch.meshgrid([
arrange, arrange, arrange
])) # [3, 2**step+1, 2**step+1, 2**step+1]
coords = coords.view(3, -1).t() # [N, 3]
calculated = torch.zeros(
(2**steps[-1]+1, 2**steps[-1]+1, 2**steps[-1]+1), dtype=torch.bool
).to(cuda)
gird8_offsets = torch.stack(torch.meshgrid([
torch.tensor([-1, 0, 1]), torch.tensor([-1, 0, 1]), torch.tensor([-1, 0, 1])
])).int().to(cuda).view(3, -1).t() #[27, 3]
with torch.no_grad():
for step in steps:
resolution = 2**step + 1
stride = 2**(steps[-1]-step)
if step == steps[0]:
coords2D = coords.float() / (2**steps[-1]+1) * (b_max - b_min) + b_min
sdf_all = batch_eval(
coords2D.t(),
).view(resolution, resolution, resolution)
coords_accum = coords / stride
coords_accum = coords_accum.long()
calculated[coords[:, 0], coords[:, 1], coords[:, 2]] = True
else:
valid = F.interpolate(
(sdf_all>thresh).view(1, 1, *sdf_all.size()).float(),
size=resolution, mode="trilinear", align_corners=True
)[0, 0]
sdf_all = F.interpolate(
sdf_all.view(1, 1, *sdf_all.size()),
size=resolution, mode="trilinear", align_corners=True
)[0, 0]
coords_accum *= 2
is_boundary = (valid > 0.0) & (valid < 1.0)
is_boundary = smooth_conv3x3(is_boundary.float().view(1, 1, *is_boundary.size()))[0, 0] > 0
is_boundary[coords_accum[:, 0], coords_accum[:, 1], coords_accum[:, 2]] = False
# coords = is_boundary.nonzero() * stride
coords = torch.nonzero(is_boundary) * stride
coords2D = coords.float() / (2**steps[-1]+1) * (b_max - b_min) + b_min
# coords2D = coords.float() / (2**steps[-1]+1)
sdf = batch_eval(
coords2D.t(),
) #[N]
if sdf is None:
continue
if sdf is not None:
sdf_all[is_boundary] = sdf
voxels = coords / stride
voxels = voxels.long()
coords_accum = torch.cat([
voxels,
coords_accum
], dim=0).unique(dim=0)
calculated[coords[:, 0], coords[:, 1], coords[:, 2]] = True
for n_iter in range(14):
sdf_valid = valid[voxels[:, 0], voxels[:, 1], voxels[:, 2]]
idxs_danger = ((sdf_valid==1) & (sdf<thresh)) | ((sdf_valid==0) & (sdf>thresh)) #[N,]
coords_danger = coords[idxs_danger, :] #[N, 3]
if coords_danger.size(0) == 0:
break
coords_arround = coords_danger.int() + gird8_offsets.view(-1, 1, 3) * stride
coords_arround = coords_arround.reshape(-1, 3).long()
coords_arround = coords_arround.unique(dim=0)
coords_arround[:, 0] = coords_arround[:, 0].clamp(0, calculated.size(0)-1)
coords_arround[:, 1] = coords_arround[:, 1].clamp(0, calculated.size(1)-1)
coords_arround[:, 2] = coords_arround[:, 2].clamp(0, calculated.size(2)-1)
coords = coords_arround[
calculated[coords_arround[:, 0], coords_arround[:, 1], coords_arround[:, 2]] == False
]
if coords.size(0) == 0:
break
coords2D = coords.float() / (2**steps[-1]+1) * (b_max - b_min) + b_min
# coords2D = coords.float() / (2**steps[-1]+1)
sdf = batch_eval(
coords2D.t(),
) #[N]
voxels = coords / stride
voxels = voxels.long()
sdf_all[voxels[:, 0], voxels[:, 1], voxels[:, 2]] = sdf
coords_accum = torch.cat([
voxels,
coords_accum
], dim=0).unique(dim=0)
calculated[coords[:, 0], coords[:, 1], coords[:, 2]] = True
return sdf_all.data.cpu().numpy()
|
AutoAvatar-main
|
utils/implicit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pickle
import os
import torch
import torch.nn.functional as F
from pytorch3d.ops import norm_laplacian, sample_points_from_meshes, knn_points, knn_gather
from pytorch3d.structures import Meshes, Pointclouds, utils as struct_utils
from pytorch3d.io import load_ply
from pytorch3d import _C
from pytorch3d.renderer import TexturesVertex
from pytorch3d.transforms import axis_angle_to_quaternion
import smplx
from smplx.utils import SMPLOutput
from smplx.lbs import blend_shapes, vertices2joints, batch_rodrigues, batch_rigid_transform
# Functions -----------------------------------------------------------------------------------------------------
def load_smpl(args):
if args['data']['type'] == 'CAPE':
ply_path = os.path.join(args['data']['raw_dataset_dir'], 'minimal_body_shape', args['data']['subject'], '%s_minimal.ply' % args['data']['subject'])
elif args['data']['type'] == 'DFaust':
ply_path = os.path.join(args['data']['dataset_dir'], 'smpl_poses', args['data']['subject'], 'v_template.ply')
v_template, _ = load_ply(ply_path)
smpl_model = MySMPL(args['data']['smpl_path'], v_template=v_template)
return smpl_model
def taubin_smoothing(
meshes: Meshes, lambd: float = 0.53, mu: float = -0.53, num_iter: int = 10
) -> Meshes:
"""
Taubin smoothing [1] is an iterative smoothing operator for meshes.
At each iteration
verts := (1 - λ) * verts + λ * L * verts
verts := (1 - μ) * verts + μ * L * verts
This function returns a new mesh with smoothed vertices.
Args:
meshes: Meshes input to be smoothed
lambd, mu: float parameters for Taubin smoothing,
lambd > 0, mu < 0
num_iter: number of iterations to execute smoothing
Returns:
mesh: Smoothed input Meshes
[1] Curve and Surface Smoothing without Shrinkage,
Gabriel Taubin, ICCV 1997
"""
verts = meshes.verts_packed() # V x 3
edges = meshes.edges_packed() # E x 3
for _ in range(num_iter):
L = norm_laplacian(verts, edges)
total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
verts = (1 - lambd) * verts + lambd * torch.mm(L, verts) / (total_weight + 1e-10)
# pyre-ignore
L = norm_laplacian(verts, edges)
total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
verts = (1 - mu) * verts + mu * torch.mm(L, verts) / (total_weight + 1e-10)
verts_list = struct_utils.packed_to_list(
verts, meshes.num_verts_per_mesh().tolist()
)
mesh = Meshes(verts=list(verts_list), faces=meshes.faces_list())
return mesh
def compute_adjacent_matrix(parents, n_rings):
"""
:param parents: (J,)
"""
J = parents.shape[0]
W = torch.zeros(J, J - 1)
for i in range(J - 1):
W[i + 1, i] += 1.0
parent = parents[i+1]
for j in range(n_rings):
W[parent, i] += 1.0
if parent == 0:
break
parent = parents[parent]
# W /= W.sum(0, keepdim=True) + 1e-16
return W
def sample_igr_pts(verts, faces, bbmin, bbmax, args):
"""
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param bbmin / bbmax: (B, 3)
"""
B, N, _ = verts.shape
meshes = Meshes(verts=verts, faces=faces)
if not args['model']['use_detail']:
surf_pts, surf_normals = sample_points_from_meshes(meshes, num_samples=args['train']['n_pts_scan'], return_normals=True)
else:
normals = meshes.verts_normals_padded()
meshes = Meshes(verts=verts, faces=faces, textures=TexturesVertex(normals))
surf_pts, surf_normals = sample_points_from_meshes(meshes, num_samples=args['train']['n_pts_scan'], return_textures=True)
surf_normals = F.normalize(surf_normals, p=2, dim=-1)
igr_pts = surf_pts[:, :args['train']['n_pts_scan_igr']] + torch.normal(0, args['train']['pts_igr_sigma'], (B, args['train']['n_pts_scan_igr'], 3), device=verts.device)
igr_pts = torch.minimum(torch.maximum(igr_pts, bbmin[:, None].expand(B, args['train']['n_pts_scan_igr'], 3)), bbmax[:, None].expand(B, args['train']['n_pts_scan_igr'], 3))
bbox_pts = torch.rand((B, args['train']['n_pts_bbox_igr'], 3), device=verts.device) * (bbmax - bbmin)[:, None] + bbmin[:, None]
rand_pts = torch.cat([igr_pts, bbox_pts], dim=1)
return surf_pts, surf_normals, igr_pts, bbox_pts, rand_pts
def _point_to_edge_distance(
point: torch.Tensor, s0, s1
) -> torch.Tensor:
"""
Computes the squared euclidean distance of points to edges. Modified from https://github.com/facebookresearch/pytorch3d/issues/613
Args:
point: FloatTensor of shape (P, 3)
edge: FloatTensor of shape (P, 2, 3)
Returns:
dist: FloatTensor of shape (P,)
x: FloatTensor of shape (P, 3)
If a, b are the start and end points of the segments, we
parametrize a point p as
x(t) = a + t * (b - a)
To find t which describes p we minimize (x(t) - p) ^ 2
Note that p does not need to live in the space spanned by (a, b)
"""
s01 = s1 - s0
norm_s01 = (s01 * s01).sum(dim=-1)
same_edge = norm_s01 < 1e-8
t = torch.where(same_edge, torch.ones_like(norm_s01) * 0.5, (s01 * (point - s0)).sum(dim=-1) / norm_s01)
t = torch.clamp(t, min=0.0, max=1.0)[..., None]
x = s0 + t * s01
dist = ((x - point) * (x - point)).sum(dim=-1).sqrt()
return dist, x
def _point_to_bary(point: torch.Tensor, a, b, c) -> torch.Tensor:
"""
Computes the barycentric coordinates of point wrt triangle (tri)
Note that point needs to live in the space spanned by tri = (a, b, c),
i.e. by taking the projection of an arbitrary point on the space spanned by
tri. Modified from https://github.com/facebookresearch/pytorch3d/issues/613
Args:
point: FloatTensor of shape (P, 3)
tri: FloatTensor of shape (3, 3)
Returns:
bary: FloatTensor of shape (P, 3)
"""
assert point.dim() == 2 and point.shape[1] == 3
P, _ = point.shape
assert a.shape == (P, 3) and b.shape == (P, 3) and c.shape == (P, 3)
v0 = b - a
v1 = c - a
v2 = point - a
d00 = (v0 * v0).sum(dim=-1)
d01 = (v0 * v1).sum(dim=-1)
d11 = (v1 * v1).sum(dim=-1)
d20 = (v2 * v0).sum(dim=-1)
d21 = (v2 * v1).sum(dim=-1)
denom = d00 * d11 - d01 * d01 + 1e-8
s2 = (d11 * d20 - d01 * d21) / denom
s3 = (d00 * d21 - d01 * d20) / denom
s1 = 1.0 - s2 - s3
bary = torch.stack([s1, s2, s3], dim=-1)
return bary
def proj_pts_to_mesh(pts, verts, faces, verts_feat=None, scale=1000, return_idxs=False):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param verts_feat: (B, N, C)
"""
B, M, _ = pts.shape
N = verts.shape[1]
F = faces.shape[1]
pts = pts * scale
verts = verts * scale
meshes = Meshes(verts=verts, faces=faces)
pcls = Pointclouds(pts)
assert len(meshes) == B and len(pcls) == B
# packed representation for pointclouds
points = pcls.points_packed() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_points = pcls.num_points_per_cloud().max().item()
assert torch.allclose(points, pts.view(-1, 3))
# packed representation for faces
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
tris = verts_packed[faces_packed] # (T, 3, 3)
tris_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_tris = meshes.num_faces_per_mesh().max().item()
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
assert torch.allclose(verts_packed, verts.view(-1, 3)) #and torch.allclose(faces_packed, faces.view(-1, 3))
dists, idxs = _C.point_face_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_points, 1e-3
)
pts_faces_normals = faces_normals_packed[idxs] # (P, 3)
pts_verts_normals = verts_normals_packed[faces_packed][idxs] # (P, 3, 3)
pts_tris = tris[idxs] # (P, 3, 3)
# Project pts to the plane of its closest triangle
v, v0, v1, v2 = points, pts_tris[:, 0], pts_tris[:, 1], pts_tris[:, 2]
sd = -((v0 - v) * pts_faces_normals).sum(dim=-1, keepdim=True)
v_proj = -sd * pts_faces_normals + v
# Check v_proj outside triangle
inside = torch.isclose(sd[:, 0].abs(), dists.sqrt(), atol=1e-5)
outside = torch.logical_not(inside)
# Project pts to triangle edges
if outside.sum().item() > 0:
e01_dist, e01_v_proj = _point_to_edge_distance(v[outside], v0[outside], v1[outside])
e02_dist, e02_v_proj = _point_to_edge_distance(v[outside], v0[outside], v2[outside])
e12_dist, e12_v_proj = _point_to_edge_distance(v[outside], v1[outside], v2[outside])
e_dist = torch.stack([e01_dist, e02_dist, e12_dist], dim=0) # (3, P_)
e_v_proj = torch.stack([e01_v_proj, e02_v_proj, e12_v_proj], dim=0) # (3, P_, 3)
e_min_idxs = torch.argmin(e_dist, dim=0) # (P_,)
v_proj_out = torch.gather(e_v_proj, dim=0, index=e_min_idxs[None, :, None].expand(1, e_dist.shape[1], 3))[0]
v_proj[outside] = v_proj_out
# Compute barycentric coordinates
bary = _point_to_bary(v_proj, v0, v1, v2) # (P, 3)
pts_normals = (pts_verts_normals * bary[..., None]).sum(dim=-2)
sd = torch.norm(v - v_proj + 1e-8, dim=-1, p=2) * ((v - v_proj) * pts_normals).sum(dim=-1).sign()
# Test
if not torch.allclose(sd.abs(), dists.sqrt(), atol=1e-3):
print('sd:', (sd.abs() - dists.sqrt()).abs().max(), ((sd.abs() - dists.sqrt()).abs() > 1e-3).sum())
# v_proj_rec = (pts_tris * bary[..., None]).sum(dim=-2)
# if not torch.allclose(v_proj, v_proj_rec, atol=1e-3):
# print('v_proj:', (v_proj - v_proj_rec).abs().max())
# if sd.isnan().sum().item() > 0:
# print(sd.isnan().sum(), '/', sd.shape)
if verts_feat is not None:
C = verts_feat.shape[-1]
assert verts_feat.shape == (B, N, C)
verts_feat_packed = verts_feat.view(-1, C)
pts_verts_feat = verts_feat_packed[faces_packed][idxs] # (P, 3, C)
pts_feat = (pts_verts_feat * bary[..., None]).sum(dim=-2)
pts_feat = pts_feat.view(B, M, C)
else:
pts_feat = None
if not return_idxs:
return sd.view(B, M) / scale, v_proj.view(B, M, 3) / scale, faces_packed[idxs].reshape(B, M, 3), bary.view(B, M, 3), pts_feat
else:
return sd.view(B, M) / scale, v_proj.view(B, M, 3) / scale, faces_packed[idxs].reshape(B, M, 3), bary.view(B, M, 3), pts_feat, idxs.view(B, M)
def proj_pts_to_mesh_sample(pts, verts, faces, verts_feat=None, n_sample=100000):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param verts_feat: (B, N, C)
"""
B, M, _ = pts.shape
F = faces.shape[1]
K = n_sample
if verts_feat is None:
verts_feat = torch.zeros_like(verts)
C = verts_feat.shape[-1]
meshes = Meshes(verts=verts, faces=faces, textures=TexturesVertex(verts_feat))
pts_v, pts_v_normals, pts_v_feat = sample_points_from_meshes(meshes, num_samples=n_sample, return_normals=True, return_textures=True)
assert pts_v.shape == (B, K, 3) and pts_v_normals.shape == (B, K, 3) and pts_v_feat.shape == (B, K, C)
# KNN
_, idx, nn = knn_points(pts, pts_v, K=1, return_nn=True)
assert torch.allclose(nn, knn_gather(pts_v, idx)) and idx.shape == (B, M, 1)
nn_normals = knn_gather(pts_v_normals, idx)
nn_feat = knn_gather(pts_v_feat, idx)
assert nn.shape == (B, M, 1, 3) and nn_normals.shape == (B, M, 1, 3) and nn_feat.shape == (B, M, 1, C)
nn, nn_normals, nn_feat = nn[:, :, 0], nn_normals[:, :, 0], nn_feat[:, :, 0]
sd = torch.norm(pts - nn + 1e-8, dim=-1, p=2) * ((pts - nn) * nn_normals).sum(dim=-1).sign()
return sd, nn, nn_normals, nn_feat
def compute_signed_dst(pts, verts, faces, scale=1000):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
"""
B, M, _ = pts.shape
F = faces.shape[1]
pts = pts * scale
verts = verts * scale
meshes = Meshes(verts=verts, faces=faces)
pcls = Pointclouds(pts)
assert len(meshes) == B and len(pcls) == B
# packed representation for pointclouds
points = pcls.points_packed() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_points = pcls.num_points_per_cloud().max().item()
assert torch.allclose(points, pts.view(-1, 3))
# packed representation for faces
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
tris = verts_packed[faces_packed] # (T, 3, 3)
tris_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_tris = meshes.num_faces_per_mesh().max().item()
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
assert torch.allclose(verts_packed, verts.view(-1, 3)) and torch.allclose(faces_packed, faces.view(-1, 3))
dists, idxs = _C.point_face_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_points
)
pts_faces_normals = faces_normals_packed[idxs] # (P, 3)
pts_verts_normals = verts_normals_packed[faces_packed][idxs] # (P, 3, 3)
pts_tris = tris[idxs] # (P, 3, 3)
verts_normals = meshes.verts_normals_padded()
_, nn, _, nn_normals = proj_pts_to_mesh_sample(pts, verts, faces, verts_feat=verts_normals, n_sample=100000)
sd = dists.sqrt() * ((pts - nn) * nn_normals).sum(dim=-1).sign()
return sd.view(B, M) / scale
def scan_to_pred_errors(verts_scan, faces_scan, verts_pred, faces_pred):
"""
:param verts_scan: (B, N_s, 3)
:param faces_scan: (B, F_s, 3)
:param verts_pred: (B, N_p, 3)
:param faces_pred: (B, F_p, 3)
"""
B, N_s, _ = verts_scan.shape
N_p = verts_pred.shape[1]
assert verts_scan.shape == (B, N_s, 3) and verts_pred.shape == (B, N_p, 3)
meshes = Meshes(verts=verts_scan, faces=faces_scan)
normals_scan = meshes.verts_normals_padded()
meshes = Meshes(verts=verts_pred, faces=faces_pred)
normals_pred = meshes.verts_normals_padded()
assert normals_pred.shape == (B, N_p, 3)
sd_err, _, _, _, normals_proj = proj_pts_to_mesh(verts_scan, verts_pred, faces_pred, normals_pred)
cos_err = F.cosine_similarity(normals_scan, normals_proj, dim=-1)
assert sd_err.shape == (B, N_s) and cos_err.shape == (B, N_s)
return sd_err, cos_err
def proj_pts_to_uv(pts, verts, faces, verts_uv, faces_uv, uv_feat=None):
"""
:param pts: (B, M, 3)
:param verts: (B, N, 3)
:param faces: (B, F, 3)
:param verts_uv: (B, N_, 2)
:param faces_uv: (B, F, 3)
:param uv_feat: (B, C, H, W)
"""
B, M, _ = pts.shape
N = verts.shape[1]
F_ = faces.shape[1]
N_ = verts_uv.shape[1]
assert pts.shape == (B, M, 3) and verts.shape == (B, N, 3) and faces.shape == (B, F_, 3) and verts_uv.shape == (B, N_, 2) and faces_uv.shape == (B, F_, 3)
sd, v_proj, _, bary_w, _, pts_faces_idxs = proj_pts_to_mesh(pts, verts, faces, return_idxs=True)
pts_faces_idxs_packed = pts_faces_idxs.view(B * M,) # (P,)
verts_uv_ = torch.cat([verts_uv, torch.zeros_like(verts_uv[:, :, :1])], dim=-1) # (B, N_, 3)
meshes = Meshes(verts=verts_uv_, faces=faces_uv)
verts_packed = meshes.verts_packed()
faces_packed = meshes.faces_packed()
pts_verts_uv = verts_packed[faces_packed][pts_faces_idxs_packed][:, :, :2] # (P, 3, 2)
pts_uv = (pts_verts_uv * bary_w.view(B * M, 3, 1)).sum(dim=-2)
pts_uv = pts_uv.view(B, M, 1, 2)
_, C, H, W = uv_feat.shape
assert uv_feat.shape == (B, C, H, W)
# pts_feat = F.grid_sample(uv_feat, pts_uv, mode='bilinear', align_corners=False) # (B, C, M, 1)
grid_sample = MyGridSample.apply
pts_feat = grid_sample(pts_uv, uv_feat) # (B, C, M, 1)
assert pts_feat.shape == (B, C, M, 1)
pts_feat = pts_feat.permute(0, 2, 1, 3).squeeze(-1).contiguous()
assert pts_feat.shape == (B, M, C)
return sd, v_proj, pts_feat, pts_uv.view(B, M, 2)
def lbs(
betas,
pose,
v_template,
shapedirs,
posedirs,
J_regressor,
parents,
lbs_weights,
pose2rot: bool = True,
):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device, dtype = betas.device, betas.dtype
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(pose.view(-1, 3)).view(
[batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(
pose_feature, posedirs).view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed, A
# Classes -------------------------------------------------------------------------------------------------------
class CAPEJson():
"""
CAPE .bin Structure:
'subject'
'cloth_type'
'seqs'
for seq in seqs:
'id'
'seq_name': longlong_athletics_trial1
'frames'
for frame in frames:
'npz_path'
'smooth_mesh_path': New field!!!
"""
def __init__(self, bin_path=None):
self.data = None
if bin_path is not None:
self.load_bin_file(bin_path)
def load_bin_file(self, bin_path):
with open(bin_path, 'rb') as f:
self.data = pickle.load(f)
def dump_bin_file(self, bin_path):
with open(bin_path, 'wb') as f:
pickle.dump(self.data, f)
def append_frames(self, frames, npz_path):
frames.append({
'npz_path': npz_path
})
return frames
def append_seqs(self, seqs, seq_name, frames):
seqs.append({
'id': len(seqs),
'seq_name': seq_name,
'frames': frames
})
return seqs
def set_data(self, subject, cloth_type, seqs):
self.data = {
'subject': subject,
'cloth_type': cloth_type,
'seqs': seqs
}
def num_of_seqs(self):
return len(self.data['seqs'])
def num_of_frames(self):
count = 0
for seq in self.data['seqs']:
count += len(seq['frames'])
return count
class MySMPL(smplx.SMPLLayer):
def __init__(
self, model_path: str,
kid_template_path: str = '',
data_struct = None,
create_betas: bool = True,
betas = None,
num_betas: int = 10,
create_global_orient: bool = True,
global_orient = None,
create_body_pose: bool = True,
body_pose = None,
create_transl: bool = True,
transl = None,
dtype=torch.float32,
batch_size: int = 1,
joint_mapper=None,
gender: str = 'neutral',
age: str = 'adult',
vertex_ids = None,
v_template = None,
**kwargs
) -> None:
super().__init__(model_path=model_path, kid_template_path=kid_template_path, data_struct=data_struct, betas=betas, num_betas=num_betas,
global_orient=global_orient, body_pose=body_pose, transl=transl, dtype=dtype, batch_size=batch_size, joint_mapper=joint_mapper,
gender=gender, age=age, vertex_ids=vertex_ids, v_template=v_template, **kwargs)
self.register_buffer('pose_cano', torch.zeros((1, self.NUM_BODY_JOINTS * 3), dtype=dtype))
self.faces = self.faces_tensor
def forward(
self,
poses,
betas = None,
body_pose = None,
global_orient = None,
transl = None,
return_verts=True,
return_full_pose: bool = False,
pose2rot: bool = True,
**kwargs
) -> SMPLOutput:
''' Forward pass for the SMPL model
Parameters
----------
global_orient: torch.tensor, optional, shape Bx3
If given, ignore the member variable and use it as the global
rotation of the body. Useful if someone wishes to predicts this
with an external model. (default=None)
betas: torch.tensor, optional, shape BxN_b
If given, ignore the member variable `betas` and use it
instead. For example, it can used if shape parameters
`betas` are predicted from some external model.
(default=None)
body_pose: torch.tensor, optional, shape Bx(J*3)
If given, ignore the member variable `body_pose` and use it
instead. For example, it can used if someone predicts the
pose of the body joints are predicted from some external model.
It should be a tensor that contains joint rotations in
axis-angle format. (default=None)
transl: torch.tensor, optional, shape Bx3
If given, ignore the member variable `transl` and use it
instead. For example, it can used if the translation
`transl` is predicted from some external model.
(default=None)
return_verts: bool, optional
Return the vertices. (default=True)
return_full_pose: bool, optional
Returns the full axis-angle pose vector (default=False)
Returns
-------
'''
transl, global_orient, body_pose = poses[:, :3], poses[:, 3:6], poses[:, 6:]
apply_trans = True
full_pose = torch.cat([global_orient, body_pose], dim=1)
batch_size = poses.shape[0]
betas = torch.zeros([batch_size, self.num_betas], dtype=self.dtype, device=poses.device)
vertices, joints, A = lbs(betas, full_pose, self.v_template,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.lbs_weights, pose2rot=pose2rot)
joints = self.vertex_joint_selector(vertices, joints)
# Map the joints to the current dataset
if self.joint_mapper is not None:
joints = self.joint_mapper(joints)
if apply_trans:
joints += transl.unsqueeze(dim=1)
vertices += transl.unsqueeze(dim=1)
output = SMPLOutput(vertices=vertices if return_verts else None,
global_orient=global_orient,
body_pose=body_pose,
joints=joints,
betas=betas,
full_pose=full_pose if return_full_pose else None)
output.A = A
return output
@classmethod
def compute_poses_quat(cls, poses):
"""
:param poses: (B, 69)
"""
B, _ = poses.shape
J = cls.NUM_BODY_JOINTS
poses = poses.view(B, J, 3)
poses_quat = axis_angle_to_quaternion(poses)
assert poses_quat.shape == (B, J, 4)
return poses_quat
SMPL_JOINT_NAMES = [
'pelvis',
'left_hip',
'right_hip',
'spine1',
'left_knee',
'right_knee',
'spine2',
'left_ankle',
'right_ankle',
'spine3',
'left_foot',
'right_foot',
'neck',
'left_collar',
'right_collar',
'head',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hand',
'right_hand'
]
def batched_gradient(features):
"""
Compute gradient of a batch of feature maps
:param features: a 3D tensor for a batch of feature maps, dim: (N, C, H, W)
:return: gradient maps of input features, dim: (N, 2*C, H, W), the last row and column are padded with zeros
(N, 0:C, H, W) = dI/dx, (N, C:2C, H, W) = dI/dy
"""
H = features.size(-2)
W = features.size(-1)
C = features.size(1)
N = features.size(0)
grad_x = (features[:, :, :, 2:] - features[:, :, :, :W - 2]) / 2.0
grad_x = F.pad(grad_x, (1, 1, 0, 0), mode='replicate')
grad_y = (features[:, :, 2:, :] - features[:, :, :H - 2, :]) / 2.0
grad_y = F.pad(grad_y, (0, 0, 1, 1), mode='replicate')
grad = torch.cat([grad_x.view(N, C, H, W), grad_y.view(N, C, H, W)], dim=1)
return grad
class MyGridSample(torch.autograd.Function):
@staticmethod
def forward(ctx, grid, feat):
vert_feat = F.grid_sample(feat, grid, mode='bilinear', padding_mode='zeros', align_corners=True).detach()
ctx.save_for_backward(feat, grid)
return vert_feat
@staticmethod
def backward(ctx, grad_output):
feat, grid = ctx.saved_tensors
# Gradient for grid
N, C, H, W = feat.shape
_, Hg, Wg, _ = grid.shape
feat_grad = batched_gradient(feat) # dim: (N, 2*C, H, W)
grid_grad = F.grid_sample(feat_grad, grid, mode='bilinear', padding_mode='zeros', align_corners=True) # dim: (N, 2*C, Hg, Wg)
grid_grad = grid_grad.view(N, 2, C, Hg, Wg).permute(0, 3, 4, 2, 1).contiguous() # dim: (N, Hg, Wg, C, 2)
grad_output_perm = grad_output.permute(0, 2, 3, 1).contiguous() # dim: (N, Hg, Wg, C)
grid_grad = torch.bmm(grad_output_perm.view(N * Hg * Wg, 1, C),
grid_grad.view(N * Hg * Wg, C, 2)).view(N, Hg, Wg, 2)
grid_grad[:, :, :, 0] = grid_grad[:, :, :, 0] * (W - 1) / 2
grid_grad[:, :, :, 1] = grid_grad[:, :, :, 1] * (H - 1) / 2
# Gradient for feat
feat_d = feat.detach()
feat_d.requires_grad = True
grid_d = grid.detach()
grid_d.requires_grad = True
with torch.enable_grad():
vert_feat = F.grid_sample(feat_d, grid_d, mode='bilinear', padding_mode='zeros', align_corners=True)
vert_feat.backward(grad_output.detach())
feat_grad = feat_d.grad
return grid_grad, feat_grad
|
AutoAvatar-main
|
utils/CAPE.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import padding
from torch.nn.modules.module import Module
class Embedder(nn.Module):
def __init__(self, nch, n_freq):
super().__init__()
self.nch = nch
self.n_freq = n_freq
self.out_ch = nch
self.freq_fn = [(1, lambda x: x)]
for i in range(n_freq):
for fn in [torch.sin, torch.cos]:
self.freq_fn.append((2 ** i, fn))
self.out_ch += nch
def forward(self, x):
out = torch.cat([fn(x * freq) for freq, fn in self.freq_fn], dim=-1)
assert out.shape[-1] == self.out_ch
return out
class MLP(nn.Module):
def __init__(self, nchs, skips, act, w_norm, act_last, w_norm_last, init_zero_last=False):
super().__init__()
self.nchs = copy.deepcopy(nchs)
self.skips = copy.deepcopy(skips)
self.mlp = nn.ModuleList()
for i in range(len(nchs) - 1):
in_ch = nchs[i] if i not in skips else nchs[i] + nchs[0]
out_ch = nchs[i + 1]
if i < len(nchs) - 2:
layer = nn.utils.weight_norm(nn.Linear(in_ch, out_ch)) if w_norm else nn.Linear(in_ch, out_ch)
else:
assert i == len(nchs) - 2
layer = nn.utils.weight_norm(nn.Linear(in_ch, out_ch)) if w_norm_last else nn.Linear(in_ch, out_ch)
if init_zero_last:
torch.nn.init.zeros_(layer.weight)
if hasattr(layer, 'bias') and layer.bias is not None:
torch.nn.init.zeros_(layer.bias)
self.mlp.append(layer)
if act == 'softplus':
self.act = nn.Softplus(beta=100, threshold=20)
elif act == 'linear':
self.act = nn.Identity()
else:
raise NotImplementedError('Not implement activation type \'%s\'!' % act)
if act_last == 'softplus':
self.act_last = nn.Softplus(beta=100, threshold=20)
elif act_last == 'linear':
self.act_last = nn.Identity()
else:
raise NotImplementedError('Not implement activation type \'%s\'!' % act_last)
def forward(self, x):
x_ = x
for i in range(len(self.mlp)):
if i in self.skips:
x_ = torch.cat([x_, x], dim=-1)
x_ = self.mlp[i](x_)
x_ = self.act(x_) if i < len(self.mlp) - 1 else self.act_last(x_)
return x_
class Conv2dBias(nn.Conv2d):
def __init__(self, in_ch, out_ch, kernel_size, size, stride, padding, use_bias=True, *args, **kwargs):
super().__init__(in_ch, out_ch, bias=False, kernel_size=kernel_size, stride=stride, padding=padding, *args, **kwargs)
self.use_bias = use_bias
if self.use_bias:
self.register_parameter('bias', nn.Parameter(torch.zeros(1, out_ch, size, size), requires_grad=True))
def forward(self, x):
out = F.conv2d(x, self.weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
if self.use_bias:
out = out + self.bias
return out
class ConvDownBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.conv1 = Conv2dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv2dBias(in_ch, out_ch, kernel_size=kernel_size, size=size//2, stride=2, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=2, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class ConvUpBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv1 = Conv2dBias(in_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv2dBias(out_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
x = self.upsample(x)
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class ConvBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.conv1 = Conv2dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv2dBias(in_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class Conv1dBias(nn.Conv1d):
def __init__(self, in_ch, out_ch, kernel_size, size, stride, padding, use_bias=True, *args, **kwargs):
super().__init__(in_ch, out_ch, bias=False, kernel_size=kernel_size, stride=stride, padding=padding, *args, **kwargs)
self.use_bias = use_bias
if self.use_bias:
self.register_parameter('bias', nn.Parameter(torch.zeros(1, out_ch, size), requires_grad=True))
def forward(self, x):
out = F.conv1d(x, self.weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
if self.use_bias:
out = out + self.bias
return out
class Conv1dDownBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
# assert size % 2 == 0
size_half = size // 2 if size % 2 == 0 else (size + 1) // 2
self.conv1 = Conv1dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv1dBias(in_ch, out_ch, kernel_size=kernel_size, size=size_half, stride=2, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv1d(in_ch, out_ch, kernel_size=1, stride=2, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
class Conv1dBlock(nn.Module):
def __init__(self, in_ch, out_ch, size, kernel_size=3, padding=1):
super().__init__()
assert size % 2 == 0
self.conv1 = Conv1dBias(in_ch, in_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.conv2 = Conv1dBias(in_ch, out_ch, kernel_size=kernel_size, size=size, stride=1, padding=padding, use_bias=True)
self.lrelu = nn.LeakyReLU(0.2)
self.conv_skip = nn.Conv1d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
x_skip = self.conv_skip(x)
x_ = self.conv1(x)
x_ = self.lrelu(x_)
x_ = self.conv2(x_)
x_ = self.lrelu(x_)
out = x_ + x_skip
return out
|
AutoAvatar-main
|
models/nets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
import copy
import shutil, inspect
import torch
import torch.nn.functional as F
import torch.optim as optim
import pytorch_lightning as pl
from pytorch3d.io import save_ply
import utils.CAPE as cape_utils
from utils.implicit import reconstruction
from models.std.nets import DynNet
import models.std.visual as visual
class Implicit_Trainbox(pl.LightningModule):
def __init__(self, args, log_dir, resolution, recurrent=True, eval_frames=None):
super().__init__()
self.args = copy.deepcopy(args)
self.log_dir = log_dir
self.resolution = resolution
self.recurrent = recurrent
self.eval_frames = eval_frames
if not os.path.exists(log_dir):
os.mkdir(log_dir)
if not os.path.exists(os.path.join(log_dir, 'ckpt')):
os.mkdir(os.path.join(log_dir, 'ckpt'))
if not os.path.exists(os.path.join(log_dir, 'net_def')):
os.mkdir(os.path.join(log_dir, 'net_def'))
if not os.path.exists(os.path.join(log_dir, 'mesh')):
os.mkdir(os.path.join(log_dir, 'mesh'))
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(inspect.getfile(DynNet), os.path.join(log_dir, 'net_def'))
self.dyn_net = DynNet(args)
self.itr = 0
def save_ckpt(self):
torch.save(self.dyn_net.state_dict(), os.path.join(self.log_dir, 'ckpt', 'dyn_net_%06d.pth' % self.itr))
def load_ckpt(self, itr, log_dir):
self.dyn_net.load_state_dict(torch.load(os.path.join(log_dir, 'ckpt', 'dyn_net_%06d.pth' % itr), map_location='cpu'))
def preprocess(self, batch):
verts_detail, faces_detail, verts_smt, faces_smt, poses = batch['verts_detail'], batch['faces_detail'], batch['verts_smt'], batch['faces_smt'], batch['poses']
B, T, _ = poses.shape
N = self.dyn_net.smpl_model.v_template.shape[0]
verts_smpl = self.dyn_net.smpl_model(poses.view(B * T, 75)).vertices.view(B, T, N, 3)
return verts_detail, faces_detail, verts_smt, faces_smt, poses, verts_smpl
def train_or_valid_step(self, batch, batch_idx, is_train):
verts_detail_all, faces_detail_all, verts_smt_all, faces_smt_all, poses_all, verts_smpl_all = self.preprocess(batch)
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
K = self.args['model']['ob_vals'][-1]
assert T_next == 1
sd_errs = []
cos_errs = []
obsdf_rollout = None
loss_surf_sdf = 0
loss_surf_grad = 0
loss_igr = 0
loss_o = 0
end_idx = self.args['train']['n_rollout']
if self.eval_frames is not None and batch_idx + T_hist == self.eval_frames[0]:
end_idx = poses_all.shape[1] - T + 1
for i in range(end_idx):
verts_detail = verts_detail_all[i:i+T]
faces_detail = faces_detail_all[i:i+T]
verts_smt = verts_smt_all[i:i+T]
faces_smt = faces_smt_all[i:i+T]
poses = poses_all[:, i:i+T]
verts_smpl = verts_smpl_all[:, i:i+T]
N = verts_smpl.shape[2]
B = poses.shape[0]
if poses.shape[1] < T:
break
verts = verts_smt
faces = faces_smt
verts_gt = verts_smt if not self.args['model']['use_detail'] else verts_detail
faces_gt = faces_smt if not self.args['model']['use_detail'] else faces_detail
bbmin = verts_smpl.min(dim=2)[0] - 0.1
bbmax = verts_smpl.max(dim=2)[0] + 0.1
surf_pts, surf_normals, igr_pts, bbox_pts, rand_pts = cape_utils.sample_igr_pts(verts_gt[-1], faces_gt[-1], bbmin[:, -1], bbmax[:, -1], self.args)
if self.args['model']['stage'] == 'shape_enc_dec':
obsdf, _ = self.dyn_net.shapes_to_obsdf(verts[-1], poses[:, -1], mode='meshes', faces=faces[-1])
assert obsdf.shape == (B, N, K)
obsdf = obsdf[:, None]
if self.args['model']['stage'] == 'auto_regr':
if obsdf_rollout is None:
obsdf = [self.dyn_net.shapes_to_obsdf(verts[j], poses[:, j], mode='meshes', faces=faces[j])[0] for j in range(T_hist)]
obsdf = torch.stack(obsdf, dim=1)
assert obsdf.shape == (B, T_hist, N, K)
else:
obsdf = obsdf_rollout.detach()
shapes = self.dyn_net(obsdf, poses)
if i + 1 < end_idx and self.recurrent:
with torch.no_grad():
obsdf_new, _ = self.dyn_net.shapes_to_obsdf(shapes, poses[:, -1], mode='nets')
obsdf_rollout = torch.cat([obsdf[:, 1:], obsdf_new[:, None]], dim=1).detach()
assert obsdf_rollout.shape == (B, T_hist, N, K)
if self.eval_frames is None:
# Losses
surf_sdf, surf_sdf_grad = self.dyn_net.query_sdf_with_grad(surf_pts, poses[:, -1], shapes)
rand_sdf, rand_sdf_grad = self.dyn_net.query_sdf_with_grad(rand_pts, poses[:, -1], shapes)
bbox_sdf = rand_sdf[:, self.args['train']['n_pts_scan_igr']:]
assert bbox_sdf.shape == (B, self.args['train']['n_pts_bbox_igr'])
loss_surf_sdf += surf_sdf.abs().mean() / self.args['train']['n_rollout']
loss_surf_grad += torch.norm(surf_sdf_grad - surf_normals, p=2, dim=-1).mean() / self.args['train']['n_rollout']
loss_igr += (torch.norm(rand_sdf_grad, p=2, dim=-1) - 1).pow(2).mean() / self.args['train']['n_rollout']
loss_o += torch.exp(-50.0 * torch.abs(bbox_sdf)).mean() / self.args['train']['n_rollout']
else:
out_dir = os.path.join(self.log_dir, 'mesh', 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, 'gt')):
os.mkdir(os.path.join(out_dir, 'gt'))
if not os.path.exists(os.path.join(out_dir, 'pred')):
os.mkdir(os.path.join(out_dir, 'pred'))
if i == 0:
for j in range(T_hist):
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % j), verts_gt[j][0], faces_gt[j][0])
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % (i + T_hist)), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes[:1])
verts_out, faces_out = out
verts_out, faces_out = torch.from_numpy(verts_out).float().to(poses.device), torch.from_numpy(faces_out.astype(np.int32)).long().to(poses.device)
save_ply(os.path.join(out_dir, 'pred', 'pred_%06d.ply' % (i + T_hist)), verts_out, faces_out)
sd_err, cos_err = cape_utils.scan_to_pred_errors(verts_gt[-1], faces_gt[-1], verts_out[None], faces_out[None])
sd_errs.append(sd_err.cpu())
cos_errs.append(cos_err.cpu())
if self.eval_frames is not None:
with open(os.path.join(out_dir, 'errs.bin'), 'wb') as f:
pickle.dump({'sd_errs': sd_errs, 'cos_errs': cos_errs}, f)
visual.render_meshes(out_dir, start_i=T_hist)
os.system('bash models/std/videos.sh %s %s' % (out_dir, str(T_hist)))
loss = loss_surf_sdf + loss_surf_grad + loss_igr * self.args['train']['lambda_igr'] + loss_o * self.args['train']['lambda_o']
res_dict = {
'verts': verts,
'faces': faces,
'verts_gt': verts_gt,
'faces_gt': faces_gt,
'verts_smpl': verts_smpl,
'poses': poses,
'shapes': shapes,
'bbmin': bbmin,
'bbmax': bbmax,
'loss_surf_sdf': loss_surf_sdf,
'loss_surf_grad': loss_surf_grad,
'loss_igr': loss_igr,
'loss_o': loss_o,
'loss': loss
}
return res_dict
def training_step(self, batch, batch_idx):
res_dict = self.train_or_valid_step(batch, batch_idx, True)
# log
prefix = 'Train'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
# checkpoint
self.itr += 1
if self.itr % self.args['train']['ckpt_step'] == 0:
self.save_ckpt()
return res_dict['loss']
def validation_step(self, batch, batch_idx):
if self.eval_frames is not None and batch_idx + self.args['model']['n_hist_frames'] not in self.eval_frames:
return
res_dict = self.train_or_valid_step(batch, batch_idx, False)
# log
prefix = 'Valid'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
if self.eval_frames is None:
self.compute_meshes(res_dict, batch, batch_idx)
def configure_optimizers(self):
if self.args['model']['stage'] == 'shape_enc_dec':
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
elif self.args['model']['stage'] == 'auto_regr':
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
return optimizer
def compute_meshes(self, res_dict, batch, batch_idx):
verts, faces, verts_gt, faces_gt, verts_smpl, poses, shapes, bbmin, bbmax = res_dict['verts'], res_dict['faces'], res_dict['verts_gt'], res_dict['faces_gt'], \
res_dict['verts_smpl'], res_dict['poses'], res_dict['shapes'], res_dict['bbmin'], res_dict['bbmax']
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
if not os.path.exists(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr)):
os.mkdir(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr))
out_dir = os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr, 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for i in range(T_hist):
save_ply(os.path.join(out_dir, 'hist_%d.ply' % i), verts[i][0], faces[i][0])
save_ply(os.path.join(out_dir, 'gt.ply'), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes[:1])
if out != -1:
verts_out, faces_out = out
save_ply(os.path.join(out_dir, 'pred.ply'),
torch.from_numpy(verts_out).float().contiguous(), torch.from_numpy(faces_out.astype(np.int32)).contiguous().long())
def test_step(self, batch, batch_idx):
self.validation_step(batch, batch_idx)
|
AutoAvatar-main
|
models/std/trainbox.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.io import load_ply
from pytorch3d.renderer import look_at_view_transform
from utils.render import render_mesh
def render_meshes(data_dir, start_i=3, gpu_id=0, simplify_mesh=True):
if not os.path.exists(os.path.join(data_dir, 'gt_imgs')):
os.mkdir(os.path.join(data_dir, 'gt_imgs'))
if not os.path.exists(os.path.join(data_dir, 'pred_imgs')):
os.mkdir(os.path.join(data_dir, 'pred_imgs'))
# if not os.path.exists(os.path.join(data_dir, 'pred_cano_imgs')):
# os.mkdir(os.path.join(data_dir, 'pred_cano_imgs'))
if not os.path.exists(os.path.join(data_dir, 'errs_imgs')):
os.mkdir(os.path.join(data_dir, 'errs_imgs'))
# pred_names = sorted(os.listdir(os.path.join(data_dir, 'pred_cano')))
# for i, pred_name in enumerate(pred_names):
# verts, faces = load_ply(os.path.join(data_dir, 'pred_cano', pred_name))
# if i == 0:
# center = verts.median(dim=0)[0]
# t = center.clone()
# t[2] += 9
# R, t = look_at_view_transform(eye=t[None], at=center[None])
# image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9, simplify_mesh=simplify_mesh)
# plt.imsave(os.path.join(data_dir, 'pred_cano_imgs', '%06d.jpg' % (i + start_i)), image.cpu().numpy())
pred_names = sorted(os.listdir(os.path.join(data_dir, 'pred')))
for i, pred_name in enumerate(pred_names):
verts, faces = load_ply(os.path.join(data_dir, 'pred', pred_name))
if i == 0:
center = verts.median(dim=0)[0]
t = center.clone()
t[2] += 9
R, t = look_at_view_transform(eye=t[None], at=center[None])
image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9, simplify_mesh=simplify_mesh)
plt.imsave(os.path.join(data_dir, 'pred_imgs', '%06d.jpg' % (i + start_i)), image.cpu().numpy())
gt_names = sorted(os.listdir(os.path.join(data_dir, 'gt')))
with open(os.path.join(data_dir, 'errs.bin'), 'rb') as f:
data = pickle.load(f)
sd_errs, cos_errs = data['sd_errs'], data['cos_errs']
for i, gt_name in enumerate(gt_names):
verts, faces = load_ply(os.path.join(data_dir, 'gt', gt_name))
image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9)
plt.imsave(os.path.join(data_dir, 'gt_imgs', '%06d.jpg' % i), image.cpu().numpy())
if i < start_i:
continue
sd_err = sd_errs[i - start_i][0]
assert sd_err.shape == (verts.shape[0],)
max_dst = 0.1
sd_err_nc = (sd_err / max_dst).clip(min=-1, max=1)
colors = torch.zeros((verts.shape[0], 3))
colors[sd_err_nc < 0] = (1 - sd_err_nc[sd_err_nc < 0].abs())[:, None] * torch.tensor([1, 1, 1])[None] + \
sd_err_nc[sd_err_nc < 0].abs()[:, None] * torch.tensor([1, 0, 0])[None]
colors[sd_err_nc >= 0] = (1 - sd_err_nc[sd_err_nc >= 0])[:, None] * torch.tensor([1, 1, 1])[None] + \
sd_err_nc[sd_err_nc >= 0][:, None] * torch.tensor([0, 1, 1])[None]
image = render_mesh(verts.cuda(gpu_id), faces.cuda(gpu_id), R[0].cuda(gpu_id), t[0].cuda(gpu_id), 9, colors=colors.cuda(gpu_id))
plt.imsave(os.path.join(data_dir, 'errs_imgs', '%06d.jpg' % i), image.cpu().numpy())
|
AutoAvatar-main
|
models/std/visual.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from pytorch3d.ops import knn_points, knn_gather
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.transforms.rotation_conversions import matrix_to_axis_angle
import utils.CAPE as cape_utils
from utils.render import *
from models.nets import *
class DynNet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.smpl_model = cape_utils.load_smpl(args)
self.register_buffer('v_template', self.smpl_model.v_template, persistent=False)
self.register_buffer('faces', self.smpl_model.faces, persistent=False)
mask_ids = ['left_wrist', 'right_wrist', 'left_hand', 'right_hand', 'left_ankle', 'right_ankle', 'left_foot', 'right_foot', 'head']
mask_ids = [cape_utils.SMPL_JOINT_NAMES.index(e) for e in mask_ids]
head_hands_feet_mask = self.smpl_model.lbs_weights[:, mask_ids].sum(dim=-1) # (N,)
head_hands_feet_mask[head_hands_feet_mask < 2e-2] = 0
head_hands_feet_mask = (head_hands_feet_mask * 10).clip(max=1)
self.register_buffer('head_hands_feet_mask', head_hands_feet_mask, persistent=False)
W = cape_utils.compute_adjacent_matrix(self.smpl_model.parents, 4)
self.register_buffer('W', W, persistent=False) # (J + 1, J)
data = np.load(args['data']['uv_info'])
verts_uv, faces_uv, v2uv = torch.from_numpy(data['verts_uv']), torch.from_numpy(data['faces_uv']).long(), torch.from_numpy(data['v2uv']).long()
self.geo_fn = UVRender(args, verts_uv, faces_uv, v2uv)
self.register_buffer('head_hands_feet_mask_uv', self.geo_fn.to_uv(head_hands_feet_mask[None, :, None].cuda()), persistent=False)
data = np.load(args['data']['resample_idxs_path'])
self.resample_idxs = data['idxs']
self.shape_enc_dec = ShapeEncDec(args)
if args['model']['stage'] == 'auto_regr':
self.dynamics_net = DynamicsNet(args)
def compute_poses_feat(self, poses):
"""
:param poses: (B, 69)
"""
B = poses.shape[0]
J = self.smpl_model.NUM_BODY_JOINTS
N = self.smpl_model.get_num_verts()
assert poses.shape == (B, 69)
poses_quat = self.smpl_model.compute_poses_quat(poses) # (B, J, 4)
assert poses_quat.shape == (B, J, 4)
lbs_w = self.smpl_model.lbs_weights[None].expand(B, N, J + 1)
lbs_w = torch.einsum('bvj,jl->bvl', lbs_w, self.W)
assert lbs_w.shape == (B, N, J)
poses_feat = poses_quat[:, None] * lbs_w[..., None]
assert poses_feat.shape == (B, N, J, 4)
return poses_feat
def normalize_sd_delta(self, sd_delta):
sd_delta_nc = torch.sign(sd_delta) * (sd_delta.abs() * 1000 + 1).log() * 0.25
return sd_delta_nc
def normalize_globalRt(self, pts, poses):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
"""
B, M, _ = pts.shape
assert poses.shape == (B, 75)
smpl_out = self.smpl_model(poses)
root_T_inv = torch.linalg.inv(smpl_out.A[:, 0]) # (B, 4, 4)
pts_nc = pts - poses[:, None, :3]
pts_nc_homo = torch.ones((B, M, 1), dtype=torch.float, device=pts.device)
pts_nc_homo = torch.cat([pts_nc, pts_nc_homo], dim=-1)
pts_nc = torch.bmm(root_T_inv, pts_nc_homo.transpose(-2, -1)).transpose(-2, -1)[..., :3].contiguous()
assert pts_nc.shape == (B, M, 3)
return pts_nc
def query_sdf_nets(self, pts, poses, shapes, force_coarse=False):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
:param shapes: (B, N, C)
"""
B, M, _ = pts.shape
_, N, C = shapes.shape
assert poses.shape == (B, 75) and shapes.shape == (B, N, C) and N == self.smpl_model.get_num_verts()
verts = self.smpl_model(poses).vertices
assert verts.shape == (B, N, 3)
# Normalize global Rt
verts = self.normalize_globalRt(verts, poses)
pts = self.normalize_globalRt(pts, poses)
# MLP decode
# SMPL resample
meshes = Meshes(verts=verts, faces=self.faces[None].expand(B, -1, -1))
normals = meshes.verts_normals_padded()
assert normals.shape == (B, N, 3)
verts_ori = verts.clone()
shapes_ori = shapes.clone()
verts = verts[:, self.resample_idxs]
normals = normals[:, self.resample_idxs]
shapes = shapes[:, self.resample_idxs]
N_ = verts.shape[1]
assert verts.shape == (B, N_, 3) and normals.shape == (B, N_, 3) and shapes.shape == (B, N_, C)
# KNN
K = 20
C_s = 64
C_ = 128
_, idx, pts_nn = knn_points(pts, verts, K=K, return_nn=True)
assert torch.allclose(pts_nn, knn_gather(verts, idx))
normals_nn = knn_gather(normals, idx)
shapes_nn = knn_gather(shapes, idx)
assert pts_nn.shape == (B, M, K, 3) and normals_nn.shape == (B, M, K, 3) and shapes_nn.shape == (B, M, K, C)
pts_nn = pts_nn - pts[:, :, None]
# Proj pts to mesh
_, pts_proj, _, _, shapes_proj = cape_utils.proj_pts_to_mesh(pts, verts_ori, self.faces[None].expand(B, -1, -1).contiguous(), shapes_ori)
assert pts_proj.shape == (B, M, 3) and shapes_proj.shape == (B, M, C)
pts_proj = pts_proj - pts
# Aggregate
feat_nn = self.shape_enc_dec.pts_mlp(
torch.cat([
self.shape_enc_dec.pts_emb(pts_nn.view(B * M * K, 3)),
self.shape_enc_dec.pts_emb(normals_nn.view(B * M * K, 3)),
shapes_nn.view(B * M * K, C)[:, :C_s]
], dim=-1)
).view(B, M, K, C_)
feat_proj = self.shape_enc_dec.proj_pts_mlp(
torch.cat([
self.shape_enc_dec.pts_emb(pts_proj.view(B * M, 3)),
shapes_proj.view(B * M, C)[:, :C_s]
], dim=-1)
).view(B, M, 1, C_)
feat = torch.cat([feat_nn, feat_proj], dim=-2)
assert feat.shape == (B, M, K + 1, C_)
w = self.shape_enc_dec.weights_fc(feat.view(B * M * (K + 1), C_)).view(B, M, K + 1, 1)
w = torch.softmax(w, dim=-2)
feat = (feat * w).sum(dim=-2)
assert feat.shape == (B, M, C_)
sdf = self.shape_enc_dec.sdf_mlp(feat.view(B * M, C_)).view(B, M)
return sdf
def compute_obpts(self, poses):
"""
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
verts_smpl = self.smpl_model(poses).vertices
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B, N, 3)
offset = torch.linspace(*self.args['model']['ob_vals'], device=poses.device)[None, None, :, None] * normals_smpl[:, :, None, :] # (B, N, K, 3)
obpts = offset + verts_smpl[:, :, None]
return obpts
def shapes_to_obsdf(self, shapes, poses, mode='nets', faces=None):
"""
:param shapes: (B, N, C)
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
C = shapes.shape[-1]
assert poses.shape == (B, 75) and shapes.shape[0] == B
# Compute observer pts
obpts = self.compute_obpts(poses)
assert obpts.shape == (B, N, K, 3)
# Query sdf
if mode == 'meshes':
assert C == 3 and faces is not None
sdf, _, _, _, _ = cape_utils.proj_pts_to_mesh(obpts.view(B, N * K, 3), shapes, faces)
sdf = sdf.view(B, N, K)
elif mode == 'nets':
assert shapes.shape == (B, N, C)
sdf = self.query_sdf_nets(obpts.view(B, N * K, 3), poses, shapes, force_coarse=True)
sdf = sdf.view(B, N, K)
return sdf, obpts
def query_sdf_with_grad(self, pts, poses, shapes):
B, M, _ = pts.shape
C = shapes.shape[-1]
N = self.smpl_model.get_num_verts()
assert pts.shape == (B, M, 3) and poses.shape == (B, 75) and shapes.shape == (B, N, C)
with torch.enable_grad():
pts.requires_grad_(True)
sdf = self.query_sdf_nets(pts, poses, shapes)
assert sdf.shape == (B, M)
sdf_grad = autograd.grad([sdf.sum()], [pts], retain_graph=True, create_graph=True)[0]
assert sdf_grad.shape == (B, M, 3)
return sdf, sdf_grad
def enc_shapes_to_sdf(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T, 75)
"""
B, T, _ = poses.shape
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
assert obsdf.shape == (B, T, N, K) and poses.shape == (B, T, 75)
# Compute obpts_uv
obpts = self.compute_obpts(poses.view(B * T, 75))
assert obpts.shape == (B * T, N, K, 3)
obpts = self.normalize_globalRt(obpts.view(B * T, N * K, 3), poses.view(B * T, 75))
obpts_uv = self.geo_fn.to_uv(obpts.view(B * T, N, K * 3))
assert obpts_uv.shape == (B * T, K * 3, H, W)
# Compute obsdf_uv
obsdf_uv = self.geo_fn.to_uv(obsdf.view(B * T, N, K))
assert obsdf_uv.shape == (B * T, K, H, W)
# Net forward
in_feat = torch.cat([obpts_uv, obsdf_uv * 20], dim=1)
shapes_uv = self.shape_enc_dec.shape_enc(in_feat)
C = shapes_uv.shape[1]
feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
shapes = self.geo_fn.from_uv(feat_uv_)
assert shapes.shape == (B * T, N, C) and shapes_uv.shape == (B * T, C, H, W)
shapes = shapes.view(B, T, N, C)
shapes_uv = shapes_uv.view(B, T, C, H, W)
return shapes, shapes_uv
def forward(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T_, 75)
"""
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
if self.args['model']['stage'] == 'shape_enc_dec':
B, T, _ = poses.shape
assert obsdf.shape == (B, T, N, K) and poses.shape == (B, T, 75) and T == 1
shapes, shapes_uv = self.enc_shapes_to_sdf(obsdf, poses)
shapes = shapes.squeeze(1)
C = shapes.shape[-1]
assert shapes.shape == (B, N, C)
elif self.args['model']['stage'] == 'auto_regr':
B, T_, _ = poses.shape
T = obsdf.shape[1]
J_ = self.smpl_model.NUM_BODY_JOINTS + 1
J = self.smpl_model.NUM_BODY_JOINTS
assert obsdf.shape == (B, T, N, K) and poses.shape == (B, T_, 75) and T_ - T == 1 and T == self.args['model']['n_hist_frames']
poses_ref = poses[:, -1:].expand(B, T_, 75).contiguous()
# Compute obpts_uv
obpts = self.compute_obpts(poses.view(B * T_, 75))
assert obpts.shape == (B * T_, N, K, 3)
obpts = self.normalize_globalRt(obpts.view(B * T_, N * K, 3), poses_ref.view(B * T_, 75))
obpts_uv = self.geo_fn.to_uv(obpts.view(B * T_, N, K * 3))
assert obpts_uv.shape == (B * T_, K * 3, H, W)
obpts_uv = obpts_uv.view(B, T_ * K * 3, H, W)
# Compute poses velocity
poses_prev = poses[:, :-1].clone()
poses_last = poses[:, 1:].clone()
poses_vel = torch.zeros_like(poses_last)
assert poses_prev.shape == (B, T, 75) and poses_last.shape == (B, T, 75) and poses_vel.shape == (B, T, 75)
poses_vel[..., :3] = poses_last[..., :3] - poses_prev[..., :3]
rot_prev = axis_angle_to_matrix(poses_prev[..., 3:].reshape(B * T * J_, 3))
rot_last = axis_angle_to_matrix(poses_last[..., 3:].reshape(B * T * J_, 3))
rot_vel = torch.bmm(rot_last, torch.linalg.inv(rot_prev))
assert rot_vel.shape == (B * T * J_, 3, 3)
poses_vel[..., 3:] = matrix_to_axis_angle(rot_vel).view(B, T, J_ * 3)
poses_vel_feat = self.compute_poses_feat(poses_vel[..., 6:].reshape(B * T, 69))
assert poses_vel_feat.shape == (B * T, N, J, 4)
poses_vel_feat = torch.cat([poses_vel_feat.view(B * T, N, J * 4), poses_vel[..., :6].reshape(B * T, 1, 6).expand(B * T, N, 6)], dim=-1)
assert poses_vel_feat.shape == (B * T, N, J * 4 + 6)
poses_vel_feat_uv = self.geo_fn.to_uv(poses_vel_feat)
assert poses_vel_feat_uv.shape == (B * T, J * 4 + 6, H, W)
poses_vel_feat_uv = self.dynamics_net.local_poses_vel_conv_block(poses_vel_feat_uv).view(B, T * 32, H, W)
poses_vel_feat_uv = self.dynamics_net.temp_poses_vel_conv_block(poses_vel_feat_uv)
assert poses_vel_feat_uv.shape == (B, 32, H, W)
# Compute pose_feat
pose_feat = self.compute_poses_feat(poses[:, -1, 6:].clone())
assert pose_feat.shape == (B, N, J, 4)
pose_feat_uv = self.geo_fn.to_uv(pose_feat.view(B, N, J * 4))
assert pose_feat_uv.shape == (B, J * 4, H, W)
pose_feat_uv = self.dynamics_net.local_pose_conv_block(pose_feat_uv)
assert pose_feat_uv.shape == (B, 32, H, W)
# Compute obsdf_feat_uv
obsdf_delta = obsdf[:, 1:] - obsdf[:, :-1]
assert obsdf_delta.shape == (B, T - 1, N, K)
obsdf_delta = self.normalize_sd_delta(obsdf_delta)
obsdf_delta = obsdf_delta.permute(0, 2, 1, 3).contiguous()
assert obsdf_delta.shape == (B, N, T - 1, K)
obsdf_feat = torch.cat([obsdf_delta.view(B, N, (T - 1) * K), obsdf[:, -1] * 20], dim=-1)
assert obsdf_feat.shape == (B, N, T * K)
obsdf_feat_uv = self.geo_fn.to_uv(obsdf_feat)
assert obsdf_feat_uv.shape == (B, T * K, H, W)
# Unet forward
feat_uv = torch.cat([obpts_uv, poses_vel_feat_uv, pose_feat_uv, obsdf_feat_uv], dim=1)
shapes_uv_delta = self.dynamics_net.unet(feat_uv)
_, shapes_uv_prev = self.enc_shapes_to_sdf(obsdf[:, -1:], poses[:, -2:-1])
shapes_uv = shapes_uv_prev[:, 0] + shapes_uv_delta
C = shapes_uv.shape[1]
feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
shapes = self.geo_fn.from_uv(feat_uv_)
assert shapes.shape == (B, N, C)
return shapes
class ShapeEncDec(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.sdf_mlp = MLP([128, 128, 1], [-1], 'softplus', True, 'linear', False)
self.pts_emb = Embedder(3, 4)
self.proj_pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.pts_mlp = MLP([64 + self.pts_emb.out_ch * 2, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.weights_fc = nn.Linear(128, 1)
self.shape_enc = ShapeEnc(args)
self.register_parameter('uv_bias', nn.Parameter(torch.normal(0, 0.01, (1, 64, 256, 256), dtype=torch.float), requires_grad=True))
class DynamicsNet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.local_pose_conv_block = ConvBlock(92, 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.local_poses_vel_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.temp_poses_vel_conv_block = ConvBlock(32 * args['model']['n_hist_frames'], 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.unet = Unet(args)
class ShapeEnc(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv_in = ConvBlock(args['model']['ob_vals'][-1] * 4, 64, 256)
self.conv0 = ConvDownBlock(64, 64, 256)
self.conv1 = ConvDownBlock(64, 64, 128)
self.conv2 = ConvUpBlock(64, 64, 128)
self.conv3 = ConvUpBlock(64, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv0(x)
x1 = self.conv1(x0)
x2 = self.conv2(x1) + x0
x3 = self.conv3(x2) + x
out = self.conv_out(x3)
return out
class Unet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv_in = ConvBlock((args['model']['n_hist_frames'] + args['model']['n_batch_frames'] * 3) * args['model']['ob_vals'][-1] + 64, 64, 256)
self.conv_down0 = ConvDownBlock(64, 128, 256)
self.conv_down1 = ConvDownBlock(128, 256, 128)
self.conv_down2 = ConvDownBlock(256, 256, 64)
self.conv_down3 = ConvDownBlock(256, 256, 32)
self.conv_up3 = ConvUpBlock(256, 256, 32)
self.conv_up2 = ConvUpBlock(256, 256, 64)
self.conv_up1 = ConvUpBlock(256, 128, 128)
self.conv_up0 = ConvUpBlock(128, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
torch.nn.init.zeros_(self.conv_out[0].weight)
if hasattr(self.conv_out[0], 'bias') and self.conv_out[0].bias is not None:
torch.nn.init.zeros_(self.conv_out[0].bias)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv_down0(x)
x1 = self.conv_down1(x0)
x2 = self.conv_down2(x1)
x3 = self.conv_down3(x2)
y3 = self.conv_up3(x3) + x2
y2 = self.conv_up2(y3) + x1
y1 = self.conv_up1(y2) + x0
y0 = self.conv_up0(y1) + x
out = self.conv_out(y0)
return out
|
AutoAvatar-main
|
models/std/nets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
import copy
import shutil, inspect
import torch
import torch.nn.functional as F
import torch.optim as optim
import pytorch_lightning as pl
from pytorch3d.io import save_ply
import time
import utils.CAPE as cape_utils
from utils.implicit import reconstruction
from models.PosedDecKNN_dPoses_dHs.nets import DynNet
import models.std.visual as visual
class Implicit_Trainbox(pl.LightningModule):
def __init__(self, args, log_dir, resolution, recurrent=True, eval_frames=None, pose_model=None):
super().__init__()
self.args = copy.deepcopy(args)
self.log_dir = log_dir
self.resolution = resolution
self.recurrent = recurrent
self.eval_frames = eval_frames
self.pose_model = pose_model
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(os.path.join(log_dir, 'ckpt')):
os.mkdir(os.path.join(log_dir, 'ckpt'))
if not os.path.exists(os.path.join(log_dir, 'net_def')):
os.mkdir(os.path.join(log_dir, 'net_def'))
if not os.path.exists(os.path.join(log_dir, 'mesh')):
os.mkdir(os.path.join(log_dir, 'mesh'))
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(inspect.getfile(DynNet), os.path.join(log_dir, 'net_def'))
self.dyn_net = DynNet(args, eval_frames)
self.itr = 0
def save_ckpt(self):
torch.save(self.dyn_net.state_dict(), os.path.join(self.log_dir, 'ckpt', 'dyn_net_%06d.pth' % self.itr))
def load_ckpt(self, itr, log_dir):
self.dyn_net.load_state_dict(torch.load(os.path.join(log_dir, 'ckpt', 'dyn_net_%06d.pth' % itr), map_location='cpu'))
def preprocess(self, batch):
verts_detail, faces_detail, verts_smt, faces_smt, poses = batch['verts_detail'], batch['faces_detail'], batch['verts_smt'], batch['faces_smt'], batch['poses']
B, T, _ = poses.shape
N = self.dyn_net.smpl_model.v_template.shape[0]
verts_smpl = self.dyn_net.smpl_model(poses.view(B * T, 75)).vertices.view(B, T, N, 3)
if len(verts_detail) == 0:
verts_detail = [verts_smpl[:, i].contiguous() for i in range(T)]
faces_detail = [self.dyn_net.smpl_model.faces.to(verts_smpl.device)[None].expand(B, -1, -1).contiguous() for i in range(T)]
verts_smt = verts_detail
faces_smt = faces_detail
if not self.args['data']['separate_detail']:
verts_smt = verts_detail
faces_smt = faces_detail
return verts_detail, faces_detail, verts_smt, faces_smt, poses, verts_smpl
def train_or_valid_step(self, batch, batch_idx, is_train):
verts_detail_all, faces_detail_all, verts_smt_all, faces_smt_all, poses_all, verts_smpl_all = self.preprocess(batch)
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
K = self.args['model']['ob_vals'][-1]
assert T_next == 1
iter_times = []
sd_errs = []
cos_errs = []
obsdf_rollout = None
shapes_uv_init = None
loss_surf_sdf = 0
loss_surf_grad = 0
loss_igr = 0
loss_o = 0
end_idx = self.args['train']['n_rollout']
if self.eval_frames is not None and batch_idx + T_hist == self.eval_frames[0]:
end_idx = poses_all.shape[1] - T + 1
for i in range(end_idx):
verts_detail = verts_detail_all[i:i+T]
faces_detail = faces_detail_all[i:i+T]
verts_smt = verts_smt_all[i:i+T]
faces_smt = faces_smt_all[i:i+T]
poses = poses_all[:, i:i+T]
verts_smpl = verts_smpl_all[:, i:i+T]
N = verts_smpl.shape[2]
B = poses.shape[0]
if poses.shape[1] < T:
break
verts = verts_smt
faces = faces_smt
verts_gt = verts_smt if not self.args['model']['use_detail'] else verts_detail
faces_gt = faces_smt if not self.args['model']['use_detail'] else faces_detail
bbmin = verts_smpl.min(dim=2)[0] - 0.1
bbmax = verts_smpl.max(dim=2)[0] + 0.1
surf_pts, surf_normals, igr_pts, bbox_pts, rand_pts = cape_utils.sample_igr_pts(verts_gt[-1], faces_gt[-1], bbmin[:, -1], bbmax[:, -1], self.args)
# start_time = time.time()
if self.args['model']['stage'] == 'shape_enc_dec':
obsdf, _ = self.dyn_net.shapes_to_obsdf(verts[-1], poses[:, -1], mode='meshes', faces=faces[-1])
assert obsdf.shape == (B, N, 1)
obsdf = obsdf[:, None]
if self.args['model']['stage'] == 'auto_regr':
if obsdf_rollout is None:
if 'verts_init' not in batch:
obsdf = [self.dyn_net.shapes_to_obsdf(verts[j], poses[:, j], mode='meshes', faces=faces[j])[0] for j in range(T_hist)]
obsdf = torch.stack(obsdf, dim=1)
assert obsdf.shape == (B, T_hist, N, 1)
else:
_, shapes_uv_pose = self.pose_model(None, poses[:, :-1].reshape(B * T_hist, 1, 75))
obsdf, _ = self.pose_model.shapes_to_obsdf(torch.zeros((B * T_hist, 0, 0), device=poses.device), poses[:, :-1].reshape(B * T_hist, 75), mode='nets', shapes_uv=shapes_uv_pose)
assert obsdf.shape == (B * T_hist, N, 1)
obsdf = obsdf.view(B, T_hist, N, 1).contiguous()
# obsdf = self.dyn_net.shapes_to_obsdf(batch['verts_init'], batch['poses_init'], mode='meshes', faces=batch['faces_init'])[0]
# assert obsdf.shape == (B, N, 1)
# obsdf = obsdf[:, None].expand(B, T_hist, N, 1).contiguous()
else:
obsdf = obsdf_rollout.detach()
shapes, shapes_uv = self.dyn_net(obsdf, poses)
if self.eval_frames is not None:
if shapes_uv_init is None:
shapes_uv_init = shapes_uv
else:
shapes_uv = shapes_uv * (1 - self.dyn_net.head_hands_feet_mask_uv) + shapes_uv_init * self.dyn_net.head_hands_feet_mask_uv
if i + 1 < end_idx and self.recurrent:
with torch.no_grad():
obsdf_new, _ = self.dyn_net.shapes_to_obsdf(shapes, poses[:, -1], mode='nets', shapes_uv=shapes_uv)
obsdf_rollout = torch.cat([obsdf[:, 1:], obsdf_new[:, None]], dim=1).detach()
assert obsdf_rollout.shape == (B, T_hist, N, 1)
if self.eval_frames is None:
# Losses
surf_sdf, surf_sdf_grad = self.dyn_net.query_sdf_with_grad(surf_pts, poses[:, -1], shapes_uv)
rand_sdf, rand_sdf_grad = self.dyn_net.query_sdf_with_grad(rand_pts, poses[:, -1], shapes_uv)
bbox_sdf = rand_sdf[:, self.args['train']['n_pts_scan_igr']:]
assert bbox_sdf.shape == (B, self.args['train']['n_pts_bbox_igr'])
loss_surf_sdf += surf_sdf.abs().mean() / self.args['train']['n_rollout']
loss_surf_grad += torch.norm(surf_sdf_grad - surf_normals, p=2, dim=-1).mean() / self.args['train']['n_rollout']
loss_igr += (torch.norm(rand_sdf_grad, p=2, dim=-1) - 1).pow(2).mean() / self.args['train']['n_rollout']
loss_o += torch.exp(-50.0 * torch.abs(bbox_sdf)).mean() / self.args['train']['n_rollout']
else:
out_dir = os.path.join(self.log_dir, 'mesh', 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, 'gt')):
os.mkdir(os.path.join(out_dir, 'gt'))
if not os.path.exists(os.path.join(out_dir, 'pred')):
os.mkdir(os.path.join(out_dir, 'pred'))
if not os.path.exists(os.path.join(out_dir, 'poses')):
os.mkdir(os.path.join(out_dir, 'poses'))
with open(os.path.join(out_dir, 'poses', 'poses_%06d.bin' % (i + T_hist)), 'wb') as f:
pickle.dump({'poses': poses.cpu()}, f)
if i == 0:
for j in range(T_hist):
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % j), verts_gt[j][0], faces_gt[j][0])
save_ply(os.path.join(out_dir, 'gt', 'gt_%06d.ply' % (i + T_hist)), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes_uv[:1])
verts_out, faces_out = out
verts_out, faces_out = torch.from_numpy(verts_out).float().to(poses.device), torch.from_numpy(faces_out.astype(np.int32)).long().to(poses.device)
save_ply(os.path.join(out_dir, 'pred', 'pred_%06d.ply' % (i + T_hist)), verts_out, faces_out)
sd_err, cos_err = cape_utils.scan_to_pred_errors(verts_gt[-1], faces_gt[-1], verts_out[None], faces_out[None])
sd_errs.append(sd_err.cpu())
cos_errs.append(cos_err.cpu())
# iter_time = time.time() - start_time
# iter_times.append(iter_time)
# print('time:', iter_time)
# print('mean time:', np.array(iter_times[1:-1]).mean())
# input('pause')
if self.eval_frames is not None:
with open(os.path.join(out_dir, 'errs.bin'), 'wb') as f:
pickle.dump({'sd_errs': sd_errs, 'cos_errs': cos_errs}, f)
visual.render_meshes(out_dir, start_i=T_hist, simplify_mesh=False)
os.system('bash models/std/videos.sh %s %s' % (out_dir, str(T_hist)))
loss = loss_surf_sdf + loss_surf_grad + loss_igr * self.args['train']['lambda_igr'] + loss_o * self.args['train']['lambda_o']
res_dict = {
'verts': verts,
'faces': faces,
'verts_gt': verts_gt,
'faces_gt': faces_gt,
'verts_smpl': verts_smpl,
'poses': poses,
'shapes': shapes_uv,
'bbmin': bbmin,
'bbmax': bbmax,
'loss_surf_sdf': loss_surf_sdf,
'loss_surf_grad': loss_surf_grad,
'loss_igr': loss_igr,
'loss_o': loss_o,
'loss': loss
}
return res_dict
def training_step(self, batch, batch_idx):
res_dict = self.train_or_valid_step(batch, batch_idx, True)
# log
prefix = 'Train'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
# checkpoint
self.itr += 1
if self.itr % self.args['train']['ckpt_step'] == 0:
self.save_ckpt()
return res_dict['loss']
def validation_step(self, batch, batch_idx):
if self.eval_frames is not None and batch_idx + self.args['model']['n_hist_frames'] not in self.eval_frames:
return
res_dict = self.train_or_valid_step(batch, batch_idx, False)
# log
prefix = 'Valid'
self.log('%s/loss' % prefix, res_dict['loss'])
self.log('%s/loss_surf_sdf' % prefix, res_dict['loss_surf_sdf'])
self.log('%s/loss_surf_grad' % prefix, res_dict['loss_surf_grad'])
self.log('%s/loss_igr' % prefix, res_dict['loss_igr'])
self.log('%s/loss_o' % prefix, res_dict['loss_o'])
if self.eval_frames is None:
self.compute_meshes(res_dict, batch, batch_idx)
def configure_optimizers(self):
if self.args['model']['stage'] == 'shape_enc_dec':
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
elif self.args['model']['stage'] == 'auto_regr':# and not self.args['model']['use_detail']:
optimizer = optim.Adam(self.dyn_net.parameters(), lr=self.args['train']['lr'])
# elif self.args['model']['use_detail']:
# optimizer = optim.Adam(self.dyn_net.detail_dec.parameters(), lr=self.args['train']['lr'])
return optimizer
def compute_meshes(self, res_dict, batch, batch_idx):
verts, faces, verts_gt, faces_gt, verts_smpl, poses, shapes, bbmin, bbmax = res_dict['verts'], res_dict['faces'], res_dict['verts_gt'], res_dict['faces_gt'], \
res_dict['verts_smpl'], res_dict['poses'], res_dict['shapes'], res_dict['bbmin'], res_dict['bbmax']
T = self.args['model']['n_batch_frames']
T_hist = self.args['model']['n_hist_frames']
T_next = T - T_hist
if not os.path.exists(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr)):
os.mkdir(os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr))
out_dir = os.path.join(self.log_dir, 'mesh', 'itr_%06d' % self.itr, 'batch_%06d' % batch_idx)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for i in range(T_hist):
save_ply(os.path.join(out_dir, 'hist_%d.ply' % i), verts[i][0], faces[i][0])
save_ply(os.path.join(out_dir, 'gt.ply'), verts_gt[-1][0], faces_gt[-1][0])
out = reconstruction(self.dyn_net.query_sdf_nets, poses.device, None,
self.resolution, bbmin[0, -1].cpu().numpy(), bbmax[0, -1].cpu().numpy(),
use_octree=False, num_samples=4096, transform=None, thresh=0, texture_net = None, poses=poses[:1, -1], shapes=shapes[:1])
if out != -1:
verts_out, faces_out = out
save_ply(os.path.join(out_dir, 'pred.ply'),
torch.from_numpy(verts_out).float().contiguous(), torch.from_numpy(faces_out.astype(np.int32)).contiguous().long())
def test_step(self, batch, batch_idx):
self.validation_step(batch, batch_idx)
|
AutoAvatar-main
|
models/PosedDecKNN_dPoses_dHs/trainbox.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from pytorch3d.ops import knn_points, knn_gather
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.transforms.rotation_conversions import matrix_to_axis_angle
import utils.CAPE as cape_utils
from utils.render import *
from models.nets import *
class DynNet(nn.Module):
def __init__(self, args, eval_frames):
super().__init__()
self.args = copy.deepcopy(args)
self.eval_frames = eval_frames
self.smpl_model = cape_utils.load_smpl(args)
self.register_buffer('v_template', self.smpl_model.v_template, persistent=False)
self.register_buffer('faces', self.smpl_model.faces, persistent=False)
mask_ids = ['left_wrist', 'right_wrist', 'left_hand', 'right_hand', 'left_ankle', 'right_ankle', 'left_foot', 'right_foot', 'head']
mask_ids = [cape_utils.SMPL_JOINT_NAMES.index(e) for e in mask_ids]
head_hands_feet_mask = self.smpl_model.lbs_weights[:, mask_ids].sum(dim=-1) # (N,)
head_hands_feet_mask[head_hands_feet_mask < 2e-2] = 0
head_hands_feet_mask = (head_hands_feet_mask * 10).clip(max=1)
self.register_buffer('head_hands_feet_mask', head_hands_feet_mask, persistent=False)
mask_ids = ['left_ankle', 'right_ankle', 'left_foot', 'right_foot']
mask_ids = [cape_utils.SMPL_JOINT_NAMES.index(e) for e in mask_ids]
feet_mask = self.smpl_model.lbs_weights[:, mask_ids].sum(dim=-1) # (N,)
self.register_buffer('feet_mask', feet_mask, persistent=False)
W = cape_utils.compute_adjacent_matrix(self.smpl_model.parents, 1)
self.register_buffer('W', W, persistent=False) # (J + 1, J)
data = np.load(args['data']['uv_info'])
verts_uv, faces_uv, v2uv = torch.from_numpy(data['verts_uv']), torch.from_numpy(data['faces_uv']).long(), torch.from_numpy(data['v2uv']).long()
self.geo_fn = UVRender(args, verts_uv, faces_uv, v2uv)
self.register_buffer('head_hands_feet_mask_uv', self.geo_fn.to_uv(head_hands_feet_mask[None, :, None].cuda()), persistent=False)
data = np.load(args['data']['resample_idxs_path'])
self.resample_idxs = data['idxs']
self.shape_enc_dec = ShapeEncDec(args)
if args['model']['stage'] == 'auto_regr':
self.dynamics_net = DynamicsNet(args)
# if args['model']['use_detail']:
# self.detail_dec = DetailDec(args)
def compute_poses_feat(self, poses):
"""
:param poses: (B, 69)
"""
B = poses.shape[0]
J = self.smpl_model.NUM_BODY_JOINTS
N = self.smpl_model.get_num_verts()
assert poses.shape == (B, 69)
poses_quat = self.smpl_model.compute_poses_quat(poses) # (B, J, 4)
assert poses_quat.shape == (B, J, 4)
lbs_w = self.smpl_model.lbs_weights[None].expand(B, N, J + 1)
lbs_w = torch.einsum('bvj,jl->bvl', lbs_w, self.W)
assert lbs_w.shape == (B, N, J)
poses_feat = poses_quat[:, None] * lbs_w[..., None]
assert poses_feat.shape == (B, N, J, 4)
return poses_feat
def normalize_sd_delta(self, sd_delta):
sd_delta_nc = torch.sign(sd_delta) * (sd_delta.abs() * 1000 + 1).log() * 0.25
return sd_delta_nc
def normalize_globalRt(self, pts, poses):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
"""
B, M, _ = pts.shape
assert poses.shape == (B, 75)
smpl_out = self.smpl_model(poses)
root_T_inv = torch.linalg.inv(smpl_out.A[:, 0]) # (B, 4, 4)
pts_nc = pts - poses[:, None, :3]
pts_nc_homo = torch.ones((B, M, 1), dtype=torch.float, device=pts.device)
pts_nc_homo = torch.cat([pts_nc, pts_nc_homo], dim=-1)
pts_nc = torch.bmm(root_T_inv, pts_nc_homo.transpose(-2, -1)).transpose(-2, -1)[..., :3].contiguous()
assert pts_nc.shape == (B, M, 3)
return pts_nc
def query_sdf_nets(self, pts, poses, shapes, force_coarse=False):
"""
:param pts: (B, M, 3)
:param poses: (B, 75)
:param shapes: (B, C, H, W)
"""
B, M, _ = pts.shape
# _, N, C = shapes.shape
C = shapes.shape[1]
N = self.smpl_model.get_num_verts()
H = W = self.args['model']['uv_size']
# assert poses.shape == (B, 75) and shapes.shape == (B, N, C) and N == self.smpl_model.get_num_verts()
assert poses.shape == (B, 75) and shapes.shape == (B, C, H, W)
verts = self.smpl_model(poses).vertices
assert verts.shape == (B, N, 3)
shapes = self.geo_fn.from_uv(shapes)
# shapes_bias = self.geo_fn.from_uv(self.shape_enc_dec.uv_bias)
# shapes = shapes * (1 - self.head_hands_feet_mask[None, :, None]) + shapes_bias * self.head_hands_feet_mask[None, :, None]
assert shapes.shape == (B, N, C)
# Normalize global Rt
verts = self.normalize_globalRt(verts, poses)
pts = self.normalize_globalRt(pts, poses)
# MLP decode
# SMPL resample
meshes = Meshes(verts=verts, faces=self.faces[None].expand(B, -1, -1))
normals = meshes.verts_normals_padded()
assert normals.shape == (B, N, 3)
verts_ori = verts.clone()
shapes_ori = shapes.clone()
verts = verts[:, self.resample_idxs]
normals = normals[:, self.resample_idxs]
shapes = shapes[:, self.resample_idxs]
N_ = verts.shape[1]
assert verts.shape == (B, N_, 3) and normals.shape == (B, N_, 3) and shapes.shape == (B, N_, C)
# KNN
K = 20
C_s = 64
C_ = 128
_, idx, pts_nn = knn_points(pts, verts, K=K, return_nn=True)
assert torch.allclose(pts_nn, knn_gather(verts, idx))
normals_nn = knn_gather(normals, idx)
shapes_nn = knn_gather(shapes, idx)
assert pts_nn.shape == (B, M, K, 3) and normals_nn.shape == (B, M, K, 3) and shapes_nn.shape == (B, M, K, C)
pts_nn = pts[:, :, None] - pts_nn
cos_nn = torch.cosine_similarity(pts_nn, normals_nn, dim=-1)
len_nn = torch.norm(pts_nn, p=2, dim=-1)
assert cos_nn.shape == (B, M, K) and len_nn.shape == (B, M, K)
x = torch.cat([cos_nn[..., None], len_nn[..., None]], dim=-1)
assert x.shape == (B, M, K, 2)
# Proj pts to mesh
# sd, pts_proj, shapes_proj, pts_uv = cape_utils.proj_pts_to_uv(pts, verts_ori, self.faces[None].expand(B, -1, -1).contiguous(),
# self.geo_fn.verts_uv[None].expand(B, -1, -1),
# self.geo_fn.faces_uv[None].expand(B, -1, -1), shapes_ori)
# assert sd.shape == (B, M) and pts_proj.shape == (B, M, 3) and shapes_proj.shape == (B, M, C) and pts_uv.shape == (B, M, 2)
# x = torch.cat([sd[..., None], pts_uv], dim=-1)
# assert x.shape == (B, M, 3)
# pts_proj = pts_proj - pts
# Aggregate
feat_nn = self.shape_enc_dec.pts_mlp(
torch.cat([
# self.shape_enc_dec.pts_emb(pts_nn.view(B * M * K, 3)),
# self.shape_enc_dec.pts_emb(normals_nn.view(B * M * K, 3)),
self.shape_enc_dec.pts_emb(x.view(B * M * K, 2)),
shapes_nn.view(B * M * K, C)[:, :C_s]
], dim=-1)
).view(B, M, K, C_)
# feat_proj = self.shape_enc_dec.proj_pts_mlp(
# torch.cat([
# self.shape_enc_dec.pts_emb(x.view(B * M, 3)),
# shapes_proj.view(B * M, C)[:, :C_s]
# ], dim=-1)
# ).view(B, M, 1, C_)
feat = feat_nn #torch.cat([feat_nn, feat_proj], dim=-2)
assert feat.shape == (B, M, K, C_) #(B, M, K + 1, C_)
w = self.shape_enc_dec.weights_fc(feat.view(B * M * K, C_)).view(B, M, K, 1) #.view(B * M * (K + 1), C_)).view(B, M, K + 1, 1)
w = torch.softmax(w, dim=-2)
feat = (feat * w).sum(dim=-2)
assert feat.shape == (B, M, C_)
sdf = self.shape_enc_dec.sdf_mlp(feat.view(B * M, C_)).view(B, M)
# # Check weights
# print(x[0, 0])
# print(w[0, 0])
# input('pause')
# sdf = self.shape_enc_dec.sdf_mlp(feat_proj).view(B, M)
# mask feet
if self.eval_frames is not None:
pts_feet_mask = knn_gather(self.feet_mask[None, self.resample_idxs, None].expand(B, -1, -1), idx)[:, :, 0, 0]
assert pts_feet_mask.shape == (B, M)
abs_mask = (cos_nn[..., 0] > 0).long() * (len_nn[..., :4].mean(dim=-1) > 0.04)
sdf_abs = sdf.clone()
sdf_abs = sdf_abs.abs() * abs_mask + sdf_abs * (1 - abs_mask)
sdf = sdf * (1 - pts_feet_mask) + sdf_abs * pts_feet_mask
# if self.args['model']['use_detail'] and not force_coarse:
# # Aggregate
# feat_nn = self.detail_dec.pts_mlp(
# torch.cat([
# self.detail_dec.pts_emb(pts_nn.view(B * M * K, 3)),
# self.detail_dec.pts_emb(normals_nn.view(B * M * K, 3)),
# shapes_nn.view(B * M * K, C)[:, C_s:]
# ], dim=-1)
# ).view(B, M, K, C_)
# feat_proj = self.detail_dec.proj_pts_mlp(
# torch.cat([
# self.detail_dec.pts_emb(pts_proj.view(B * M, 3)),
# shapes_proj.view(B * M, C)[:, C_s:]
# ], dim=-1)
# ).view(B, M, 1, C_)
# feat = torch.cat([feat_nn, feat_proj], dim=-2)
# assert feat.shape == (B, M, K + 1, C_)
# w = self.detail_dec.weights_fc(feat.view(B * M * (K + 1), C_)).view(B, M, K + 1, 1)
# w = torch.softmax(w, dim=-2)
# feat = (feat * w).sum(dim=-2)
# assert feat.shape == (B, M, C_)
# sdf_delta = self.detail_dec.sdf_mlp(feat.view(B * M, C_)).view(B, M)
# sdf = sdf + sdf_delta
return sdf
def compute_obpts(self, poses):
"""
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
verts_smpl = self.smpl_model(poses).vertices
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B, N, 3)
offset = torch.linspace(*self.args['model']['ob_vals'], device=poses.device)[None, None, :, None] * normals_smpl[:, :, None, :] # (B, N, K, 3)
obpts = offset + verts_smpl[:, :, None]
return obpts
def shapes_to_obsdf(self, shapes, poses, mode='nets', faces=None, shapes_uv=None):
"""
:param shapes: (B, N, C)
:param poses: (B, 75)
"""
B = poses.shape[0]
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
C = shapes.shape[-1]
assert poses.shape == (B, 75) and shapes.shape[0] == B
# Compute observer pts
obpts = self.compute_obpts(poses)
assert obpts.shape == (B, N, K, 3)
# Query sdf
def query(obpts, clip):
B, N, K, _ = obpts.shape
if mode == 'meshes':
assert C == 3 and faces is not None
sdf, _, _, _, _ = cape_utils.proj_pts_to_mesh(obpts.view(B, N * K, 3), shapes, faces)
sdf = sdf.view(B, N, K)
elif mode == 'nets':
assert shapes_uv is not None
sdf = self.query_sdf_nets(obpts.view(B, N * K, 3), poses, shapes_uv, force_coarse=True)
sdf = sdf.view(B, N, K)
if clip:
thres = (self.args['model']['ob_vals'][1] - self.args['model']['ob_vals'][0]) / (K - 1)
assert thres > 0
sdf = sdf.clip(min=-thres, max=thres)
return sdf
sdf = query(obpts, False)
verts_smpl = self.smpl_model(poses).vertices
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B, N, 3)
offsets = torch.linspace(*self.args['model']['ob_vals'], device=poses.device)[None, None, :].expand(B, N, K).contiguous()
idxs = torch.arange(0, K, 1, device=poses.device).long()[None, None, :].expand(B, N, K).contiguous()
idxs[sdf < 0] += 777
idxs_pos = torch.min(idxs, dim=-1)[0].clip(max=K-1)
idxs_neg = (idxs_pos - 1).clip(min=0)
offsets_pos = torch.gather(offsets, -1, idxs_pos[..., None])
offsets_neg = torch.gather(offsets, -1, idxs_neg[..., None])
assert offsets_pos.shape == (B, N, 1) and offsets_neg.shape == (B, N, 1)
sdf_pos = torch.gather(sdf, -1, idxs_pos[..., None])
sdf_neg = torch.gather(sdf, -1, idxs_neg[..., None])
assert sdf_pos.shape == (B, N, 1) and sdf_neg.shape == (B, N, 1)
# binary search
for i in range(2):
offsets_mid = (offsets_neg + offsets_pos) / 2
obpts_mid = offsets_mid[..., None] * normals_smpl[:, :, None, :] + verts_smpl[:, :, None]
sdf_mid = query(obpts_mid, False)
assert sdf_mid.shape == (B, N, 1)
offsets_neg_new = offsets_neg.clone()
offsets_pos_new = offsets_pos.clone()
offsets_neg_new[sdf_mid <= 0] = offsets_mid[sdf_mid <= 0]
offsets_pos_new[sdf_mid > 0] = offsets_mid[sdf_mid > 0]
offsets_neg = offsets_neg_new.contiguous()
offsets_pos = offsets_pos_new.contiguous()
sdf_neg_new = sdf_neg.clone()
sdf_pos_new = sdf_pos.clone()
sdf_neg_new[sdf_mid <= 0] = sdf_mid[sdf_mid <= 0]
sdf_pos_new[sdf_mid > 0] = sdf_mid[sdf_mid > 0]
sdf_neg = sdf_neg_new.contiguous()
sdf_pos = sdf_pos_new.contiguous()
# offsets_surf = (offsets_neg + offsets_pos) / 2
# Interpolation
zero_mask = idxs_neg != idxs_pos
w = sdf_neg.abs() + sdf_pos.abs()
zero_mask = (zero_mask.long() * (w.squeeze(-1) > 1e-10).long()).bool()
w_neg = torch.zeros_like(sdf_neg) + 0.5
w_neg[zero_mask] = sdf_pos[zero_mask].abs() / w[zero_mask]
w_pos = torch.zeros_like(sdf_pos) + 0.5
w_pos[zero_mask] = sdf_neg[zero_mask].abs() / w[zero_mask]
offsets_surf = w_neg * offsets_neg + w_pos * offsets_pos
return offsets_surf, obpts
def query_sdf_with_grad(self, pts, poses, shapes):
B, M, _ = pts.shape
C = shapes.shape[1]
N = self.smpl_model.get_num_verts()
H = W = self.args['model']['uv_size']
assert pts.shape == (B, M, 3) and poses.shape == (B, 75) and shapes.shape == (B, C, H, W)
with torch.enable_grad():
pts.requires_grad_(True)
sdf = self.query_sdf_nets(pts, poses, shapes)
assert sdf.shape == (B, M)
sdf_grad = autograd.grad([sdf.sum()], [pts], retain_graph=True, create_graph=True)[0]
assert sdf_grad.shape == (B, M, 3)
return sdf, sdf_grad
def enc_shapes_to_sdf(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T, 75)
"""
B, T, _ = poses.shape
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
assert obsdf.shape == (B, T, N, 1) and poses.shape == (B, T, 75)
# Compute obpts_uv
verts_smpl = self.smpl_model(poses.view(B * T, 75)).vertices
assert verts_smpl.shape == (B * T, N, 3)
verts_smpl = self.normalize_globalRt(verts_smpl, poses.view(B * T, 75))
meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B * T, -1, -1))
normals_smpl = meshes.verts_normals_padded() # (B * T, N, 3)
obpts = torch.cat([verts_smpl, normals_smpl], dim=-1)
assert obpts.shape == (B * T, N, 6)
obpts_uv = self.geo_fn.to_uv(obpts)
assert obpts_uv.shape == (B * T, 6, H, W)
# Compute obsdf_uv
obsdf_uv = self.geo_fn.to_uv(obsdf.view(B * T, N, 1))
assert obsdf_uv.shape == (B * T, 1, H, W)
# Net forward
in_feat = torch.cat([obpts_uv, obsdf_uv * 20], dim=1)
shapes_uv = self.shape_enc_dec.shape_enc(in_feat)
C = shapes_uv.shape[1]
feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
shapes = self.geo_fn.from_uv(feat_uv_)
assert shapes.shape == (B * T, N, C) and shapes_uv.shape == (B * T, C, H, W)
shapes = shapes.view(B, T, N, C)
shapes_uv = shapes_uv.view(B, T, C, H, W)
return shapes, shapes_uv
def pose_temp_deri(self, poses):
"""
:param poses: (B, T_, 75)
"""
B, T_, _ = poses.shape
T = T_ - 1
J_ = self.smpl_model.NUM_BODY_JOINTS + 1
poses_prev = poses[:, :-1].clone()
poses_last = poses[:, 1:].clone()
poses_vel = torch.zeros_like(poses_last)
assert poses_prev.shape == (B, T, 75) and poses_last.shape == (B, T, 75) and poses_vel.shape == (B, T, 75)
poses_vel[..., :3] = poses_last[..., :3] - poses_prev[..., :3]
rot_prev = axis_angle_to_matrix(poses_prev[..., 3:].reshape(B * T * J_, 3))
rot_last = axis_angle_to_matrix(poses_last[..., 3:].reshape(B * T * J_, 3))
rot_vel = torch.bmm(rot_last, torch.linalg.inv(rot_prev))
assert rot_vel.shape == (B * T * J_, 3, 3)
poses_vel[..., 3:] = matrix_to_axis_angle(rot_vel).view(B, T, J_ * 3)
return poses_vel
def forward(self, obsdf, poses):
"""
:param obsdf: (B, T, N, K)
:param poses: (B, T_, 75)
"""
N = self.smpl_model.get_num_verts()
K = self.args['model']['ob_vals'][-1]
H = W = self.args['model']['uv_size']
if self.args['model']['stage'] == 'shape_enc_dec':
B, T, _ = poses.shape
assert obsdf.shape == (B, T, N, 1) and poses.shape == (B, T, 75) and T == 1
shapes, shapes_uv = self.enc_shapes_to_sdf(obsdf, poses)
shapes = shapes.squeeze(1)
C = shapes.shape[-1]
assert shapes.shape == (B, N, C)
elif self.args['model']['stage'] == 'auto_regr':
B, T_, _ = poses.shape
T = obsdf.shape[1]
n_H = self.args['model']['n_H']
J_ = self.smpl_model.NUM_BODY_JOINTS + 1
J = self.smpl_model.NUM_BODY_JOINTS
assert obsdf.shape == (B, T, N, 1) and poses.shape == (B, T_, 75) and T_ - T == 1 and T == self.args['model']['n_hist_frames']
poses_ref = poses[:, -1:].expand(B, T_, 75).contiguous()
# # Compute obpts_uv
# verts_smpl = self.smpl_model(poses.view(B * T_, 75)).vertices
# assert verts_smpl.shape == (B * T_, N, 3)
# verts_smpl = self.normalize_globalRt(verts_smpl, poses_ref.view(B * T_, 75))
# meshes = Meshes(verts=verts_smpl, faces=self.faces[None].expand(B * T_, -1, -1))
# normals_smpl = meshes.verts_normals_padded() # (B * T_, N, 3)
# obpts = torch.cat([verts_smpl, normals_smpl], dim=-1)
# assert obpts.shape == (B * T_, N, 6)
# obpts_uv = self.geo_fn.to_uv(obpts)
# assert obpts_uv.shape == (B * T_, 6, H, W)
# obpts_uv = obpts_uv.view(B, T_ * 6, H, W)
# Compute poses velocity
# poses_prev = poses[:, :-1].clone()
# poses_last = poses[:, 1:].clone()
# poses_vel = torch.zeros_like(poses_last)
# assert poses_prev.shape == (B, T, 75) and poses_last.shape == (B, T, 75) and poses_vel.shape == (B, T, 75)
# poses_vel[..., :3] = poses_last[..., :3] - poses_prev[..., :3]
# rot_prev = axis_angle_to_matrix(poses_prev[..., 3:].reshape(B * T * J_, 3))
# rot_last = axis_angle_to_matrix(poses_last[..., 3:].reshape(B * T * J_, 3))
# rot_vel = torch.bmm(rot_last, torch.linalg.inv(rot_prev))
# assert rot_vel.shape == (B * T * J_, 3, 3)
# poses_vel[..., 3:] = matrix_to_axis_angle(rot_vel).view(B, T, J_ * 3)
# poses_vel_feat = self.compute_poses_feat(poses_vel[..., 6:].reshape(B * T, 69))
# assert poses_vel_feat.shape == (B * T, N, J, 4)
# poses_vel_feat = torch.cat([poses_vel_feat.view(B * T, N, J * 4), poses_vel[..., :6].reshape(B * T, 1, 6).expand(B * T, N, 6)], dim=-1)
# assert poses_vel_feat.shape == (B * T, N, J * 4 + 6)
# poses_vel_feat_uv = self.geo_fn.to_uv(poses_vel_feat)
# assert poses_vel_feat_uv.shape == (B * T, J * 4 + 6, H, W)
# poses_vel_feat_uv = self.dynamics_net.local_poses_vel_conv_block(poses_vel_feat_uv).view(B, T * 32, H, W)
# poses_vel_feat_uv = self.dynamics_net.temp_poses_vel_conv_block(poses_vel_feat_uv)
# assert poses_vel_feat_uv.shape == (B, 32, H, W)
pose_vel = self.pose_temp_deri(poses)
assert pose_vel.shape == (B, T, 75)
pose_vel = pose_vel.view(B * T, 75)
# pose_acc = self.pose_temp_deri(pose_vel)
# pose_vel = pose_vel[:, -1]
# pose_acc = pose_acc[:, -1]
# pose_vel
pose_vel_feat = self.compute_poses_feat(pose_vel[:, 6:].clone())
assert pose_vel_feat.shape == (B * T, N, J, 4)
pose_vel_feat = torch.cat([pose_vel_feat.view(B * T, N, J * 4), pose_vel[:, None, :6].expand(B * T, N, 6)], dim=-1)
assert pose_vel_feat.shape == (B * T, N, J * 4 + 6)
pose_vel_feat_uv = self.geo_fn.to_uv(pose_vel_feat)
assert pose_vel_feat_uv.shape == (B * T, J * 4 + 6, H, W)
pose_vel_feat_uv = self.dynamics_net.local_pose_vel_conv_block(pose_vel_feat_uv)
assert pose_vel_feat_uv.shape == (B * T, 32, H, W)
pose_vel_feat_uv = pose_vel_feat_uv.view(B, T * 32, H, W)
# # pose_acc
# pose_acc_feat = self.compute_poses_feat(pose_acc[:, 6:].clone())
# assert pose_acc_feat.shape == (B, N, J, 4)
# pose_acc_feat = torch.cat([pose_acc_feat.view(B, N, J * 4), pose_acc[:, None, :6].expand(B, N, 6)], dim=-1)
# assert pose_acc_feat.shape == (B, N, J * 4 + 6)
# pose_acc_feat_uv = self.geo_fn.to_uv(pose_acc_feat)
# assert pose_acc_feat_uv.shape == (B, J * 4 + 6, H, W)
# pose_acc_feat_uv = self.dynamics_net.local_pose_acc_conv_block(pose_acc_feat_uv)
# assert pose_acc_feat_uv.shape == (B, 32, H, W)
# Compute pose_feat
pose_feat = self.compute_poses_feat(poses[:, -1, 6:].clone())
assert pose_feat.shape == (B, N, J, 4)
pose_feat_uv = self.geo_fn.to_uv(pose_feat.view(B, N, J * 4))
assert pose_feat_uv.shape == (B, J * 4, H, W)
pose_feat_uv = self.dynamics_net.local_pose_conv_block(pose_feat_uv)
assert pose_feat_uv.shape == (B, 32, H, W)
# Compute obsdf_feat_uv
obsdf_delta = obsdf[:, 1:] - obsdf[:, :-1]
assert obsdf_delta.shape == (B, T - 1, N, 1)
# np.save('/mnt/ImpDyn_ws/logs/tmp/obsdf0.npy', obsdf.detach().cpu().numpy())
# print(obsdf_delta.min(), obsdf_delta.max())
# plt.hist(obsdf_delta.detach().view(-1).cpu().numpy())
# plt.savefig('/mnt/ImpDyn_ws/logs/tmp/obsdf_delta.jpg')
obsdf_delta = self.normalize_sd_delta(obsdf_delta)
# print(obsdf_delta.min(), obsdf_delta.max())
# plt.hist(obsdf_delta.detach().view(-1).cpu().numpy())
# plt.savefig('/mnt/ImpDyn_ws/logs/tmp/obsdf_delta_nc.jpg')
# input('pause')
obsdf_delta = obsdf_delta.permute(0, 2, 1, 3).contiguous()
assert obsdf_delta.shape == (B, N, T - 1, 1)
obsdf_feat = torch.cat([obsdf_delta.view(B, N, T - 1), obsdf[:, -1] * 20], dim=-1)
assert obsdf_feat.shape == (B, N, T)
obsdf_feat_uv = self.geo_fn.to_uv(obsdf_feat)
assert obsdf_feat_uv.shape == (B, T, H, W)
# obsdf_feat = obsdf.permute(0, 2, 1, 3).contiguous() * 20
# assert obsdf_feat.shape == (B, N, T, 1)
# obsdf_feat_uv = self.geo_fn.to_uv(obsdf_feat.view(B, N, T)[:, :, -n_H:].contiguous())
# assert obsdf_feat_uv.shape == (B, n_H, H, W)
# Unet forward
feat_uv = torch.cat([pose_vel_feat_uv, pose_feat_uv, obsdf_feat_uv], dim=1)
# shapes_uv_delta = self.dynamics_net.unet(feat_uv)
# _, shapes_uv_prev = self.enc_shapes_to_sdf(obsdf[:, -1:], poses[:, -2:-1])
# shapes_uv = shapes_uv_prev[:, 0] + shapes_uv_delta
shapes_uv = self.dynamics_net.unet(feat_uv)
C = shapes_uv.shape[1]
# feat_uv_ = shapes_uv * (1 - self.head_hands_feet_mask_uv) + self.shape_enc_dec.uv_bias * self.head_hands_feet_mask_uv
# if self.args['model']['use_detail']:
# feat = self.detail_dec.unet(feat_uv_)
# feat_uv_ = torch.cat([feat_uv_, feat], dim=1)
# C = feat_uv_.shape[1]
shapes = self.geo_fn.from_uv(shapes_uv)
assert shapes.shape == (B, N, C)
return shapes, shapes_uv
class ShapeEncDec(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.sdf_mlp = MLP([128, 128, 1], [-1], 'softplus', True, 'linear', False)
self.pts_emb = Embedder(2, 4)
# self.proj_pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.weights_fc = nn.Linear(128, 1)
# self.shape_enc = ShapeEnc(args)
# self.register_parameter('uv_bias', nn.Parameter(torch.normal(0, 0.01, (1, 64, 256, 256), dtype=torch.float), requires_grad=True))
class DynamicsNet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.local_pose_conv_block = ConvBlock(92, 32, args['model']['uv_size'], kernel_size=1, padding=0)
# self.local_poses_vel_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
# self.temp_poses_vel_conv_block = ConvBlock(32 * args['model']['n_hist_frames'], 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.local_pose_vel_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
# self.local_pose_acc_conv_block = ConvBlock(98, 32, args['model']['uv_size'], kernel_size=1, padding=0)
self.unet = Unet(args)
class DetailDec(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.sdf_mlp = MLP([128, 128, 1], [-1], 'softplus', True, 'linear', False, init_zero_last=True)
self.pts_emb = Embedder(3, 6)
self.proj_pts_mlp = MLP([64 + self.pts_emb.out_ch, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.pts_mlp = MLP([64 + self.pts_emb.out_ch * 2, 128, 128], [-1], 'softplus', True, 'softplus', True)
self.weights_fc = nn.Linear(128, 1)
self.unet = DetailUnet(args)
class ShapeEnc(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv_in = ConvBlock(7, 64, 256)
self.conv0 = ConvDownBlock(64, 64, 256)
self.conv1 = ConvDownBlock(64, 64, 128)
self.conv2 = ConvUpBlock(64, 64, 128)
self.conv3 = ConvUpBlock(64, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv0(x)
x1 = self.conv1(x0)
x2 = self.conv2(x1) + x0
x3 = self.conv3(x2) + x
out = self.conv_out(x3)
return out
class Unet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
# self.conv_in = ConvBlock(args['model']['n_hist_frames'] + 64, 64, 256)
self.conv_in = ConvBlock(args['model']['n_hist_frames'] + 32 * args['model']['n_batch_frames'], 64, 256)
self.conv_down0 = ConvDownBlock(64, 128, 256)
self.conv_down1 = ConvDownBlock(128, 256, 128)
self.conv_down2 = ConvDownBlock(256, 256, 64)
self.conv_down3 = ConvDownBlock(256, 256, 32)
self.conv_up3 = ConvUpBlock(256, 256, 32)
self.conv_up2 = ConvUpBlock(256, 256, 64)
self.conv_up1 = ConvUpBlock(256, 128, 128)
self.conv_up0 = ConvUpBlock(128, 64, 256)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, padding=0),
nn.Tanh()
)
torch.nn.init.zeros_(self.conv_out[0].weight)
if hasattr(self.conv_out[0], 'bias') and self.conv_out[0].bias is not None:
torch.nn.init.zeros_(self.conv_out[0].bias)
def forward(self, x):
x = self.conv_in(x)
x0 = self.conv_down0(x)
x1 = self.conv_down1(x0)
x2 = self.conv_down2(x1)
x3 = self.conv_down3(x2)
y3 = self.conv_up3(x3) + x2
y2 = self.conv_up2(y3) + x1
y1 = self.conv_up1(y2) + x0
y0 = self.conv_up0(y1) + x
out = self.conv_out(y0)
return out
class DetailUnet(nn.Module):
def __init__(self, args):
super().__init__()
self.args = copy.deepcopy(args)
self.conv0 = ConvDownBlock(64, 64, 256)
self.conv1 = ConvDownBlock(64, 64, 128)
self.conv2 = ConvUpBlock(64, 64, 128)
self.conv3 = ConvUpBlock(64, 64, 256)
def forward(self, x):
x0 = self.conv0(x)
x1 = self.conv1(x0)
x2 = self.conv2(x1) + x0
x3 = self.conv3(x2) + x
return x3
|
AutoAvatar-main
|
models/PosedDecKNN_dPoses_dHs/nets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
import pickle
import datetime
import shutil
import glob
import random
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from utils.configs import *
from utils.io import *
import utils.CAPE as cape_utils
import utils.DFaust as dfaust_utils
from data.DFaust_dataset import DFaustDataset
from models.PosedDecKNN_dPoses_dHs.trainbox import Implicit_Trainbox
np.random.rand(777)
torch.random.manual_seed(777)
torch.cuda.manual_seed_all(777)
random.seed(777)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
from argparse import ArgumentParser
parser = ArgumentParser(description='Test AutoAvatar.')
parser.add_argument('--ws_dir', required=True, help='path of work space directory')
parser.add_argument('--ckpt_dir', required=True, help='path of checkpoint directory')
parser.add_argument('--ckpt_itr', default=7500, type=int)
parser.add_argument('--gpu_id', default=0, type=int)
parser.add_argument('--resolution', default=256, type=int, help='marching cube resolution')
parser.add_argument('--data_mode', default='extrap', type=str, help='test which type of data. choose from ["extrap", "interp"]')
cmd_args = parser.parse_args()
gpu_id = cmd_args.gpu_id
resolution = cmd_args.resolution
data_mode = cmd_args.data_mode
eval_frames = [3] #list(range(3, 99999, 20)) #
ckpt_dir = cmd_args.ckpt_dir
#'/mnt/ImpDyn_ws/logs/Feb27_00-22-01_04s_50002_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/May26_20-56-44_04s_50002_v2_PosedDecKNN_dPoses_dHs_HalfSub_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Mar03_10-44-09_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout8'
#'/mnt/ImpDyn_ws/logs/Mar03_10-42-05_04s_50002_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout8'
#'/mnt/ImpDyn_ws/logs/Feb27_20-50-42_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Feb27_09-57-21_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr'#Feb27_20-50-42_04s_50004_v2_PosedDecKNN_dPoses_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Feb23_12-43-09_04s_50002_v2_PosedDecKNN_Dyna_dHs_AutoRegr_Rollout2'
#'/mnt/ImpDyn_ws/logs/Feb19_01-26-38_04s_50002_NA_PosedDecKNN_Dyna_Hs_AutoRegr'
ckpt_itr = cmd_args.ckpt_itr
#90000#7500
#105000
configs_path = glob.glob(os.path.join(ckpt_dir, 'net_def', '*.yaml'))[0]
args = load_configs(configs_path, cmd_args.ws_dir)
dev_tag = '72s'
subject_tag = args['data']['subject'] + '_' + args['data']['cloth_type']
current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_name = current_time + '_' + dev_tag + '_' + subject_tag + '_' + args['name'] + '_' + args['version'] + '_' + data_mode
args['log_dir'] = os.path.join(cmd_args.ws_dir, 'logs_test')
args['train']['n_rollout'] = 32
with open(args['data']['interp_bin_path'], 'rb') as f:
interp_list = pickle.load(f)
with open(args['data']['extrap_bin_path'], 'rb') as f:
extrap_list = pickle.load(f)
if data_mode == 'extrap':
seqs_list = extrap_list
elif data_mode == 'interp':
seqs_list = interp_list
elif data_mode == 'train':
seqs_list = [4]
if not os.path.exists(os.path.join(args['log_dir'], log_name)):
os.makedirs(os.path.join(args['log_dir'], log_name))
for seq_idx in seqs_list:
log_dir = os.path.join(args['log_dir'], log_name, 'seq_%03d' % seq_idx)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
dfaust_json = dfaust_utils.DFaustJson(args['data']['bin_path'])
validset = DFaustDataset(args, dfaust_json, [seq_idx], eval_frames=eval_frames)
valid_loader = DataLoader(validset, batch_size=1, shuffle=False, num_workers=4)
logger = TensorBoardLogger(log_dir, name='')
trainbox = Implicit_Trainbox(args, log_dir, resolution, eval_frames=eval_frames)
if ckpt_dir is not None:
trainbox.load_ckpt(ckpt_itr, ckpt_dir)
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(configs_path, os.path.join(log_dir, 'net_def'))
train_params = {
'max_steps': 10,
'gpus': [gpu_id],
'logger': logger,
'max_epochs': 200000,
'log_every_n_steps': 50,
}
if 'check_val_every_n_epoch' in args['train']:
train_params['check_val_every_n_epoch'] = args['train']['check_val_every_n_epoch']
else:
train_params['val_check_interval'] = args['train']['ckpt_step']
trainer = Trainer(**train_params)
trainer.test(trainbox, valid_loader)
|
AutoAvatar-main
|
exps/PosedDecKNN_dPoses_dHs/implicit_eval_dfaust.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
import pickle
import datetime
import shutil
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from utils.configs import *
from utils.io import *
import utils.CAPE as cape_utils
import utils.DFaust as dfaust_utils
from data.DFaust_dataset import DFaustDataset
from models.PosedDecKNN_dPoses_dHs.trainbox import Implicit_Trainbox
np.random.rand(777)
torch.random.manual_seed(777)
def train(configs_path, args, log_name, gpu_id, resolution, max_steps, ckpt_dir, ckpt_itr, ShapeEncDec_ckpt_dir, ShapeEncDec_ckpt_itr, coarse_ckpt_dir=None, coarse_ckpt_itr=None):
dfaust_json = dfaust_utils.DFaustJson(args['data']['bin_path'])
with open(args['data']['train_bin_path'], 'rb') as f:
train_list = pickle.load(f)
with open(args['data']['interp_bin_path'], 'rb') as f:
interp_list = pickle.load(f)
with open(args['data']['extrap_bin_path'], 'rb') as f:
extrap_list = pickle.load(f)
trainset = DFaustDataset(args, dfaust_json, train_list)
validset = DFaustDataset(args, dfaust_json, extrap_list + interp_list, gap=10)
train_loader = DataLoader(trainset, batch_size=1, shuffle=True, num_workers=8)
valid_loader = DataLoader(validset, batch_size=1, shuffle=False, num_workers=4)
logger = TensorBoardLogger(args['log_dir'], name=log_name)
log_dir = os.path.join(args['log_dir'], log_name)
trainbox = Implicit_Trainbox(args, log_dir, resolution)
if ckpt_dir is not None:
trainbox.load_ckpt(ckpt_itr, ckpt_dir)
if ShapeEncDec_ckpt_dir is not None:
load_components(trainbox.dyn_net, ShapeEncDec_ckpt_dir, ShapeEncDec_ckpt_itr, 'shape_enc_dec')
if coarse_ckpt_dir is not None:
load_components(trainbox.dyn_net, coarse_ckpt_dir, coarse_ckpt_itr, 'shape_enc_dec')
load_components(trainbox.dyn_net, coarse_ckpt_dir, coarse_ckpt_itr, 'dynamics_net')
shutil.copy(os.path.realpath(__file__), os.path.join(log_dir, 'net_def'))
shutil.copy(configs_path, os.path.join(log_dir, 'net_def'))
train_params = {
'max_steps': max_steps,
'gpus': [gpu_id],
'logger': logger,
'max_epochs': 200000,
'log_every_n_steps': 50,
}
if 'check_val_every_n_epoch' in args['train']:
train_params['check_val_every_n_epoch'] = args['train']['check_val_every_n_epoch']
else:
train_params['val_check_interval'] = args['train']['ckpt_step']
trainer = Trainer(**train_params)
trainer.fit(trainbox, train_loader, valid_loader)
from argparse import ArgumentParser
parser = ArgumentParser(description='Train AutoAvatar.')
parser.add_argument('--ws_dir', required=True, help='path of work space directory')
parser.add_argument('--configs_path', required=True, help='path of configs file')
parser.add_argument('--configs_path_rollout', required=True, help='path of configs file')
parser.add_argument('--gpu_id', default=0, type=int)
parser.add_argument('--resolution', default=128, type=int, help='marching cube resolution')
parser.add_argument('--max_steps', default=90000, type=int, help='max training steps')
parser.add_argument('--max_steps_rollout', default=7500, type=int, help='max training steps')
cmd_args = parser.parse_args()
ShapeEncDec_ckpt_dir = None
ShapeEncDec_ckpt_itr = None
gpu_id = cmd_args.gpu_id
resolution = cmd_args.resolution
max_steps = cmd_args.max_steps
configs_path = cmd_args.configs_path
args = load_configs(configs_path, cmd_args.ws_dir)
dev_tag = '04s'
subject_tag = args['data']['subject'] + '_' + args['data']['cloth_type']
current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_name = current_time + '_' + dev_tag + '_' + subject_tag + '_' + args['name'] + '_' + args['version']
log_dir = os.path.join(args['log_dir'], log_name)
train(configs_path, args, log_name, gpu_id, resolution, max_steps + 5, None, None, ShapeEncDec_ckpt_dir, ShapeEncDec_ckpt_itr)
ckpt_dir = log_dir
ckpt_itr = max_steps
gpu_id = cmd_args.gpu_id
resolution = cmd_args.resolution
max_steps = cmd_args.max_steps_rollout
configs_path = cmd_args.configs_path_rollout
args = load_configs(configs_path, cmd_args.ws_dir)
dev_tag = '04s'
subject_tag = args['data']['subject'] + '_' + args['data']['cloth_type']
current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_name = current_time + '_' + dev_tag + '_' + subject_tag + '_' + args['name'] + '_' + args['version']
log_dir = os.path.join(args['log_dir'], log_name)
train(configs_path, args, log_name, gpu_id, resolution, max_steps + 5, ckpt_dir, ckpt_itr, None, None)
|
AutoAvatar-main
|
exps/PosedDecKNN_dPoses_dHs/implicit_train_dfaust.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import torch
from torch.utils.data import Dataset
from pytorch3d.io import load_ply
import utils.CAPE as cape_utils
class CAPEDataset(Dataset):
def __init__(self, args, cape_json, seq_list, skip=1, gap=1, eval_frames=None) -> None:
super().__init__()
self.args = copy.deepcopy(args)
self.cape_json = cape_json
self.seq_list = seq_list
self.skip = skip
self.gap = gap
self.eval_frames = eval_frames
self.n_frames = args['model']['n_batch_frames']
self.n_rollout = args['train']['n_rollout']
self.raw_dataset_dir = args['data']['raw_dataset_dir']
self.dataset_dir = args['data']['dataset_dir']
self.smooth_tag = args['data']['smooth_tag']
self.faces = torch.from_numpy(np.load(os.path.join(args['data']['raw_dataset_dir'], 'misc', 'smpl_tris.npy')).astype(np.int32)).long()
self.samples = [[], []]
for seq_idx in seq_list:
seq = cape_json.data['seqs'][seq_idx]
seq_len = len(seq['frames'])
if eval_frames is None:
frame_idxs = list(range(0, seq_len - (self.n_frames - 2 + self.n_rollout) * skip, gap))
else:
frame_idxs = list(range(0, seq_len - self.n_frames + 1, 1))
self.samples[0] += [seq_idx] * len(frame_idxs)
self.samples[1] += frame_idxs
assert len(self.samples[0]) == len(self.samples[1])
def __len__(self):
return len(self.samples[0])
def __getitem__(self, index):
seq_idx, frame_idx = self.samples[0][index], self.samples[1][index]
end_idx = frame_idx + (self.n_frames + self.n_rollout - 1) * self.skip
if self.eval_frames is not None:
if frame_idx + self.args['model']['n_hist_frames'] == self.eval_frames[0]:
end_idx = len(self.cape_json.data['seqs'][seq_idx]['frames'])
else:
end_idx = min(end_idx, len(self.cape_json.data['seqs'][seq_idx]['frames']))
verts_list = []
faces_list = []
poses_list = []
verts_smt_list = []
faces_smt_list = []
for i in range(frame_idx, end_idx, self.skip):
frame = self.cape_json.data['seqs'][seq_idx]['frames'][i]
npz_path = os.path.join(self.raw_dataset_dir, frame['npz_path'])
data = np.load(npz_path)
verts, rot, transl = data['v_posed'], data['pose'], data['transl']
poses = np.concatenate([transl, rot], axis=0)
assert poses.shape == (75,)
ply_path = os.path.join(self.dataset_dir, self.smooth_tag, self.cape_json.data['subject'],
self.cape_json.data['seqs'][seq_idx]['seq_name'], npz_path.split('/')[-1][:-4] + '_smt.ply')
verts_smt, faces_smt = load_ply(ply_path)
verts_list.append(torch.from_numpy(verts).float())
faces_list.append(self.faces.clone())
poses_list.append(torch.from_numpy(poses).float())
verts_smt_list.append(verts_smt)
faces_smt_list.append(faces_smt)
poses_list = torch.stack(poses_list, dim=0)
return {'verts_detail': verts_list, 'faces_detail': faces_list, 'verts_smt': verts_smt_list, 'faces_smt': faces_smt_list, 'poses': poses_list}
|
AutoAvatar-main
|
data/CAPE_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import h5py
import sys
import os
import copy
import pickle
import yaml
import smplx
import open3d as o3d
from tqdm import tqdm
from pytorch3d.io import save_ply, load_obj, load_ply
from pytorch3d.transforms import axis_angle_to_matrix
from pytorch3d.transforms.rotation_conversions import matrix_to_axis_angle
from pytorch3d.ops import knn_points
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from human_body_prior.body_model.body_model import BodyModel
import utils.DFaust as dfaust_utils
import utils.CAPE as cape_utils
from utils.configs import *
def generate_DFaust_SMPLH(data_dir, smpl_dir, out_dir, subject, subject_gender, gpu_id=0):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, subject)):
os.mkdir(os.path.join(out_dir, subject))
bm_fname = os.path.join(smpl_dir, 'smplh/%s/model.npz' % subject_gender)
dmpl_fname = os.path.join(smpl_dir, 'dmpls/%s/model.npz' % subject_gender)
num_betas = 16 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
bm = BodyModel(bm_fname=bm_fname, num_betas=num_betas, num_dmpls=num_dmpls, dmpl_fname=dmpl_fname)#.cuda(gpu_id)
faces = c2c(bm.f)
npz_files = sorted(os.listdir(os.path.join(data_dir, 'DFaust_67', subject)))
for npz_file in npz_files:
if '_poses' not in npz_file:
continue
if not os.path.exists(os.path.join(out_dir, subject, npz_file[:-4])):
os.mkdir(os.path.join(out_dir, subject, npz_file[:-4]))
bdata = np.load(os.path.join(data_dir, 'DFaust_67', subject, npz_file))
time_length = len(bdata['trans'])
body_parms = {
'root_orient': torch.Tensor(bdata['poses'][:, :3]),#.cuda(gpu_id), # controls the global root orientation
'pose_body': torch.Tensor(bdata['poses'][:, 3:66]),#.cuda(gpu_id), # controls the body
'pose_hand': torch.Tensor(bdata['poses'][:, 66:]),#.cuda(gpu_id), # controls the finger articulation
'trans': torch.Tensor(bdata['trans']),#.cuda(gpu_id), # controls the global body position
'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)),#.cuda(gpu_id), # controls the body shape. Body shape is static
'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]),#.cuda(gpu_id) # controls soft tissue dynamics
}
body_pose_beta = bm(**{k:v for k,v in body_parms.items() if k in ['pose_body', 'betas', 'root_orient', 'trans']})
for i in tqdm(range(time_length)):
verts = c2c(body_pose_beta.v[i])
verts = torch.from_numpy(verts)
verts_ = verts.clone()
verts[:, 1] = verts_[:, 2]
verts[:, 2] = -verts_[:, 1]
save_ply(os.path.join(out_dir, subject, npz_file[:-4], '%06d.ply' % i), verts, torch.from_numpy(faces))
def smplh_to_smpl(data_dir, subject, smpl_model_path, gpu_id=0):
if not os.path.exists(os.path.join(data_dir, 'smpl_poses')):
os.mkdir(os.path.join(data_dir, 'smpl_poses'))
if not os.path.exists(os.path.join(data_dir, 'smpl_poses', subject)):
os.mkdir(os.path.join(data_dir, 'smpl_poses', subject))
with open('data/smplh2smpl.yaml', 'r') as f:
default_configs = yaml.load(f, Loader=yaml.FullLoader)
seqs = sorted(os.listdir(os.path.join(data_dir, 'smplh_meshes', subject)))
for seq in seqs:
if not os.path.exists(os.path.join(data_dir, 'smpl_poses', subject, seq)):
os.mkdir(os.path.join(data_dir, 'smpl_poses', subject, seq))
configs = copy.deepcopy(default_configs)
configs['body_model']['folder'] = smpl_model_path
configs['datasets']['mesh_folder']['data_folder'] = os.path.join(data_dir, 'smplh_meshes', subject, seq)
configs['output_folder'] = os.path.join(data_dir, 'smpl_poses', subject, seq)
with open('tmp/configs.yaml', 'w') as f:
yaml.dump(configs, f)
os.system('cd external/smplx | python -m transfer_model --exp-cfg tmp/configs.yaml')
def DFaust_smplh_to_smpl(dataset_dir, smpl_dir, out_dir, subject, subject_gender, gpu_id=0):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(os.path.join(out_dir, subject)):
os.mkdir(os.path.join(out_dir, subject))
bm_fname = os.path.join(smpl_dir, 'smplh/%s/model.npz' % subject_gender)
dmpl_fname = os.path.join(smpl_dir, 'dmpls/%s/model.npz' % subject_gender)
num_betas = 10 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
bm = BodyModel(bm_fname=bm_fname, num_betas=num_betas, num_dmpls=num_dmpls, dmpl_fname=dmpl_fname)#.cuda(gpu_id)
faces = c2c(bm.f)
npz_files = sorted(os.listdir(os.path.join(dataset_dir, 'DFaust_67', subject)))
for npz_file in npz_files:
if '_poses' not in npz_file:
continue
bdata = np.load(os.path.join(dataset_dir, 'DFaust_67', subject, npz_file))
time_length = len(bdata['trans'])
body_parms = {
'root_orient': torch.Tensor(bdata['poses'][:, :3]),#.cuda(gpu_id), # controls the global root orientation
'pose_body': torch.Tensor(bdata['poses'][:, 3:66]),#.cuda(gpu_id), # controls the body
'pose_hand': torch.Tensor(bdata['poses'][:, 66:]),#.cuda(gpu_id), # controls the finger articulation
'trans': torch.Tensor(bdata['trans']),#.cuda(gpu_id), # controls the global body position
'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)),#.cuda(gpu_id), # controls the body shape. Body shape is static
'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]),#.cuda(gpu_id) # controls soft tissue dynamics
}
body_pose_beta = bm(**{k:v for k,v in body_parms.items() if k in ['pose_body', 'betas', 'root_orient', 'trans']})
root_joints = body_pose_beta.Jtr[:, 0]
smpl_poses = torch.Tensor(bdata['poses'][:, 3:72])
global_orient = torch.Tensor(bdata['poses'][:, :3])
transls = torch.Tensor(bdata['trans'])
flip_yz_mat = torch.tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]]).float()[None].expand(time_length, 3, 3)
global_rotmat = axis_angle_to_matrix(global_orient)
global_rotmat = torch.bmm(flip_yz_mat, global_rotmat)
global_orient = matrix_to_axis_angle(global_rotmat)
root_joints_yup = torch.bmm(flip_yz_mat, root_joints[..., None])[..., 0]
root_joints_notransl = root_joints - transls
transls = root_joints_yup - root_joints_notransl
poses = torch.cat([transls, global_orient, smpl_poses], dim=-1)
assert poses.shape == (time_length, 75)
np.savez_compressed(os.path.join(out_dir, subject, npz_file), poses=poses.numpy())
# Save template
shape_data = np.load(os.path.join(dataset_dir, 'DFaust_67', subject, 'shape.npz'))
betas = torch.Tensor(shape_data['betas'][:10]).unsqueeze(0)
body_pose_beta = bm(betas=betas)
verts = c2c(body_pose_beta.v[0])
verts = torch.from_numpy(verts)
# verts_ = verts.clone()
# verts[:, 1] = verts_[:, 2]
# verts[:, 2] = -verts_[:, 1]
save_ply(os.path.join(out_dir, subject, 'v_template.ply'), verts, torch.from_numpy(faces))
def DFaust_parse_raw(dataset_dir, subject):
dfaust_json = dfaust_utils.DFaustJson()
seq_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject)))
seqs = []
for seq_name in seq_names:
ply_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject, seq_name)))
poses = np.load(os.path.join(dataset_dir, 'smpl_poses', subject, '%s_%s_poses.npz' % (subject, seq_name)))['poses']
pre_idx = None
frames = []
for i, ply_name in enumerate(ply_names):
idx = int(ply_name.split('.')[-2])
if pre_idx is not None and idx != pre_idx + 1:
seqs = dfaust_json.append_seqs(seqs, seq_name, frames)
frames = []
frames = dfaust_json.append_frames(frames, os.path.join('scans', subject, seq_name, ply_name), poses[i])
pre_idx = idx
seqs = dfaust_json.append_seqs(seqs, seq_name, frames)
dfaust_json.set_data(subject, seqs)
dfaust_json.dump_bin_file(os.path.join(dataset_dir, '%s_raw.bin' % subject))
print(dfaust_json.num_of_seqs())
print(dfaust_json.num_of_frames())
for seq in dfaust_json.data['seqs']:
print(seq['id'], seq['seq_name'])
def split_train_test(dataset_dir, tag, bin_path, subject, interp_acts, extrap_acts):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
dfaust_json_new = dfaust_utils.DFaustJson()
seqs_new = []
train_list = []
interp_list = []
extrap_list = []
for seq in dfaust_json.data['seqs']:
if seq['id'] in extrap_acts[0]:
assert seq['seq_name'] in extrap_acts[1]
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames']))
extrap_list.append(seqs_new[-1]['id'])
elif seq['id'] in interp_acts[0]:
assert seq['seq_name'] in interp_acts[1]
half_len = len(seq['frames']) // 2
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames'][:half_len]))
train_list.append(seqs_new[-1]['id'])
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames'][half_len:]))
interp_list.append(seqs_new[-1]['id'])
else:
seqs_new = dfaust_json_new.append_seqs(seqs_new, seq['seq_name'], copy.deepcopy(seq['frames']))
train_list.append(seqs_new[-1]['id'])
dfaust_json_new.set_data(subject, seqs_new)
dfaust_json_new.dump_bin_file(os.path.join(dataset_dir, '%s_%s.bin' % (subject, tag)))
print(dfaust_json_new.num_of_seqs())
print(dfaust_json_new.num_of_frames())
with open(os.path.join(dataset_dir, '%s_%s_train.bin' % (subject, tag)), 'wb') as f:
pickle.dump(train_list, f)
with open(os.path.join(dataset_dir, '%s_%s_interp.bin' % (subject, tag)), 'wb') as f:
pickle.dump(interp_list, f)
with open(os.path.join(dataset_dir, '%s_%s_extrap.bin' % (subject, tag)), 'wb') as f:
pickle.dump(extrap_list, f)
print(train_list)
print(interp_list)
print(extrap_list)
def add_transl(dataset_dir, bin_path, subject, smpl_path):
smpl_model = smplx.SMPLLayer(model_path=smpl_path)
dfaust_json = dfaust_utils.DFaustJson(bin_path)
betas = []
for seq in tqdm(dfaust_json.data['seqs']):
bdata = np.load(os.path.join(dataset_dir, 'DFaust_67', subject, '%s_%s_poses.npz' % (subject, seq['seq_name'])))
for i in range(len(seq['frames'])):
frame = seq['frames'][i]
idx = int(frame['pose_path'].split('/')[-1][:-4])
with open(os.path.join(dataset_dir, frame['pose_path']), 'rb') as f:
data = pickle.load(f)
verts_smpl_ref, _, _ = load_obj(os.path.join(dataset_dir, frame['pose_path'][:-4] + '.obj'))
body_pose = data['full_pose'][0].detach().cpu()[None, 1:]
global_orient = data['full_pose'][0].detach().cpu()[None, 0]
verts_smpl = smpl_model(betas=data['betas'].detach().cpu(), body_pose=body_pose, global_orient=global_orient).vertices[0]
transl = (verts_smpl_ref - verts_smpl).mean(dim=0)
rot = matrix_to_axis_angle(data['full_pose'][0].detach().cpu())
assert rot.shape == (24, 3)
poses = np.concatenate([transl, rot.view(72).numpy()], axis=0)
assert poses.shape == (75,)
frame['poses'] = poses
betas.append(data['betas'].detach().cpu())
betas = torch.cat(betas, dim=0).mean(dim=0)[None]
v_template = smpl_model(betas=betas).vertices[0]
save_ply(os.path.join(dataset_dir, 'smpl_poses', subject, 'v_template.ply'), v_template, smpl_model.faces_tensor)
dfaust_json.dump_bin_file(bin_path)
def simplify_scans(ws_dir, dataset_dir, bin_path, config_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
smpl_model = cape_utils.load_smpl(load_configs(config_path, ws_dir)).cuda()
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple')):
os.mkdir(os.path.join(dataset_dir, 'scans_simple'))
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'])):
os.mkdir(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject']))
for seq in tqdm(dfaust_json.data['seqs']):
mesh_dir = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'])
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
ply_path = os.path.join(dataset_dir, frame['ply_path'])
verts, faces = load_ply(ply_path)
verts, faces = verts.cuda(), faces.cuda()
poses = torch.from_numpy(frame['poses']).float().cuda()[None]
verts_smpl = smpl_model(poses).vertices[0]
bbmin = verts_smpl.min(dim=0)[0][None] - 0.1
bbmax = verts_smpl.max(dim=0)[0][None] + 0.1
mask_min = (verts > bbmin).long().cumprod(dim=-1)[:, -1]
mask_max = (verts < bbmax).long().cumprod(dim=-1)[:, -1]
verts_mask = mask_min * mask_max
faces_mask = verts_mask[faces[:, 0]] * verts_mask[faces[:, 1]] * verts_mask[faces[:, 2]]
faces_val = faces[faces_mask.bool()]
verts_idxs_new2old = torch.arange(0, verts.shape[0]).long()[verts_mask.bool()]
verts_idxs_old2new = torch.zeros_like(verts_mask) - 1
verts_idxs_old2new[verts_idxs_new2old] = torch.arange(0, verts_idxs_new2old.shape[0]).long().cuda()
faces = verts_idxs_old2new[faces_val]
verts = verts[verts_idxs_new2old]
mesh_o3d = o3d.geometry.TriangleMesh()
mesh_o3d.vertices = o3d.utility.Vector3dVector(verts.cpu().numpy())
mesh_o3d.triangles = o3d.utility.Vector3iVector(faces.cpu().numpy())
mesh_o3d = mesh_o3d.simplify_quadric_decimation(int(faces.shape[0] * 0.075 * 0.67))
verts, faces = torch.from_numpy(np.asarray(mesh_o3d.vertices)), torch.from_numpy(np.asarray(mesh_o3d.triangles))
save_ply(os.path.join(mesh_dir, ply_path.split('/')[-1]), verts, faces)
def simplify_scans_2nd(dataset_dir, bin_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple_2nd')):
os.mkdir(os.path.join(dataset_dir, 'scans_simple_2nd'))
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple_2nd', dfaust_json.data['subject'])):
os.mkdir(os.path.join(dataset_dir, 'scans_simple_2nd', dfaust_json.data['subject']))
for seq in tqdm(dfaust_json.data['seqs']):
mesh_dir = os.path.join(dataset_dir, 'scans_simple_2nd', dfaust_json.data['subject'], seq['seq_name'])
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
ply_path = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'], frame['ply_path'].split('/')[-1])
verts, faces = load_ply(ply_path)
mesh_o3d = o3d.geometry.TriangleMesh()
mesh_o3d.vertices = o3d.utility.Vector3dVector(verts.cpu().numpy())
mesh_o3d.triangles = o3d.utility.Vector3iVector(faces.cpu().numpy())
mesh_o3d = mesh_o3d.simplify_quadric_decimation(int(faces.shape[0] * 0.5))
verts, faces = torch.from_numpy(np.asarray(mesh_o3d.vertices)), torch.from_numpy(np.asarray(mesh_o3d.triangles))
save_ply(os.path.join(mesh_dir, ply_path.split('/')[-1]), verts, faces)
def filter_outlier_verts(ws_dir, dataset_dir, bin_path, config_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
smpl_model = cape_utils.load_smpl(load_configs(config_path, ws_dir)).cuda()
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple')):
os.mkdir(os.path.join(dataset_dir, 'scans_simple'))
if not os.path.exists(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'])):
os.mkdir(os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject']))
for seq in tqdm(dfaust_json.data['seqs']):
mesh_dir = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'])
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
ply_path = os.path.join(dataset_dir, 'scans_simple', dfaust_json.data['subject'], seq['seq_name'], frame['ply_path'].split('/')[-1])
verts, faces = load_ply(ply_path)
verts, faces = verts.cuda(), faces.cuda()
poses = torch.from_numpy(frame['poses']).float().cuda()[None]
verts_smpl = smpl_model(poses).vertices[0]
dst, _, _ = knn_points(verts[None], verts_smpl[None], K=1, return_nn=True)
verts_mask = (dst.sqrt() < 0.1)[0, ..., 0]
if (~verts_mask).sum().item() > 0:
verts_mask = verts_mask.long()
faces_mask = verts_mask[faces[:, 0]] * verts_mask[faces[:, 1]] * verts_mask[faces[:, 2]]
faces_val = faces[faces_mask.bool()]
verts_idxs_new2old = torch.arange(0, verts.shape[0]).long()[verts_mask.bool()]
verts_idxs_old2new = torch.zeros_like(verts_mask) - 1
verts_idxs_old2new[verts_idxs_new2old] = torch.arange(0, verts_idxs_new2old.shape[0]).long().cuda()
faces = verts_idxs_old2new[faces_val]
verts = verts[verts_idxs_new2old]
save_ply(os.path.join(mesh_dir, ply_path.split('/')[-1]), verts, faces)
def save_registered_mesh(dataset_dir, subject, h5py_path):
if not os.path.exists(os.path.join(dataset_dir, 'reg_meshes')):
os.mkdir(os.path.join(dataset_dir, 'reg_meshes'))
if not os.path.exists(os.path.join(dataset_dir, 'reg_meshes', subject)):
os.mkdir(os.path.join(dataset_dir, 'reg_meshes', subject))
seq_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject)))
for seq_name in tqdm(seq_names):
ply_names = sorted(os.listdir(os.path.join(dataset_dir, 'scans', subject, seq_name)))
mesh_dir = os.path.join(dataset_dir, 'reg_meshes', subject, seq_name)
if not os.path.exists(mesh_dir):
os.mkdir(mesh_dir)
sidseq = subject + '_' + seq_name
with h5py.File(h5py_path, 'r') as f:
if sidseq not in f:
print('Sequence %s from subject %s not in %s' % (seq_name, subject, h5py_path))
f.close()
sys.exit(1)
verts_seq = np.array(f[sidseq]).astype(np.float32).transpose([2, 0, 1])
faces = np.array(f['faces']).astype(np.float32)
for i, ply_name in tqdm(enumerate(ply_names)):
verts = verts_seq[i]
save_ply(os.path.join(mesh_dir, ply_name), torch.from_numpy(verts), torch.from_numpy(faces))
def add_idx(bin_path):
dfaust_json = dfaust_utils.DFaustJson(bin_path)
count = 0
for seq in tqdm(dfaust_json.data['seqs']):
for i in tqdm(range(len(seq['frames']))):
frame = seq['frames'][i]
frame['z_id'] = count
count += 1
dfaust_json.dump_bin_file(bin_path)
print(count)
if __name__ == '__main__':
# """
# generate_DFaust_SMPLH('/mnt/ImpDyn_ws/DFaust',
# '/mnt/ImpDyn_ws/SMPL',
# '/mnt/ImpDyn_ws/DFaust/smplh_meshes',
# '50002', 'male', gpu_id=0)
# """
# """
# smplh_to_smpl('/mnt/ImpDyn_ws/DFaust',
# '50002',
# '/mnt/ImpDyn_ws/SMPL/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50002')
# """
# """
# # 50002: interp (1st half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_004_jumping_jacks, seq_015_shake_arms
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v1', '/mnt/ImpDyn_ws/DFaust/50002_raw.bin', '50002',
# ([0, 14], ['chicken_wings', 'running_on_spot']), ([4, 15], ['jumping_jacks', 'shake_arms']))
# """
# """
# add_transl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', '50002',
# '/mnt/ImpDyn_ws/SMPL/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# """
# filter_outlier_verts('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# """
# """
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50002', '/mnt/ImpDyn_ws/DFaust/registrations_m.hdf5')
# """
# """
# add_idx('/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# # New process ---------------------
# DFaust_smplh_to_smpl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/SMPL', '/mnt/ImpDyn_ws/DFaust/smpl_poses', '50002', 'male', gpu_id=0)
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50002')
# # 50002: interp (1st half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_009_one_leg_jump, seq_010_one_leg_jump
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v2', '/mnt/ImpDyn_ws/DFaust/50002_raw.bin', '50002',
# ([0, 14], ['chicken_wings', 'running_on_spot']), ([9, 10], ['one_leg_jump', 'one_leg_jump']))
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# filter_outlier_verts('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50002', '/mnt/ImpDyn_ws/DFaust/registrations_m.hdf5')
# """
# add_idx('/mnt/ImpDyn_ws/DFaust/50002_v2.bin')
# # ---------------------------------
# """
# generate_DFaust_SMPLH('/mnt/ImpDyn_ws/DFaust',
# '/mnt/ImpDyn_ws/SMPL',
# '/mnt/ImpDyn_ws/DFaust/smplh_meshes',
# '50004', 'female', gpu_id=0)
# """
# """
# smplh_to_smpl('/mnt/ImpDyn_ws/DFaust',
# '50004',
# '/mnt/ImpDyn_ws/SMPL/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50004')
# """
# """
# # 50004: interp (2nd half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_004_jumping_jacks, seq_015_shake_arms
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v1', '/mnt/ImpDyn_ws/DFaust/50004_raw.bin', '50004',
# ([0, 18], ['chicken_wings', 'running_on_spot']), ([3, 19], ['jumping_jacks', 'shake_arms']))
# """
# """
# add_transl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50004_v1.bin', '50004',
# '/mnt/ImpDyn_ws/SMPL/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
# """
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50004_v1.bin', 'configs/DispInput/DFaust_50004/AutoRegr.yaml')
# # simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# # New process ---------------------
# DFaust_smplh_to_smpl('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/SMPL', '/mnt/ImpDyn_ws/DFaust/smpl_poses', '50004', 'female', gpu_id=0)
# DFaust_parse_raw('/mnt/ImpDyn_ws/DFaust', '50004')
# # 50004: interp (1st half train): seq_000_chicken_wings, seq_018_running_on_spot; extrap: seq_016_one_leg_loose
# split_train_test('/mnt/ImpDyn_ws/DFaust', 'v2', '/mnt/ImpDyn_ws/DFaust/50004_raw.bin', '50004',
# ([0, 18], ['chicken_wings', 'running_on_spot']), ([16], ['one_leg_loose']))
# """
# simplify_scans('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin', 'configs/DispInput/DFaust_50002/AutoRegr.yaml')
# simplify_scans_2nd('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50002_v1.bin')
# """
# filter_outlier_verts('/mnt/ImpDyn_ws/DFaust', '/mnt/ImpDyn_ws/DFaust/50004_v2.bin', 'configs/DispInput/DFaust_50004/AutoRegr.yaml')
# """
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50004', '/mnt/ImpDyn_ws/DFaust/registrations_f.hdf5')
# """
# add_idx('/mnt/ImpDyn_ws/DFaust/50004_v2.bin')
from argparse import ArgumentParser
parser = ArgumentParser(description='Process DFaust data.')
parser.add_argument('--ws_dir', required=True, help='path of work space directory')
args = parser.parse_args()
# New process ---------------------
DFaust_smplh_to_smpl(
os.path.join(args.ws_dir, 'DFaust'),
os.path.join(args.ws_dir, 'SMPL'),
os.path.join(args.ws_dir, 'DFaust', 'smpl_poses'),
'50002', 'male', gpu_id=0
)
DFaust_parse_raw(os.path.join(args.ws_dir, 'DFaust'), '50002')
# 50002: interp (1st half train): seq_000_chicken_wings, seq_014_running_on_spot; extrap: seq_009_one_leg_jump, seq_010_one_leg_jump
split_train_test(
os.path.join(args.ws_dir, 'DFaust'),
'v2',
os.path.join(args.ws_dir, 'DFaust', '50002_raw.bin'),
'50002',
([0, 14], ['chicken_wings', 'running_on_spot']),
([9, 10], ['one_leg_jump', 'one_leg_jump'])
)
simplify_scans(
args.ws_dir,
os.path.join(args.ws_dir, 'DFaust'),
os.path.join(args.ws_dir, 'DFaust', '50002_v2.bin'),
'configs/PosedDecKNN_dPoses_dHs/AutoRegr.yaml'
)
filter_outlier_verts(
args.ws_dir,
os.path.join(args.ws_dir, 'DFaust'),
os.path.join(args.ws_dir, 'DFaust', '50002_v2.bin'),
'configs/PosedDecKNN_dPoses_dHs/AutoRegr.yaml'
)
# save_registered_mesh('/mnt/ImpDyn_ws/DFaust', '50002', '/mnt/ImpDyn_ws/DFaust/registrations_m.hdf5')
add_idx(os.path.join(args.ws_dir, 'DFaust', '50002_v2.bin'))
# ---------------------------------
|
AutoAvatar-main
|
data/DFaust_generate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import pickle
import torch
from torch.utils.data import Dataset
from pytorch3d.io import load_ply
import utils.CAPE as cape_utils
class DFaustDataset(Dataset):
def __init__(self, args, dfaust_json, seq_list, skip=1, gap=1, eval_frames=None, no_mesh=False) -> None:
super().__init__()
self.args = copy.deepcopy(args)
self.dfaust_json = dfaust_json
self.seq_list = seq_list
self.skip = skip
self.gap = gap
self.eval_frames = eval_frames
self.no_mesh = no_mesh
self.n_frames = args['model']['n_batch_frames']
self.n_rollout = args['train']['n_rollout']
self.raw_dataset_dir = args['data']['raw_dataset_dir']
self.dataset_dir = args['data']['dataset_dir']
self.smooth_tag = args['data']['smooth_tag']
# self.faces = torch.from_numpy(np.load(os.path.join(args['data']['raw_dataset_dir'], 'misc', 'smpl_tris.npy')).astype(np.int32)).long()
self.samples = [[], []]
for seq_idx in seq_list:
seq = dfaust_json.data['seqs'][seq_idx]
seq_len = len(seq['frames'])
if eval_frames is None:
frame_idxs = list(range(0, seq_len - (self.n_frames - 2 + self.n_rollout) * skip, gap))
else:
frame_idxs = list(range(0, seq_len - self.n_frames + 1, 1))
self.samples[0] += [seq_idx] * len(frame_idxs)
self.samples[1] += frame_idxs
assert len(self.samples[0]) == len(self.samples[1])
def __len__(self):
return len(self.samples[0])
def __getitem__(self, index):
seq_idx, frame_idx = self.samples[0][index], self.samples[1][index]
end_idx = frame_idx + (self.n_frames + self.n_rollout - 1) * self.skip
if self.eval_frames is not None:
if frame_idx + self.args['model']['n_hist_frames'] == self.eval_frames[0]:
end_idx = len(self.dfaust_json.data['seqs'][seq_idx]['frames'])
else:
end_idx = min(end_idx, len(self.dfaust_json.data['seqs'][seq_idx]['frames']))
verts_list = []
faces_list = []
poses_list = []
verts_smt_list = []
faces_smt_list = []
z_ids_list = []
for i in range(frame_idx, end_idx, self.skip):
frame = self.dfaust_json.data['seqs'][seq_idx]['frames'][i]
ply_path = os.path.join(self.raw_dataset_dir, frame['ply_path'])
ply_path_ = os.path.join(self.dataset_dir, 'scans_simple', self.dfaust_json.data['subject'],
self.dfaust_json.data['seqs'][seq_idx]['seq_name'], ply_path.split('/')[-1])
if not self.no_mesh:
verts, faces = load_ply(ply_path_)
else:
verts, faces = torch.zeros((0, 3), dtype=torch.float32), torch.zeros((0, 3), dtype=torch.long)
poses = frame['poses']
assert poses.shape == (75,)
if not self.args['data']['separate_detail']:
verts_smt, faces_smt = verts.clone(), faces.clone()
else:
ply_path_ = os.path.join(self.dataset_dir, self.smooth_tag, self.dfaust_json.data['subject'],
self.dfaust_json.data['seqs'][seq_idx]['seq_name'], ply_path.split('/')[-1][:-4] + '_smt.ply')
verts_smt, faces_smt = load_ply(ply_path_)
z_ids_list.append(torch.tensor(frame['z_id']).long())
verts_list.append(verts)
faces_list.append(faces)
poses_list.append(torch.from_numpy(poses).float())
verts_smt_list.append(verts_smt)
faces_smt_list.append(faces_smt)
z_ids_list = torch.stack(z_ids_list, dim=0)
poses_list = torch.stack(poses_list, dim=0)
return {'verts_detail': verts_list, 'faces_detail': faces_list, 'verts_smt': verts_smt_list, 'faces_smt': faces_smt_list, 'poses': poses_list, 'z_ids': z_ids_list}
|
AutoAvatar-main
|
data/DFaust_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pickle
import utils.CAPE as cape_utils
def CAPE_parse_raw(raw_dataset_dir, out_dir, subject, cloth_type):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
cape_json = cape_utils.CAPEJson()
seqs = []
txt_path = os.path.join(raw_dataset_dir, 'seq_lists', 'seq_list_%s.txt' % subject)
with open(txt_path, 'r') as f:
for line in f:
if cloth_type not in line:
continue
seq_name = line.strip().split()[0]
seq_dir = os.path.join(raw_dataset_dir, 'sequences', subject, seq_name)
npz_files = sorted(os.listdir(seq_dir))
pre_idx = None
frames = []
for i, npz_file in enumerate(npz_files):
idx = int(npz_file.strip().split('.')[1])
if pre_idx is not None and idx != pre_idx + 1:
seqs = cape_json.append_seqs(seqs, seq_name, frames)
frames = []
frames = cape_json.append_frames(frames, os.path.join('sequences', subject, seq_name, npz_file))
pre_idx = idx
seqs = cape_json.append_seqs(seqs, seq_name, frames)
cape_json.set_data(subject, cloth_type, seqs)
cape_json.dump_bin_file(os.path.join(out_dir, '%s_%s.bin' % (subject, cloth_type)))
print(cape_json.num_of_seqs())
print(cape_json.num_of_frames())
def split_train_test(out_dir, tag, bin_path, interp_acts, extrap_acts, test_trial):
def act_in_acts(query_act, acts):
for act in acts:
if act in query_act:
return True
return False
cape_json = cape_utils.CAPEJson(bin_path)
train_list = []
interp_list = []
extrap_list = []
for seq in cape_json.data['seqs']:
if act_in_acts(seq['seq_name'], extrap_acts):
extrap_list.append(seq['id'])
elif act_in_acts(seq['seq_name'], interp_acts):
if test_trial in seq['seq_name']:
interp_list.append(seq['id'])
else:
train_list.append(seq['id'])
else:
train_list.append(seq['id'])
with open(os.path.join(out_dir, '%s_train.bin' % tag), 'wb') as f:
pickle.dump(train_list, f)
with open(os.path.join(out_dir, '%s_interp.bin' % tag), 'wb') as f:
pickle.dump(interp_list, f)
with open(os.path.join(out_dir, '%s_extrap.bin' % tag), 'wb') as f:
pickle.dump(extrap_list, f)
print(train_list)
print(interp_list)
print(extrap_list)
if __name__ == '__main__':
# CAPE_parse_raw('/mnt/Datasets/CAPE/cape_release', '/mnt/Datasets/CAPE', '03375', 'longlong')
# split_train_test('/mnt/Datasets/CAPE/', '03375_longlong', '/mnt/Datasets/CAPE/03375_longlong.bin',
# ['box', 'swim', 'twist_tilt'], ['athletics', 'frisbee', 'volleyball'], 'trial1')
CAPE_parse_raw('/mnt/Datasets/CAPE/cape_release', '/mnt/Datasets/CAPE', '00134', 'shortlong')
|
AutoAvatar-main
|
data/CAPE_generate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import copy
import pickle
import torch
from torch.utils.data import Dataset
from pytorch3d.io import load_ply
import utils.CAPE as cape_utils
class AistDataset(Dataset):
def __init__(self, args, dfaust_json, seq_dir, skip=1, gap=1, eval_frames=None) -> None:
super().__init__()
self.args = copy.deepcopy(args)
self.dfaust_json = dfaust_json
self.seq_dir = seq_dir
self.skip = skip
self.gap = gap
self.eval_frames = eval_frames
self.n_frames = args['model']['n_batch_frames']
self.n_rollout = args['train']['n_rollout']
self.raw_dataset_dir = args['data']['raw_dataset_dir']
self.dataset_dir = args['data']['dataset_dir']
self.smooth_tag = args['data']['smooth_tag']
# self.faces = torch.from_numpy(np.load(os.path.join(args['data']['raw_dataset_dir'], 'misc', 'smpl_tris.npy')).astype(np.int32)).long()
self.frame_list = sorted(os.listdir(seq_dir))
def __len__(self):
return 1
def __getitem__(self, index):
verts_list = []
faces_list = []
poses_list = []
verts_smt_list = []
faces_smt_list = []
for i in range(index, len(self.frame_list), self.skip):
data = np.load(os.path.join(self.seq_dir, self.frame_list[i]))
rot, transl = data['pose'], data['transl']
poses = np.concatenate([transl, rot], axis=0)
assert poses.shape == (75,)
poses_list.append(torch.from_numpy(poses).float())
poses_list = torch.stack(poses_list, dim=0)
frame = self.dfaust_json.data['seqs'][0]['frames'][0]
ply_path = os.path.join(self.raw_dataset_dir, frame['ply_path'])
ply_path_ = os.path.join(self.dataset_dir, 'scans_simple_2nd', self.dfaust_json.data['subject'],
self.dfaust_json.data['seqs'][0]['seq_name'], ply_path.split('/')[-1])
verts_init, faces_init = load_ply(ply_path_)
poses_init = torch.from_numpy(frame['poses']).float()
assert poses_init.shape == (75,)
z_ids_list = torch.zeros(poses_list.shape[0]).long()
return {'verts_detail': verts_list, 'faces_detail': faces_list, 'verts_smt': verts_smt_list, 'faces_smt': faces_smt_list, 'poses': poses_list,
'verts_init': verts_init, 'faces_init': faces_init, 'poses_init': poses_init, 'z_ids': z_ids_list}
|
AutoAvatar-main
|
data/Aist_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
import operator
from datetime import date
import torch
import torch.nn as nn
#from torch.utils.tensorboard import SummaryWriter
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from data.data_loader import build_data_loader
from utils.config import setup
import utils.saver as saver
from utils.progress import AverageMeter, ProgressMeter, accuracy
import utils.comm as comm
import utils.logging as logging
from evaluate import attentive_nas_eval as attentive_nas_eval
from sampler.attentive_nas_sampler import ArchSampler as ArchSampler
from solver import build_optimizer, build_lr_scheduler
import utils.loss_ops as loss_ops
import models
from copy import deepcopy
import numpy as np
import joblib
from sklearn.ensemble import RandomForestRegressor
parser = argparse.ArgumentParser(description='AttentiveNAS Training')
parser.add_argument('--config-file', default=None, type=str,
help='training configuration')
parser.add_argument('--machine-rank', default=0, type=int,
help='machine rank, distributed setting')
parser.add_argument('--num-machines', default=1, type=int,
help='number of nodes, distributed setting')
parser.add_argument('--dist-url', default="tcp://127.0.0.1:10001", type=str,
help='init method, distributed setting')
logger = logging.get_logger(__name__)
def build_args_and_env(run_args):
assert run_args.config_file and os.path.isfile(run_args.config_file), 'cannot locate config file'
args = setup(run_args.config_file)
args.config_file = run_args.config_file
#load config
assert args.distributed and args.multiprocessing_distributed, 'only support DDP training'
args.distributed = True
args.machine_rank = run_args.machine_rank
args.num_nodes = run_args.num_machines
args.dist_url = run_args.dist_url
args.models_save_dir = os.path.join(args.models_save_dir, args.exp_name)
if not os.path.exists(args.models_save_dir):
os.makedirs(args.models_save_dir)
#backup config file
saver.copy_file(args.config_file, '{}/{}'.format(args.models_save_dir, os.path.basename(args.config_file)))
args.checkpoint_save_path = os.path.join(
args.models_save_dir, 'attentive_nas.pth.tar'
)
args.logging_save_path = os.path.join(
args.models_save_dir, f'stdout.log'
)
return args
def main():
run_args = parser.parse_args()
args = build_args_and_env(run_args)
random.seed(args.seed)
torch.manual_seed(args.seed)
#cudnn.deterministic = True
#warnings.warn('You have chosen to seed training. '
# 'This will turn on the CUDNN deterministic setting, '
# 'which can slow down your training considerably! '
# 'You may see unexpected behavior when restarting '
# 'from checkpoints.')
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.num_nodes
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError
assert args.world_size > 1, 'only support ddp training'
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu # local rank, local machine cuda id
args.local_rank = args.gpu
args.batch_size = args.batch_size_per_gpu
args.batch_size_total = args.batch_size * args.world_size
#rescale base lr
args.lr_scheduler.base_lr = args.lr_scheduler.base_lr * (max(1, args.batch_size_total // 256))
# set random seed, make sure all random subgraph generated would be the same
random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed(args.seed)
global_rank = args.gpu + args.machine_rank * ngpus_per_node
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=global_rank
)
# Setup logging format.
logging.setup_logging(args.logging_save_path, 'w')
logger.info(f"Use GPU: {args.gpu}, machine rank {args.machine_rank}, num_nodes {args.num_nodes}, \
gpu per node {ngpus_per_node}, world size {args.world_size}")
# synchronize is needed here to prevent a possible timeout after calling
# init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
args.rank = comm.get_rank() # global rank
args.local_rank = args.gpu
torch.cuda.set_device(args.gpu)
# build model
logger.info("=> creating model '{}'".format(args.arch))
model = models.model_factory.create_model(args)
model.cuda(args.gpu)
#build arch sampler
arch_sampler = None
if getattr(args, 'sampler', None):
arch_sampler = ArchSampler(
args.sampler.arch_to_flops_map_file_path, args.sampler.discretize_step, model, None
)
# use sync batchnorm
if getattr(args, 'sync_bn', False):
model.apply(
lambda m: setattr(m, 'need_sync', True))
model = comm.get_parallel_model(model, args.gpu) #local rank
logger.info(model)
criterion = loss_ops.CrossEntropyLossSmooth(args.label_smoothing).cuda(args.gpu)
soft_criterion = loss_ops.KLLossSoft().cuda(args.gpu)
if not getattr(args, 'inplace_distill', True):
soft_criterion = None
## load dataset, train_sampler: distributed
train_loader, val_loader, train_sampler = build_data_loader(args)
args.n_iters_per_epoch = len(train_loader)
logger.info( f'building optimizer and lr scheduler, \
local rank {args.gpu}, global rank {args.rank}, world_size {args.world_size}')
optimizer = build_optimizer(args, model)
lr_scheduler = build_lr_scheduler(args, optimizer)
# optionally resume from a checkpoint
if args.resume:
saver.load_checkpoints(args, model, optimizer, lr_scheduler, logger)
logger.info(args)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
args.curr_epoch = epoch
logger.info('Training lr {}'.format(lr_scheduler.get_lr()[0]))
# train for one epoch
acc1, acc5 = train_epoch(epoch, model, train_loader, optimizer, criterion, args, \
arch_sampler=arch_sampler, soft_criterion=soft_criterion, lr_scheduler=lr_scheduler)
if comm.is_master_process() or args.distributed:
# validate supernet model
validate(
train_loader, val_loader, model, criterion, args
)
if comm.is_master_process():
# save checkpoints
saver.save_checkpoint(
args.checkpoint_save_path,
model,
optimizer,
lr_scheduler,
args,
epoch,
)
def train_epoch(
epoch,
model,
train_loader,
optimizer,
criterion,
args,
arch_sampler=None,
soft_criterion=None,
lr_scheduler=None,
):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
model.train()
end = time.time()
num_updates = epoch * len(train_loader)
for batch_idx, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# total subnets to be sampled
num_subnet_training = max(2, getattr(args, 'num_arch_training', 2))
optimizer.zero_grad()
### compute gradients using sandwich rule ###
# step 1 sample the largest network, apply regularization to only the largest network
drop_connect_only_last_two_stages = getattr(args, 'drop_connect_only_last_two_stages', True)
model.module.sample_max_subnet()
model.module.set_dropout_rate(args.dropout, args.drop_connect, drop_connect_only_last_two_stages) #dropout for supernet
output = model(images)
loss = criterion(output, target)
loss.backward()
with torch.no_grad():
soft_logits = output.clone().detach()
#step 2. sample the smallest network and several random networks
sandwich_rule = getattr(args, 'sandwich_rule', True)
model.module.set_dropout_rate(0, 0, drop_connect_only_last_two_stages) #reset dropout rate
for arch_id in range(1, num_subnet_training):
if arch_id == num_subnet_training-1 and sandwich_rule:
model.module.sample_min_subnet()
else:
# attentive sampling with training loss as the surrogate performance metric
if arch_sampler is not None:
sampling_method = args.sampler.method
if sampling_method in ['bestup', 'worstup']:
target_flops = arch_sampler.sample_one_target_flops()
candidate_archs = arch_sampler.sample_archs_according_to_flops(
target_flops, n_samples=args.sampler.num_trials
)
my_pred_accs = []
for arch in candidate_archs:
model.module.set_active_subnet(**arch)
with torch.no_grad():
my_pred_accs.append(-1.0 * criterion(model(images), target))
if sampling_method == 'bestup':
idx, _ = max(enumerate(my_pred_accs), key=operator.itemgetter(1))
else:
idx, _ = min(enumerate(my_pred_accs), key=operator.itemgetter(1))
model.module.set_active_subnet(**candidate_archs[idx]) #reset
else:
raise NotImplementedError
else:
model.module.sample_active_subnet()
# calcualting loss
output = model(images)
if soft_criterion:
loss = soft_criterion(output, soft_logits)
else:
assert not args.inplace_distill
loss = criterion(output, target)
loss.backward()
#clip gradients if specfied
if getattr(args, 'grad_clip_value', None):
torch.nn.utils.clip_grad_value_(model.parameters(), args.grad_clip_value)
optimizer.step()
#accuracy measured on the local batch
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
corr1, corr5, loss = acc1*args.batch_size, acc5*args.batch_size, loss.item()*args.batch_size #just in case the batch size is different on different nodes
stats = torch.tensor([corr1, corr5, loss, args.batch_size], device=args.gpu)
dist.barrier() # synchronizes all processes
dist.all_reduce(stats, op=torch.distributed.ReduceOp.SUM)
corr1, corr5, loss, batch_size = stats.tolist()
acc1, acc5, loss = corr1/batch_size, corr5/batch_size, loss/batch_size
losses.update(loss, batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
else:
losses.update(loss.item(), images.size(0))
top1.update(acc1, images.size(0))
top5.update(acc5, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
num_updates += 1
if lr_scheduler is not None:
lr_scheduler.step()
if batch_idx % args.print_freq == 0:
progress.display(batch_idx, logger)
return top1.avg, top5.avg
def validate(
train_loader,
val_loader,
model,
criterion,
args,
distributed = True,
):
subnets_to_be_evaluated = {
'attentive_nas_min_net': {},
'attentive_nas_max_net': {},
}
acc1_list, acc5_list = attentive_nas_eval.validate(
subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
bn_calibration = True,
)
if __name__ == '__main__':
main()
|
AttentiveNAS-main
|
train_attentive_nas.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import sys
from datetime import date
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models
from utils.config import setup
from utils.flops_counter import count_net_flops_and_params
import utils.comm as comm
import utils.saver as saver
from data.data_loader import build_data_loader
from utils.progress import AverageMeter, ProgressMeter, accuracy
import argparse
parser = argparse.ArgumentParser(description='Test AttentiveNas Models')
parser.add_argument('--config-file', default='./configs/eval_attentive_nas_models.yml')
parser.add_argument('--model', default='a0', type=str, choices=['a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6'])
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
run_args = parser.parse_args()
if __name__ == '__main__':
args = setup(run_args.config_file)
args.model = run_args.model
args.gpu = run_args.gpu
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
args.__dict__['active_subnet'] = args.__dict__['pareto_models'][args.model]
print(args.active_subnet)
train_loader, val_loader, train_sampler = build_data_loader(args)
## init static attentivenas model with weights inherited from the supernet
model = models.model_factory.create_model(args)
model.to(args.gpu)
model.eval()
# bn running stats calibration following Slimmable (https://arxiv.org/abs/1903.05134)
# please consider trying a different random seed if you see a small accuracy drop
with torch.no_grad():
model.reset_running_stats_for_calibration()
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= args.post_bn_calibration_batch_num:
break
images = images.cuda(args.gpu, non_blocking=True)
model(images) #forward only
model.eval()
with torch.no_grad():
criterion = nn.CrossEntropyLoss().cuda()
from evaluate.imagenet_eval import validate_one_subnet
acc1, acc5, loss, flops, params = validate_one_subnet(val_loader, model, criterion, args)
print(acc1, acc5, flops, params)
|
AttentiveNAS-main
|
test_attentive_nas.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from Slimmable - https://github.com/JiahuiYu/slimmable_networks
import torch
class CrossEntropyLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification """
def forward(self, output, target):
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
cross_entropy_loss = -torch.bmm(target, output_log_prob)
return cross_entropy_loss.mean()
class KLLossSoft(torch.nn.modules.loss._Loss):
""" inplace distillation for image classification
output: output logits of the student network
target: output logits of the teacher network
T: temperature
KL(p||q) = Ep \log p - \Ep log q
"""
def forward(self, output, soft_logits, target=None, temperature=1., alpha=0.9):
output, soft_logits = output / temperature, soft_logits / temperature
soft_target_prob = torch.nn.functional.softmax(soft_logits, dim=1)
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
kd_loss = -torch.sum(soft_target_prob * output_log_prob, dim=1)
if target is not None:
n_class = output.size(1)
target = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
ce_loss = -torch.bmm(target, output_log_prob).squeeze()
loss = alpha*temperature* temperature*kd_loss + (1.0-alpha)*ce_loss
else:
loss = kd_loss
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
class CrossEntropyLossSmooth(torch.nn.modules.loss._Loss):
def __init__(self, label_smoothing=0.1):
super(CrossEntropyLossSmooth, self).__init__()
self.eps = label_smoothing
""" label smooth """
def forward(self, output, target):
n_class = output.size(1)
one_hot = torch.zeros_like(output).scatter(1, target.view(-1, 1), 1)
target = one_hot * (1 - self.eps) + self.eps / n_class
output_log_prob = torch.nn.functional.log_softmax(output, dim=1)
target = target.unsqueeze(1)
output_log_prob = output_log_prob.unsqueeze(2)
loss = -torch.bmm(target, output_log_prob)
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
return loss
|
AttentiveNAS-main
|
utils/loss_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import builtins
import decimal
import functools
import logging
import os
import sys
from .comm import is_master_process as is_master_proc
def _suppress_print():
"""
Suppresses printing from the current process.
"""
def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False):
pass
builtins.print = print_pass
def setup_logging(save_path, mode='a'):
"""
Sets up the logging for multiple processes. Only enable the logging for the
master process, and suppress logging for the non-master processes.
"""
if is_master_proc():
# Enable logging for the master process.
logging.root.handlers = []
else:
# Suppress logging for non-master processes.
_suppress_print()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.propagate = False
print_plain_formatter = logging.Formatter(
"[%(asctime)s]: %(message)s",
datefmt="%m/%d %H:%M:%S",
)
fh_plain_formatter = logging.Formatter("%(message)s")
if is_master_proc():
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(print_plain_formatter)
logger.addHandler(ch)
if save_path is not None and is_master_proc():
fh = logging.FileHandler(save_path, mode=mode)
fh.setLevel(logging.DEBUG)
fh.setFormatter(fh_plain_formatter)
logger.addHandler(fh)
def get_logger(name):
"""
Retrieve the logger with the specified name or, if name is None, return a
logger which is the root logger of the hierarchy.
Args:
name (string): name of the logger.
"""
return logging.getLogger(name)
|
AttentiveNAS-main
|
utils/logging.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# implementation adapted from Slimmable: https://github.com/JiahuiYu/slimmable_networks
"""config utilities for yml file."""
import os
import sys
import yaml
class LoaderMeta(type):
"""Constructor for supporting `!include`.
"""
def __new__(mcs, __name__, __bases__, __dict__):
"""Add include constructer to class."""
# register the include constructor on the class
cls = super().__new__(mcs, __name__, __bases__, __dict__)
cls.add_constructor('!include', cls.construct_include)
return cls
class Loader(yaml.SafeLoader, metaclass=LoaderMeta):
"""YAML Loader with `!include` constructor.
"""
def __init__(self, stream):
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(self, node):
"""Include file referenced at node."""
filename = os.path.abspath(
os.path.join(self._root, self.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, Loader)
else:
return ''.join(f.readlines())
class AttrDict(dict):
"""Dict as attribute trick.
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
elif isinstance(value, list):
if isinstance(value[0], dict):
self.__dict__[key] = [AttrDict(item) for item in value]
else:
self.__dict__[key] = value
def yaml(self):
"""Convert object to yaml dict and return.
"""
yaml_dict = {}
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
yaml_dict[key] = value.yaml()
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
new_l = []
for item in value:
new_l.append(item.yaml())
yaml_dict[key] = new_l
else:
yaml_dict[key] = value
else:
yaml_dict[key] = value
return yaml_dict
def __repr__(self):
"""Print all variables.
"""
ret_str = []
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
ret_str.append('{}:'.format(key))
child_ret_str = value.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
ret_str.append('{}:'.format(key))
for item in value:
# treat as AttrDict above
child_ret_str = item.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
else:
ret_str.append('{}: {}'.format(key, value))
else:
ret_str.append('{}: {}'.format(key, value))
return '\n'.join(ret_str)
class Config(AttrDict):
def __init__(self, filename=None):
with open(filename, 'r') as f:
cfg_dict = yaml.load(f, Loader)
super(Config, self).__init__(cfg_dict)
def setup(config_file):
assert os.path.isfile(config_file), 'cannot locate {}'.format(config_file)
return Config(config_file)
|
AttentiveNAS-main
|
utils/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import logging
import pickle
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_my_model(model):
if isinstance(model, nn.DataParallel):
return model.module
return model
def is_master_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def get_parallel_model(model, device):
if get_world_size() >= 1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[device], find_unused_parameters=True
)
else:
raise NotImplementedError
return model
def reduce_eval_results(summary, gpu):
summary = summary + "".join([" "] * (2000-len(summary)))
#send summary to rank 0
summary = torch.tensor([ord(c) for c in summary]).cuda(gpu)
summary_list = [torch.zeros_like(summary) for _ in range(dist.get_world_size())]
dist.all_gather(summary_list, summary)
group = []
for _i in range(dist.get_world_size()):
s = "".join([chr(c) for c in summary_list[_i]])
group.append(eval(s))
return group
|
AttentiveNAS-main
|
utils/comm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from copy import deepcopy
import torch
import os
import shutil
import joblib
def copy_file(source_path, target_path):
shutil.copyfile(source_path, target_path)
def save_acc_predictor(args, acc_predictor):
args.curr_acc_predictor_path = os.path.join(args.models_save_dir, f'acc_predictor_{args.curr_epoch}.joblib')
with open(args.curr_acc_predictor_path, 'wb') as fp:
joblib.dump(acc_predictor, fp)
def load_acc_predictor(args, predictor_saved_path=None):
if predictor_saved_path is None:
predictor_saved_path = args.curr_acc_predictor_path
with open(predictor_saved_path, 'rb') as fp:
acc_predictor = joblib.load(fp)
return acc_predictor
def save_checkpoint(save_path, model, optimizer, lr_scheduler, args, epoch, is_best=False):
save_state = {
'epoch': epoch + 1,
'args': args,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict()
}
best_model_path = os.path.join(
os.path.dirname(save_path),
'best_{}'.format(os.path.basename(save_path))
)
with open(save_path, 'wb') as f:
torch.save(save_state, f, _use_new_zipfile_serialization=False)
if is_best:
copy_file(save_path, best_model_path)
def load_checkpoints(args, model, optimizer=None, lr_scheduler=None, logger=None):
resume_path = args.resume
assert os.path.isfile(resume_path), "=> no checkpoint found at '{}'".format(resume_path)
with open(resume_path, 'rb') as f:
checkpoint = torch.load(f, map_location=torch.device('cpu'))
if logger:
logger.info("=> loading checkpoint '{}'".format(resume_path))
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
resume_with_a_different_optimizer = getattr(args, 'resume_with_a_different_optimizer', False)
resume_with_a_different_lr_scheduler = getattr(args, 'resume_with_a_different_lr_scheduler', False)
if optimizer and not resume_with_a_different_optimizer:
optimizer.load_state_dict(checkpoint['optimizer'])
if lr_scheduler and not resume_with_a_different_optimizer and not resume_with_a_different_lr_scheduler:
# use lr_scheduler settings defined in args
skip_keys = list(args.lr_scheduler.__dict__.keys()) + ['clamp_lr']
for k in skip_keys:
if k in checkpoint['lr_scheduler']:
checkpoint['lr_scheduler'].pop(k)
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
elif lr_scheduler is not None:
# reset lr_scheduler start epoch only
lr_scheduler.step(checkpoint['lr_scheduler']['last_epoch'])
if logger:
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(resume_path, checkpoint['epoch']))
del checkpoint
|
AttentiveNAS-main
|
utils/saver.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from OFA - https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
import copy
multiply_adds = 1
def count_convNd(m, _, y):
cin = m.in_channels
kernel_ops = m.weight.size()[2] * m.weight.size()[3]
ops_per_element = kernel_ops
output_elements = y.nelement()
# cout x oW x oH
total_ops = cin * output_elements * ops_per_element // m.groups
m.total_ops = torch.Tensor([int(total_ops)])
def count_linear(m, _, __):
total_ops = m.in_features * m.out_features
m.total_ops = torch.Tensor([int(total_ops)])
register_hooks = {
nn.Conv1d: count_convNd,
nn.Conv2d: count_convNd,
nn.Conv3d: count_convNd,
######################################
nn.Linear: count_linear,
######################################
nn.Dropout: None,
nn.Dropout2d: None,
nn.Dropout3d: None,
nn.BatchNorm2d: None,
}
def profile(model, input_size=(1, 3, 224, 224), custom_ops=None):
handler_collection = []
custom_ops = {} if custom_ops is None else custom_ops
def add_hooks(m_):
if len(list(m_.children())) > 0:
return
m_.register_buffer('total_ops', torch.zeros(1))
m_.register_buffer('total_params', torch.zeros(1))
for p in m_.parameters():
m_.total_params += torch.Tensor([p.numel()])
m_type = type(m_)
fn = None
if m_type in custom_ops:
fn = custom_ops[m_type]
elif m_type in register_hooks:
fn = register_hooks[m_type]
else:
# print("Not implemented for ", m_)
pass
if fn is not None:
# print("Register FLOP counter for module %s" % str(m_))
_handler = m_.register_forward_hook(fn)
handler_collection.append(_handler)
original_device = model.parameters().__next__().device
training = model.training
model.eval()
model.apply(add_hooks)
x = torch.zeros(input_size).to(original_device)
with torch.no_grad():
model(x)
total_ops = 0
total_params = 0
for m in model.modules():
if len(list(m.children())) > 0: # skip for non-leaf module
continue
total_ops += m.total_ops
total_params += m.total_params
total_ops = total_ops.item()
total_params = total_params.item()
model.train(training)
model.to(original_device)
for handler in handler_collection:
handler.remove()
return total_ops, total_params
def count_net_flops_and_params(net, data_shape=(1, 3, 224, 224)):
if isinstance(net, nn.DataParallel):
net = net.module
net = copy.deepcopy(net)
flop, nparams = profile(net, data_shape)
return flop /1e6, nparams /1e6
|
AttentiveNAS-main
|
utils/flops_counter.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import torch
import torch.nn as nn
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch, logger=None):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
if logger is None:
print('\t'.join(entries))
else:
logger.info('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum() #sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
AttentiveNAS-main
|
utils/progress.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation adapted from OFA: https://github.com/mit-han-lab/once-for-all
import copy
import random
import collections
import math
import torch
import torch.nn as nn
from .modules.dynamic_layers import DynamicMBConvLayer, DynamicConvBnActLayer, DynamicLinearLayer, DynamicShortcutLayer
from .modules.static_layers import MobileInvertedResidualBlock
from .modules.nn_utils import make_divisible, int2list
from .modules.nn_base import MyNetwork
from .attentive_nas_static_model import AttentiveNasStaticModel
class AttentiveNasDynamicModel(MyNetwork):
def __init__(self, supernet, n_classes=1000, bn_param=(0., 1e-5)):
super(AttentiveNasDynamicModel, self).__init__()
self.supernet = supernet
self.n_classes = n_classes
self.use_v3_head = getattr(self.supernet, 'use_v3_head', False)
self.stage_names = ['first_conv', 'mb1', 'mb2', 'mb3', 'mb4', 'mb5', 'mb6', 'mb7', 'last_conv']
self.width_list, self.depth_list, self.ks_list, self.expand_ratio_list = [], [], [], []
for name in self.stage_names:
block_cfg = getattr(self.supernet, name)
self.width_list.append(block_cfg.c)
if name.startswith('mb'):
self.depth_list.append(block_cfg.d)
self.ks_list.append(block_cfg.k)
self.expand_ratio_list.append(block_cfg.t)
self.resolution_list = self.supernet.resolutions
self.cfg_candidates = {
'resolution': self.resolution_list ,
'width': self.width_list,
'depth': self.depth_list,
'kernel_size': self.ks_list,
'expand_ratio': self.expand_ratio_list
}
#first conv layer, including conv, bn, act
out_channel_list, act_func, stride = \
self.supernet.first_conv.c, self.supernet.first_conv.act_func, self.supernet.first_conv.s
self.first_conv = DynamicConvBnActLayer(
in_channel_list=int2list(3), out_channel_list=out_channel_list,
kernel_size=3, stride=stride, act_func=act_func,
)
# inverted residual blocks
self.block_group_info = []
blocks = []
_block_index = 0
feature_dim = out_channel_list
for stage_id, key in enumerate(self.stage_names[1:-1]):
block_cfg = getattr(self.supernet, key)
width = block_cfg.c
n_block = max(block_cfg.d)
act_func = block_cfg.act_func
ks = block_cfg.k
expand_ratio_list = block_cfg.t
use_se = block_cfg.se
self.block_group_info.append([_block_index + i for i in range(n_block)])
_block_index += n_block
output_channel = width
for i in range(n_block):
stride = block_cfg.s if i == 0 else 1
if min(expand_ratio_list) >= 4:
expand_ratio_list = [_s for _s in expand_ratio_list if _s >= 4] if i == 0 else expand_ratio_list
mobile_inverted_conv = DynamicMBConvLayer(
in_channel_list=feature_dim,
out_channel_list=output_channel,
kernel_size_list=ks,
expand_ratio_list=expand_ratio_list,
stride=stride,
act_func=act_func,
use_se=use_se,
channels_per_group=getattr(self.supernet, 'channels_per_group', 1)
)
shortcut = DynamicShortcutLayer(feature_dim, output_channel, reduction=stride)
blocks.append(MobileInvertedResidualBlock(mobile_inverted_conv, shortcut))
feature_dim = output_channel
self.blocks = nn.ModuleList(blocks)
last_channel, act_func = self.supernet.last_conv.c, self.supernet.last_conv.act_func
if not self.use_v3_head:
self.last_conv = DynamicConvBnActLayer(
in_channel_list=feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func,
)
else:
expand_feature_dim = [f_dim * 6 for f_dim in feature_dim]
self.last_conv = nn.Sequential(collections.OrderedDict([
('final_expand_layer', DynamicConvBnActLayer(
feature_dim, expand_feature_dim, kernel_size=1, use_bn=True, act_func=act_func)
),
('pool', nn.AdaptiveAvgPool2d((1,1))),
('feature_mix_layer', DynamicConvBnActLayer(
in_channel_list=expand_feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func, use_bn=False,)
),
]))
#final conv layer
self.classifier = DynamicLinearLayer(
in_features_list=last_channel, out_features=n_classes, bias=True
)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
self.zero_residual_block_bn_weights()
self.active_dropout_rate = 0
self.active_drop_connect_rate = 0
self.active_resolution = 224
def zero_residual_block_bn_weights(self):
with torch.no_grad():
for m in self.modules():
if isinstance(m, MobileInvertedResidualBlock):
if isinstance(m.mobile_inverted_conv, DynamicMBConvLayer) and m.shortcut is not None:
m.mobile_inverted_conv.point_linear.bn.bn.weight.zero_()
@staticmethod
def name():
return 'AttentiveNasModel'
def forward(self, x):
# resize input to target resolution first
if x.size(-1) != self.active_resolution:
x = torch.nn.functional.interpolate(x, size=self.active_resolution, mode='bicubic')
# first conv
x = self.first_conv(x)
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
x = self.blocks[idx](x)
x = self.last_conv(x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
if self.active_dropout_rate > 0 and self.training:
x = torch.nn.functional.dropout(x, p = self.active_dropout_rate)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
_str += self.blocks[0].module_str + '\n'
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
_str += self.blocks[idx].module_str + '\n'
if not self.use_v3_head:
_str += self.last_conv.module_str + '\n'
else:
_str += self.last_conv.final_expand_layer.module_str + '\n'
_str += self.last_conv.feature_mix_layer.module_str + '\n'
_str += self.classifier.module_str + '\n'
return _str
@property
def config(self):
return {
'name': AttentiveNasDynamicModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
'last_conv': self.last_conv.config if not self.use_v3_head else None,
'final_expand_layer': self.last_conv.final_expand_layer if self.use_v3_head else None,
'feature_mix_layer': self.last_conv.feature_mix_layer if self.use_v3_head else None,
'classifier': self.classifier.config,
'resolution': self.active_resolution
}
@staticmethod
def build_from_config(config):
raise ValueError('do not support this function')
""" set, sample and get active sub-networks """
def set_active_subnet(self, resolution=224, width=None, depth=None, kernel_size=None, expand_ratio=None, **kwargs):
assert len(depth) == len(kernel_size) == len(expand_ratio) == len(width) - 2
#set resolution
self.active_resolution = resolution
# first conv
self.first_conv.active_out_channel = width[0]
for stage_id, (c, k, e, d) in enumerate(zip(width[1:-1], kernel_size, expand_ratio, depth)):
start_idx, end_idx = min(self.block_group_info[stage_id]), max(self.block_group_info[stage_id])
for block_id in range(start_idx, start_idx+d):
block = self.blocks[block_id]
#block output channels
block.mobile_inverted_conv.active_out_channel = c
if block.shortcut is not None:
block.shortcut.active_out_channel = c
#dw kernel size
block.mobile_inverted_conv.active_kernel_size = k
#dw expansion ration
block.mobile_inverted_conv.active_expand_ratio = e
#IRBlocks repated times
for i, d in enumerate(depth):
self.runtime_depth[i] = min(len(self.block_group_info[i]), d)
#last conv
if not self.use_v3_head:
self.last_conv.active_out_channel = width[-1]
else:
# default expansion ratio: 6
self.last_conv.final_expand_layer.active_out_channel = width[-2] * 6
self.last_conv.feature_mix_layer.active_out_channel = width[-1]
def get_active_subnet_settings(self):
r = self.active_resolution
width, depth, kernel_size, expand_ratio= [], [], [], []
#first conv
width.append(self.first_conv.active_out_channel)
for stage_id in range(len(self.block_group_info)):
start_idx = min(self.block_group_info[stage_id])
block = self.blocks[start_idx] #first block
width.append(block.mobile_inverted_conv.active_out_channel)
kernel_size.append(block.mobile_inverted_conv.active_kernel_size)
expand_ratio.append(block.mobile_inverted_conv.active_expand_ratio)
depth.append(self.runtime_depth[stage_id])
if not self.use_v3_head:
width.append(self.last_conv.active_out_channel)
else:
width.append(self.last_conv.feature_mix_layer.active_out_channel)
return {
'resolution': r,
'width': width,
'kernel_size': kernel_size,
'expand_ratio': expand_ratio,
'depth': depth,
}
def set_dropout_rate(self, dropout=0, drop_connect=0, drop_connect_only_last_two_stages=True):
self.active_dropout_rate = dropout
for idx, block in enumerate(self.blocks):
if drop_connect_only_last_two_stages:
if idx not in self.block_group_info[-1] + self.block_group_info[-2]:
continue
this_drop_connect_rate = drop_connect * float(idx) / len(self.blocks)
block.drop_connect_rate = this_drop_connect_rate
def sample_min_subnet(self):
return self._sample_active_subnet(min_net=True)
def sample_max_subnet(self):
return self._sample_active_subnet(max_net=True)
def sample_active_subnet(self, compute_flops=False):
cfg = self._sample_active_subnet(
False, False
)
if compute_flops:
cfg['flops'] = self.compute_active_subnet_flops()
return cfg
def sample_active_subnet_within_range(self, targeted_min_flops, targeted_max_flops):
while True:
cfg = self._sample_active_subnet()
cfg['flops'] = self.compute_active_subnet_flops()
if cfg['flops'] >= targeted_min_flops and cfg['flops'] <= targeted_max_flops:
return cfg
def _sample_active_subnet(self, min_net=False, max_net=False):
sample_cfg = lambda candidates, sample_min, sample_max: \
min(candidates) if sample_min else (max(candidates) if sample_max else random.choice(candidates))
cfg = {}
# sample a resolution
cfg['resolution'] = sample_cfg(self.cfg_candidates['resolution'], min_net, max_net)
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = []
for vv in self.cfg_candidates[k]:
cfg[k].append(sample_cfg(int2list(vv), min_net, max_net))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def mutate_and_reset(self, cfg, prob=0.1, keep_resolution=False):
cfg = copy.deepcopy(cfg)
pick_another = lambda x, candidates: x if len(candidates) == 1 else random.choice([v for v in candidates if v != x])
# sample a resolution
r = random.random()
if r < prob and not keep_resolution:
cfg['resolution'] = pick_another(cfg['resolution'], self.cfg_candidates['resolution'])
# sample channels, depth, kernel_size, expand_ratio
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
for _i, _v in enumerate(cfg[k]):
r = random.random()
if r < prob:
cfg[k][_i] = pick_another(cfg[k][_i], int2list(self.cfg_candidates[k][_i]))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def crossover_and_reset(self, cfg1, cfg2, p=0.5):
def _cross_helper(g1, g2, prob):
assert type(g1) == type(g2)
if isinstance(g1, int):
return g1 if random.random() < prob else g2
elif isinstance(g1, list):
return [v1 if random.random() < prob else v2 for v1, v2 in zip(g1, g2)]
else:
raise NotImplementedError
cfg = {}
cfg['resolution'] = cfg1['resolution'] if random.random() < p else cfg2['resolution']
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = _cross_helper(cfg1[k], cfg2[k], p)
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def get_active_subnet(self, preserve_weight=True):
with torch.no_grad():
first_conv = self.first_conv.get_active_subnet(3, preserve_weight)
blocks = []
input_channel = first_conv.out_channels
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(MobileInvertedResidualBlock(
self.blocks[idx].mobile_inverted_conv.get_active_subnet(input_channel, preserve_weight),
self.blocks[idx].shortcut.get_active_subnet(input_channel, preserve_weight) if self.blocks[idx].shortcut is not None else None
))
input_channel = stage_blocks[-1].mobile_inverted_conv.out_channels
blocks += stage_blocks
if not self.use_v3_head:
last_conv = self.last_conv.get_active_subnet(input_channel, preserve_weight)
in_features = last_conv.out_channels
else:
final_expand_layer = self.last_conv.final_expand_layer.get_active_subnet(input_channel, preserve_weight)
feature_mix_layer = self.last_conv.feature_mix_layer.get_active_subnet(input_channel*6, preserve_weight)
in_features = feature_mix_layer.out_channels
last_conv = nn.Sequential(
final_expand_layer,
nn.AdaptiveAvgPool2d((1,1)),
feature_mix_layer
)
classifier = self.classifier.get_active_subnet(in_features, preserve_weight)
_subnet = AttentiveNasStaticModel(
first_conv, blocks, last_conv, classifier, self.active_resolution, use_v3_head=self.use_v3_head
)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
def get_active_net_config(self):
raise NotImplementedError
def compute_active_subnet_flops(self):
def count_conv(c_in, c_out, size_out, groups, k):
kernel_ops = k**2
output_elements = c_out * size_out**2
ops = c_in * output_elements * kernel_ops / groups
return ops
def count_linear(c_in, c_out):
return c_in * c_out
total_ops = 0
c_in = 3
size_out = self.active_resolution // self.first_conv.stride
c_out = self.first_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 3)
c_in = c_out
# mb blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
block = self.blocks[idx]
c_middle = make_divisible(round(c_in * block.mobile_inverted_conv.active_expand_ratio), 8)
# 1*1 conv
if block.mobile_inverted_conv.inverted_bottleneck is not None:
total_ops += count_conv(c_in, c_middle, size_out, 1, 1)
# dw conv
stride = 1 if idx > active_idx[0] else block.mobile_inverted_conv.stride
if size_out % stride == 0:
size_out = size_out // stride
else:
size_out = (size_out +1) // stride
total_ops += count_conv(c_middle, c_middle, size_out, c_middle, block.mobile_inverted_conv.active_kernel_size)
# 1*1 conv
c_out = block.mobile_inverted_conv.active_out_channel
total_ops += count_conv(c_middle, c_out, size_out, 1, 1)
#se
if block.mobile_inverted_conv.use_se:
num_mid = make_divisible(c_middle // block.mobile_inverted_conv.depth_conv.se.reduction, divisor=8)
total_ops += count_conv(c_middle, num_mid, 1, 1, 1) * 2
if block.shortcut and c_in != c_out:
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
c_in = c_out
if not self.use_v3_head:
c_out = self.last_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
else:
c_expand = self.last_conv.final_expand_layer.active_out_channel
c_out = self.last_conv.feature_mix_layer.active_out_channel
total_ops += count_conv(c_in, c_expand, size_out, 1, 1)
total_ops += count_conv(c_expand, c_out, 1, 1, 1)
# n_classes
total_ops += count_linear(c_out, self.n_classes)
return total_ops / 1e6
def load_weights_from_pretrained_models(self, checkpoint_path):
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
for k, v in self.state_dict().items():
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
|
AttentiveNAS-main
|
models/attentive_nas_dynamic_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .model_factory import *
|
AttentiveNAS-main
|
models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import math
from .modules.static_layers import set_layer_from_config, MBInvertedConvLayer, ConvBnActLayer, ShortcutLayer, LinearLayer, MobileInvertedResidualBlock, IdentityLayer
from .modules.nn_utils import make_divisible
from .modules.nn_base import MyNetwork
class AttentiveNasStaticModel(MyNetwork):
def __init__(self, first_conv, blocks, last_conv, classifier, resolution, use_v3_head=True):
super(AttentiveNasStaticModel, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.last_conv = last_conv
self.classifier = classifier
self.resolution = resolution #input size
self.use_v3_head = use_v3_head
def forward(self, x):
# resize input to target resolution first
if x.size(-1) != self.resolution:
x = torch.nn.functional.interpolate(x, size=self.resolution, mode='bicubic')
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.last_conv(x)
if not self.use_v3_head:
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
for block in self.blocks:
_str += block.module_str + '\n'
#_str += self.last_conv.module_str + '\n'
_str += self.classifier.module_str
return _str
@property
def config(self):
return {
'name': AttentiveNasStaticModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
#'last_conv': self.last_conv.config,
'classifier': self.classifier.config,
'resolution': self.resolution
}
def weight_initialization(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
@staticmethod
def build_from_config(config):
raise NotImplementedError
def reset_running_stats_for_calibration(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
m.training = True
m.momentum = None # cumulative moving average
m.reset_running_stats()
|
AttentiveNAS-main
|
models/attentive_nas_static_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .attentive_nas_dynamic_model import AttentiveNasDynamicModel
def create_model(args, arch=None):
n_classes = int(getattr(args, 'n_classes', 1000))
bn_momentum = getattr(args, 'bn_momentum', 0.1)
bn_eps = getattr(args, 'bn_eps', 1e-5)
dropout = getattr(args, 'dropout', 0)
drop_connect = getattr(args, 'drop_connect', 0)
if arch is None:
arch = args.arch
if arch == 'attentive_nas_dynamic_model':
model = AttentiveNasDynamicModel(
args.supernet_config,
n_classes = n_classes,
bn_param = (bn_momentum, bn_eps),
)
elif arch == 'attentive_nas_static_model':
supernet = AttentiveNasDynamicModel(
args.supernet_config,
n_classes = n_classes,
bn_param = (bn_momentum, bn_eps),
)
# load from pretrained models
supernet.load_weights_from_pretrained_models(args.pareto_models.supernet_checkpoint_path)
# subsample a static model with weights inherited from the supernet dynamic model
supernet.set_active_subnet(
resolution=args.active_subnet.resolution,
width = args.active_subnet.width,
depth = args.active_subnet.depth,
kernel_size = args.active_subnet.kernel_size,
expand_ratio = args.active_subnet.expand_ratio
)
model = supernet.get_active_subnet()
# house-keeping stuff
model.set_bn_param(momentum=bn_momentum, eps=bn_eps)
del supernet
else:
raise ValueError(arch)
return model
|
AttentiveNAS-main
|
models/model_factory.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import torch.nn as nn
from .nn_utils import get_same_padding, build_activation, make_divisible, drop_connect
from .nn_base import MyModule
from .activations import *
def set_layer_from_config(layer_config):
if layer_config is None:
return None
name2layer = {
ConvBnActLayer.__name__: ConvBnActLayer,
IdentityLayer.__name__: IdentityLayer,
LinearLayer.__name__: LinearLayer,
MBInvertedConvLayer.__name__: MBInvertedConvLayer,
}
layer_name = layer_config.pop('name')
layer = name2layer[layer_name]
return layer.build_from_config(layer_config)
class SELayer(nn.Module):
REDUCTION = 4
def __init__(self, channel):
super(SELayer, self).__init__()
self.channel = channel
self.reduction = SELayer.REDUCTION
num_mid = make_divisible(self.channel // self.reduction, divisor=8)
self.fc = nn.Sequential(OrderedDict([
('reduce', nn.Conv2d(self.channel, num_mid, 1, 1, 0, bias=True)),
('relu', nn.ReLU(inplace=True)),
('expand', nn.Conv2d(num_mid, self.channel, 1, 1, 0, bias=True)),
('h_sigmoid', Hsigmoid(inplace=True)),
]))
def forward(self, x):
#x: N, C, H, W
y = x.mean(3, keepdim=True).mean(2, keepdim=True) # N, C, 1, 1
y = self.fc(y)
return x * y
class ConvBnActLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, dilation=1, groups=1, bias=False,
use_bn=True, act_func='relu'):
super(ConvBnActLayer, self).__init__()
# default normal 3x3_Conv with bn and relu
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
self.bias = bias
self.use_bn = use_bn
self.act_func = act_func
pad = get_same_padding(self.kernel_size)
self.conv = nn.Conv2d(in_channels, out_channels, self.kernel_size,
stride, pad, dilation=dilation, groups=groups, bias=bias
)
if self.use_bn:
self.bn = nn.BatchNorm2d(out_channels)
self.act = build_activation(self.act_func, inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act:
x = self.act(x)
return x
@property
def module_str(self):
if isinstance(self.kernel_size, int):
kernel_size = (self.kernel_size, self.kernel_size)
else:
kernel_size = self.kernel_size
if self.groups == 1:
if self.dilation > 1:
conv_str = '%dx%d_DilatedConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_Conv' % (kernel_size[0], kernel_size[1])
else:
if self.dilation > 1:
conv_str = '%dx%d_DilatedGroupConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_GroupConv' % (kernel_size[0], kernel_size[1])
conv_str += '_O%d' % self.out_channels
return conv_str
@property
def config(self):
return {
'name': ConvBnActLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'groups': self.groups,
'bias': self.bias,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return ConvBnActLayer(**config)
class IdentityLayer(MyModule):
def __init__(self, ):
super(IdentityLayer, self).__init__()
def forward(self, x):
return x
@property
def module_str(self):
return 'Identity'
@property
def config(self):
return {
'name': IdentityLayer.__name__,
}
@staticmethod
def build_from_config(config):
return IdentityLayer(**config)
class LinearLayer(MyModule):
def __init__(self, in_features, out_features, bias=True):
super(LinearLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = nn.Linear(in_features, out_features, bias)
def forward(self, x):
#if dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return '%dx%d_Linear' % (self.in_features, self.out_features)
@property
def config(self):
return {
'name': LinearLayer.__name__,
'in_features': self.in_features,
'out_features': self.out_features,
'bias': self.bias,
#'dropout_rate': self.dropout_rate,
}
@staticmethod
def build_from_config(config):
return LinearLayer(**config)
class ShortcutLayer(MyModule):
def __init__(self, in_channels, out_channels, reduction=1):
super(ShortcutLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.reduction = reduction
self.conv = nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False)
def forward(self, x):
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
if self.in_channels != self.out_channels:
x = self.conv(x)
return x
@property
def module_str(self):
if self.in_channels == self.out_channels and self.reduction == 1:
conv_str = 'IdentityShortcut'
else:
if self.reduction == 1:
conv_str = '%d-%d_Shortcut' % (self.in_channels, self.out_channels)
else:
conv_str = '%d-%d_R%d_Shortcut' % (self.in_channels, self.out_channels, self.reduction)
return conv_str
@property
def config(self):
return {
'name': ShortcutLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return ShortcutLayer(**config)
class MBInvertedConvLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False, channels_per_group=1):
super(MBInvertedConvLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expand_ratio = expand_ratio
self.mid_channels = mid_channels
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
if self.mid_channels is None:
feature_dim = round(self.in_channels * self.expand_ratio)
else:
feature_dim = self.mid_channels
if self.expand_ratio == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True)),
]))
assert feature_dim % self.channels_per_group == 0
active_groups = feature_dim // self.channels_per_group
pad = get_same_padding(self.kernel_size)
depth_conv_modules = [
('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=active_groups, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True))
]
if self.use_se:
depth_conv_modules.append(('se', SELayer(feature_dim)))
self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))
self.point_linear = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(out_channels)),
]))
def forward(self, x):
if self.inverted_bottleneck:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.mid_channels is None:
expand_ratio = self.expand_ratio
else:
expand_ratio = self.mid_channels // self.in_channels
layer_str = '%dx%d_MBConv%d_%s' % (self.kernel_size, self.kernel_size, expand_ratio, self.act_func.upper())
if self.use_se:
layer_str = 'SE_' + layer_str
layer_str += '_O%d' % self.out_channels
return layer_str
@property
def config(self):
return {
'name': MBInvertedConvLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'expand_ratio': self.expand_ratio,
'mid_channels': self.mid_channels,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return MBInvertedConvLayer(**config)
class MobileInvertedResidualBlock(MyModule):
def __init__(self, mobile_inverted_conv, shortcut, drop_connect_rate=0):
super(MobileInvertedResidualBlock, self).__init__()
self.mobile_inverted_conv = mobile_inverted_conv
self.shortcut = shortcut
self.drop_connect_rate = drop_connect_rate
def forward(self, x):
in_channel = x.size(1)
if self.mobile_inverted_conv is None: # or isinstance(self.mobile_inverted_conv, ZeroLayer):
res = x
elif self.shortcut is None: # or isinstance(self.shortcut, ZeroLayer):
res = self.mobile_inverted_conv(x)
else:
im = self.shortcut(x)
x = self.mobile_inverted_conv(x)
if self.drop_connect_rate > 0 and in_channel == im.size(1) and self.shortcut.reduction == 1:
x = drop_connect(x, p=self.drop_connect_rate, training=self.training)
res = x + im
return res
@property
def module_str(self):
return '(%s, %s)' % (
self.mobile_inverted_conv.module_str if self.mobile_inverted_conv is not None else None,
self.shortcut.module_str if self.shortcut is not None else None
)
@property
def config(self):
return {
'name': MobileInvertedResidualBlock.__name__,
'mobile_inverted_conv': self.mobile_inverted_conv.config if self.mobile_inverted_conv is not None else None,
'shortcut': self.shortcut.config if self.shortcut is not None else None,
}
@staticmethod
def build_from_config(config):
mobile_inverted_conv = set_layer_from_config(config['mobile_inverted_conv'])
shortcut = set_layer_from_config(config['shortcut'])
return MobileInvertedResidualBlock(mobile_inverted_conv, shortcut)
|
AttentiveNAS-main
|
models/modules/static_layers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import math
import torch
import torch.nn as nn
try:
from fvcore.common.file_io import PathManager
except:
pass
class MyModule(nn.Module):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
class MyNetwork(MyModule):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
def zero_last_gamma(self):
raise NotImplementedError
""" implemented methods """
def set_bn_param(self, momentum, eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
if momentum is not None:
m.momentum = float(momentum)
else:
m.momentum = None
m.eps = float(eps)
return
def get_bn_param(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
return {
'momentum': m.momentum,
'eps': m.eps,
}
return None
def init_model(self, model_init):
""" Conv2d, BatchNorm2d, BatchNorm1d, Linear, """
for m in self.modules():
if isinstance(m, nn.Conv2d):
if model_init == 'he_fout':
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif model_init == 'he_fin':
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
else:
raise NotImplementedError
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.zero_()
def get_parameters(self, keys=None, mode='include', exclude_set=None):
if exclude_set is None:
exclude_set = {}
if keys is None:
for name, param in self.named_parameters():
if name not in exclude_set:
yield param
elif mode == 'include':
for name, param in self.named_parameters():
flag = False
for key in keys:
if key in name:
flag = True
break
if flag and name not in exclude_set:
yield param
elif mode == 'exclude':
for name, param in self.named_parameters():
flag = True
for key in keys:
if key in name:
flag = False
break
if flag and name not in exclude_set:
yield param
else:
raise ValueError('do not support: %s' % mode)
def weight_parameters(self, exclude_set=None):
return self.get_parameters(exclude_set=exclude_set)
def load_weights_from_pretrained_models(self, checkpoint_path, load_from_ema=False):
try:
with PathManager.open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
except:
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
if load_from_ema and 'state_dict_ema' in checkpoint:
pretrained_state_dicts = checkpoint['state_dict_ema']
for k, v in self.state_dict().items():
name = k
if not load_from_ema:
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
|
AttentiveNAS-main
|
models/modules/nn_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
import torch.nn.functional as F
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
#class Swish(nn.Module):
# def __init__(self, inplace=True):
# super(Swish, self).__init__()
# self.inplace = inplace
#
# def forward(self, x):
# return x * torch.sigmoid(x)
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace) / 6.
|
AttentiveNAS-main
|
models/modules/activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .static_layers import MBInvertedConvLayer, ConvBnActLayer, LinearLayer, SELayer, ShortcutLayer
from .dynamic_ops import DynamicSeparableConv2d, DynamicPointConv2d, DynamicBatchNorm2d, DynamicLinear, DynamicSE
from .nn_utils import int2list, get_net_device, copy_bn, build_activation, make_divisible
from .nn_base import MyModule, MyNetwork
class DynamicMBConvLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list,
kernel_size_list=3, expand_ratio_list=6, stride=1, act_func='relu6', use_se=False, channels_per_group=1):
super(DynamicMBConvLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size_list = int2list(kernel_size_list, 1)
self.expand_ratio_list = int2list(expand_ratio_list, 1)
self.stride = stride
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
# build modules
max_middle_channel = round(max(self.in_channel_list) * max(self.expand_ratio_list))
if max(self.expand_ratio_list) == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max(self.in_channel_list), max_middle_channel)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True)),
]))
self.depth_conv = nn.Sequential(OrderedDict([
('conv', DynamicSeparableConv2d(max_middle_channel, self.kernel_size_list, stride=self.stride, channels_per_group=self.channels_per_group)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True))
]))
if self.use_se:
self.depth_conv.add_module('se', DynamicSE(max_middle_channel))
self.point_linear = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max_middle_channel, max(self.out_channel_list))),
('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
]))
self.active_kernel_size = max(self.kernel_size_list)
self.active_expand_ratio = max(self.expand_ratio_list)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
if self.inverted_bottleneck is not None:
self.inverted_bottleneck.conv.active_out_channel = \
make_divisible(round(in_channel * self.active_expand_ratio), 8)
self.depth_conv.conv.active_kernel_size = self.active_kernel_size
self.point_linear.conv.active_out_channel = self.active_out_channel
if self.inverted_bottleneck is not None:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.use_se:
return 'SE(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
else:
return '(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
@property
def config(self):
return {
'name': DynamicMBConvLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size_list': self.kernel_size_list,
'expand_ratio_list': self.expand_ratio_list,
'stride': self.stride,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return DynamicMBConvLayer(**config)
############################################################################################
def get_active_subnet(self, in_channel, preserve_weight=True):
middle_channel = make_divisible(round(in_channel * self.active_expand_ratio), 8)
channels_per_group = self.depth_conv.conv.channels_per_group
# build the new layer
sub_layer = MBInvertedConvLayer(
in_channel, self.active_out_channel, self.active_kernel_size, self.stride, self.active_expand_ratio,
act_func=self.act_func, mid_channels=middle_channel, use_se=self.use_se, channels_per_group=channels_per_group
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
# copy weight from current layer
if sub_layer.inverted_bottleneck is not None:
sub_layer.inverted_bottleneck.conv.weight.data.copy_(
self.inverted_bottleneck.conv.conv.weight.data[:middle_channel, :in_channel, :, :]
)
copy_bn(sub_layer.inverted_bottleneck.bn, self.inverted_bottleneck.bn.bn)
sub_layer.depth_conv.conv.weight.data.copy_(
self.depth_conv.conv.get_active_filter(middle_channel, self.active_kernel_size).data
)
copy_bn(sub_layer.depth_conv.bn, self.depth_conv.bn.bn)
if self.use_se:
se_mid = make_divisible(middle_channel // SELayer.REDUCTION, divisor=8)
sub_layer.depth_conv.se.fc.reduce.weight.data.copy_(
self.depth_conv.se.fc.reduce.weight.data[:se_mid, :middle_channel, :, :]
)
sub_layer.depth_conv.se.fc.reduce.bias.data.copy_(self.depth_conv.se.fc.reduce.bias.data[:se_mid])
sub_layer.depth_conv.se.fc.expand.weight.data.copy_(
self.depth_conv.se.fc.expand.weight.data[:middle_channel, :se_mid, :, :]
)
sub_layer.depth_conv.se.fc.expand.bias.data.copy_(self.depth_conv.se.fc.expand.bias.data[:middle_channel])
sub_layer.point_linear.conv.weight.data.copy_(
self.point_linear.conv.conv.weight.data[:self.active_out_channel, :middle_channel, :, :]
)
copy_bn(sub_layer.point_linear.bn, self.point_linear.bn.bn)
return sub_layer
def re_organize_middle_weights(self, expand_ratio_stage=0):
raise NotImplementedError
#importance = torch.sum(torch.abs(self.point_linear.conv.conv.weight.data), dim=(0, 2, 3))
#if expand_ratio_stage > 0:
# sorted_expand_list = copy.deepcopy(self.expand_ratio_list)
# sorted_expand_list.sort(reverse=True)
# target_width = sorted_expand_list[expand_ratio_stage]
# target_width = round(max(self.in_channel_list) * target_width)
# importance[target_width:] = torch.arange(0, target_width - importance.size(0), -1)
#
#sorted_importance, sorted_idx = torch.sort(importance, dim=0, descending=True)
#self.point_linear.conv.conv.weight.data = torch.index_select(
# self.point_linear.conv.conv.weight.data, 1, sorted_idx
#)
#
#adjust_bn_according_to_idx(self.depth_conv.bn.bn, sorted_idx)
#self.depth_conv.conv.conv.weight.data = torch.index_select(
# self.depth_conv.conv.conv.weight.data, 0, sorted_idx
#)
#if self.use_se:
# # se expand: output dim 0 reorganize
# se_expand = self.depth_conv.se.fc.expand
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 0, sorted_idx)
# se_expand.bias.data = torch.index_select(se_expand.bias.data, 0, sorted_idx)
# # se reduce: input dim 1 reorganize
# se_reduce = self.depth_conv.se.fc.reduce
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 1, sorted_idx)
# # middle weight reorganize
# se_importance = torch.sum(torch.abs(se_expand.weight.data), dim=(0, 2, 3))
# se_importance, se_idx = torch.sort(se_importance, dim=0, descending=True)
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 1, se_idx)
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 0, se_idx)
# se_reduce.bias.data = torch.index_select(se_reduce.bias.data, 0, se_idx)
#
## TODO if inverted_bottleneck is None, the previous layer should be reorganized accordingly
#if self.inverted_bottleneck is not None:
# adjust_bn_according_to_idx(self.inverted_bottleneck.bn.bn, sorted_idx)
# self.inverted_bottleneck.conv.conv.weight.data = torch.index_select(
# self.inverted_bottleneck.conv.conv.weight.data, 0, sorted_idx
# )
# return None
#else:
# return sorted_idx
class DynamicConvBnActLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, kernel_size=3, stride=1, dilation=1,
use_bn=True, act_func='relu6'):
super(DynamicConvBnActLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.use_bn = use_bn
self.act_func = act_func
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation,
)
if self.use_bn:
self.bn = DynamicBatchNorm2d(max(self.out_channel_list))
if self.act_func is not None:
self.act = build_activation(self.act_func, inplace=True)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act_func is not None:
x = self.act(x)
return x
@property
def module_str(self):
return 'DyConv(O%d, K%d, S%d)' % (self.active_out_channel, self.kernel_size, self.stride)
@property
def config(self):
return {
'name': DynamicConvBnActLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return DynamicConvBnActLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ConvBnActLayer(
in_channel, self.active_out_channel, self.kernel_size, self.stride, self.dilation,
use_bn=self.use_bn, act_func=self.act_func
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
if self.use_bn:
copy_bn(sub_layer.bn, self.bn.bn)
return sub_layer
class DynamicLinearLayer(MyModule):
def __init__(self, in_features_list, out_features, bias=True):
super(DynamicLinearLayer, self).__init__()
self.in_features_list = int2list(in_features_list)
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = DynamicLinear(
max_in_features=max(self.in_features_list), max_out_features=self.out_features, bias=self.bias
)
def forward(self, x):
#if self.dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return 'DyLinear(%d)' % self.out_features
@property
def config(self):
return {
'name': DynamicLinear.__name__,
'in_features_list': self.in_features_list,
'out_features': self.out_features,
'bias': self.bias
}
@staticmethod
def build_from_config(config):
return DynamicLinearLayer(**config)
def get_active_subnet(self, in_features, preserve_weight=True):
#sub_layer = LinearLayer(in_features, self.out_features, self.bias, dropout_rate=self.dropout_rate)
sub_layer = LinearLayer(in_features, self.out_features, self.bias)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.linear.weight.data.copy_(self.linear.linear.weight.data[:self.out_features, :in_features])
if self.bias:
sub_layer.linear.bias.data.copy_(self.linear.linear.bias.data[:self.out_features])
return sub_layer
class DynamicShortcutLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, reduction=1):
super(DynamicShortcutLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.reduction = reduction
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=1, stride=1,
)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
#identity mapping
if in_channel == self.active_out_channel and self.reduction == 1:
return x
#average pooling, if size doesn't match
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
#1*1 conv, if #channels doesn't match
if in_channel != self.active_out_channel:
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
return x
@property
def module_str(self):
return 'DyShortcut(O%d, R%d)' % (self.active_out_channel, self.reduction)
@property
def config(self):
return {
'name': DynamicShortcutLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return DynamicShortcutLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ShortcutLayer(
in_channel, self.active_out_channel, self.reduction
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
return sub_layer
|
AttentiveNAS-main
|
models/modules/dynamic_layers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
AttentiveNAS-main
|
models/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch.nn as nn
from .activations import *
def make_divisible(v, divisor=8, min_value=1):
"""
forked from slim:
https://github.com/tensorflow/models/blob/\
0344c5503ee55e24f0de7f37336a6e08f10976fd/\
research/slim/nets/mobilenet/mobilenet.py#L62-L69
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def sub_filter_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
dev = sub_kernel_size // 2
start, end = center - dev, center + dev + 1
assert end - start == sub_kernel_size
return start, end
def get_net_device(net):
return net.parameters().__next__().device
def int2list(val, repeat_time=1):
if isinstance(val, list):
return val
elif isinstance(val, tuple):
return list(val)
else:
return [val for _ in range(repeat_time)]
def get_same_padding(kernel_size):
if isinstance(kernel_size, tuple):
assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size
p1 = get_same_padding(kernel_size[0])
p2 = get_same_padding(kernel_size[1])
return p1, p2
assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`'
assert kernel_size % 2 > 0, 'kernel size should be odd number'
return kernel_size // 2
def copy_bn(target_bn, src_bn):
feature_dim = target_bn.num_features
target_bn.weight.data.copy_(src_bn.weight.data[:feature_dim])
target_bn.bias.data.copy_(src_bn.bias.data[:feature_dim])
target_bn.running_mean.data.copy_(src_bn.running_mean.data[:feature_dim])
target_bn.running_var.data.copy_(src_bn.running_var.data[:feature_dim])
def build_activation(act_func, inplace=True):
if act_func == 'relu':
return nn.ReLU(inplace=inplace)
elif act_func == 'relu6':
return nn.ReLU6(inplace=inplace)
elif act_func == 'tanh':
return nn.Tanh()
elif act_func == 'sigmoid':
return nn.Sigmoid()
elif act_func == 'h_swish':
return Hswish(inplace=inplace)
elif act_func == 'h_sigmoid':
return Hsigmoid(inplace=inplace)
elif act_func == 'swish':
return MemoryEfficientSwish()
elif act_func is None:
return None
else:
raise ValueError('do not support: %s' % act_func)
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert 0 <= p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1.0 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
|
AttentiveNAS-main
|
models/modules/nn_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch
from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm
import torch.distributed as dist
from .nn_utils import get_same_padding, make_divisible, sub_filter_start_end
from .static_layers import SELayer
class DynamicSeparableConv2d(nn.Module):
KERNEL_TRANSFORM_MODE = None # None or 1
def __init__(self, max_in_channels, kernel_size_list, stride=1, dilation=1, channels_per_group=1):
super(DynamicSeparableConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.channels_per_group = channels_per_group
assert self.max_in_channels % self.channels_per_group == 0
self.kernel_size_list = kernel_size_list
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_in_channels, max(self.kernel_size_list), self.stride,
groups=self.max_in_channels // self.channels_per_group, bias=False,
)
self._ks_set = list(set(self.kernel_size_list))
self._ks_set.sort() # e.g., [3, 5, 7]
if self.KERNEL_TRANSFORM_MODE is not None:
# register scaling parameters
# 7to5_matrix, 5to3_matrix
scale_params = {}
for i in range(len(self._ks_set) - 1):
ks_small = self._ks_set[i]
ks_larger = self._ks_set[i + 1]
param_name = '%dto%d' % (ks_larger, ks_small)
scale_params['%s_matrix' % param_name] = Parameter(torch.eye(ks_small ** 2))
for name, param in scale_params.items():
self.register_parameter(name, param)
self.active_kernel_size = max(self.kernel_size_list)
def get_active_filter(self, in_channel, kernel_size):
out_channel = in_channel
max_kernel_size = max(self.kernel_size_list)
start, end = sub_filter_start_end(max_kernel_size, kernel_size)
filters = self.conv.weight[:out_channel, :in_channel, start:end, start:end]
if self.KERNEL_TRANSFORM_MODE is not None and kernel_size < max_kernel_size:
start_filter = self.conv.weight[:out_channel, :in_channel, :, :] # start with max kernel
for i in range(len(self._ks_set) - 1, 0, -1):
src_ks = self._ks_set[i]
if src_ks <= kernel_size:
break
target_ks = self._ks_set[i - 1]
start, end = sub_filter_start_end(src_ks, target_ks)
_input_filter = start_filter[:, :, start:end, start:end]
_input_filter = _input_filter.contiguous()
_input_filter = _input_filter.view(_input_filter.size(0), _input_filter.size(1), -1)
_input_filter = _input_filter.view(-1, _input_filter.size(2))
_input_filter = F.linear(
_input_filter, self.__getattr__('%dto%d_matrix' % (src_ks, target_ks)),
)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks ** 2)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks, target_ks)
start_filter = _input_filter
filters = start_filter
return filters
def forward(self, x, kernel_size=None):
if kernel_size is None:
kernel_size = self.active_kernel_size
in_channel = x.size(1)
assert in_channel % self.channels_per_group == 0
filters = self.get_active_filter(in_channel, kernel_size).contiguous()
padding = get_same_padding(kernel_size)
y = F.conv2d(
x, filters, None, self.stride, padding, self.dilation, in_channel // self.channels_per_group
)
return y
class DynamicPointConv2d(nn.Module):
def __init__(self, max_in_channels, max_out_channels, kernel_size=1, stride=1, dilation=1):
super(DynamicPointConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.max_out_channels = max_out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_out_channels, self.kernel_size, stride=self.stride, bias=False,
)
self.active_out_channel = self.max_out_channels
def forward(self, x, out_channel=None):
if out_channel is None:
out_channel = self.active_out_channel
in_channel = x.size(1)
filters = self.conv.weight[:out_channel, :in_channel, :, :].contiguous()
padding = get_same_padding(self.kernel_size)
y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
return y
class DynamicLinear(nn.Module):
def __init__(self, max_in_features, max_out_features, bias=True):
super(DynamicLinear, self).__init__()
self.max_in_features = max_in_features
self.max_out_features = max_out_features
self.bias = bias
self.linear = nn.Linear(self.max_in_features, self.max_out_features, self.bias)
self.active_out_features = self.max_out_features
def forward(self, x, out_features=None):
if out_features is None:
out_features = self.active_out_features
in_features = x.size(1)
weight = self.linear.weight[:out_features, :in_features].contiguous()
bias = self.linear.bias[:out_features] if self.bias else None
y = F.linear(x, weight, bias)
return y
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class DynamicBatchNorm2d(nn.Module):
'''
1. doesn't acculate bn statistics, (momentum=0.)
2. calculate BN statistics of all subnets after training
3. bn weights are shared
https://arxiv.org/abs/1903.05134
https://detectron2.readthedocs.io/_modules/detectron2/layers/batch_norm.html
'''
#SET_RUNNING_STATISTICS = False
def __init__(self, max_feature_dim):
super(DynamicBatchNorm2d, self).__init__()
self.max_feature_dim = max_feature_dim
self.bn = nn.BatchNorm2d(self.max_feature_dim)
#self.exponential_average_factor = 0 #doesn't acculate bn stats
self.need_sync = False
# reserved to tracking the performance of the largest and smallest network
self.bn_tracking = nn.ModuleList(
[
nn.BatchNorm2d(self.max_feature_dim, affine=False),
nn.BatchNorm2d(self.max_feature_dim, affine=False)
]
)
def forward(self, x):
feature_dim = x.size(1)
if not self.training:
raise ValueError('DynamicBN only supports training')
bn = self.bn
# need_sync
if not self.need_sync:
return F.batch_norm(
x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
bn.momentum, bn.eps,
)
else:
assert dist.get_world_size() > 1, 'SyncBatchNorm requires >1 world size'
B, C = x.shape[0], x.shape[1]
mean = torch.mean(x, dim=[0, 2, 3])
meansqr = torch.mean(x * x, dim=[0, 2, 3])
assert B > 0, 'does not support zero batch size'
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
invstd = torch.rsqrt(var + bn.eps)
scale = bn.weight[:feature_dim] * invstd
bias = bn.bias[:feature_dim] - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
#if bn.num_features == feature_dim or DynamicBatchNorm2d.SET_RUNNING_STATISTICS:
# return bn(x)
#else:
# exponential_average_factor = 0.0
# if bn.training and bn.track_running_stats:
# # TODO: if statement only here to tell the jit to skip emitting this when it is None
# if bn.num_batches_tracked is not None:
# bn.num_batches_tracked += 1
# if bn.momentum is None: # use cumulative moving average
# exponential_average_factor = 1.0 / float(bn.num_batches_tracked)
# else: # use exponential moving average
# exponential_average_factor = bn.momentum
# return F.batch_norm(
# x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
# bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
# exponential_average_factor, bn.eps,
# )
class DynamicSE(SELayer):
def __init__(self, max_channel):
super(DynamicSE, self).__init__(max_channel)
def forward(self, x):
in_channel = x.size(1)
num_mid = make_divisible(in_channel // self.reduction, divisor=8)
y = x.mean(3, keepdim=True).mean(2, keepdim=True)
# reduce
reduce_conv = self.fc.reduce
reduce_filter = reduce_conv.weight[:num_mid, :in_channel, :, :].contiguous()
reduce_bias = reduce_conv.bias[:num_mid] if reduce_conv.bias is not None else None
y = F.conv2d(y, reduce_filter, reduce_bias, 1, 0, 1, 1)
# relu
y = self.fc.relu(y)
# expand
expand_conv = self.fc.expand
expand_filter = expand_conv.weight[:in_channel, :num_mid, :, :].contiguous()
expand_bias = expand_conv.bias[:in_channel] if expand_conv.bias is not None else None
y = F.conv2d(y, expand_filter, expand_bias, 1, 0, 1, 1)
# hard sigmoid
y = self.fc.h_sigmoid(y)
return x * y
|
AttentiveNAS-main
|
models/modules/dynamic_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, WarmupLinearDecayLR, ConstantLR
def build_optimizer(args, model):
"""
Build an optimizer from config.
"""
no_wd_params, wd_params = [], []
for name, param in model.named_parameters():
if param.requires_grad:
if ".bn" in name or ".bias" in name:
no_wd_params.append(param)
else:
wd_params.append(param)
no_wd_params = nn.ParameterList(no_wd_params)
wd_params = nn.ParameterList(wd_params)
weight_decay_weight = args.weight_decay_weight
weight_decay_bn_bias = args.weight_decay_bn_bias
base_lr = args.lr_scheduler.base_lr
params_group = [
{"params": wd_params, "weight_decay": float(weight_decay_weight), 'group_name':'weight'},
{"params": no_wd_params, "weight_decay": float(weight_decay_bn_bias), 'group_name':'bn_bias'},
]
if args.optimizer.method == 'sgd':
momentum = args.optimizer.momentum
nesterov = args.optimizer.nesterov
optimizer = torch.optim.SGD(
params_group,
lr = base_lr,
momentum = momentum,
nesterov = nesterov,
)
else:
raise ValueError(f'no optimizer {args.optimizer.method}')
return optimizer
def build_lr_scheduler(args, optimizer):
if not hasattr(args, 'max_iters'):
#important house-keeping stuff
args.max_iters = args.n_iters_per_epoch * args.epochs
if getattr(args, 'warmup_iters', None) is None:
args.warmup_iters = args.n_iters_per_epoch * args.warmup_epochs
warmup_iters = args.warmup_iters
warmup_lr = float(getattr(args.lr_scheduler, 'warmup_lr', 0.001))
warmup_method = getattr(args.lr_scheduler, 'warmup_method', 'linear')
clamp_lr_percent = float(getattr(args.lr_scheduler, 'clamp_lr_percent', 0.))
clamp_lr = args.lr_scheduler.base_lr * clamp_lr_percent
if args.lr_scheduler.method == 'warmup_cosine_lr':
return WarmupCosineLR(
optimizer,
args.max_iters,
warmup_factor = warmup_lr,
warmup_iters = warmup_iters,
warmup_method = warmup_method,
clamp_lr = clamp_lr,
)
elif args.lr_scheduler.method == 'warmup_exp_decay_lr':
decay_cycle_iters = int(args.lr_scheduler.lr_decay_cycle * args.n_iters_per_epoch)
total_decay_iters = args.n_iters_per_epoch * (args.epochs - args.warmup_epochs)
milestones = [ warmup_iters + (idx + 1) * decay_cycle_iters \
for idx in range(total_decay_iters // decay_cycle_iters)]
return WarmupMultiStepLR(
optimizer,
milestones,
gamma=args.lr_scheduler.lr_decay_rate_per_cycle,
warmup_factor = warmup_lr,
warmup_iters = warmup_iters,
warmup_method = warmup_method,
clamp_lr = clamp_lr,
)
elif args.lr_scheduler.method == 'warmup_linear_lr':
decay_cycle_iters = args.n_iters_per_epoch
milestones = [ warmup_iters + (idx + 1) * decay_cycle_iters \
for idx in range(args.epochs - args.warmup_epochs)]
return WarmupLinearDecayLR(
optimizer,
milestones,
warmup_factor = warmup_lr,
warmup_iters = warmup_iters,
warmup_method = warmup_method,
clamp_lr = clamp_lr,
)
elif args.lr_scheduler.method == 'constant_lr':
return ConstantLR(
optimizer
)
else:
raise NotImplementedError
|
AttentiveNAS-main
|
solver/build.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import math
from bisect import bisect_right
from typing import List
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self,
optimizer,
max_iters,
warmup_factor = 0.001,
warmup_iters = 1000,
warmup_method = 'linear',
last_epoch = -1,
clamp_lr = 0.
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.clamp_lr = clamp_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [ max( self.clamp_lr if self.last_epoch > self.warmup_iters else 0.,
base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
) for base_lr in self.base_lrs ]
def _compute_values(self):
return self.get_lr()
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma = 0.1,
warmup_factor = 0.001,
warmup_iters = 1000,
warmup_method = "linear",
last_epoch= -1,
clamp_lr = 0.
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.clamp_lr = clamp_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [ max( self.clamp_lr if self.last_epoch > self.warmup_iters else 0.,
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
) for base_lr in self.base_lrs ]
def _compute_values(self):
# The new interface
return self.get_lr()
class WarmupLinearDecayLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
warmup_factor = 0.001,
warmup_iters = 1000,
warmup_method = "linear",
last_epoch= -1,
clamp_lr = 0.
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
self.milestones = milestones
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.clamp_lr = clamp_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [ max( self.clamp_lr if self.last_epoch > self.warmup_iters else 0.,
base_lr * warmup_factor * (1.0 - 1.0 * bisect_right(self.milestones, self.last_epoch) / len(self.milestones))
) for base_lr in self.base_lrs ]
def _compute_values(self):
# The new interface
return self.get_lr()
def _get_warmup_factor_at_iter(method, iter, warmup_iters, warmup_factor):
if iter >= warmup_iters:
return 1.0
if method == 'constant':
return warmup_factor
elif method == 'linear':
alpha = float(iter) / float(warmup_iters)
return warmup_factor * (1. - alpha) + alpha
else:
raise ValueError("Unknown warmup method: {}".format(method))
class ConstantLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self,
optimizer,
last_epoch= -1,
):
super().__init__(optimizer, last_epoch)
def get_lr(self):
return [ base_lr for base_lr in self.base_lrs ]
def _compute_values(self):
return self.get_lr()
|
AttentiveNAS-main
|
solver/lr_scheduler.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .build import build_optimizer, build_lr_scheduler
|
AttentiveNAS-main
|
solver/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import time
from utils.progress import AverageMeter, ProgressMeter, accuracy
from utils.flops_counter import count_net_flops_and_params
import models
def log_helper(summary, logger=None):
if logger:
logger.info(summary)
else:
print(summary)
def validate_one_subnet(
val_loader,
subnet,
criterion,
args,
logger=None,
):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
log_helper('evaluating...', logger)
#evaluation
end = time.time()
subnet.cuda(args.gpu)
subnet.eval() # freeze again all running stats
for batch_idx, (images, target) in enumerate(val_loader):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = subnet(images)
loss = criterion(output, target).item()
# measure accuracy
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.size(0)
if args.distributed and getattr(args, 'distributed_val', True):
corr1, corr5, loss = acc1 * batch_size, acc5 * batch_size, loss * batch_size
stats = torch.tensor([corr1, corr5, loss, batch_size], device=args.gpu)
dist.barrier() # synchronizes all processes
dist.all_reduce(stats, op=torch.distributed.ReduceOp.SUM)
corr1, corr5, loss, batch_size = stats.tolist()
acc1, acc5, loss = corr1 / batch_size, corr5 / batch_size, loss/batch_size
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
losses.update(loss, batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
if batch_idx % args.print_freq == 0:
progress.display(batch_idx, logger)
log_helper(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}, Top1: {top1.sum}/{top1.count}'
.format(top1=top1, top5=top5), logger)
# compute flops
if getattr(subnet, 'module', None):
resolution = subnet.module.resolution
else:
resolution = subnet.resolution
data_shape = (1, 3, resolution, resolution)
flops, params = count_net_flops_and_params(subnet, data_shape)
return float(top1.avg), float(top5.avg), float(losses.avg), flops, params
|
AttentiveNAS-main
|
evaluate/imagenet_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import time
from utils.progress import AverageMeter, ProgressMeter, accuracy
import utils.comm as comm
from .imagenet_eval import validate_one_subnet, log_helper
def validate(
subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
bn_calibration=True,
):
supernet = model.module \
if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model
results = []
top1_list, top5_list = [], []
with torch.no_grad():
for net_id in subnets_to_be_evaluated:
if net_id == 'attentive_nas_min_net':
supernet.sample_min_subnet()
elif net_id == 'attentive_nas_max_net':
supernet.sample_max_subnet()
elif net_id.startswith('attentive_nas_random_net'):
supernet.sample_active_subnet()
else:
supernet.set_active_subnet(
subnets_to_be_evaluated[net_id]['resolution'],
subnets_to_be_evaluated[net_id]['width'],
subnets_to_be_evaluated[net_id]['depth'],
subnets_to_be_evaluated[net_id]['kernel_size'],
subnets_to_be_evaluated[net_id]['expand_ratio'],
)
subnet = supernet.get_active_subnet()
subnet_cfg = supernet.get_active_subnet_settings()
subnet.cuda(args.gpu)
if bn_calibration:
subnet.eval()
subnet.reset_running_stats_for_calibration()
# estimate running mean and running statistics
logger.info('Calirating bn running statistics')
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= args.post_bn_calibration_batch_num:
break
if getattr(args, 'use_clean_images_for_subnet_training', False):
_, images = images
images = images.cuda(args.gpu, non_blocking=True)
subnet(images) #forward only
acc1, acc5, loss, flops, params = validate_one_subnet(
val_loader, subnet, criterion, args, logger
)
top1_list.append(acc1)
top5_list.append(acc5)
summary = str({
'net_id': net_id,
'mode': 'evaluate',
'epoch': getattr(args, 'curr_epoch', -1),
'acc1': acc1,
'acc5': acc5,
'loss': loss,
'flops': flops,
'params': params,
**subnet_cfg
})
if args.distributed and getattr(args, 'distributed_val', True):
logger.info(summary)
results += [summary]
else:
group = comm.reduce_eval_results(summary, args.gpu)
results += group
for rec in group:
logger.info(rec)
return results
|
AttentiveNAS-main
|
evaluate/attentive_nas_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import atexit
import os
import random
import copy
def count_helper(v, flops, m):
if flops not in m:
m[flops] = {}
if v not in m[flops]:
m[flops][v] = 0
m[flops][v] += 1
def round_flops(flops, step):
return int(round(flops / step) * step)
def convert_count_to_prob(m):
if isinstance(m[list(m.keys())[0]], dict):
for k in m:
convert_count_to_prob(m[k])
else:
t = sum(m.values())
for k in m:
m[k] = 1.0 * m[k] / t
def sample_helper(flops, m):
keys = list(m[flops].keys())
probs = list(m[flops].values())
return random.choices(keys, weights=probs)[0]
def build_trasition_prob_matrix(file_handler, step):
# initlizie
prob_map = {}
prob_map['discretize_step'] = step
for k in ['flops', 'resolution', 'width', 'depth', 'kernel_size', 'expand_ratio']:
prob_map[k] = {}
cc = 0
for line in file_handler:
vals = eval(line.strip())
# discretize
flops = round_flops(vals['flops'], step)
prob_map['flops'][flops] = prob_map['flops'].get(flops, 0) + 1
# resolution
r = vals['resolution']
count_helper(r, flops, prob_map['resolution'])
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
for idx, v in enumerate(vals[k]):
if idx not in prob_map[k]:
prob_map[k][idx] = {}
count_helper(v, flops, prob_map[k][idx])
cc += 1
# convert count to probability
for k in ['flops', 'resolution', 'width', 'depth', 'kernel_size', 'expand_ratio']:
convert_count_to_prob(prob_map[k])
prob_map['n_observations'] = cc
return prob_map
class ArchSampler():
def __init__(self, arch_to_flops_map_file_path, discretize_step, model, acc_predictor=None):
super(ArchSampler, self).__init__()
with open(arch_to_flops_map_file_path, 'r') as fp:
self.prob_map = build_trasition_prob_matrix(fp, discretize_step)
self.discretize_step = discretize_step
self.model = model
self.acc_predictor = acc_predictor
self.min_flops = min(list(self.prob_map['flops'].keys()))
self.max_flops = max(list(self.prob_map['flops'].keys()))
self.curr_sample_pool = None #TODO; architecture samples could be generated in an asynchronous way
def sample_one_target_flops(self, flops_uniform=False):
f_vals = list(self.prob_map['flops'].keys())
f_probs = list(self.prob_map['flops'].values())
if flops_uniform:
return random.choice(f_vals)
else:
return random.choices(f_vals, weights=f_probs)[0]
def sample_archs_according_to_flops(self, target_flops, n_samples=1, max_trials=100, return_flops=True, return_trials=False):
archs = []
#for _ in range(n_samples):
while len(archs) < n_samples:
for _trial in range(max_trials+1):
arch = {}
arch['resolution'] = sample_helper(target_flops, self.prob_map['resolution'])
for k in ['width', 'kernel_size', 'depth', 'expand_ratio']:
arch[k] = []
for idx in sorted(list(self.prob_map[k].keys())):
arch[k].append(sample_helper(target_flops, self.prob_map[k][idx]))
if self.model:
self.model.set_active_subnet(**arch)
flops = self.model.compute_active_subnet_flops()
if return_flops:
arch['flops'] = flops
if round_flops(flops, self.discretize_step) == target_flops:
break
else:
raise NotImplementedError
#accepte the sample anyway
archs.append(arch)
return archs
|
AttentiveNAS-main
|
sampler/attentive_nas_sampler.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
""" Auto Augment
Implementation adapted from timm: https://github.com/rwightman/pytorch-image-models
"""
import random
import math
from PIL import Image, ImageOps, ImageEnhance
import PIL
_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
_HPARAMS_DEFAULT = dict(
translate_const=250,
img_mean=_FILL,
)
_RANDOM_INTERPOLATION = (Image.NEAREST, Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.NEAREST)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
bits_to_keep = max(1, bits_to_keep) # prevent all 0 images
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg2(level):
level = (level / _MAX_LEVEL) * float(_HPARAMS_DEFAULT['translate_const'])
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level):
# range [-0.45, 0.45]
level = (level / _MAX_LEVEL) * 0.45
level = _randomly_negate(level)
return (level,)
# def level_to_arg(hparams):
# return {
# 'AutoContrast': lambda level: (),
# 'Equalize': lambda level: (),
# 'Invert': lambda level: (),
# 'Rotate': _rotate_level_to_arg,
# # FIXME these are both different from original impl as I believe there is a bug,
# # not sure what is the correct alternative, hence 2 options that look better
# 'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4) + 4,), # range [4, 8]
# 'Posterize2': lambda level: (4 - int((level / _MAX_LEVEL) * 4),), # range [4, 0]
# 'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256),), # range [0, 256]
# 'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110),), # range [0, 110]
# 'Color': _enhance_level_to_arg,
# 'Contrast': _enhance_level_to_arg,
# 'Brightness': _enhance_level_to_arg,
# 'Sharpness': _enhance_level_to_arg,
# 'ShearX': _shear_level_to_arg,
# 'ShearY': _shear_level_to_arg,
# 'TranslateX': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateY': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateXRel': lambda level: _translate_rel_level_to_arg(level),
# 'TranslateYRel': lambda level: _translate_rel_level_to_arg(level),
# }
NAME_TO_OP = {
'AutoContrast': auto_contrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Posterize2': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x_abs,
'TranslateY': translate_y_abs,
'TranslateXRel': translate_x_rel,
'TranslateYRel': translate_y_rel,
}
def pass_fn(input):
return ()
def _conversion0(input):
return (int((input / _MAX_LEVEL) * 4) + 4,)
def _conversion1(input):
return (4 - int((input / _MAX_LEVEL) * 4),)
def _conversion2(input):
return (int((input / _MAX_LEVEL) * 256),)
def _conversion3(input):
return (int((input / _MAX_LEVEL) * 110),)
class AutoAugmentOp:
def __init__(self, name, prob, magnitude, hparams={}):
self.aug_fn = NAME_TO_OP[name]
# self.level_fn = level_to_arg(hparams)[name]
if name == 'AutoContrast' or name == 'Equalize' or name == 'Invert':
self.level_fn = pass_fn
elif name == 'Rotate':
self.level_fn = _rotate_level_to_arg
elif name == 'Posterize':
self.level_fn = _conversion0
elif name == 'Posterize2':
self.level_fn = _conversion1
elif name == 'Solarize':
self.level_fn = _conversion2
elif name == 'SolarizeAdd':
self.level_fn = _conversion3
elif name == 'Color' or name == 'Contrast' or name == 'Brightness' or name == 'Sharpness':
self.level_fn = _enhance_level_to_arg
elif name == 'ShearX' or name == 'ShearY':
self.level_fn = _shear_level_to_arg
elif name == 'TranslateX' or name == 'TranslateY':
self.level_fn = _translate_abs_level_to_arg2
elif name == 'TranslateXRel' or name == 'TranslateYRel':
self.level_fn = _translate_rel_level_to_arg
else:
print("{} not recognized".format({}))
self.prob = prob
self.magnitude = magnitude
# If std deviation of magnitude is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from normal dist
# with mean magnitude and std-dev of magnitude_std.
# NOTE This is being tested as it's not in paper or reference impl.
self.magnitude_std = 0.5 # FIXME add arg/hparam
self.kwargs = {
'fillcolor': hparams['img_mean'] if 'img_mean' in hparams else _FILL,
'resample': hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION
}
def __call__(self, img):
if self.prob < random.random():
return img
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = self.level_fn(magnitude)
return self.aug_fn(img, *level_args, **self.kwargs)
def auto_augment_policy_v0(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from TPU EfficientNet impl, cannot find
# a paper reference.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_original(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from https://arxiv.org/abs/1805.09501
policy = [
[('Posterize', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('Posterize', 0.6, 7), ('Posterize', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('Posterize', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('Posterize', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy(name='v0', hparams=_HPARAMS_DEFAULT):
if name == 'original':
return auto_augment_policy_original(hparams)
elif name == 'v0':
return auto_augment_policy_v0(hparams)
else:
print("Unknown auto_augmentation policy {}".format(name))
raise AssertionError()
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
sub_policy = random.choice(self.policy)
for op in sub_policy:
img = op(img)
return img
|
AttentiveNAS-main
|
data/auto_augment_tf.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from __future__ import print_function
import torch
import torchvision.transforms.functional as F
from torchvision import datasets, transforms
from torch.utils.data import Dataset
import math
import sys
import random
from PIL import Image
from torch.utils.data.distributed import DistributedSampler
import os
from .data_transform import get_data_transform
def build_data_loader(args):
if args.dataset == 'imagenet':
return build_default_imagenet_data_loader(args)
else:
raise NotImplementedError
def build_default_imagenet_data_loader(args):
traindir = os.path.join(args.dataset_dir, "train")
valdir = os.path.join(args.dataset_dir, "val")
#build transforms
train_transform = get_data_transform(args, is_training=True, augment=args.augment)
test_transform = get_data_transform(args, is_training=False, augment=args.augment)
#build datasets
if not getattr(args, 'data_loader_cross_validation', False):
train_dataset = datasets.ImageFolder(traindir, train_transform)
val_dataset = datasets.ImageFolder(valdir, test_transform)
#else:
# my_dataset = datasets.ImageFolder(traindir)
# train_dataset, val_dataset = torch.utils.data.random_split(
# my_dataset, [args.data_split_ntrain, args.data_split_nval], generator=torch.Generator().manual_seed(args.data_split_seed)
# )
# train_dataset = MyDataset( train_dataset, train_transform)
# val_dataset = MyDataset(val_dataset, test_transform)
#build data loaders
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last = getattr(args, 'drop_last', True),
num_workers=args.data_loader_workers_per_gpu,
pin_memory=True,
)
if args.distributed and getattr(args, 'distributed_val', True):
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
val_sampler = None
eval_batch_size = min(args.batch_size, 16) \
if not getattr(args, 'eval_only', False) else args.batch_size
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=eval_batch_size,
shuffle=False,
num_workers=args.data_loader_workers_per_gpu,
drop_last=False,
pin_memory=True,
sampler=val_sampler,
)
return train_loader, val_loader, train_sampler
|
AttentiveNAS-main
|
data/data_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
AttentiveNAS-main
|
data/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
from .auto_augment_tf import (
auto_augment_policy,
AutoAugment,
)
IMAGENET_PIXEL_MEAN = [123.675, 116.280, 103.530]
IMAGENET_PIXEL_STD = [58.395, 57.12, 57.375]
def get_data_transform(args, is_training, augment):
train_crop_size = getattr(args, 'train_crop_size', 224)
test_scale = getattr(args, 'test_scale', 256)
test_crop_size = getattr(args, 'test_crop_size', 224)
interpolation = Image.BICUBIC
if getattr(args, 'interpolation', None) and args.interpolation == 'bilinear':
interpolation = Image.BILINEAR
da_args = {
'train_crop_size': train_crop_size,
'test_scale': test_scale,
'test_crop_size': test_crop_size,
'interpolation': interpolation
}
if augment == 'default':
return build_default_transform(is_training, **da_args)
elif augment == 'auto_augment_tf':
policy = getattr(args, 'auto_augment_policy', 'v0')
return build_imagenet_auto_augment_tf_transform(is_training, policy=policy, **da_args)
else:
raise ValueError(augment)
def get_normalize():
normalize = transforms.Normalize(
mean=torch.Tensor(IMAGENET_PIXEL_MEAN) / 255.0,
std=torch.Tensor(IMAGENET_PIXEL_STD) / 255.0,
)
return normalize
def build_default_transform(
is_training, train_crop_size=224, test_scale=256, test_crop_size=224, interpolation=Image.BICUBIC
):
normalize = get_normalize()
if is_training:
ret = transforms.Compose(
[
transforms.RandomResizedCrop(train_crop_size, interpolation=interpolation),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
else:
ret = transforms.Compose(
[
transforms.Resize(test_scale, interpolation=interpolation),
transforms.CenterCrop(test_crop_size),
transforms.ToTensor(),
normalize,
]
)
return ret
def build_imagenet_auto_augment_tf_transform(
is_training, policy='v0', train_crop_size=224, test_scale=256, test_crop_size=224, interpolation=Image.BICUBIC
):
normalize = get_normalize()
img_size = train_crop_size
aa_params = {
"translate_const": int(img_size * 0.45),
"img_mean": tuple(round(x) for x in IMAGENET_PIXEL_MEAN),
}
aa_policy = AutoAugment(auto_augment_policy(policy, aa_params))
if is_training:
ret = transforms.Compose(
[
transforms.RandomResizedCrop(train_crop_size, interpolation=interpolation),
transforms.RandomHorizontalFlip(),
aa_policy,
transforms.ToTensor(),
normalize,
]
)
else:
ret = transforms.Compose(
[
transforms.Resize(test_scale, interpolation=interpolation),
transforms.CenterCrop(test_crop_size),
transforms.ToTensor(),
normalize,
]
)
return ret
|
AttentiveNAS-main
|
data/data_transform.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import distutils.command.build
import distutils.util
import fnmatch
import glob
import io
import os
import sys
from pathlib import Path
import setuptools
from setuptools.command.build_py import build_py as build_py_orig
from setuptools.dist import Distribution
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
"--package-dir",
help="Source directory of package files.",
default="bazel-bin/package.runfiles/CompilerGym",
)
argparser.add_argument(
"--get-wheel-filename",
action="store_true",
help="Print only output filename without building it.",
)
argparser.add_argument(
"--build-dir",
help="Path to build dir. This is where this script copies files from the source before making the wheel package.",
default="build",
)
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
sys.path.insert(0, str((Path(args.package_dir) / "compiler_gym").absolute()))
import config # noqa: E402
with open("VERSION") as f:
version = f.read().strip()
with open("README.md") as f:
# Force UTF-8 file encoding to support non-ascii characters in the readme.
with io.open("README.md", encoding="utf-8") as f:
long_description = f.read()
with open("compiler_gym/requirements.txt") as f:
requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
# When building a bdist_wheel we need to set the appropriate tags: this package
# includes compiled binaries, and does not include compiled python extensions.
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
self.root_is_pure = False
def get_tag(self):
python, abi, plat = _bdist_wheel.get_tag(self)
python, abi = "py3", "none"
return python, abi, plat
except ImportError:
bdist_wheel = None
class build(distutils.command.build.build):
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.build_base = args.build_dir
# Add files that should be excluded from the package.
# The argument exclude_package_data of setuptools.setup(...)
# does not work with py files. They have to be excluded here.
excluded = [
str(Path(args.package_dir) / "compiler_gym/envs/llvm/make_specs.py"),
str(Path(args.package_dir) / "compiler_gym/bin/random_eval.py"),
]
class build_py(build_py_orig):
def find_package_modules(self, package, package_dir):
modules = super().find_package_modules(package, package_dir)
res = [
(pkg, mod, file)
for (pkg, mod, file) in modules
if not any(fnmatch.fnmatchcase(file, pat=pattern) for pattern in excluded)
]
return res
def wheel_filename(**kwargs):
# create a fake distribution from arguments
dist = Distribution(attrs=kwargs)
# finalize bdist_wheel command
bdist_wheel_cmd = dist.get_command_obj("bdist_wheel")
bdist_wheel_cmd.ensure_finalized()
# assemble wheel file name
distname = bdist_wheel_cmd.wheel_dist_name
tag = "-".join(bdist_wheel_cmd.get_tag())
return f"{distname}-{tag}.whl"
setup_kwargs = {
"name": "compiler_gym",
"version": version,
"description": "Reinforcement learning environments for compiler research",
"author": "Facebook AI Research",
"long_description": long_description,
"long_description_content_type": "text/markdown",
"url": "https://github.com/facebookresearch/CompilerGym",
"license": "MIT",
"packages": [
"compiler_gym.bin",
"compiler_gym.datasets",
"compiler_gym.envs.gcc.datasets",
"compiler_gym.envs.gcc.service",
"compiler_gym.envs.gcc",
"compiler_gym.envs.loop_tool",
"compiler_gym.envs.loop_tool.service",
"compiler_gym.envs",
"compiler_gym.envs",
"compiler_gym.errors",
"compiler_gym.leaderboard",
"compiler_gym.service.proto",
"compiler_gym.service.runtime",
"compiler_gym.service",
"compiler_gym.spaces",
"compiler_gym.third_party.autophase",
"compiler_gym.third_party.gccinvocation",
"compiler_gym.third_party.inst2vec",
"compiler_gym.third_party",
"compiler_gym.util.flags",
"compiler_gym.util",
"compiler_gym.views",
"compiler_gym.wrappers",
"compiler_gym",
],
"package_dir": {
"": args.package_dir,
},
"package_data": {
"compiler_gym": [
"envs/gcc/service/compiler_gym-gcc-service",
"envs/loop_tool/service/compiler_gym-loop_tool-service",
"third_party/csmith/csmith/bin/csmith",
"third_party/csmith/csmith/include/csmith-2.3.0/*.h",
"third_party/inst2vec/*.pickle",
]
},
"install_requires": requirements,
"include_package_data": True,
"python_requires": ">=3.7",
"classifiers": [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Compilers",
],
"cmdclass": {"bdist_wheel": bdist_wheel, "build": build, "build_py": build_py},
"platforms": [distutils.util.get_platform()],
"zip_safe": False,
}
if config.enable_llvm_env:
setup_kwargs["packages"].extend(
[
"compiler_gym.envs.llvm.datasets",
"compiler_gym.envs.llvm.service",
"compiler_gym.envs.llvm",
"compiler_gym.third_party.llvm",
"compiler_gym.third_party.autophase",
]
)
setup_kwargs["package_data"]["compiler_gym"].extend(
[
"envs/llvm/service/compiler_gym-llvm-service",
"envs/llvm/service/compute_observation",
"envs/llvm/service/llvm-extract-one",
"envs/llvm/service/libLLVMPolly.so",
"third_party/cbench/benchmarks.txt",
"third_party/cbench/cbench-v*/crc32.bc",
]
)
if config.enable_mlir_env:
setup_kwargs["packages"].extend(
[
"compiler_gym.envs.mlir.datasets",
"compiler_gym.envs.mlir.service",
"compiler_gym.envs.mlir",
]
)
setup_kwargs["package_data"]["compiler_gym"].extend(
["envs/mlir/service/compiler_gym-mlir-service"]
)
original_cwd = os.getcwd()
try:
os.chdir(os.path.join(args.package_dir, "compiler_gym"))
setup_kwargs["package_data"]["compiler_gym"].extend(
glob.glob("envs/mlir/service/llvm/**", recursive=True)
)
setup_kwargs["package_data"]["compiler_gym"].extend(
glob.glob("envs/mlir/service/google_benchmark/**", recursive=True)
)
finally:
os.chdir(original_cwd)
if args.get_wheel_filename:
# Instead of generating the wheel file,
# print its filename.
file_name = wheel_filename(**setup_kwargs)
sys.stdout.write(file_name)
else:
setuptools.setup(**setup_kwargs)
|
CompilerGym-development
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import compiler_gym
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
def test_compiler_gym_make():
"""Test that compiler_gym.make() is equivalent to gym.make()."""
with compiler_gym.make("llvm-v0") as env:
assert isinstance(env, LlvmEnv)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/make_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/envs."""
import gym
import pytest
from flaky import flaky
from compiler_gym.envs import llvm
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.service.connection import CompilerGymServiceConnection
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_benchmark_constructor_arg(env: LlvmEnv):
env.close() # Fixture only required to pull in dataset.
with gym.make("llvm-v0", benchmark="cbench-v1/dijkstra") as env:
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
def test_benchmark_setter(env: LlvmEnv):
env.benchmark = "benchmark://cbench-v1/dijkstra"
assert env.benchmark != "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
def test_benchmark_set_in_reset(env: LlvmEnv):
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
@pytest.mark.parametrize("reward_space", ["IrInstructionCount", "ObjectTextSizeBytes"])
def test_reward_space_setter(env: LlvmEnv, reward_space: str):
env.reward_space = reward_space
assert env.reward_space == reward_space
env.reset()
assert env.reward_space == reward_space
@pytest.mark.parametrize("reward_space", ["IrInstructionCount", "ObjectTextSizeBytes"])
def test_reward_space_set_in_reset(env: LlvmEnv, reward_space: str):
env.reset(reward_space=reward_space)
assert env.reward_space == reward_space
env.reset()
assert env.reward_space == reward_space
@pytest.mark.parametrize(
"observation_space", ["IrInstructionCount", "ObjectTextSizeBytes"]
)
def test_observation_space_setter(env: LlvmEnv, observation_space: str):
env.observation_space = observation_space
assert env.observation_space_spec == observation_space
env.reset()
assert env.observation_space_spec == observation_space
@pytest.mark.parametrize(
"observation_space", ["IrInstructionCount", "ObjectTextSizeBytes"]
)
def test_observation_space_set_in_reset(env: LlvmEnv, observation_space: str):
env.reset(observation_space=observation_space)
assert env.observation_space_spec == observation_space
env.reset()
assert env.observation_space_spec == observation_space
def test_uri_substring_no_match(env: LlvmEnv):
env.reset(benchmark="benchmark://cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
with pytest.raises(LookupError):
env.reset(benchmark="benchmark://cbench-v1/crc3")
with pytest.raises(LookupError):
env.reset(benchmark="benchmark://cbench-v1/cr")
def test_uri_substring_candidate_no_match_infer_scheme(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
with pytest.raises(LookupError):
env.reset(benchmark="cbench-v1/crc3")
with pytest.raises(LookupError):
env.reset(benchmark="cbench-v1/cr")
def test_reset_to_force_benchmark(env: LlvmEnv):
"""Test that calling reset() with a benchmark forces that benchmark to
be used for every subsequent episode.
"""
env.reset(benchmark="benchmark://cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
for _ in range(10):
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_unset_forced_benchmark(env: LlvmEnv):
"""Test that setting benchmark "unsets" the previous benchmark."""
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
with pytest.warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_change_benchmark_mid_episode(env: LlvmEnv):
"""Test that changing the benchmark while in an episode has no effect until
the next call to reset()."""
env.reset(benchmark="benchmark://cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
with pytest.warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = "benchmark://cbench-v1/dijkstra"
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
def test_set_benchmark_invalid_type(env: LlvmEnv):
with pytest.raises(TypeError) as ctx:
env.benchmark = 10
assert str(ctx.value) == "Expected a Benchmark or str, received: 'int'"
def test_gym_make_kwargs():
"""Test that passing kwargs to gym.make() are forwarded to environment
constructor.
"""
with gym.make(
"llvm-v0", observation_space="Autophase", reward_space="IrInstructionCount"
) as env:
assert env.observation_space_spec.id == "Autophase"
assert env.reward_space.name == "IrInstructionCount"
def test_step_session_id_not_found(env: LlvmEnv):
"""Test that step() recovers gracefully from an unknown session error from
the service."""
env._session_id = 15 # pylint: disable=protected-access
observation, reward, done, info = env.step(0)
assert done
assert info["error_details"] == "Session not found: 15"
assert observation is None
assert reward is None
assert not env.in_episode
@pytest.fixture(scope="function")
def remote_env() -> LlvmEnv:
"""A test fixture that yields a connection to a remote service."""
service = CompilerGymServiceConnection(llvm.LLVM_SERVICE_BINARY)
try:
with LlvmEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
@flaky # step() can fail.
def test_switch_default_reward_space_in_episode(env: LlvmEnv):
"""Test that switching reward space during an episode resets the cumulative
episode reward.
"""
env.reset(reward_space=None)
_, _, done, info = env.step(0)
assert not done, info
assert env.episode_reward is None
env.reward_space = "IrInstructionCount"
assert env.episode_reward == 0
_, _, done, info = env.step(0)
assert not done, info
assert env.episode_reward is not None
@flaky # step() can fail.
def test_set_same_default_reward_space_in_episode(env: LlvmEnv):
"""Test that setting the reward space during an episode does not reset the
cumulative episode reward if the reward space is unchanged.
"""
env.reset(reward_space="IrInstructionCount")
env.episode_reward = 10
# No change to the reward space.
env.reward_space = "IrInstructionCount"
assert env.episode_reward == 10
# Change in reward space.
env.reward_space = "IrInstructionCountOz"
assert env.episode_reward == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/compiler_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the copy() and deepcopy() operators on ClientServiceCompilerEnv."""
from copy import copy, deepcopy
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_forbidden_shallow_copy(env: LlvmEnv):
"""Test that shallow copy operator is explicitly forbidden."""
with pytest.raises(
TypeError,
match=r"^ClientServiceCompilerEnv instances do not support shallow copies. Use deepcopy\(\)",
):
copy(env)
def test_deep_copy(env: LlvmEnv):
"""Test that deep copy creates an independent copy."""
env.reset()
with deepcopy(env) as cpy:
assert cpy.state == env.state
env.step(env.action_space.sample())
assert cpy.state != env.state
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/env_copy_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:validate."""
import gym
import pytest
from compiler_gym import CompilerEnvState, validate_states
from tests.test_main import main
@pytest.mark.parametrize("inorder", (False, True))
@pytest.mark.parametrize("nproc", (1, 2))
def test_validate_states_lambda_callback(inorder, nproc):
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
commandline="opt input.bc -o output.bc",
)
results = list(
validate_states(
make_env=lambda: gym.make("llvm-v0"),
states=[state],
inorder=inorder,
nproc=nproc,
)
)
assert len(results) == 1
assert results[0].okay()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/validate_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pkg_resources
import compiler_gym
from compiler_gym.util.runfiles_path import runfiles_path
from packaging import version
from tests.pytest_plugins.common import bazel_only, install_test_only
from tests.test_main import main
def test_version_dunder():
assert isinstance(compiler_gym.__version__, str)
def test_version_dunder_format():
version.parse(compiler_gym.__version__)
@install_test_only
def test_setuptools_version():
version = pkg_resources.require("compiler_gym")[0].version
assert version == compiler_gym.__version__
@bazel_only
def test_expected_version():
"""Test that embedded compiler gym version matches VERSION file."""
with open(runfiles_path("VERSION")) as f:
version = f.read().strip()
assert version == compiler_gym.__version__
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/version_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:compiler_env_state."""
import json
from io import StringIO
from pathlib import Path
import pytest
import requests
from pydantic import ValidationError as PydanticValidationError
from compiler_gym import CompilerEnvState, CompilerEnvStateWriter
from compiler_gym.compiler_env_state import CompilerEnvStateReader
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_state_from_dict_empty():
with pytest.raises(PydanticValidationError):
CompilerEnvState(**{})
def test_state_invalid_walltime():
with pytest.raises(PydanticValidationError, match="Walltime cannot be negative"):
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=-1,
reward=1.5,
commandline="",
)
def test_state_to_json_from_dict():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=100,
reward=1.5,
commandline="-a -b -c",
)
state_from_dict = CompilerEnvState(**json.loads(original_state.json()))
assert state_from_dict.benchmark == "benchmark://cbench-v0/foo"
assert state_from_dict.walltime == 100
assert state_from_dict.reward == 1.5
assert state_from_dict.commandline == "-a -b -c"
def test_state_to_json_from_dict_no_reward():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=100, commandline="-a -b -c"
)
state_from_dict = CompilerEnvState(**json.loads(original_state.json()))
assert state_from_dict.benchmark == "benchmark://cbench-v0/foo"
assert state_from_dict.walltime == 100
assert state_from_dict.reward is None
assert state_from_dict.commandline == "-a -b -c"
def test_state_equality_different_types():
state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
assert not state == 5 # noqa testing __eq__
assert state != 5 # testing __ne__
def test_state_equality_same():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert not a != b # noqa testing __ne__
def test_state_equality_differnt_walltime():
"""Test that walltime is not compared."""
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=5, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert not a != b # noqa testing __ne__
def test_state_equality_one_sided_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=5, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert b == a # testing __eq__
assert not a != b # noqa testing __ne__
assert not b != a # noqa testing __ne__
def test_state_equality_equal_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
assert a == b # testing __eq__
assert b == a # testing __eq__
assert not a != b # noqa testing __ne__
assert not b != a # noqa testing __ne__
def test_state_equality_unequal_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=3,
)
assert not a == b # noqa testing __eq__
assert not b == a # noqatesting __eq__
assert a != b # testing __ne__
assert b != a # testing __ne__
def test_compiler_env_state_writer():
buf = StringIO()
writer = CompilerEnvStateWriter(buf)
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=True,
)
assert buf.getvalue() == (
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
def test_compiler_env_state_writer_no_header():
buf = StringIO()
writer = CompilerEnvStateWriter(buf, header=False)
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=True,
)
assert buf.getvalue() == "benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
@pytest.mark.parametrize("flush", range(1))
def test_compiler_env_state_writer_with_statement(tmpwd: Path, flush: bool):
path = Path("results.csv")
assert not path.is_file() # Sanity check.
f = open(path, "w")
with CompilerEnvStateWriter(f) as writer:
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=flush,
)
assert f.closed
with open(path) as f:
assert f.read() == (
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
def test_compiler_env_state_reader():
buf = StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_no_header():
buf = StringIO("benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n")
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_with_header():
buf = StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_with_header_out_of_order_columns():
buf = StringIO(
"commandline,reward,benchmark,walltime\n"
"-a -b -c,2.0,benchmark://cbench-v0/foo,5.0\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_empty_input():
buf = StringIO("")
reader = CompilerEnvStateReader(buf)
assert list(reader) == []
def test_compiler_env_state_reader_header_only():
buf = StringIO("benchmark,reward,walltime,commandline\n")
reader = CompilerEnvStateReader(buf)
assert list(reader) == []
def test_state_from_csv_invalid_format():
buf = StringIO("abcdef\n")
reader = CompilerEnvStateReader(buf)
with pytest.raises(
ValueError, match=r"Expected 4 columns in the first row of CSV: \['abcdef'\]"
):
next(iter(reader))
def test_state_serialize_deserialize_equality():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=100,
reward=1.5,
commandline="-a -b -c",
)
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(original_state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.benchmark == "benchmark://cbench-v0/foo"
assert state_from_csv.walltime == 100
assert state_from_csv.reward == 1.5
assert state_from_csv.commandline == "-a -b -c"
def test_state_serialize_deserialize_equality_no_reward():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=100, commandline="-a -b -c"
)
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(original_state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.benchmark == "benchmark://cbench-v0/foo"
assert state_from_csv.walltime == 100
assert state_from_csv.reward is None
assert state_from_csv.commandline == "-a -b -c"
def test_read_paths_stdin(monkeypatch):
monkeypatch.setattr(
"sys.stdin",
StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
),
)
reader = CompilerEnvStateReader.read_paths(["-"])
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_read_paths_file(tmp_path):
file_dir = f"{tmp_path}/test.csv"
with open(file_dir, "w") as csv_file:
csv_file.write(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader.read_paths([file_dir])
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_read_paths_url(monkeypatch):
urls = ["https://compilergym.ai/benchmarktest.csv"]
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
def ok_mock_response(*args, **kwargs):
return MockResponse(
(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
),
200,
)
monkeypatch.setattr(requests, "get", ok_mock_response)
reader = CompilerEnvStateReader.read_paths(urls)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def bad_mock_response(*args, **kwargs):
return MockResponse("", 404)
monkeypatch.setattr(requests, "get", bad_mock_response)
with pytest.raises(requests.exceptions.InvalidURL):
reader = CompilerEnvStateReader.read_paths(urls)
list(reader)
def test_read_paths_bad_inputs():
bad_dirs = [
"/fake/directory/file.csv",
"fake/directory/file.csv",
"https://www.compilergym.ai/benchmark",
"htts://www.compilergym.ai/benchmark.csv",
"htts://www.compilergym.ai/benchmark",
]
with pytest.raises(FileNotFoundError):
reader = CompilerEnvStateReader.read_paths(bad_dirs)
list(reader)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/compiler_env_state_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:random_search."""
import tempfile
from pathlib import Path
import gym
from compiler_gym.random_search import random_search, replay_actions_from_logs
from tests.pytest_plugins.common import set_command_line_flags
from tests.test_main import main
def make_env():
env = gym.make("llvm-autophase-ic-v0")
env.benchmark = "cbench-v1/dijkstra"
return env
def test_random_search_smoke_test():
with tempfile.TemporaryDirectory() as tmp:
outdir = Path(tmp)
set_command_line_flags(["argv0"])
env = random_search(
make_env=make_env,
outdir=outdir,
patience=50,
total_runtime=3,
nproc=1,
skip_done=False,
)
env.close()
assert (outdir / "random_search.json").is_file()
assert (outdir / "random_search_progress.csv").is_file()
assert (outdir / "random_search_best_actions.txt").is_file()
assert (outdir / "optimized.bc").is_file()
with make_env() as env:
replay_actions_from_logs(env, Path(outdir))
assert (outdir / "random_search_best_actions_progress.csv").is_file()
assert (outdir / "random_search_best_actions_commandline.txt").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/random_search_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from getpass import getuser
from typing import List, Optional
import pytest
from compiler_gym.util import debug_util as dbg
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
"""The main entry point for the pytest runner.
An example file which uses this:
from compiler_gym.util.test_main import main
def test_foo():
assert 1 + 1 == 2
if __name__ == "__main__":
main()
In the above, the single test_foo test will be executed.
:param extra_pytest_args: A list of additional command line options to pass
to pytest.
:param debug_level: The debug level to use to run tests. Higher levels are
more verbose and may be useful for diagnosing test failures. Normally
CompilerGym executes with a debug level of 0.
"""
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
|
CompilerGym-development
|
tests/test_main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:validation_result."""
import json
import pytest
from compiler_gym import CompilerEnvState, ValidationError, ValidationResult
from tests.test_main import main
def test_validation_error_equality():
e1 = ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
e2 = ValidationError( # Same as e1
type="Syntax Error",
data={"data": [1, 2, 3]},
)
e3 = ValidationError( # Different "type"
type="Foobar",
data={"data": [1, 2, 3]},
)
e4 = ValidationError( # Different "data" dict
type="Syntax Error",
data={"data": [1, 2, 3, 4]},
)
assert e1 == e2
assert e1 != e3
assert e3 != e4
def test_validation_error_json():
"""Check that JSON serialize/de-serialize produces equivalent errors."""
error = ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
assert ValidationError(**json.loads(error.json())) == error
def test_validation_result_json():
result = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
],
)
assert ValidationResult(**json.loads(result.json())) == result
def test_validation_result_equality_different_states():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test/a",
commandline="test",
walltime=1,
),
walltime=3,
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test/b",
commandline="test",
walltime=1,
),
walltime=3,
)
assert a != b
def test_validation_result_equality_different_walltimes():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=10,
),
walltime=10,
)
assert a == b
def test_validation_result_equality_different_errors_order():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
),
ValidationError(
type="Runtime Error",
data={"a": "b"},
),
],
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Runtime Error",
data={"a": "b"},
),
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
),
],
)
assert a == b
def test_validation_result_join_no_inputs():
with pytest.raises(ValueError, match=r"^No states to join$"):
ValidationResult.join([])
def test_validation_result_join_one_input():
result = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
],
)
joined_result = ValidationResult.join([result])
assert result == joined_result
def test_validation_result_join_two_inputs_different_errors():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
],
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=10,
),
walltime=3,
errors=[
ValidationError(
type="Type Error",
data={"a": "b"},
)
],
)
c = ValidationResult.join([a, b])
assert c == ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=10,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
),
ValidationError(
type="Type Error",
data={"a": "b"},
),
],
)
# Test walltime, which is excluded from equality comparisons.
assert c.walltime == 6
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/validation_result_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import CommandlineWithTerminalAction, ConstrainedCommandline
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_commandline_with_terminal_action(env: LlvmEnv):
mem2reg_unwrapped_index = env.action_space["-mem2reg"]
env = CommandlineWithTerminalAction(env)
mem2reg_index = env.action_space["-mem2reg"]
reg2mem_index = env.action_space["-reg2mem"]
assert mem2reg_index == mem2reg_unwrapped_index
env.reset()
_, _, done, info = env.step(mem2reg_index)
assert not done, info
_, _, done, info = env.multistep([reg2mem_index, reg2mem_index])
assert not done, info
assert env.actions == [mem2reg_index, reg2mem_index, reg2mem_index]
_, _, done, info = env.step(len(env.action_space.flags) - 1)
assert done
assert "terminal_action" in info
def test_commandline_with_terminal_action_fork(env: LlvmEnv):
env = CommandlineWithTerminalAction(env)
assert env.unwrapped.action_space != env.action_space # Sanity check.
with env.fork() as fkd:
assert fkd.action_space == env.action_space
_, _, done, _ = env.step(len(env.action_space.flags) - 1)
assert done
_, _, done, _ = fkd.step(len(env.action_space.flags) - 1)
assert done
def test_constrained_action_space(env: LlvmEnv):
mem2reg_index = env.action_space["-mem2reg"]
reg2mem_index = env.action_space["-reg2mem"]
env = ConstrainedCommandline(env=env, flags=["-mem2reg", "-reg2mem"])
assert env.action_space.n == 2
assert env.action_space.flags == ["-mem2reg", "-reg2mem"]
assert env.action(0) == mem2reg_index
assert env.action([0, 1]) == [mem2reg_index, reg2mem_index]
env.reset()
env.step(0)
env.multistep([1, 1])
assert env.actions == [0, 1, 1]
def test_constrained_action_space_fork(env: LlvmEnv):
mem2reg_index = env.action_space["-mem2reg"]
reg2mem_index = env.action_space["-reg2mem"]
env = ConstrainedCommandline(env=env, flags=["-mem2reg", "-reg2mem"])
fkd = env.fork()
try:
assert fkd.action_space.n == 2
assert fkd.action_space.flags == ["-mem2reg", "-reg2mem"]
assert fkd.action(0) == mem2reg_index
assert fkd.action([0, 1]) == [mem2reg_index, reg2mem_index]
fkd.reset()
fkd.step(0)
fkd.multistep([1, 1])
assert fkd.actions == [0, 1, 1]
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/commandline_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.wrappers.llvm."""
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.wrappers import RuntimePointEstimateReward
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_invalid_runtime_count(env: LlvmEnv):
env = RuntimePointEstimateReward(env, runtime_count=-10)
with pytest.raises(
ValueError, match="runtimes_per_observation_count must be >= 1. Received: -10"
):
env.reset()
def test_invalid_warmup_count(env: LlvmEnv):
env = RuntimePointEstimateReward(env, warmup_count=-10)
with pytest.raises(
ValueError,
match="warmup_runs_count_per_runtime_observation must be >= 0. Received: -10",
):
env.reset()
def test_reward_range(env: LlvmEnv):
env = RuntimePointEstimateReward(env, runtime_count=3)
assert env.reward_range == (-float("inf"), float("inf"))
def test_reward_range_not_runnable_benchmark(env: LlvmEnv):
env = RuntimePointEstimateReward(env, runtime_count=3)
with pytest.raises(
BenchmarkInitError, match=r"^Benchmark is not runnable: benchmark://npb-v0/1$"
):
env.reset(benchmark="benchmark://npb-v0/1")
@flaky # Runtime can fail
def test_fork(env: LlvmEnv):
env = RuntimePointEstimateReward(env)
with env.fork() as fkd:
assert fkd.reward_space_spec.name == "runtime"
@pytest.mark.parametrize("runtime_count", [1, 3, 5])
@pytest.mark.parametrize("warmup_count", [0, 1, 3])
@pytest.mark.parametrize("estimator", [np.median, min])
@flaky # Runtime can fail
def test_reward_values(env: LlvmEnv, runtime_count, warmup_count, estimator):
env = RuntimePointEstimateReward(
env, runtime_count=runtime_count, warmup_count=warmup_count, estimator=estimator
)
env.reset()
assert env.reward_space_spec.runtime_count == runtime_count
assert env.reward_space_spec.warmup_count == warmup_count
assert env.reward_space_spec.estimator == estimator
_, reward_a, done, info = env.step(env.action_space.sample())
assert not done, info
_, reward_b, done, info = env.step(env.action_space.sample())
assert not done, info
_, reward_c, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.episode_reward == reward_a + reward_b + reward_c
assert reward_a or reward_b or reward_c
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/llvm_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import Counter
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_Counter_reset(env: LlvmEnv):
with Counter(env) as env:
env.reset()
assert env.counters == {
"close": 0,
"fork": 0,
"reset": 1,
"step": 0,
}
env.reset()
assert env.counters == {
"close": 0,
"fork": 0,
"reset": 2,
"step": 0,
}
def test_Counter_step(env: LlvmEnv):
with Counter(env) as env:
env.reset()
env.step(0)
assert env.counters == {
"close": 0,
"fork": 0,
"reset": 1,
"step": 1,
}
def test_Counter_double_close(env: LlvmEnv):
with Counter(env) as env:
env.close()
env.close()
assert env.counters == {
"close": 2,
"fork": 0,
"reset": 0,
"step": 0,
}
# Implicit close in `with` statement.
assert env.counters == {
"close": 3,
"fork": 0,
"reset": 0,
"step": 0,
}
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/counter_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import CycleOverBenchmarks, TimeLimit
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_wrapped_close(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
env.close()
assert env.service is None
def test_wrapped_fork_type(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
fkd = env.fork()
try:
assert isinstance(fkd, TimeLimit)
finally:
fkd.close()
def test_wrapped_step_multi_step(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
env.multistep([0, 0, 0])
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
assert env.actions == [0, 0, 0]
def test_wrapped_custom_step_args(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
(ic,), _, _, _ = env.step(0, observation_spaces=["IrInstructionCount"])
assert isinstance(ic, int)
def test_time_limit_reached(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=3)
env.reset()
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert done, info
assert info["TimeLimit.truncated"], info
_, _, done, info = env.step(0)
assert done, info
assert info["TimeLimit.truncated"], info
def test_time_limit_fork(env: LlvmEnv):
"""Check that the time limit state is copied on fork()."""
env = TimeLimit(env, max_episode_steps=3)
env.reset()
_, _, done, info = env.step(0) # 1st step
assert not done, info
fkd = env.fork()
try:
_, _, done, info = env.step(0) # 2nd step
assert not done, info
_, _, done, info = fkd.step(0) # 2nd step
assert not done, info
_, _, done, info = env.step(0) # 3rd step
assert done, info
_, _, done, info = fkd.step(0) # 3rd step
assert done, info
finally:
fkd.close()
def test_time_limit(env: LlvmEnv):
"""Check CycleOverBenchmarks does not break TimeLimit"""
env = TimeLimit(env, max_episode_steps=3)
env = CycleOverBenchmarks(
env,
benchmarks=[
"benchmark://cbench-v1/crc32",
],
)
env.reset()
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert done
assert info["TimeLimit.truncated"], info
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/time_limit_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import (
CycleOverBenchmarks,
CycleOverBenchmarksIterator,
IterateOverBenchmarks,
RandomOrderBenchmarks,
)
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_iterate_over_benchmarks(env: LlvmEnv):
env = IterateOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
with pytest.raises(StopIteration):
env.reset()
def test_iterate_over_benchmarks_fork(env: LlvmEnv):
"""Test that fork() copies over benchmark iterator state."""
env = IterateOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
fkd = env.fork()
try:
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/dijkstra"
finally:
fkd.close()
def test_iterate_over_benchmarks_fork_shared_iterator(env: LlvmEnv):
"""Test fork() using a single benchmark iterator shared between forks."""
env = IterateOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
fork_shares_iterator=True,
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
fkd = env.fork()
try:
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/dijkstra"
finally:
fkd.close()
def test_cycle_over_benchmarks(env: LlvmEnv):
env = CycleOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_cycle_over_benchmarks_fork(env: LlvmEnv):
env = CycleOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
fkd = env.fork()
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
try:
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/crc32"
finally:
fkd.close()
def test_cycle_over_benchmarks_fork_shared_iterator(env: LlvmEnv):
env = CycleOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
fork_shares_iterator=True,
)
fkd = env.fork()
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
try:
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/crc32"
finally:
fkd.close()
def test_cycle_over_benchmarks_iterator(env):
env = CycleOverBenchmarksIterator(
env,
make_benchmark_iterator=lambda: (
"benchmark://cbench-v1/dijkstra",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/adpcm",
),
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/adpcm"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
with env.fork() as fkd:
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/dijkstra"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
assert env.benchmark == "benchmark://cbench-v1/adpcm"
def test_random_order_benchmarks(env: LlvmEnv):
env = RandomOrderBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
def test_random_order_benchmarks_fork(env: LlvmEnv):
env = RandomOrderBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
fkd = env.fork()
try:
fkd.reset()
env.reset()
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/datasets_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/wrappers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
import pytest
from pytest import warns
from compiler_gym.datasets import Datasets
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import ActionWrapper, CompilerEnvWrapper
from compiler_gym.wrappers import ObservationWrapper as CoreObservationWrapper
from compiler_gym.wrappers import RewardWrapper as CoreRewardWrapper
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
class ObservationWrapper(CoreObservationWrapper):
def __init__(self, env):
super().__init__(env)
def convert_observation(self, observation):
return observation
class RewardWrapper(CoreRewardWrapper):
def __init__(self, env):
super().__init__(env)
def convert_reward(self, reward):
return reward
@pytest.fixture(
scope="module",
params=[
ActionWrapper,
CompilerEnvWrapper,
ObservationWrapper,
RewardWrapper,
],
)
def wrapper_type(request):
"""A test fixture that yields one of the CompilerGym wrapper types."""
return request.param
def test_wrapped_close(env: LlvmEnv, wrapper_type):
env = wrapper_type(env)
env.close()
assert env.service is None
def test_wrapped_properties(env: LlvmEnv, wrapper_type):
"""Test accessing the non-standard properties."""
with wrapper_type(env) as env:
assert env.actions == []
assert env.benchmark
assert isinstance(env.datasets, Datasets)
def test_wrapped_fork_type(env: LlvmEnv, wrapper_type):
"""Test forking a wrapper."""
env = wrapper_type(env)
fkd = env.fork()
try:
assert isinstance(fkd, wrapper_type)
finally:
fkd.close()
def test_wrapped_fork_subtype(env: LlvmEnv, wrapper_type):
"""Test forking a wrapper subtype."""
class MyWrapper(wrapper_type):
def __init__(self, env):
super().__init__(env)
env = MyWrapper(env)
fkd = env.fork()
try:
assert isinstance(fkd, MyWrapper)
finally:
fkd.close()
def test_wrapped_fork_subtype_custom_constructor(env: LlvmEnv, wrapper_type):
"""Test forking a wrapper with a custom constructor. This requires a custom
fork() implementation."""
class MyWrapper(wrapper_type):
def __init__(self, env, foo):
super().__init__(env)
self.foo = foo
def fork(self):
return MyWrapper(self.env.fork(), foo=self.foo)
env = MyWrapper(env, foo=1)
fkd = env.fork()
try:
assert isinstance(fkd, MyWrapper)
assert fkd.foo == 1
finally:
fkd.close()
def test_wrapped_step_multi_step(env: LlvmEnv):
"""Test passing a list of actions to step()."""
env = CompilerEnvWrapper(env)
env.reset()
env.multistep([0, 0, 0])
assert env.actions == [0, 0, 0]
def test_wrapped_step_custom_args(env: LlvmEnv, wrapper_type):
"""Test passing the custom CompilerGym step() keyword arguments."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
def action(self, action):
return action # pass thru
def convert_reward(self, reward):
return reward
env = MyWrapper(env)
env.reset()
(ir, ic), (icr, icroz), _, _ = env.multistep(
actions=[0, 0, 0],
observation_spaces=["Ir", "IrInstructionCount"],
reward_spaces=["IrInstructionCount", "IrInstructionCountOz"],
)
assert isinstance(ir, str)
assert isinstance(ic, int)
assert isinstance(icr, float)
assert isinstance(icroz, float)
assert env.unwrapped.observation.spaces["Ir"].space.contains(ir)
assert env.unwrapped.observation.spaces["IrInstructionCount"].space.contains(ic)
def test_wrapped_benchmark(env: LlvmEnv, wrapper_type):
"""Test that benchmark property has expected values."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
env.observation_space = "Ir"
env = MyWrapper(env)
ir_a = env.reset(benchmark="benchmark://cbench-v1/dijkstra")
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
ir_b = env.reset(benchmark="benchmark://cbench-v1/qsort")
assert env.benchmark == "benchmark://cbench-v1/qsort"
# Check that the observations for different benchmarks are different.
assert ir_a != ir_b
def test_wrapped_set_benchmark(env: LlvmEnv, wrapper_type):
"""Test that the benchmark attribute can be set on wrapped classes."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
env = MyWrapper(env)
# Set the benchmark attribute and check that it propagates.
env.benchmark = "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
# Repeat again for a different benchmark.
with warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_wrapped_env_in_episode(env: LlvmEnv, wrapper_type):
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation
env = MyWrapper(env)
assert not env.in_episode
env.reset()
assert env.in_episode
def test_wrapped_env_changes_default_spaces(env: LlvmEnv, wrapper_type):
"""Test when an environment wrapper changes the default observation and reward spaces."""
class MyWrapper(wrapper_type):
def __init__(self, env: LlvmEnv):
super().__init__(env)
self.env.observation_space = "Autophase"
self.env.reward_space = "IrInstructionCount"
def convert_observation(self, observation):
return observation # pass thru
env = MyWrapper(env)
assert env.observation_space.shape == (56,)
assert env.observation_space_spec.id == "Autophase"
assert env.reward_space.name == "IrInstructionCount"
observation = env.reset()
assert env.observation_space.contains(observation)
def test_wrapped_env_change_spaces(env: LlvmEnv, wrapper_type):
"""Test changing the observation and reward spaces on a wrapped environment."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
def convert_reward(self, reward):
return reward # pass thru
env = MyWrapper(env)
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
assert env.observation_space.shape == (56,)
assert env.observation_space_spec.id == "Autophase"
assert env.reward_space.name == "IrInstructionCount"
def test_wrapped_action(mocker, env: LlvmEnv):
class MyWrapper(ActionWrapper):
def action(self, action):
return action - 1
def reverse_action(self, action):
return action + 1
env = MyWrapper(env)
mocker.spy(env, "action")
env.reset()
env.step(1)
env.step(2)
assert env.action.call_count == 2 # pylint: disable=no-member
assert env.actions == [0, 1]
def test_wrapped_observation(mocker, env: LlvmEnv):
"""Test using an ObservationWrapper that returns the length of the Ir string."""
class MyWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = "Ir"
def convert_observation(self, observation):
return len(observation)
env = MyWrapper(env)
assert env.reset() > 0
observation, _, _, _ = env.step(0)
assert observation > 0
def test_wrapped_observation_missing_definition(env: LlvmEnv):
with pytest.raises(TypeError):
env = CoreObservationWrapper(env)
def test_wrapped_reward(env: LlvmEnv):
class MyWrapper(RewardWrapper):
def convert_reward(self, reward):
return -5
env.reward_space = "IrInstructionCount"
env = MyWrapper(env)
env.reset()
_, reward, _, _ = env.step(0)
assert reward == -5
assert env.episode_reward == -5
_, reward, _, _ = env.step(0)
assert reward == -5
assert env.episode_reward == -10
def test_wrapped_env_close(env: LlvmEnv):
wrapped = CompilerEnvWrapper(env)
wrapped.reset()
assert wrapped.service is not None
wrapped.close()
assert wrapped.service is None
def test_wrapped_env_custom_close(env: LlvmEnv):
"""Test that a custom close() method is called on wrapped environments."""
class MyWrapper(CompilerEnvWrapper):
def __init__(self, env: LlvmEnv):
super().__init__(env)
self.custom_close = False
def close(self):
self.custom_close = True
self.env.close()
env = MyWrapper(env)
assert not env.custom_close
env.close()
assert env.custom_close
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/core_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import CompilerEnvWrapper, SynchronousSqliteLogger
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_SynchronousSqliteLogger_creates_file(env: LlvmEnv, tmp_path):
db_path = tmp_path / "example.db"
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env = SynchronousSqliteLogger(env, db_path)
env.reset()
env.step(0)
env.flush()
assert db_path.is_file()
def test_SynchronousSqliteLogger_requires_llvm_env(tmp_path):
with pytest.raises(TypeError, match="Requires LlvmEnv base environment"):
SynchronousSqliteLogger(1, tmp_path / "example.db")
def test_SynchronousSqliteLogger_wrapped_env(env: LlvmEnv, tmp_path):
env = CompilerEnvWrapper(env)
env = SynchronousSqliteLogger(env, tmp_path / "example.db")
env.reset()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/sqlite_logger_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.wrappers.llvm."""
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import ValidateBenchmarkAfterEveryStep
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_ValidateBenchmarkAfterEveryStep_valid(env: LlvmEnv):
env.reset()
type(env.benchmark).ivalidate = lambda *_: iter(())
env = ValidateBenchmarkAfterEveryStep(env, reward_penalty=-5)
_, reward, done, info = env.step(0)
assert reward != -5
assert not done
assert "error_details" not in info
@pytest.mark.parametrize("reward_penalty", [-5, 10])
def test_ValidateBenchmarkAfterEveryStep_invalid(env: LlvmEnv, reward_penalty):
env.reset()
type(env.benchmark).ivalidate = lambda *_: iter(["Oh no!"])
env = ValidateBenchmarkAfterEveryStep(env, reward_penalty=reward_penalty)
_, reward, done, info = env.step(0)
assert reward == reward_penalty
assert done
assert info["error_details"] == "Oh no!"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/validation_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.