file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
RoboticExplorationLab/Deep-ILC/dflex/dflex/vec3.h
|
#pragma once
struct float3
{
float x;
float y;
float z;
inline CUDA_CALLABLE float3(float x=0.0f, float y=0.0f, float z=0.0f) : x(x), y(y), z(z) {}
explicit inline CUDA_CALLABLE float3(const float* p) : x(p[0]), y(p[1]), z(p[2]) {}
};
//--------------
// float3 methods
inline CUDA_CALLABLE float3 operator - (float3 a)
{
return { -a.x, -a.y, -a.z };
}
inline CUDA_CALLABLE float3 mul(float3 a, float s)
{
return { a.x*s, a.y*s, a.z*s };
}
inline CUDA_CALLABLE float3 div(float3 a, float s)
{
return { a.x/s, a.y/s, a.z/s };
}
inline CUDA_CALLABLE float3 add(float3 a, float3 b)
{
return { a.x+b.x, a.y+b.y, a.z+b.z };
}
inline CUDA_CALLABLE float3 add(float3 a, float s)
{
return { a.x + s, a.y + s, a.z + s };
}
inline CUDA_CALLABLE float3 sub(float3 a, float3 b)
{
return { a.x-b.x, a.y-b.y, a.z-b.z };
}
inline CUDA_CALLABLE float dot(float3 a, float3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
inline CUDA_CALLABLE float3 cross(float3 a, float3 b)
{
float3 c;
c.x = a.y*b.z - a.z*b.y;
c.y = a.z*b.x - a.x*b.z;
c.z = a.x*b.y - a.y*b.x;
return c;
}
inline CUDA_CALLABLE float index(const float3 & a, int idx)
{
#if FP_CHECK
if (idx < 0 || idx > 2)
{
printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
exit(1);
}
#endif
return (&a.x)[idx];
}
inline CUDA_CALLABLE void adj_index(const float3 & a, int idx, float3 & adj_a, int & adj_idx, float & adj_ret)
{
#if FP_CHECK
if (idx < 0 || idx > 2)
{
printf("float3 index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
exit(1);
}
#endif
(&adj_a.x)[idx] += adj_ret;
}
inline CUDA_CALLABLE float length(float3 a)
{
return sqrtf(dot(a, a));
}
inline CUDA_CALLABLE float3 normalize(float3 a)
{
float l = length(a);
if (l > kEps)
return div(a,l);
else
return float3();
}
inline bool CUDA_CALLABLE isfinite(float3 x)
{
return std::isfinite(x.x) && std::isfinite(x.y) && std::isfinite(x.z);
}
// adjoint float3 constructor
inline CUDA_CALLABLE void adj_float3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const float3& adj_ret)
{
adj_x += adj_ret.x;
adj_y += adj_ret.y;
adj_z += adj_ret.z;
}
inline CUDA_CALLABLE void adj_mul(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_a.x += s*adj_ret.x;
adj_a.y += s*adj_ret.y;
adj_a.z += s*adj_ret.z;
adj_s += dot(a, adj_ret);
#if FP_CHECK
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
printf("adj_mul((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
inline CUDA_CALLABLE void adj_div(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_s += dot(- a / (s * s), adj_ret); // - a / s^2
adj_a.x += adj_ret.x / s;
adj_a.y += adj_ret.y / s;
adj_a.z += adj_ret.z / s;
#if FP_CHECK
if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
printf("adj_div((%f %f %f), %f, (%f %f %f), %f, (%f %f %f)\n", a.x, a.y, a.z, s, adj_a.x, adj_a.y, adj_a.z, adj_s, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
inline CUDA_CALLABLE void adj_add(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += adj_ret;
adj_b += adj_ret;
}
inline CUDA_CALLABLE void adj_add(float3 a, float s, float3& adj_a, float& adj_s, const float3& adj_ret)
{
adj_a += adj_ret;
adj_s += adj_ret.x + adj_ret.y + adj_ret.z;
}
inline CUDA_CALLABLE void adj_sub(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
adj_a += adj_ret;
adj_b -= adj_ret;
}
inline CUDA_CALLABLE void adj_dot(float3 a, float3 b, float3& adj_a, float3& adj_b, const float adj_ret)
{
adj_a += b*adj_ret;
adj_b += a*adj_ret;
#if FP_CHECK
if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret))
printf("adj_dot((%f %f %f), (%f %f %f), (%f %f %f), (%f %f %f), %f)\n", a.x, a.y, a.z, b.x, b.y, b.z, adj_a.x, adj_a.y, adj_a.z, adj_b.x, adj_b.y, adj_b.z, adj_ret);
#endif
}
inline CUDA_CALLABLE void adj_cross(float3 a, float3 b, float3& adj_a, float3& adj_b, const float3& adj_ret)
{
// todo: sign check
adj_a += cross(b, adj_ret);
adj_b -= cross(a, adj_ret);
}
#ifdef CUDA
inline __device__ void atomic_add(float3 * addr, float3 value) {
// *addr += value;
atomicAdd(&(addr -> x), value.x);
atomicAdd(&(addr -> y), value.y);
atomicAdd(&(addr -> z), value.z);
}
#endif
inline CUDA_CALLABLE void adj_length(float3 a, float3& adj_a, const float adj_ret)
{
adj_a += normalize(a)*adj_ret;
#if FP_CHECK
if (!isfinite(adj_a))
printf("%s:%d - adj_length((%f %f %f), (%f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret);
#endif
}
inline CUDA_CALLABLE void adj_normalize(float3 a, float3& adj_a, const float3& adj_ret)
{
float d = length(a);
if (d > kEps)
{
float invd = 1.0f/d;
float3 ahat = normalize(a);
adj_a += (adj_ret*invd - ahat*(dot(ahat, adj_ret))*invd);
#if FP_CHECK
if (!isfinite(adj_a))
printf("%s:%d - adj_normalize((%f %f %f), (%f %f %f), (%f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, adj_a.x, adj_a.y, adj_a.z, adj_ret.x, adj_ret.y, adj_ret.z);
#endif
}
}
| 5,542 |
C
| 23.745536 | 179 | 0.560628 |
RoboticExplorationLab/Deep-ILC/dflex/dflex/sim.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This module contains time-integration objects for simulating
models + state forward in time.
"""
import math
import torch
import numpy as np
import dflex.util
import dflex.adjoint as df
import dflex.config
from dflex.model import *
import time
import ipdb
# Todo
#-----
#
# [x] Spring model
# [x] 2D FEM model
# [x] 3D FEM model
# [x] Cloth
# [x] Wind/Drag model
# [x] Bending model
# [x] Triangle collision
# [x] Rigid body model
# [x] Rigid shape contact
# [x] Sphere
# [x] Capsule
# [x] Box
# [ ] Convex
# [ ] SDF
# [ ] Implicit solver
# [x] USD import
# [x] USD export
# -----
# externally compiled kernels module (C++/CUDA code with PyBind entry points)
kernels = None
@df.func
def test(c: float):
x = 1.0
y = float(2)
z = int(3.0)
print(y)
print(z)
if (c < 3.0):
x = 2.0
return x*6.0
def kernel_init():
global kernels
kernels = df.compile()
@df.kernel
def integrate_particles(x: df.tensor(df.float3),
v: df.tensor(df.float3),
f: df.tensor(df.float3),
w: df.tensor(float),
gravity: df.tensor(df.float3),
dt: float,
x_new: df.tensor(df.float3),
v_new: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x, tid)
v0 = df.load(v, tid)
f0 = df.load(f, tid)
inv_mass = df.load(w, tid)
g = df.load(gravity, 0)
# simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt
v1 = v0 + (f0 * inv_mass + g * df.step(0.0 - inv_mass)) * dt
x1 = x0 + v1 * dt
df.store(x_new, tid, x1)
df.store(v_new, tid, v1)
# semi-implicit Euler integration
@df.kernel
def integrate_rigids(rigid_x: df.tensor(df.float3),
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
rigid_f: df.tensor(df.float3),
rigid_t: df.tensor(df.float3),
inv_m: df.tensor(float),
inv_I: df.tensor(df.mat33),
gravity: df.tensor(df.float3),
dt: float,
rigid_x_new: df.tensor(df.float3),
rigid_r_new: df.tensor(df.quat),
rigid_v_new: df.tensor(df.float3),
rigid_w_new: df.tensor(df.float3)):
tid = df.tid()
# positions
x0 = df.load(rigid_x, tid)
r0 = df.load(rigid_r, tid)
# velocities
v0 = df.load(rigid_v, tid)
w0 = df.load(rigid_w, tid) # angular velocity
# forces
f0 = df.load(rigid_f, tid)
t0 = df.load(rigid_t, tid)
# masses
inv_mass = df.load(inv_m, tid) # 1 / mass
inv_inertia = df.load(inv_I, tid) # inverse of 3x3 inertia matrix
g = df.load(gravity, 0)
# linear part
v1 = v0 + (f0 * inv_mass + g * df.nonzero(inv_mass)) * dt # linear integral (linear position/velocity)
x1 = x0 + v1 * dt
# angular part
# so reverse multiplication by r0 takes you from global coordinates into local coordinates
# because it's covector and thus gets pulled back rather than pushed forward
wb = df.rotate_inv(r0, w0) # angular integral (angular velocity and rotation), rotate into object reference frame
tb = df.rotate_inv(r0, t0) # also rotate torques into local coordinates
# I^{-1} torque = angular acceleration and inv_inertia is always going to be in the object frame.
# So we need to rotate into that frame, and then back into global.
w1 = df.rotate(r0, wb + inv_inertia * tb * dt) # I^-1 * torque * dt., then go back into global coordinates
r1 = df.normalize(r0 + df.quat(w1, 0.0) * r0 * 0.5 * dt) # rotate around w1 by dt
df.store(rigid_x_new, tid, x1)
df.store(rigid_r_new, tid, r1)
df.store(rigid_v_new, tid, v1)
df.store(rigid_w_new, tid, w1)
@df.kernel
def eval_springs(x: df.tensor(df.float3),
v: df.tensor(df.float3),
spring_indices: df.tensor(int),
spring_rest_lengths: df.tensor(float),
spring_stiffness: df.tensor(float),
spring_damping: df.tensor(float),
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(spring_indices, tid * 2 + 0)
j = df.load(spring_indices, tid * 2 + 1)
ke = df.load(spring_stiffness, tid)
kd = df.load(spring_damping, tid)
rest = df.load(spring_rest_lengths, tid)
xi = df.load(x, i)
xj = df.load(x, j)
vi = df.load(v, i)
vj = df.load(v, j)
xij = xi - xj
vij = vi - vj
l = length(xij)
l_inv = 1.0 / l
# normalized spring direction
dir = xij * l_inv
c = l - rest
dcdt = dot(dir, vij)
# damping based on relative velocity.
fs = dir * (ke * c + kd * dcdt)
df.atomic_sub(f, i, fs)
df.atomic_add(f, j, fs)
@df.kernel
def eval_triangles(x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat22),
activation: df.tensor(float),
k_mu: float,
k_lambda: float,
k_damp: float,
k_drag: float,
k_lift: float,
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 3 + 0)
j = df.load(indices, tid * 3 + 1)
k = df.load(indices, tid * 3 + 2)
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
vp = df.load(v, i) # vel zero
vq = df.load(v, j) # vel one
vr = df.load(v, k) # vel two
qp = q - p # barycentric coordinates (centered at p)
rp = r - p
Dm = df.load(pose, tid)
inv_rest_area = df.determinant(Dm) * 2.0 # 1 / det(A) = det(A^-1)
rest_area = 1.0 / inv_rest_area
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_area
k_lambda = k_lambda * rest_area
k_damp = k_damp * rest_area
# F = Xs*Xm^-1
f1 = qp * Dm[0, 0] + rp * Dm[1, 0]
f2 = qp * Dm[0, 1] + rp * Dm[1, 1]
#-----------------------------
# St. Venant-Kirchoff
# # Green strain, F'*F-I
# e00 = dot(f1, f1) - 1.0
# e10 = dot(f2, f1)
# e01 = dot(f1, f2)
# e11 = dot(f2, f2) - 1.0
# E = df.mat22(e00, e01,
# e10, e11)
# # local forces (deviatoric part)
# T = df.mul(E, df.transpose(Dm))
# # spatial forces, F*T
# fq = (f1*T[0,0] + f2*T[1,0])*k_mu*2.0
# fr = (f1*T[0,1] + f2*T[1,1])*k_mu*2.0
# alpha = 1.0
#-----------------------------
# Baraff & Witkin, note this model is not isotropic
# c1 = length(f1) - 1.0
# c2 = length(f2) - 1.0
# f1 = normalize(f1)*c1*k1
# f2 = normalize(f2)*c2*k1
# fq = f1*Dm[0,0] + f2*Dm[0,1]
# fr = f1*Dm[1,0] + f2*Dm[1,1]
#-----------------------------
# Neo-Hookean (with rest stability)
# force = mu*F*Dm'
fq = (f1 * Dm[0, 0] + f2 * Dm[0, 1]) * k_mu
fr = (f1 * Dm[1, 0] + f2 * Dm[1, 1]) * k_mu
alpha = 1.0 + k_mu / k_lambda
#-----------------------------
# Area Preservation
n = df.cross(qp, rp)
area = df.length(n) * 0.5
# actuation
act = df.load(activation, tid)
# J-alpha
c = area * inv_rest_area - alpha + act
# dJdx
n = df.normalize(n)
dcdq = df.cross(rp, n) * inv_rest_area * 0.5
dcdr = df.cross(n, qp) * inv_rest_area * 0.5
f_area = k_lambda * c
#-----------------------------
# Area Damping
dcdt = dot(dcdq, vq) + dot(dcdr, vr) - dot(dcdq + dcdr, vp)
f_damp = k_damp * dcdt
fq = fq + dcdq * (f_area + f_damp)
fr = fr + dcdr * (f_area + f_damp)
fp = fq + fr
#-----------------------------
# Lift + Drag
vmid = (vp + vr + vq) * 0.3333
vdir = df.normalize(vmid)
f_drag = vmid * (k_drag * area * df.abs(df.dot(n, vmid)))
f_lift = n * (k_lift * area * (1.57079 - df.acos(df.dot(n, vdir)))) * dot(vmid, vmid)
# note reversed sign due to atomic_add below.. need to write the unary op -
fp = fp - f_drag - f_lift
fq = fq + f_drag + f_lift
fr = fr + f_drag + f_lift
# apply forces
df.atomic_add(f, i, fp)
df.atomic_sub(f, j, fq)
df.atomic_sub(f, k, fr)
@df.func
def triangle_closest_point_barycentric(a: df.float3, b: df.float3, c: df.float3, p: df.float3):
ab = b - a
ac = c - a
ap = p - a
d1 = df.dot(ab, ap)
d2 = df.dot(ac, ap)
if (d1 <= 0.0 and d2 <= 0.0):
return float3(1.0, 0.0, 0.0)
bp = p - b
d3 = df.dot(ab, bp)
d4 = df.dot(ac, bp)
if (d3 >= 0.0 and d4 <= d3):
return float3(0.0, 1.0, 0.0)
vc = d1 * d4 - d3 * d2
v = d1 / (d1 - d3)
if (vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0):
return float3(1.0 - v, v, 0.0)
cp = p - c
d5 = dot(ab, cp)
d6 = dot(ac, cp)
if (d6 >= 0.0 and d5 <= d6):
return float3(0.0, 0.0, 1.0)
vb = d5 * d2 - d1 * d6
w = d2 / (d2 - d6)
if (vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0):
return float3(1.0 - w, 0.0, w)
va = d3 * d6 - d5 * d4
w = (d4 - d3) / ((d4 - d3) + (d5 - d6))
if (va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0):
return float3(0.0, w, 1.0 - w)
denom = 1.0 / (va + vb + vc)
v = vb * denom
w = vc * denom
return float3(1.0 - v - w, v, w)
@df.kernel
def eval_triangles_contact(
# idx : df.tensor(int), # list of indices for colliding particles
num_particles: int, # size of particles
x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat22),
activation: df.tensor(float),
k_mu: float,
k_lambda: float,
k_damp: float,
k_drag: float,
k_lift: float,
f: df.tensor(df.float3)):
tid = df.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# index = df.load(idx, tid)
pos = df.load(x, particle_no) # at the moment, just one particle
# vel0 = df.load(v, 0)
i = df.load(indices, face_no * 3 + 0)
j = df.load(indices, face_no * 3 + 1)
k = df.load(indices, face_no * 3 + 2)
if (i == particle_no or j == particle_no or k == particle_no):
return
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
# vp = df.load(v, i) # vel zero
# vq = df.load(v, j) # vel one
# vr = df.load(v, k) # vel two
# qp = q-p # barycentric coordinates (centered at p)
# rp = r-p
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest
dist = df.dot(diff, diff)
n = df.normalize(diff)
c = df.min(dist - 0.01, 0.0) # 0 unless within 0.01 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
fn = n * c * 1e5
df.atomic_sub(f, particle_no, fn)
# # apply forces (could do - f / 3 here)
df.atomic_add(f, i, fn * bary[0])
df.atomic_add(f, j, fn * bary[1])
df.atomic_add(f, k, fn * bary[2])
@df.kernel
def eval_triangles_rigid_contacts(
num_particles: int, # number of particles (size of contact_point)
x: df.tensor(df.float3), # position of particles
v: df.tensor(df.float3),
indices: df.tensor(int), # triangle indices
rigid_x: df.tensor(df.float3), # rigid body positions
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3), # position of contact points relative to body
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
# rigid_f : df.tensor(df.float3),
# rigid_t : df.tensor(df.float3),
tri_f: df.tensor(df.float3)):
tid = df.tid()
face_no = tid // num_particles # which face
particle_no = tid % num_particles # which particle
# -----------------------
# load rigid body point
c_body = df.load(contact_body, particle_no)
c_point = df.load(contact_point, particle_no)
c_dist = df.load(contact_dist, particle_no)
c_mat = df.load(contact_mat, particle_no)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
x0 = df.load(rigid_x, c_body) # position of colliding body
r0 = df.load(rigid_r, c_body) # orientation of colliding body
v0 = df.load(rigid_v, c_body)
w0 = df.load(rigid_w, c_body)
# transform point to world space
pos = x0 + df.rotate(r0, c_point)
# use x0 as center, everything is offset from center of mass
# moment arm
r = pos - x0 # basically just c_point in the new coordinates
rhat = df.normalize(r)
pos = pos + rhat * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
# contact point velocity
dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point.
# -----------------------
# load triangle
i = df.load(indices, face_no * 3 + 0)
j = df.load(indices, face_no * 3 + 1)
k = df.load(indices, face_no * 3 + 2)
p = df.load(x, i) # point zero
q = df.load(x, j) # point one
r = df.load(x, k) # point two
vp = df.load(v, i) # vel zero
vq = df.load(v, j) # vel one
vr = df.load(v, k) # vel two
bary = triangle_closest_point_barycentric(p, q, r, pos)
closest = p * bary[0] + q * bary[1] + r * bary[2]
diff = pos - closest # vector from tri to point
dist = df.dot(diff, diff) # squared distance
n = df.normalize(diff) # points into the object
c = df.min(dist - 0.05, 0.0) # 0 unless within 0.05 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
# fn = n * c * 1e6 # points towards cloth (both n and c are negative)
# df.atomic_sub(tri_f, particle_no, fn)
fn = c * ke # normal force (restitution coefficient * how far inside for ground) (negative)
vtri = vp * bary[0] + vq * bary[1] + vr * bary[2] # bad approximation for centroid velocity
vrel = vtri - dpdt
vn = dot(n, vrel) # velocity component of rigid in negative normal direction
vt = vrel - n * vn # velocity component not in normal direction
# contact damping
fd = 0.0 - df.max(vn, 0.0) * kd * df.step(c) # again, negative, into the ground
# # viscous friction
# ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd)
upper = 0.0 - lower # workaround because no unary ops yet
nx = cross(n, float3(0.0, 0.0, 1.0)) # basis vectors for tangent
nz = cross(n, float3(1.0, 0.0, 0.0))
vx = df.clamp(dot(nx * kf, vt), lower, upper)
vz = df.clamp(dot(nz * kf, vt), lower, upper)
ft = (nx * vx + nz * vz) * (0.0 - df.step(c)) # df.float3(vx, 0.0, vz)*df.step(c)
# # Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
# #ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = n * (fn + fd) + ft
df.atomic_add(tri_f, i, f_total * bary[0])
df.atomic_add(tri_f, j, f_total * bary[1])
df.atomic_add(tri_f, k, f_total * bary[2])
@df.kernel
def eval_bending(
x: df.tensor(df.float3), v: df.tensor(df.float3), indices: df.tensor(int), rest: df.tensor(float), ke: float, kd: float, f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
rest_angle = df.load(rest, tid)
x1 = df.load(x, i)
x2 = df.load(x, j)
x3 = df.load(x, k)
x4 = df.load(x, l)
v1 = df.load(v, i)
v2 = df.load(v, j)
v3 = df.load(v, k)
v4 = df.load(v, l)
n1 = df.cross(x3 - x1, x4 - x1) # normal to face 1
n2 = df.cross(x4 - x2, x3 - x2) # normal to face 2
n1_length = df.length(n1)
n2_length = df.length(n2)
rcp_n1 = 1.0 / n1_length
rcp_n2 = 1.0 / n2_length
cos_theta = df.dot(n1, n2) * rcp_n1 * rcp_n2
n1 = n1 * rcp_n1 * rcp_n1
n2 = n2 * rcp_n2 * rcp_n2
e = x4 - x3
e_hat = df.normalize(e)
e_length = df.length(e)
s = df.sign(df.dot(df.cross(n2, n1), e_hat))
angle = df.acos(cos_theta) * s
d1 = n1 * e_length
d2 = n2 * e_length
d3 = n1 * df.dot(x1 - x4, e_hat) + n2 * df.dot(x2 - x4, e_hat)
d4 = n1 * df.dot(x3 - x1, e_hat) + n2 * df.dot(x3 - x2, e_hat)
# elastic
f_elastic = ke * (angle - rest_angle)
# damping
f_damp = kd * (df.dot(d1, v1) + df.dot(d2, v2) + df.dot(d3, v3) + df.dot(d4, v4))
# total force, proportional to edge length
f_total = 0.0 - e_length * (f_elastic + f_damp)
df.atomic_add(f, i, d1 * f_total)
df.atomic_add(f, j, d2 * f_total)
df.atomic_add(f, k, d3 * f_total)
df.atomic_add(f, l, d4 * f_total)
@df.kernel
def eval_tetrahedra(x: df.tensor(df.float3),
v: df.tensor(df.float3),
indices: df.tensor(int),
pose: df.tensor(df.mat33),
activation: df.tensor(float),
materials: df.tensor(float),
f: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
act = df.load(activation, tid)
k_mu = df.load(materials, tid * 3 + 0)
k_lambda = df.load(materials, tid * 3 + 1)
k_damp = df.load(materials, tid * 3 + 2)
x0 = df.load(x, i)
x1 = df.load(x, j)
x2 = df.load(x, k)
x3 = df.load(x, l)
v0 = df.load(v, i)
v1 = df.load(v, j)
v2 = df.load(v, k)
v3 = df.load(v, l)
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v10 = v1 - v0
v20 = v2 - v0
v30 = v3 - v0
Ds = df.mat33(x10, x20, x30)
Dm = df.load(pose, tid)
inv_rest_volume = df.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
alpha = 1.0 + k_mu / k_lambda - k_mu / (4.0 * k_lambda)
# scale stiffness coefficients to account for area
k_mu = k_mu * rest_volume
k_lambda = k_lambda * rest_volume
k_damp = k_damp * rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
dFdt = df.mat33(v10, v20, v30) * Dm
col1 = df.float3(F[0, 0], F[1, 0], F[2, 0])
col2 = df.float3(F[0, 1], F[1, 1], F[2, 1])
col3 = df.float3(F[0, 2], F[1, 2], F[2, 2])
#-----------------------------
# Neo-Hookean (with rest stability [Smith et al 2018])
Ic = dot(col1, col1) + dot(col2, col2) + dot(col3, col3)
# deviatoric part
P = F * k_mu * (1.0 - 1.0 / (Ic + 1.0)) + dFdt * k_damp
H = P * df.transpose(Dm)
f1 = df.float3(H[0, 0], H[1, 0], H[2, 0])
f2 = df.float3(H[0, 1], H[1, 1], H[2, 1])
f3 = df.float3(H[0, 2], H[1, 2], H[2, 2])
#-----------------------------
# C_spherical
# r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# r_s_inv = 1.0/r_s
# C = r_s - df.sqrt(3.0)
# dCdx = F*df.transpose(Dm)*r_s_inv
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
#----------------------------
# C_D
# r_s = df.sqrt(dot(col1, col1) + dot(col2, col2) + dot(col3, col3))
# C = r_s*r_s - 3.0
# dCdx = F*df.transpose(Dm)*2.0
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# f1 = grad1*C*k_mu
# f2 = grad2*C*k_mu
# f3 = grad3*C*k_mu
# hydrostatic part
J = df.determinant(F)
#print(J)
s = inv_rest_volume / 6.0
dJdx1 = df.cross(x20, x30) * s
dJdx2 = df.cross(x30, x10) * s
dJdx3 = df.cross(x10, x20) * s
f_volume = (J - alpha + act) * k_lambda
f_damp = (df.dot(dJdx1, v1) + df.dot(dJdx2, v2) + df.dot(dJdx3, v3)) * k_damp
f_total = f_volume + f_damp
f1 = f1 + dJdx1 * f_total
f2 = f2 + dJdx2 * f_total
f3 = f3 + dJdx3 * f_total
f0 = (f1 + f2 + f3) * (0.0 - 1.0)
# apply forces
df.atomic_sub(f, i, f0)
df.atomic_sub(f, j, f1)
df.atomic_sub(f, k, f2)
df.atomic_sub(f, l, f3)
@df.kernel
def eval_contacts(x: df.tensor(df.float3), v: df.tensor(df.float3), ke: float, kd: float, kf: float, mu: float, f: df.tensor(df.float3)):
tid = df.tid() # this just handles contact of particles with the ground plane, nothing else.
x0 = df.load(x, tid)
v0 = df.load(v, tid)
n = float3(0.0, 1.0, 0.0) # why is the normal always y? Ground is always (0, 1, 0) normal
c = df.min(dot(n, x0) - 0.01, 0.0) # 0 unless within 0.01 of surface
#c = df.leaky_min(dot(n, x0)-0.01, 0.0, 0.0)
vn = dot(n, v0)
vt = v0 - n * vn
fn = n * c * ke
# contact damping
fd = n * df.min(vn, 0.0) * kd
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * c * ke
upper = 0.0 - lower
vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
ftotal = fn + (fd + ft) * df.step(c)
df.atomic_sub(f, tid, ftotal)
@df.func
def sphere_sdf(center: df.float3, radius: float, p: df.float3):
return df.length(p-center) - radius
@df.func
def sphere_sdf_grad(center: df.float3, radius: float, p: df.float3):
return df.normalize(p-center)
@df.func
def box_sdf(upper: df.float3, p: df.float3):
# adapted from https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm
qx = abs(p[0])-upper[0]
qy = abs(p[1])-upper[1]
qz = abs(p[2])-upper[2]
e = df.float3(df.max(qx, 0.0), df.max(qy, 0.0), df.max(qz, 0.0))
return df.length(e) + df.min(df.max(qx, df.max(qy, qz)), 0.0)
@df.func
def box_sdf_grad(upper: df.float3, p: df.float3):
qx = abs(p[0])-upper[0]
qy = abs(p[1])-upper[1]
qz = abs(p[2])-upper[2]
# exterior case
if (qx > 0.0 or qy > 0.0 or qz > 0.0):
x = df.clamp(p[0], 0.0-upper[0], upper[0])
y = df.clamp(p[1], 0.0-upper[1], upper[1])
z = df.clamp(p[2], 0.0-upper[2], upper[2])
return df.normalize(p - df.float3(x, y, z))
sx = df.sign(p[0])
sy = df.sign(p[1])
sz = df.sign(p[2])
# x projection
if (qx > qy and qx > qz):
return df.float3(sx, 0.0, 0.0)
# y projection
if (qy > qx and qy > qz):
return df.float3(0.0, sy, 0.0)
# z projection
if (qz > qx and qz > qy):
return df.float3(0.0, 0.0, sz)
@df.func
def capsule_sdf(radius: float, half_width: float, p: df.float3):
if (p[0] > half_width):
return length(df.float3(p[0] - half_width, p[1], p[2])) - radius
if (p[0] < 0.0 - half_width):
return length(df.float3(p[0] + half_width, p[1], p[2])) - radius
return df.length(df.float3(0.0, p[1], p[2])) - radius
@df.func
def capsule_sdf_grad(radius: float, half_width: float, p: df.float3):
if (p[0] > half_width):
return normalize(df.float3(p[0] - half_width, p[1], p[2]))
if (p[0] < 0.0 - half_width):
return normalize(df.float3(p[0] + half_width, p[1], p[2]))
return normalize(df.float3(0.0, p[1], p[2]))
@df.kernel
def eval_soft_contacts(
num_particles: int,
particle_x: df.tensor(df.float3),
particle_v: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_v_sc: df.tensor(df.spatial_vector),
shape_X_co: df.tensor(df.spatial_transform),
shape_body: df.tensor(int),
shape_geo_type: df.tensor(int),
shape_geo_src: df.tensor(int),
shape_geo_scale: df.tensor(df.float3),
shape_materials: df.tensor(float),
ke: float,
kd: float,
kf: float,
mu: float,
# outputs
particle_f: df.tensor(df.float3),
body_f: df.tensor(df.spatial_vector)):
tid = df.tid()
shape_index = tid // num_particles # which shape
particle_index = tid % num_particles # which particle
rigid_index = df.load(shape_body, shape_index)
px = df.load(particle_x, particle_index)
pv = df.load(particle_v, particle_index)
#center = float3(0.0, 0.5, 0.0)
#radius = 0.25
#margin = 0.01
# sphere collider
# c = df.min(sphere_sdf(center, radius, x0)-margin, 0.0)
# n = sphere_sdf_grad(center, radius, x0)
# box collider
#c = df.min(box_sdf(df.float3(radius, radius, radius), x0-center)-margin, 0.0)
#n = box_sdf_grad(df.float3(radius, radius, radius), x0-center)
X_sc = df.spatial_transform_identity()
if (rigid_index >= 0):
X_sc = df.load(body_X_sc, rigid_index)
X_co = df.load(shape_X_co, shape_index)
X_so = df.spatial_transform_multiply(X_sc, X_co)
X_os = df.spatial_transform_inverse(X_so)
# transform particle position to shape local space
x_local = df.spatial_transform_point(X_os, px)
# geo description
geo_type = df.load(shape_geo_type, shape_index)
geo_scale = df.load(shape_geo_scale, shape_index)
margin = 0.01
# evaluate shape sdf
c = 0.0
n = df.float3(0.0, 0.0, 0.0)
# GEO_SPHERE (0)
if (geo_type == 0):
c = df.min(sphere_sdf(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, sphere_sdf_grad(df.float3(0.0, 0.0, 0.0), geo_scale[0], x_local))
# GEO_BOX (1)
if (geo_type == 1):
c = df.min(box_sdf(geo_scale, x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, box_sdf_grad(geo_scale, x_local))
# GEO_CAPSULE (2)
if (geo_type == 2):
c = df.min(capsule_sdf(geo_scale[0], geo_scale[1], x_local)-margin, 0.0)
n = df.spatial_transform_vector(X_so, capsule_sdf_grad(geo_scale[0], geo_scale[1], x_local))
# rigid velocity
rigid_v_s = df.spatial_vector()
if (rigid_index >= 0):
rigid_v_s = df.load(body_v_sc, rigid_index)
rigid_w = df.spatial_top(rigid_v_s)
rigid_v = df.spatial_bottom(rigid_v_s)
# compute the body velocity at the particle position
bv = rigid_v + df.cross(rigid_w, px)
# relative velocity
v = pv - bv
# decompose relative velocity
vn = dot(n, v)
vt = v - n * vn
# contact elastic
fn = n * c * ke
# contact damping
fd = n * df.min(vn, 0.0) * kd
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * c * ke
upper = 0.0 - lower
vx = clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = fn + (fd + ft) * df.step(c)
t_total = df.cross(px, f_total)
df.atomic_sub(particle_f, particle_index, f_total)
if (rigid_index >= 0):
df.atomic_sub(body_f, rigid_index, df.spatial_vector(t_total, f_total))
@df.kernel
def eval_rigid_contacts(rigid_x: df.tensor(df.float3),
rigid_r: df.tensor(df.quat),
rigid_v: df.tensor(df.float3),
rigid_w: df.tensor(df.float3),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3),
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
rigid_f: df.tensor(df.float3),
rigid_t: df.tensor(df.float3)):
tid = df.tid()
c_body = df.load(contact_body, tid)
c_point = df.load(contact_point, tid)
c_dist = df.load(contact_dist, tid)
c_mat = df.load(contact_mat, tid)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
x0 = df.load(rigid_x, c_body) # position of colliding body
r0 = df.load(rigid_r, c_body) # orientation of colliding body
v0 = df.load(rigid_v, c_body)
w0 = df.load(rigid_w, c_body)
n = float3(0.0, 1.0, 0.0)
# transform point to world space
p = x0 + df.rotate(r0, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
# use x0 as center, everything is offset from center of mass
# moment arm
r = p - x0 # basically just c_point in the new coordinates
# contact point velocity
dpdt = v0 + df.cross(w0, r) # this is rigid velocity cross offset, so it's the velocity of the contact point.
# check ground contact
c = df.min(dot(n, p), 0.0) # check if we're inside the ground
vn = dot(n, dpdt) # velocity component out of the ground
vt = dpdt - n * vn # velocity component not into the ground
fn = c * ke # normal force (restitution coefficient * how far inside for ground)
# contact damping
fd = df.min(vn, 0.0) * kd * df.step(c) # again, velocity into the ground, negative
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd) # negative
upper = 0.0 - lower # positive, workaround for no unary ops
vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
ft = df.float3(vx, 0.0, vz) * df.step(c)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
#ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke)
f_total = n * (fn + fd) + ft
t_total = df.cross(r, f_total)
df.atomic_sub(rigid_f, c_body, f_total)
df.atomic_sub(rigid_t, c_body, t_total)
# # Frank & Park definition 3.20, pg 100
@df.func
def spatial_transform_twist(t: df.spatial_transform, x: df.spatial_vector):
q = spatial_transform_get_rotation(t)
p = spatial_transform_get_translation(t)
w = spatial_top(x)
v = spatial_bottom(x)
w = rotate(q, w)
v = rotate(q, v) + cross(p, w)
return spatial_vector(w, v)
@df.func
def spatial_transform_wrench(t: df.spatial_transform, x: df.spatial_vector):
q = spatial_transform_get_rotation(t)
p = spatial_transform_get_translation(t)
w = spatial_top(x)
v = spatial_bottom(x)
v = rotate(q, v)
w = rotate(q, w) + cross(p, v)
return spatial_vector(w, v)
@df.func
def spatial_transform_inverse(t: df.spatial_transform):
p = spatial_transform_get_translation(t)
q = spatial_transform_get_rotation(t)
q_inv = inverse(q)
return spatial_transform(rotate(q_inv, p)*(0.0 - 1.0), q_inv);
# computes adj_t^-T*I*adj_t^-1 (tensor change of coordinates), Frank & Park, section 8.2.3, pg 290
@df.func
def spatial_transform_inertia(t: df.spatial_transform, I: df.spatial_matrix):
t_inv = spatial_transform_inverse(t)
q = spatial_transform_get_rotation(t_inv)
p = spatial_transform_get_translation(t_inv)
r1 = rotate(q, float3(1.0, 0.0, 0.0))
r2 = rotate(q, float3(0.0, 1.0, 0.0))
r3 = rotate(q, float3(0.0, 0.0, 1.0))
R = mat33(r1, r2, r3)
S = mul(skew(p), R)
T = spatial_adjoint(R, S)
return mul(mul(transpose(T), I), T)
@df.kernel
def eval_rigid_contacts_art(
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
contact_body: df.tensor(int),
contact_point: df.tensor(df.float3),
contact_dist: df.tensor(float),
contact_mat: df.tensor(int),
materials: df.tensor(float),
body_f_s: df.tensor(df.spatial_vector)):
tid = df.tid()
c_body = df.load(contact_body, tid)
c_point = df.load(contact_point, tid)
c_dist = df.load(contact_dist, tid)
c_mat = df.load(contact_mat, tid)
# hard coded surface parameter tensor layout (ke, kd, kf, mu)
ke = df.load(materials, c_mat * 4 + 0) # restitution coefficient
kd = df.load(materials, c_mat * 4 + 1) # damping coefficient
kf = df.load(materials, c_mat * 4 + 2) # friction coefficient
mu = df.load(materials, c_mat * 4 + 3) # coulomb friction
X_s = df.load(body_X_s, c_body) # position of colliding body
v_s = df.load(body_v_s, c_body) # orientation of colliding body
n = float3(0.0, 1.0, 0.0)
# transform point to world space
p = df.spatial_transform_point(X_s, c_point) - n * c_dist # add on 'thickness' of shape, e.g.: radius of sphere/capsule
w = df.spatial_top(v_s)
v = df.spatial_bottom(v_s)
# contact point velocity
dpdt = v + df.cross(w, p)
# check ground contact
c = df.dot(n, p) # check if we're inside the ground
if (c >= 0.0):
return
vn = dot(n, dpdt) # velocity component out of the ground
vt = dpdt - n * vn # velocity component not into the ground
fn = c * ke # normal force (restitution coefficient * how far inside for ground)
# contact damping
fd = df.min(vn, 0.0) * kd * df.step(c) * (0.0 - c)
# viscous friction
#ft = vt*kf
# Coulomb friction (box)
lower = mu * (fn + fd) # negative
upper = 0.0 - lower # positive, workaround for no unary ops
vx = df.clamp(dot(float3(kf, 0.0, 0.0), vt), lower, upper)
vz = df.clamp(dot(float3(0.0, 0.0, kf), vt), lower, upper)
# Coulomb friction (smooth, but gradients are numerically unstable around |vt| = 0)
ft = df.normalize(vt)*df.min(kf*df.length(vt), 0.0 - mu*c*ke) * df.step(c)
f_total = n * (fn + fd) + ft
t_total = df.cross(p, f_total)
df.atomic_add(body_f_s, c_body, df.spatial_vector(t_total, f_total))
@df.func
def compute_muscle_force(
i: int,
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
muscle_links: df.tensor(int),
muscle_points: df.tensor(df.float3),
muscle_activation: float,
body_f_s: df.tensor(df.spatial_vector)):
link_0 = df.load(muscle_links, i)
link_1 = df.load(muscle_links, i+1)
if (link_0 == link_1):
return 0
r_0 = df.load(muscle_points, i)
r_1 = df.load(muscle_points, i+1)
xform_0 = df.load(body_X_s, link_0)
xform_1 = df.load(body_X_s, link_1)
pos_0 = df.spatial_transform_point(xform_0, r_0)
pos_1 = df.spatial_transform_point(xform_1, r_1)
n = df.normalize(pos_1 - pos_0)
# todo: add passive elastic and viscosity terms
f = n * muscle_activation
df.atomic_sub(body_f_s, link_0, df.spatial_vector(df.cross(pos_0, f), f))
df.atomic_add(body_f_s, link_1, df.spatial_vector(df.cross(pos_1, f), f))
return 0
@df.kernel
def eval_muscles(
body_X_s: df.tensor(df.spatial_transform),
body_v_s: df.tensor(df.spatial_vector),
muscle_start: df.tensor(int),
muscle_params: df.tensor(float),
muscle_links: df.tensor(int),
muscle_points: df.tensor(df.float3),
muscle_activation: df.tensor(float),
# output
body_f_s: df.tensor(df.spatial_vector)):
tid = df.tid()
m_start = df.load(muscle_start, tid)
m_end = df.load(muscle_start, tid+1) - 1
activation = df.load(muscle_activation, tid)
for i in range(m_start, m_end):
compute_muscle_force(i, body_X_s, body_v_s, muscle_links, muscle_points, activation, body_f_s)
# compute transform across a joint
@df.func
def jcalc_transform(type: int, axis: df.float3, joint_q: df.tensor(float), start: int):
# prismatic
if (type == 0):
q = df.load(joint_q, start)
X_jc = spatial_transform(axis * q, quat_identity())
return X_jc
# revolute
if (type == 1):
q = df.load(joint_q, start)
X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat_from_axis_angle(axis, q))
return X_jc
# ball
if (type == 2):
qx = df.load(joint_q, start + 0)
qy = df.load(joint_q, start + 1)
qz = df.load(joint_q, start + 2)
qw = df.load(joint_q, start + 3)
X_jc = spatial_transform(float3(0.0, 0.0, 0.0), quat(qx, qy, qz, qw))
return X_jc
# fixed
if (type == 3):
X_jc = spatial_transform_identity()
return X_jc
# free
if (type == 4):
px = df.load(joint_q, start + 0)
py = df.load(joint_q, start + 1)
pz = df.load(joint_q, start + 2)
qx = df.load(joint_q, start + 3)
qy = df.load(joint_q, start + 4)
qz = df.load(joint_q, start + 5)
qw = df.load(joint_q, start + 6)
X_jc = spatial_transform(float3(px, py, pz), quat(qx, qy, qz, qw))
return X_jc
# default case
return spatial_transform_identity()
# compute motion subspace and velocity for a joint
@df.func
def jcalc_motion(type: int, axis: df.float3, X_sc: df.spatial_transform, joint_S_s: df.tensor(df.spatial_vector), joint_qd: df.tensor(float), joint_start: int):
# prismatic
if (type == 0):
S_s = df.spatial_transform_twist(X_sc, spatial_vector(float3(0.0, 0.0, 0.0), axis))
v_j_s = S_s * df.load(joint_qd, joint_start)
df.store(joint_S_s, joint_start, S_s)
return v_j_s
# revolute
if (type == 1):
S_s = df.spatial_transform_twist(X_sc, spatial_vector(axis, float3(0.0, 0.0, 0.0)))
v_j_s = S_s * df.load(joint_qd, joint_start)
df.store(joint_S_s, joint_start, S_s)
return v_j_s
# ball
if (type == 2):
w = float3(df.load(joint_qd, joint_start + 0),
df.load(joint_qd, joint_start + 1),
df.load(joint_qd, joint_start + 2))
S_0 = df.spatial_transform_twist(X_sc, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
S_1 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
S_2 = df.spatial_transform_twist(X_sc, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
# write motion subspace
df.store(joint_S_s, joint_start + 0, S_0)
df.store(joint_S_s, joint_start + 1, S_1)
df.store(joint_S_s, joint_start + 2, S_2)
return S_0*w[0] + S_1*w[1] + S_2*w[2]
# fixed
if (type == 3):
return spatial_vector()
# free
if (type == 4):
v_j_s = spatial_vector(df.load(joint_qd, joint_start + 0),
df.load(joint_qd, joint_start + 1),
df.load(joint_qd, joint_start + 2),
df.load(joint_qd, joint_start + 3),
df.load(joint_qd, joint_start + 4),
df.load(joint_qd, joint_start + 5))
# write motion subspace
df.store(joint_S_s, joint_start + 0, spatial_vector(1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 1, spatial_vector(0.0, 1.0, 0.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 2, spatial_vector(0.0, 0.0, 1.0, 0.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 3, spatial_vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
df.store(joint_S_s, joint_start + 4, spatial_vector(0.0, 0.0, 0.0, 0.0, 1.0, 0.0))
df.store(joint_S_s, joint_start + 5, spatial_vector(0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
return v_j_s
# default case
return spatial_vector()
# # compute the velocity across a joint
# #@df.func
# def jcalc_velocity(self, type, S_s, joint_qd, start):
# # prismatic
# if (type == 0):
# v_j_s = df.load(S_s, start)*df.load(joint_qd, start)
# return v_j_s
# # revolute
# if (type == 1):
# v_j_s = df.load(S_s, start)*df.load(joint_qd, start)
# return v_j_s
# # fixed
# if (type == 2):
# v_j_s = spatial_vector()
# return v_j_s
# # free
# if (type == 3):
# v_j_s = S_s[start+0]*joint_qd[start+0]
# v_j_s += S_s[start+1]*joint_qd[start+1]
# v_j_s += S_s[start+2]*joint_qd[start+2]
# v_j_s += S_s[start+3]*joint_qd[start+3]
# v_j_s += S_s[start+4]*joint_qd[start+4]
# v_j_s += S_s[start+5]*joint_qd[start+5]
# return v_j_s
# computes joint space forces/torques in tau
@df.func
def jcalc_tau(
type: int,
target_k_e: float,
target_k_d: float,
limit_k_e: float,
limit_k_d: float,
joint_S_s: df.tensor(spatial_vector),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
coord_start: int,
dof_start: int,
body_f_s: spatial_vector,
tau: df.tensor(float)):
# prismatic / revolute
if (type == 0 or type == 1):
S_s = df.load(joint_S_s, dof_start)
q = df.load(joint_q, coord_start)
qd = df.load(joint_qd, dof_start)
act = df.load(joint_act, dof_start)
target = df.load(joint_target, coord_start)
lower = df.load(joint_limit_lower, coord_start)
upper = df.load(joint_limit_upper, coord_start)
limit_f = 0.0
# compute limit forces, damping only active when limit is violated
if (q < lower):
limit_f = limit_k_e*(lower-q)
if (q > upper):
limit_f = limit_k_e*(upper-q)
damping_f = (0.0 - limit_k_d) * qd
# total torque / force on the joint
t = 0.0 - spatial_dot(S_s, body_f_s) - target_k_e*(q - target) - target_k_d*qd + act + limit_f + damping_f
df.store(tau, dof_start, t)
# ball
if (type == 2):
# elastic term.. this is proportional to the
# imaginary part of the relative quaternion
r_j = float3(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2))
# angular velocity for damping
w_j = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
for i in range(0, 3):
S_s = df.load(joint_S_s, dof_start+i)
w = w_j[i]
r = r_j[i]
df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s) - w*target_k_d - r*target_k_e)
# fixed
# if (type == 3)
# pass
# free
if (type == 4):
for i in range(0, 6):
S_s = df.load(joint_S_s, dof_start+i)
df.store(tau, dof_start+i, 0.0 - spatial_dot(S_s, body_f_s))
return 0
@df.func
def jcalc_integrate(
type: int,
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_qdd: df.tensor(float),
coord_start: int,
dof_start: int,
dt: float,
joint_q_new: df.tensor(float),
joint_qd_new: df.tensor(float)):
# prismatic / revolute
if (type == 0 or type == 1):
qdd = df.load(joint_qdd, dof_start)
qd = df.load(joint_qd, dof_start)
q = df.load(joint_q, coord_start)
qd_new = qd + qdd*dt
q_new = q + qd_new*dt
df.store(joint_qd_new, dof_start, qd_new)
df.store(joint_q_new, coord_start, q_new)
# ball
if (type == 2):
m_j = float3(df.load(joint_qdd, dof_start + 0),
df.load(joint_qdd, dof_start + 1),
df.load(joint_qdd, dof_start + 2))
w_j = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
r_j = quat(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2),
df.load(joint_q, coord_start + 3))
# symplectic Euler
w_j_new = w_j + m_j*dt
drdt_j = mul(quat(w_j_new, 0.0), r_j) * 0.5
# new orientation (normalized)
r_j_new = normalize(r_j + drdt_j * dt)
# update joint coords
df.store(joint_q_new, coord_start + 0, r_j_new[0])
df.store(joint_q_new, coord_start + 1, r_j_new[1])
df.store(joint_q_new, coord_start + 2, r_j_new[2])
df.store(joint_q_new, coord_start + 3, r_j_new[3])
# update joint vel
df.store(joint_qd_new, dof_start + 0, w_j_new[0])
df.store(joint_qd_new, dof_start + 1, w_j_new[1])
df.store(joint_qd_new, dof_start + 2, w_j_new[2])
# fixed joint
#if (type == 3)
# pass
# free joint
if (type == 4):
# dofs: qd = (omega_x, omega_y, omega_z, vel_x, vel_y, vel_z)
# coords: q = (trans_x, trans_y, trans_z, quat_x, quat_y, quat_z, quat_w)
# angular and linear acceleration
m_s = float3(df.load(joint_qdd, dof_start + 0),
df.load(joint_qdd, dof_start + 1),
df.load(joint_qdd, dof_start + 2))
a_s = float3(df.load(joint_qdd, dof_start + 3),
df.load(joint_qdd, dof_start + 4),
df.load(joint_qdd, dof_start + 5))
# angular and linear velocity
w_s = float3(df.load(joint_qd, dof_start + 0),
df.load(joint_qd, dof_start + 1),
df.load(joint_qd, dof_start + 2))
v_s = float3(df.load(joint_qd, dof_start + 3),
df.load(joint_qd, dof_start + 4),
df.load(joint_qd, dof_start + 5))
# symplectic Euler
w_s = w_s + m_s*dt
v_s = v_s + a_s*dt
# translation of origin
p_s = float3(df.load(joint_q, coord_start + 0),
df.load(joint_q, coord_start + 1),
df.load(joint_q, coord_start + 2))
# linear vel of origin (note q/qd switch order of linear angular elements)
# note we are converting the body twist in the space frame (w_s, v_s) to compute center of mass velcity
dpdt_s = v_s + cross(w_s, p_s)
# quat and quat derivative
r_s = quat(df.load(joint_q, coord_start + 3),
df.load(joint_q, coord_start + 4),
df.load(joint_q, coord_start + 5),
df.load(joint_q, coord_start + 6))
drdt_s = mul(quat(w_s, 0.0), r_s) * 0.5
# new orientation (normalized)
p_s_new = p_s + dpdt_s * dt
r_s_new = normalize(r_s + drdt_s * dt)
# update transform
df.store(joint_q_new, coord_start + 0, p_s_new[0])
df.store(joint_q_new, coord_start + 1, p_s_new[1])
df.store(joint_q_new, coord_start + 2, p_s_new[2])
df.store(joint_q_new, coord_start + 3, r_s_new[0])
df.store(joint_q_new, coord_start + 4, r_s_new[1])
df.store(joint_q_new, coord_start + 5, r_s_new[2])
df.store(joint_q_new, coord_start + 6, r_s_new[3])
# update joint_twist
df.store(joint_qd_new, dof_start + 0, w_s[0])
df.store(joint_qd_new, dof_start + 1, w_s[1])
df.store(joint_qd_new, dof_start + 2, w_s[2])
df.store(joint_qd_new, dof_start + 3, v_s[0])
df.store(joint_qd_new, dof_start + 4, v_s[1])
df.store(joint_qd_new, dof_start + 5, v_s[2])
return 0
@df.func
def compute_link_transform(i: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_X_pj: df.tensor(df.spatial_transform),
joint_X_cm: df.tensor(df.spatial_transform),
joint_axis: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform)):
# parent transform
parent = load(joint_parent, i)
# parent transform in spatial coordinates
X_sp = spatial_transform_identity()
if (parent >= 0):
X_sp = load(body_X_sc, parent)
type = load(joint_type, i)
axis = load(joint_axis, i)
coord_start = load(joint_q_start, i)
dof_start = load(joint_qd_start, i)
# compute transform across joint
X_jc = jcalc_transform(type, axis, joint_q, coord_start)
X_pj = load(joint_X_pj, i)
X_sc = spatial_transform_multiply(X_sp, spatial_transform_multiply(X_pj, X_jc))
# compute transform of center of mass
X_cm = load(joint_X_cm, i)
X_sm = spatial_transform_multiply(X_sc, X_cm)
# store geometry transforms
store(body_X_sc, i, X_sc)
store(body_X_sm, i, X_sm)
return 0
@df.kernel
def eval_rigid_fk(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_X_pj: df.tensor(df.spatial_transform),
joint_X_cm: df.tensor(df.spatial_transform),
joint_axis: df.tensor(df.float3),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
for i in range(start, end):
compute_link_transform(i,
joint_type,
joint_parent,
joint_q_start,
joint_qd_start,
joint_q,
joint_X_pj,
joint_X_cm,
joint_axis,
body_X_sc,
body_X_sm)
@df.func
def compute_link_velocity(i: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_qd: df.tensor(float),
joint_axis: df.tensor(df.float3),
body_I_m: df.tensor(df.spatial_matrix),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform),
joint_X_pj: df.tensor(df.spatial_transform),
gravity: df.tensor(df.float3),
# outputs
joint_S_s: df.tensor(df.spatial_vector),
body_I_s: df.tensor(df.spatial_matrix),
body_v_s: df.tensor(df.spatial_vector),
body_f_s: df.tensor(df.spatial_vector),
body_a_s: df.tensor(df.spatial_vector)):
type = df.load(joint_type, i)
axis = df.load(joint_axis, i)
parent = df.load(joint_parent, i)
dof_start = df.load(joint_qd_start, i)
X_sc = df.load(body_X_sc, i)
# parent transform in spatial coordinates
X_sp = spatial_transform_identity()
if (parent >= 0):
X_sp = load(body_X_sc, parent)
X_pj = load(joint_X_pj, i)
X_sj = spatial_transform_multiply(X_sp, X_pj)
# compute motion subspace and velocity across the joint (also stores S_s to global memory)
v_j_s = jcalc_motion(type, axis, X_sj, joint_S_s, joint_qd, dof_start)
# parent velocity
v_parent_s = spatial_vector()
a_parent_s = spatial_vector()
if (parent >= 0):
v_parent_s = df.load(body_v_s, parent)
a_parent_s = df.load(body_a_s, parent)
# body velocity, acceleration
v_s = v_parent_s + v_j_s
a_s = a_parent_s + spatial_cross(v_s, v_j_s) # + self.joint_S_s[i]*self.joint_qdd[i]
# compute body forces
X_sm = df.load(body_X_sm, i)
I_m = df.load(body_I_m, i)
# gravity and external forces (expressed in frame aligned with s but centered at body mass)
g = df.load(gravity, 0)
m = I_m[3, 3]
f_g_m = spatial_vector(float3(), g) * m
f_g_s = spatial_transform_wrench(spatial_transform(spatial_transform_get_translation(X_sm), quat_identity()), f_g_m)
#f_ext_s = df.load(body_f_s, i) + f_g_s
# body forces
I_s = spatial_transform_inertia(X_sm, I_m)
f_b_s = df.mul(I_s, a_s) + spatial_cross_dual(v_s, df.mul(I_s, v_s))
df.store(body_v_s, i, v_s)
df.store(body_a_s, i, a_s)
df.store(body_f_s, i, f_b_s - f_g_s)
df.store(body_I_s, i, I_s)
return 0
@df.func
def compute_link_tau(offset: int,
joint_end: int,
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
joint_limit_ke: df.tensor(float),
joint_limit_kd: df.tensor(float),
joint_S_s: df.tensor(df.spatial_vector),
body_fb_s: df.tensor(df.spatial_vector),
# outputs
body_ft_s: df.tensor(df.spatial_vector),
tau: df.tensor(float)):
# for backwards traversal
i = joint_end-offset-1
type = df.load(joint_type, i)
parent = df.load(joint_parent, i)
dof_start = df.load(joint_qd_start, i)
coord_start = df.load(joint_q_start, i)
target_k_e = df.load(joint_target_ke, i)
target_k_d = df.load(joint_target_kd, i)
limit_k_e = df.load(joint_limit_ke, i)
limit_k_d = df.load(joint_limit_kd, i)
# total forces on body
f_b_s = df.load(body_fb_s, i)
f_t_s = df.load(body_ft_s, i)
f_s = f_b_s + f_t_s
# compute joint-space forces, writes out tau
jcalc_tau(type, target_k_e, target_k_d, limit_k_e, limit_k_d, joint_S_s, joint_q, joint_qd, joint_act, joint_target, joint_limit_lower, joint_limit_upper, coord_start, dof_start, f_s, tau)
# update parent forces, todo: check that this is valid for the backwards pass
if (parent >= 0):
df.atomic_add(body_ft_s, parent, f_s)
return 0
@df.kernel
def eval_rigid_id(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_axis: df.tensor(df.float3),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
body_I_m: df.tensor(df.spatial_matrix),
body_X_sc: df.tensor(df.spatial_transform),
body_X_sm: df.tensor(df.spatial_transform),
joint_X_pj: df.tensor(df.spatial_transform),
gravity: df.tensor(df.float3),
# outputs
joint_S_s: df.tensor(df.spatial_vector),
body_I_s: df.tensor(df.spatial_matrix),
body_v_s: df.tensor(df.spatial_vector),
body_f_s: df.tensor(df.spatial_vector),
body_a_s: df.tensor(df.spatial_vector)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
count = end-start
# compute link velocities and coriolis forces
for i in range(start, end):
compute_link_velocity(
i,
joint_type,
joint_parent,
joint_qd_start,
joint_qd,
joint_axis,
body_I_m,
body_X_sc,
body_X_sm,
joint_X_pj,
gravity,
joint_S_s,
body_I_s,
body_v_s,
body_f_s,
body_a_s)
@df.kernel
def eval_rigid_tau(articulation_start: df.tensor(int),
joint_type: df.tensor(int),
joint_parent: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_act: df.tensor(float),
joint_target: df.tensor(float),
joint_target_ke: df.tensor(float),
joint_target_kd: df.tensor(float),
joint_limit_lower: df.tensor(float),
joint_limit_upper: df.tensor(float),
joint_limit_ke: df.tensor(float),
joint_limit_kd: df.tensor(float),
joint_axis: df.tensor(df.float3),
joint_S_s: df.tensor(df.spatial_vector),
body_fb_s: df.tensor(df.spatial_vector),
# outputs
body_ft_s: df.tensor(df.spatial_vector),
tau: df.tensor(float)):
# one thread per-articulation
index = tid()
start = df.load(articulation_start, index)
end = df.load(articulation_start, index+1)
count = end-start
# compute joint forces
for i in range(0, count):
compute_link_tau(
i,
end,
joint_type,
joint_parent,
joint_q_start,
joint_qd_start,
joint_q,
joint_qd,
joint_act,
joint_target,
joint_target_ke,
joint_target_kd,
joint_limit_lower,
joint_limit_upper,
joint_limit_ke,
joint_limit_kd,
joint_S_s,
body_fb_s,
body_ft_s,
tau)
@df.kernel
def eval_rigid_jacobian(
articulation_start: df.tensor(int),
articulation_J_start: df.tensor(int),
joint_parent: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_S_s: df.tensor(spatial_vector),
# outputs
J: df.tensor(float)):
# one thread per-articulation
index = tid()
joint_start = df.load(articulation_start, index)
joint_end = df.load(articulation_start, index+1)
joint_count = joint_end-joint_start
J_offset = df.load(articulation_J_start, index)
# in spatial.h
spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_start, joint_count, J_offset, J)
# @df.kernel
# def eval_rigid_jacobian(
# articulation_start: df.tensor(int),
# articulation_J_start: df.tensor(int),
# joint_parent: df.tensor(int),
# joint_qd_start: df.tensor(int),
# joint_S_s: df.tensor(spatial_vector),
# # outputs
# J: df.tensor(float)):
# # one thread per-articulation
# index = tid()
# joint_start = df.load(articulation_start, index)
# joint_end = df.load(articulation_start, index+1)
# joint_count = joint_end-joint_start
# dof_start = df.load(joint_qd_start, joint_start)
# dof_end = df.load(joint_qd_start, joint_end)
# dof_count = dof_end-dof_start
# #(const spatial_vector* S, const int* joint_parents, const int* joint_qd_start, int num_links, int num_dofs, float* J)
# spatial_jacobian(joint_S_s, joint_parent, joint_qd_start, joint_count, dof_count, J)
@df.kernel
def eval_rigid_mass(
articulation_start: df.tensor(int),
articulation_M_start: df.tensor(int),
body_I_s: df.tensor(spatial_matrix),
# outputs
M: df.tensor(float)):
# one thread per-articulation
index = tid()
joint_start = df.load(articulation_start, index)
joint_end = df.load(articulation_start, index+1)
joint_count = joint_end-joint_start
M_offset = df.load(articulation_M_start, index)
# in spatial.h
spatial_mass(body_I_s, joint_start, joint_count, M_offset, M)
@df.kernel
def eval_dense_gemm(m: int, n: int, p: int, t1: int, t2: int, A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)):
dense_gemm(m, n, p, t1, t2, A, B, C)
@df.kernel
def eval_dense_gemm_batched(m: df.tensor(int), n: df.tensor(int), p: df.tensor(int), t1: int, t2: int, A_start: df.tensor(int), B_start: df.tensor(int), C_start: df.tensor(int), A: df.tensor(float), B: df.tensor(float), C: df.tensor(float)):
dense_gemm_batched(m, n, p, t1, t2, A_start, B_start, C_start, A, B, C)
@df.kernel
def eval_dense_cholesky(n: int, A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)):
dense_chol(n, A, regularization, L)
@df.kernel
def eval_dense_cholesky_batched(A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), regularization: df.tensor(float), L: df.tensor(float)):
dense_chol_batched(A_start, A_dim, A, regularization, L)
@df.kernel
def eval_dense_subs(n: int, L: df.tensor(float), b: df.tensor(float), x: df.tensor(float)):
dense_subs(n, L, b, x)
# helper that propagates gradients back to A, treating L as a constant / temporary variable
# allows us to reuse the Cholesky decomposition from the forward pass
@df.kernel
def eval_dense_solve(n: int, A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)):
dense_solve(n, A, L, b, tmp, x)
# helper that propagates gradients back to A, treating L as a constant / temporary variable
# allows us to reuse the Cholesky decomposition from the forward pass
@df.kernel
def eval_dense_solve_batched(b_start: df.tensor(int), A_start: df.tensor(int), A_dim: df.tensor(int), A: df.tensor(float), L: df.tensor(float), b: df.tensor(float), tmp: df.tensor(float), x: df.tensor(float)):
dense_solve_batched(b_start, A_start, A_dim, A, L, b, tmp, x)
@df.kernel
def eval_rigid_integrate(
joint_type: df.tensor(int),
joint_q_start: df.tensor(int),
joint_qd_start: df.tensor(int),
joint_q: df.tensor(float),
joint_qd: df.tensor(float),
joint_qdd: df.tensor(float),
dt: float,
# outputs
joint_q_new: df.tensor(float),
joint_qd_new: df.tensor(float)):
# one thread per-articulation
index = tid()
type = df.load(joint_type, index)
coord_start = df.load(joint_q_start, index)
dof_start = df.load(joint_qd_start, index)
jcalc_integrate(
type,
joint_q,
joint_qd,
joint_qdd,
coord_start,
dof_start,
dt,
joint_q_new,
joint_qd_new)
g_state_out = None
# define PyTorch autograd op to wrap simulate func
class SimulateFunc(torch.autograd.Function):
"""PyTorch autograd function representing a simulation stpe
Note:
This node will be inserted into the computation graph whenever
`forward()` is called on an integrator object. It should not be called
directly by the user.
"""
@staticmethod
def forward(ctx, integrator, model, state_in, dt, substeps, mass_matrix_freq, *tensors):
# record launches
ctx.tape = df.Tape()
ctx.inputs = tensors
#ctx.outputs = df.to_weak_list(state_out.flatten())
actuation = state_in.joint_act
# simulate
for i in range(substeps):
# ensure actuation is set on all substeps
state_in.joint_act = actuation
state_out = model.state()
integrator._simulate(ctx.tape, model, state_in, state_out, dt/float(substeps), update_mass_matrix=((i%mass_matrix_freq)==0))
# swap states
state_in = state_out
# use global to pass state object back to caller
global g_state_out
g_state_out = state_out
ctx.outputs = df.to_weak_list(state_out.flatten())
return tuple(state_out.flatten())
@staticmethod
def backward(ctx, *grads):
# ensure grads are contiguous in memory
adj_outputs = df.make_contiguous(grads)
# register outputs with tape
outputs = df.to_strong_list(ctx.outputs)
for o in range(len(outputs)):
ctx.tape.adjoints[outputs[o]] = adj_outputs[o]
# replay launches backwards
ctx.tape.replay()
# find adjoint of inputs
adj_inputs = []
for i in ctx.inputs:
if i in ctx.tape.adjoints:
adj_inputs.append(ctx.tape.adjoints[i])
else:
adj_inputs.append(None)
# free the tape
ctx.tape.reset()
# filter grads to replace empty tensors / no grad / constant params with None
return (None, None, None, None, None, None, *df.filter_grads(adj_inputs))
class SemiImplicitIntegrator:
"""A semi-implicit integrator using symplectic Euler
After constructing `Model` and `State` objects this time-integrator
may be used to advance the simulation state forward in time.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example:
>>> integrator = df.SemiImplicitIntegrator()
>>>
>>> # simulation loop
>>> for i in range(100):
>>> state = integrator.forward(model, state, dt)
"""
def __init__(self):
pass
def forward(self, model: Model, state_in: State, dt: float, substeps: int, mass_matrix_freq: int) -> State:
"""Performs a single integration step forward in time
This method inserts a node into the PyTorch computational graph with
references to all model and state tensors such that gradients
can be propagrated back through the simulation step.
Args:
model: Simulation model
state: Simulation state at the start the time-step
dt: The simulation time-step (usually in seconds)
Returns:
The state of the system at the end of the time-step
"""
if dflex.config.no_grad:
# if no gradient required then do inplace update
for i in range(substeps):
self._simulate(df.Tape(), model, state_in, state_in, dt/float(substeps), update_mass_matrix=(i%mass_matrix_freq)==0)
return state_in
else:
# get list of inputs and outputs for PyTorch tensor tracking
inputs = [*state_in.flatten(), *model.flatten()]
# run sim as a PyTorch op
tensors = SimulateFunc.apply(self, model, state_in, dt, substeps, mass_matrix_freq, *inputs)
global g_state_out
state_out = g_state_out
g_state_out = None # null reference
return state_out
def _simulate(self, tape, model, state_in, state_out, dt, update_mass_matrix=True):
with dflex.util.ScopedTimer("simulate", False):
# alloc particle force buffer
if (model.particle_count):
state_out.particle_f.zero_()
if (model.link_count):
state_out.body_ft_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True)
state_out.body_f_ext_s = torch.zeros((model.link_count, 6), dtype=torch.float32, device=model.adapter, requires_grad=True)
# damped springs
if (model.spring_count):
tape.launch(func=eval_springs,
dim=model.spring_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle elastic and lift/drag forces
if (model.tri_count and model.tri_ke > 0.0):
tape.launch(func=eval_triangles,
dim=model.tri_count,
inputs=[
state_in.particle_q,
state_in.particle_qd,
model.tri_indices,
model.tri_poses,
model.tri_activations,
model.tri_ke,
model.tri_ka,
model.tri_kd,
model.tri_drag,
model.tri_lift
],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle/triangle contacts
if (model.enable_tri_collisions and model.tri_count and model.tri_ke > 0.0):
tape.launch(func=eval_triangles_contact,
dim=model.tri_count * model.particle_count,
inputs=[
model.particle_count,
state_in.particle_q,
state_in.particle_qd,
model.tri_indices,
model.tri_poses,
model.tri_activations,
model.tri_ke,
model.tri_ka,
model.tri_kd,
model.tri_drag,
model.tri_lift
],
outputs=[state_out.particle_f],
adapter=model.adapter)
# triangle bending
if (model.edge_count):
tape.launch(func=eval_bending,
dim=model.edge_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.edge_indices, model.edge_rest_angle, model.edge_ke, model.edge_kd],
outputs=[state_out.particle_f],
adapter=model.adapter)
# particle ground contact
if (model.ground and model.particle_count):
tape.launch(func=eval_contacts,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu],
outputs=[state_out.particle_f],
adapter=model.adapter)
# tetrahedral FEM
if (model.tet_count):
tape.launch(func=eval_tetrahedra,
dim=model.tet_count,
inputs=[state_in.particle_q, state_in.particle_qd, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials],
outputs=[state_out.particle_f],
adapter=model.adapter)
#----------------------------
# articulations
if (model.link_count):
# evaluate body transforms
tape.launch(
func=eval_rigid_fk,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
model.joint_X_pj,
model.joint_X_cm,
model.joint_axis
],
outputs=[
state_out.body_X_sc,
state_out.body_X_sm
],
adapter=model.adapter,
preserve_output=True)
# evaluate joint inertias, motion vectors, and forces
tape.launch(
func=eval_rigid_id,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
model.joint_axis,
model.joint_target_ke,
model.joint_target_kd,
model.body_I_m,
state_out.body_X_sc,
state_out.body_X_sm,
model.joint_X_pj,
model.gravity
],
outputs=[
state_out.joint_S_s,
state_out.body_I_s,
state_out.body_v_s,
state_out.body_f_s,
state_out.body_a_s,
],
adapter=model.adapter,
preserve_output=True)
if (model.ground and model.contact_count > 0):
# evaluate contact forces
tape.launch(
func=eval_rigid_contacts_art,
dim=model.contact_count,
inputs=[
state_out.body_X_sc,
state_out.body_v_s,
model.contact_body0,
model.contact_point0,
model.contact_dist,
model.contact_material,
model.shape_materials
],
outputs=[
state_out.body_f_s
],
adapter=model.adapter,
preserve_output=True)
# particle shape contact
if (model.particle_count):
# tape.launch(func=eval_soft_contacts,
# dim=model.particle_count*model.shape_count,
# inputs=[state_in.particle_q, state_in.particle_qd, model.contact_ke, model.contact_kd, model.contact_kf, model.contact_mu],
# outputs=[state_out.particle_f],
# adapter=model.adapter)
tape.launch(func=eval_soft_contacts,
dim=model.particle_count*model.shape_count,
inputs=[
model.particle_count,
state_in.particle_q,
state_in.particle_qd,
state_in.body_X_sc,
state_in.body_v_s,
model.shape_transform,
model.shape_body,
model.shape_geo_type,
torch.Tensor(),
model.shape_geo_scale,
model.shape_materials,
model.contact_ke,
model.contact_kd,
model.contact_kf,
model.contact_mu],
# outputs
outputs=[
state_out.particle_f,
state_out.body_f_s],
adapter=model.adapter)
# evaluate muscle actuation
tape.launch(
func=eval_muscles,
dim=model.muscle_count,
inputs=[
state_out.body_X_sc,
state_out.body_v_s,
model.muscle_start,
model.muscle_params,
model.muscle_links,
model.muscle_points,
model.muscle_activation
],
outputs=[
state_out.body_f_s
],
adapter=model.adapter,
preserve_output=True)
# evaluate joint torques
tape.launch(
func=eval_rigid_tau,
dim=model.articulation_count,
inputs=[
model.articulation_joint_start,
model.joint_type,
model.joint_parent,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
state_in.joint_act,
model.joint_target,
model.joint_target_ke,
model.joint_target_kd,
model.joint_limit_lower,
model.joint_limit_upper,
model.joint_limit_ke,
model.joint_limit_kd,
model.joint_axis,
state_out.joint_S_s,
state_out.body_f_s
],
outputs=[
state_out.body_ft_s,
state_out.joint_tau
],
adapter=model.adapter,
preserve_output=True)
if (update_mass_matrix):
model.alloc_mass_matrix()
# build J
tape.launch(
func=eval_rigid_jacobian,
dim=model.articulation_count,
inputs=[
# inputs
model.articulation_joint_start,
model.articulation_J_start,
model.joint_parent,
model.joint_qd_start,
state_out.joint_S_s
],
outputs=[
model.J
],
adapter=model.adapter,
preserve_output=True)
# build M
tape.launch(
func=eval_rigid_mass,
dim=model.articulation_count,
inputs=[
# inputs
model.articulation_joint_start,
model.articulation_M_start,
state_out.body_I_s
],
outputs=[
model.M
],
adapter=model.adapter,
preserve_output=True)
# form P = M*J
df.matmul_batched(
tape,
model.articulation_count,
model.articulation_M_rows,
model.articulation_J_cols,
model.articulation_J_rows,
0,
0,
model.articulation_M_start,
model.articulation_J_start,
model.articulation_J_start, # P start is the same as J start since it has the same dims as J
model.M,
model.J,
model.P,
adapter=model.adapter)
# form H = J^T*P
df.matmul_batched(
tape,
model.articulation_count,
model.articulation_J_cols,
model.articulation_J_cols,
model.articulation_J_rows, # P rows is the same as J rows
1,
0,
model.articulation_J_start,
model.articulation_J_start, # P start is the same as J start since it has the same dims as J
model.articulation_H_start,
model.J,
model.P,
model.H,
adapter=model.adapter)
# compute decomposition
tape.launch(
func=eval_dense_cholesky_batched,
dim=model.articulation_count,
inputs=[
model.articulation_H_start,
model.articulation_H_rows,
model.H,
model.joint_armature
],
outputs=[
model.L
],
adapter=model.adapter,
skip_check_grad=True)
tmp = torch.zeros_like(state_out.joint_tau)
# solve for qdd
tape.launch(
func=eval_dense_solve_batched,
dim=model.articulation_count,
inputs=[
model.articulation_dof_start,
model.articulation_H_start,
model.articulation_H_rows,
model.H,
model.L,
state_out.joint_tau,
tmp
],
outputs=[
state_out.joint_qdd
],
adapter=model.adapter,
skip_check_grad=True)
# integrate joint dofs -> joint coords
tape.launch(
func=eval_rigid_integrate,
dim=model.link_count,
inputs=[
model.joint_type,
model.joint_q_start,
model.joint_qd_start,
state_in.joint_q,
state_in.joint_qd,
state_out.joint_qdd,
dt
],
outputs=[
state_out.joint_q,
state_out.joint_qd
],
adapter=model.adapter)
#----------------------------
# integrate particles
if (model.particle_count):
tape.launch(func=integrate_particles,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt],
outputs=[state_out.particle_q, state_out.particle_qd],
adapter=model.adapter)
return state_out
@df.kernel
def solve_springs(x: df.tensor(df.float3),
v: df.tensor(df.float3),
invmass: df.tensor(float),
spring_indices: df.tensor(int),
spring_rest_lengths: df.tensor(float),
spring_stiffness: df.tensor(float),
spring_damping: df.tensor(float),
dt: float,
delta: df.tensor(df.float3)):
tid = df.tid()
i = df.load(spring_indices, tid * 2 + 0)
j = df.load(spring_indices, tid * 2 + 1)
ke = df.load(spring_stiffness, tid)
kd = df.load(spring_damping, tid)
rest = df.load(spring_rest_lengths, tid)
xi = df.load(x, i)
xj = df.load(x, j)
vi = df.load(v, i)
vj = df.load(v, j)
xij = xi - xj
vij = vi - vj
l = length(xij)
l_inv = 1.0 / l
# normalized spring direction
dir = xij * l_inv
c = l - rest
dcdt = dot(dir, vij)
# damping based on relative velocity.
#fs = dir * (ke * c + kd * dcdt)
wi = df.load(invmass, i)
wj = df.load(invmass, j)
denom = wi + wj
alpha = 1.0/(ke*dt*dt)
multiplier = c / (denom)# + alpha)
xd = dir*multiplier
df.atomic_sub(delta, i, xd*wi)
df.atomic_add(delta, j, xd*wj)
@df.kernel
def solve_tetrahedra(x: df.tensor(df.float3),
v: df.tensor(df.float3),
inv_mass: df.tensor(float),
indices: df.tensor(int),
pose: df.tensor(df.mat33),
activation: df.tensor(float),
materials: df.tensor(float),
dt: float,
relaxation: float,
delta: df.tensor(df.float3)):
tid = df.tid()
i = df.load(indices, tid * 4 + 0)
j = df.load(indices, tid * 4 + 1)
k = df.load(indices, tid * 4 + 2)
l = df.load(indices, tid * 4 + 3)
act = df.load(activation, tid)
k_mu = df.load(materials, tid * 3 + 0)
k_lambda = df.load(materials, tid * 3 + 1)
k_damp = df.load(materials, tid * 3 + 2)
x0 = df.load(x, i)
x1 = df.load(x, j)
x2 = df.load(x, k)
x3 = df.load(x, l)
v0 = df.load(v, i)
v1 = df.load(v, j)
v2 = df.load(v, k)
v3 = df.load(v, l)
w0 = df.load(inv_mass, i)
w1 = df.load(inv_mass, j)
w2 = df.load(inv_mass, k)
w3 = df.load(inv_mass, l)
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v10 = v1 - v0
v20 = v2 - v0
v30 = v3 - v0
Ds = df.mat33(x10, x20, x30)
Dm = df.load(pose, tid)
inv_rest_volume = df.determinant(Dm) * 6.0
rest_volume = 1.0 / inv_rest_volume
# F = Xs*Xm^-1
F = Ds * Dm
f1 = df.float3(F[0, 0], F[1, 0], F[2, 0])
f2 = df.float3(F[0, 1], F[1, 1], F[2, 1])
f3 = df.float3(F[0, 2], F[1, 2], F[2, 2])
# C_sqrt
tr = dot(f1, f1) + dot(f2, f2) + dot(f3, f3)
r_s = df.sqrt(abs(tr - 3.0))
C = r_s
if (r_s == 0.0):
return
if (tr < 3.0):
r_s = 0.0 - r_s
dCdx = F*df.transpose(Dm)*(1.0/r_s)
alpha = 1.0 + k_mu / k_lambda
# C_Neo
# r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s
# dCdx = F*df.transpose(Dm)*r_s_inv
# alpha = 1.0 + k_mu / k_lambda
# C_Spherical
# r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
# r_s_inv = 1.0/r_s
# C = r_s - df.sqrt(3.0)
# dCdx = F*df.transpose(Dm)*r_s_inv
# alpha = 1.0
# C_D
#r_s = df.sqrt(dot(f1, f1) + dot(f2, f2) + dot(f3, f3))
#C = r_s*r_s - 3.0
#dCdx = F*df.transpose(Dm)*2.0
#alpha = 1.0
grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
denom = dot(grad0,grad0)*w0 + dot(grad1,grad1)*w1 + dot(grad2,grad2)*w2 + dot(grad3,grad3)*w3
multiplier = C/(denom + 1.0/(k_mu*dt*dt*rest_volume))
delta0 = grad0*multiplier
delta1 = grad1*multiplier
delta2 = grad2*multiplier
delta3 = grad3*multiplier
# hydrostatic part
J = df.determinant(F)
C_vol = J - alpha
# dCdx = df.mat33(cross(f2, f3), cross(f3, f1), cross(f1, f2))*df.transpose(Dm)
# grad1 = float3(dCdx[0,0], dCdx[1,0], dCdx[2,0])
# grad2 = float3(dCdx[0,1], dCdx[1,1], dCdx[2,1])
# grad3 = float3(dCdx[0,2], dCdx[1,2], dCdx[2,2])
# grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
s = inv_rest_volume / 6.0
grad1 = df.cross(x20, x30) * s
grad2 = df.cross(x30, x10) * s
grad3 = df.cross(x10, x20) * s
grad0 = (grad1 + grad2 + grad3)*(0.0 - 1.0)
denom = dot(grad0, grad0)*w0 + dot(grad1, grad1)*w1 + dot(grad2, grad2)*w2 + dot(grad3, grad3)*w3
multiplier = C_vol/(denom + 1.0/(k_lambda*dt*dt*rest_volume))
delta0 = delta0 + grad0 * multiplier
delta1 = delta1 + grad1 * multiplier
delta2 = delta2 + grad2 * multiplier
delta3 = delta3 + grad3 * multiplier
# apply forces
df.atomic_sub(delta, i, delta0*w0*relaxation)
df.atomic_sub(delta, j, delta1*w1*relaxation)
df.atomic_sub(delta, k, delta2*w2*relaxation)
df.atomic_sub(delta, l, delta3*w3*relaxation)
@df.kernel
def solve_contacts(
x: df.tensor(df.float3),
v: df.tensor(df.float3),
inv_mass: df.tensor(float),
mu: float,
dt: float,
delta: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x, tid)
v0 = df.load(v, tid)
w0 = df.load(inv_mass, tid)
n = df.float3(0.0, 1.0, 0.0)
c = df.dot(n, x0) - 0.01
if (c > 0.0):
return
# normal
lambda_n = c
delta_n = n*lambda_n
# friction
vn = df.dot(n, v0)
vt = v0 - n * vn
lambda_f = df.max(mu*lambda_n, 0.0 - df.length(vt)*dt)
delta_f = df.normalize(vt)*lambda_f
df.atomic_add(delta, tid, delta_f - delta_n)
@df.kernel
def apply_deltas(x_orig: df.tensor(df.float3),
v_orig: df.tensor(df.float3),
x_pred: df.tensor(df.float3),
delta: df.tensor(df.float3),
dt: float,
x_out: df.tensor(df.float3),
v_out: df.tensor(df.float3)):
tid = df.tid()
x0 = df.load(x_orig, tid)
xp = df.load(x_pred, tid)
# constraint deltas
d = df.load(delta, tid)
x_new = xp + d
v_new = (x_new - x0)/dt
df.store(x_out, tid, x_new)
df.store(v_out, tid, v_new)
class XPBDIntegrator:
"""A implicit integrator using XPBD
After constructing `Model` and `State` objects this time-integrator
may be used to advance the simulation state forward in time.
Semi-implicit time integration is a variational integrator that
preserves energy, however it not unconditionally stable, and requires a time-step
small enough to support the required stiffness and damping forces.
See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method
Example:
>>> integrator = df.SemiImplicitIntegrator()
>>>
>>> # simulation loop
>>> for i in range(100):
>>> state = integrator.forward(model, state, dt)
"""
def __init__(self):
pass
def forward(self, model: Model, state_in: State, dt: float) -> State:
"""Performs a single integration step forward in time
This method inserts a node into the PyTorch computational graph with
references to all model and state tensors such that gradients
can be propagrated back through the simulation step.
Args:
model: Simulation model
state: Simulation state at the start the time-step
dt: The simulation time-step (usually in seconds)
Returns:
The state of the system at the end of the time-step
"""
if dflex.config.no_grad:
# if no gradient required then do inplace update
self._simulate(df.Tape(), model, state_in, state_in, dt)
return state_in
else:
# get list of inputs and outputs for PyTorch tensor tracking
inputs = [*state_in.flatten(), *model.flatten()]
# allocate new output
state_out = model.state()
# run sim as a PyTorch op
tensors = SimulateFunc.apply(self, model, state_in, state_out, dt, *inputs)
return state_out
def _simulate(self, tape, model, state_in, state_out, dt):
with dflex.util.ScopedTimer("simulate", False):
# alloc particle force buffer
if (model.particle_count):
state_out.particle_f.zero_()
q_pred = torch.zeros_like(state_in.particle_q)
qd_pred = torch.zeros_like(state_in.particle_qd)
#----------------------------
# integrate particles
if (model.particle_count):
tape.launch(func=integrate_particles,
dim=model.particle_count,
inputs=[state_in.particle_q, state_in.particle_qd, state_out.particle_f, model.particle_inv_mass, model.gravity, dt],
outputs=[q_pred, qd_pred],
adapter=model.adapter)
# contacts
if (model.particle_count and model.ground):
tape.launch(func=solve_contacts,
dim=model.particle_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.contact_mu, dt],
outputs=[state_out.particle_f],
adapter=model.adapter)
# damped springs
if (model.spring_count):
tape.launch(func=solve_springs,
dim=model.spring_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.spring_indices, model.spring_rest_length, model.spring_stiffness, model.spring_damping, dt],
outputs=[state_out.particle_f],
adapter=model.adapter)
# tetrahedral FEM
if (model.tet_count):
tape.launch(func=solve_tetrahedra,
dim=model.tet_count,
inputs=[q_pred, qd_pred, model.particle_inv_mass, model.tet_indices, model.tet_poses, model.tet_activations, model.tet_materials, dt, model.relaxation],
outputs=[state_out.particle_f],
adapter=model.adapter)
# apply updates
tape.launch(func=apply_deltas,
dim=model.particle_count,
inputs=[state_in.particle_q,
state_in.particle_qd,
q_pred,
state_out.particle_f,
dt],
outputs=[state_out.particle_q,
state_out.particle_qd],
adapter=model.adapter)
return state_out
| 97,150 |
Python
| 31.329784 | 241 | 0.512527 |
RoboticExplorationLab/Deep-ILC/dflex/dflex/matnn.h
|
#pragma once
CUDA_CALLABLE inline int dense_index(int stride, int i, int j)
{
return i*stride + j;
}
template <bool transpose>
CUDA_CALLABLE inline int dense_index(int rows, int cols, int i, int j)
{
if (transpose)
return j*rows + i;
else
return i*cols + j;
}
#ifdef CPU
const int kNumThreadsPerBlock = 1;
template <bool t1, bool t2, bool add>
CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
for (int i=0; i < m; i++)
{
for (int j=0; j < n; ++j)
{
float sum = 0.0f;
for (int k=0; k < p; ++k)
{
sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)];
}
if (add)
C[i*n + j] += sum;
else
C[i*n + j] = sum;
}
}
}
#else
const int kNumThreadsPerBlock = 256;
template <bool t1, bool t2, bool add>
CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
// each thread in the block calculates an output (or more if output dim > block dim)
for (int e=threadIdx.x; e < m*n; e += blockDim.x)
{
const int i=e/n;
const int j=e%n;
float sum = 0.0f;
for (int k=0; k < p; ++k)
{
sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)];
}
if (add)
C[i*n + j] += sum;
else
C[i*n + j] = sum;
}
}
#endif
template <bool add=false>
CUDA_CALLABLE inline void dense_gemm(int m, int n, int p, int t1, int t2, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
if (t1 == 0 && t2 == 0)
dense_gemm_impl<false, false, add>(m, n, p, A, B, C);
else if (t1 == 1 && t2 == 0)
dense_gemm_impl<true, false, add>(m, n, p, A, B, C);
else if (t1 == 0 && t2 == 1)
dense_gemm_impl<false, true, add>(m, n, p, A, B, C);
else if (t1 == 1 && t2 == 1)
dense_gemm_impl<true, true, add>(m, n, p, A, B, C);
}
template <bool add=false>
CUDA_CALLABLE inline void dense_gemm_batched(
const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2,
const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start,
const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C)
{
// on the CPU each thread computes the whole matrix multiply
// on the GPU each block computes the multiply with one output per-thread
const int batch = tid()/kNumThreadsPerBlock;
dense_gemm<add>(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch]);
}
// computes c = b^T*a*b, with a and b being stored in row-major layout
CUDA_CALLABLE inline void dense_quadratic()
{
}
// CUDA_CALLABLE inline void dense_chol(int n, const float* A, float* L)
// {
// // for each column
// for (int j=0; j < n; ++j)
// {
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] = A[dense_index(n, i, j)];
// }
// for (int k = 0; k < j; ++k)
// {
// const float p = L[dense_index(n, j, k)];
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] -= p*L[dense_index(n, i, k)];
// }
// }
// // scale
// const float d = L[dense_index(n, j, j)];
// const float s = 1.0f/sqrtf(d);
// for (int i=j; i < n; ++i)
// {
// L[dense_index(n, i, j)] *=s;
// }
// }
// }
void CUDA_CALLABLE inline dense_chol(int n, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L)
{
for (int j=0; j < n; ++j)
{
float s = A[dense_index(n, j, j)] + regularization[j];
for (int k=0; k < j; ++k)
{
float r = L[dense_index(n, j, k)];
s -= r*r;
}
s = sqrtf(s);
const float invS = 1.0f/s;
L[dense_index(n, j, j)] = s;
for (int i=j+1; i < n; ++i)
{
s = A[dense_index(n, i, j)];
for (int k=0; k < j; ++k)
{
s -= L[dense_index(n, i, k)]*L[dense_index(n, j, k)];
}
L[dense_index(n, i, j)] = s*invS;
}
}
}
void CUDA_CALLABLE inline dense_chol_batched(const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L)
{
const int batch = tid();
const int n = A_dim[batch];
const int offset = A_start[batch];
dense_chol(n, A + offset, regularization + n*batch, L + offset);
}
// Solves (L*L^T)x = b given the Cholesky factor L
CUDA_CALLABLE inline void dense_subs(int n, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ x)
{
// forward substitution
for (int i=0; i < n; ++i)
{
float s = b[i];
for (int j=0; j < i; ++j)
{
s -= L[dense_index(n, i, j)]*x[j];
}
x[i] = s/L[dense_index(n, i, i)];
}
// backward substitution
for (int i=n-1; i >= 0; --i)
{
float s = x[i];
for (int j=i+1; j < n; ++j)
{
s -= L[dense_index(n, j, i)]*x[j];
}
x[i] = s/L[dense_index(n, i, i)];
}
}
CUDA_CALLABLE inline void dense_solve(int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x)
{
dense_subs(n, L, b, x);
}
CUDA_CALLABLE inline void dense_solve_batched(
const int* __restrict__ b_start, const int* A_start, const int* A_dim,
const float* __restrict__ A, const float* __restrict__ L,
const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x)
{
const int batch = tid();
dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], NULL, x + b_start[batch]);
}
CUDA_CALLABLE inline void print_matrix(const char* name, int m, int n, const float* data)
{
printf("%s = [", name);
for (int i=0; i < m; ++i)
{
for (int j=0; j < n; ++j)
{
printf("%f ", data[dense_index(n, i, j)]);
}
printf(";\n");
}
printf("]\n");
}
// adjoint methods
CUDA_CALLABLE inline void adj_dense_gemm(
int m, int n, int p, int t1, int t2, const float* A, const float* B, float* C,
int adj_m, int adj_n, int adj_p, int adj_t1, int adj_t2, float* adj_A, float* adj_B, const float* adj_C)
{
// print_matrix("A", m, p, A);
// print_matrix("B", p, n, B);
// printf("t1: %d t2: %d\n", t1, t2);
if (t1)
{
dense_gemm<true>(p, m, n, 0, 1, B, adj_C, adj_A);
dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B);
}
else
{
dense_gemm<true>(m, p, n, 0, int(!t2), adj_C, B, adj_A);
dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B);
}
}
CUDA_CALLABLE inline void adj_dense_gemm_batched(
const int* __restrict__ m, const int* __restrict__ n, const int* __restrict__ p, int t1, int t2,
const int* __restrict__ A_start, const int* __restrict__ B_start, const int* __restrict__ C_start,
const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C,
// adj
int* __restrict__ adj_m, int* __restrict__ adj_n, int* __restrict__ adj_p, int adj_t1, int adj_t2,
int* __restrict__ adj_A_start, int* __restrict__ adj_B_start, int* __restrict__ adj_C_start,
float* __restrict__ adj_A, float* __restrict__ adj_B, const float* __restrict__ adj_C)
{
const int batch = tid()/kNumThreadsPerBlock;
adj_dense_gemm(m[batch], n[batch], p[batch], t1, t2, A+A_start[batch], B+B_start[batch], C+C_start[batch],
0, 0, 0, 0, 0, adj_A+A_start[batch], adj_B+B_start[batch], adj_C+C_start[batch]);
}
CUDA_CALLABLE inline void adj_dense_chol(
int n, const float* A, const float* __restrict__ regularization, float* L,
int adj_n, const float* adj_A, const float* __restrict__ adj_regularization, float* adj_L)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_chol_batched(
const int* __restrict__ A_start, const int* __restrict__ A_dim, const float* __restrict__ A, const float* __restrict__ regularization, float* __restrict__ L,
const int* __restrict__ adj_A_start, const int* __restrict__ adj_A_dim, const float* __restrict__ adj_A, const float* __restrict__ adj_regularization, float* __restrict__ adj_L)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_subs(
int n, const float* L, const float* b, float* x,
int adj_n, const float* adj_L, const float* adj_b, float* adj_x)
{
// nop, use dense_solve to differentiate through (A^-1)b = x
}
CUDA_CALLABLE inline void adj_dense_solve(
int n, const float* __restrict__ A, const float* __restrict__ L, const float* __restrict__ b, float* __restrict__ tmp, const float* __restrict__ x,
int adj_n, float* __restrict__ adj_A, float* __restrict__ adj_L, float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x)
{
for (int i=0; i < n; ++i)
{
tmp[i] = 0.0f;
}
dense_subs(n, L, adj_x, tmp);
for (int i=0; i < n; ++i)
{
adj_b[i] += tmp[i];
}
//dense_subs(n, L, adj_x, adj_b);
// A* = -adj_b*x^T
for (int i=0; i < n; ++i)
{
for (int j=0; j < n; ++j)
{
adj_A[dense_index(n, i, j)] += -tmp[i]*x[j];
}
}
}
CUDA_CALLABLE inline void adj_dense_solve_batched(
const int* __restrict__ b_start, const int* A_start, const int* A_dim,
const float* __restrict__ A, const float* __restrict__ L,
const float* __restrict__ b, float* __restrict__ tmp, float* __restrict__ x,
// adj
int* __restrict__ adj_b_start, int* __restrict__ adj_A_start, int* __restrict__ adj_A_dim,
float* __restrict__ adj_A, float* __restrict__ adj_L,
float* __restrict__ adj_b, float* __restrict__ adj_tmp, const float* __restrict__ adj_x)
{
const int batch = tid();
adj_dense_solve(A_dim[batch], A + A_start[batch], L + A_start[batch], b + b_start[batch], tmp + b_start[batch], x + b_start[batch],
0, adj_A + A_start[batch], adj_L + A_start[batch], adj_b + b_start[batch], tmp + b_start[batch], adj_x + b_start[batch]);
}
| 10,723 |
C
| 29.379603 | 202 | 0.531847 |
RoboticExplorationLab/Deep-ILC/dflex/dflex/render.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This optional module contains a built-in renderer for the USD data
format that can be used to visualize time-sampled simulation data.
Users should create a simulation model and integrator and periodically
call :func:`UsdRenderer.update()` to write time-sampled simulation data to the USD stage.
Example:
>>> # construct a new USD stage
>>> stage = Usd.Stage.CreateNew("my_stage.usda")
>>> renderer = df.render.UsdRenderer(model, stage)
>>>
>>> time = 0.0
>>>
>>> for i in range(100):
>>>
>>> # update simulation here
>>> # ....
>>>
>>> # update renderer
>>> stage.update(state, time)
>>> time += dt
>>>
>>> # write stage to file
>>> stage.Save()
Note:
You must have the Pixar USD bindings installed to use this module
please see https://developer.nvidia.com/usd to obtain precompiled
USD binaries and installation instructions.
"""
try:
from pxr import Usd, UsdGeom, Gf, Sdf
except ModuleNotFoundError:
print("No pxr package")
import dflex.sim
import dflex.util
import math
def _usd_add_xform(prim):
prim.ClearXformOpOrder()
t = prim.AddTranslateOp()
r = prim.AddOrientOp()
s = prim.AddScaleOp()
def _usd_set_xform(xform, transform, scale, time):
xform_ops = xform.GetOrderedXformOps()
pos = tuple(transform[0])
rot = tuple(transform[1])
xform_ops[0].Set(Gf.Vec3d(pos), time)
xform_ops[1].Set(Gf.Quatf(rot[3], rot[0], rot[1], rot[2]), time)
xform_ops[2].Set(Gf.Vec3d(scale), time)
# transforms a cylinder such that it connects the two points pos0, pos1
def _compute_segment_xform(pos0, pos1):
mid = (pos0 + pos1) * 0.5
height = (pos1 - pos0).GetLength()
dir = (pos1 - pos0) / height
rot = Gf.Rotation()
rot.SetRotateInto((0.0, 0.0, 1.0), Gf.Vec3d(dir))
scale = Gf.Vec3f(1.0, 1.0, height)
return (mid, Gf.Quath(rot.GetQuat()), scale)
class UsdRenderer:
"""A USD renderer
"""
def __init__(self, model: dflex.model.Model, stage):
"""Construct a UsdRenderer object
Args:
model: A simulation model
stage (Usd.Stage): A USD stage (either in memory or on disk)
"""
self.stage = stage
self.model = model
self.draw_points = True
self.draw_springs = False
self.draw_triangles = False
if (stage.GetPrimAtPath("/root")):
stage.RemovePrim("/root")
self.root = UsdGeom.Xform.Define(stage, '/root')
# add sphere instancer for particles
self.particle_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("particle_instancer"))
self.particle_instancer_sphere = UsdGeom.Sphere.Define(stage, self.particle_instancer.GetPath().AppendChild("sphere"))
self.particle_instancer_sphere.GetRadiusAttr().Set(model.particle_radius)
self.particle_instancer.CreatePrototypesRel().SetTargets([self.particle_instancer_sphere.GetPath()])
self.particle_instancer.CreateProtoIndicesAttr().Set([0] * model.particle_count)
# add line instancer
if (self.model.spring_count > 0):
self.spring_instancer = UsdGeom.PointInstancer.Define(stage, self.root.GetPath().AppendChild("spring_instancer"))
self.spring_instancer_cylinder = UsdGeom.Capsule.Define(stage, self.spring_instancer.GetPath().AppendChild("cylinder"))
self.spring_instancer_cylinder.GetRadiusAttr().Set(0.01)
self.spring_instancer.CreatePrototypesRel().SetTargets([self.spring_instancer_cylinder.GetPath()])
self.spring_instancer.CreateProtoIndicesAttr().Set([0] * model.spring_count)
self.stage.SetDefaultPrim(self.root.GetPrim())
# time codes
try:
self.stage.SetStartTimeCode(0.0)
self.stage.SetEndTimeCode(0.0)
self.stage.SetTimeCodesPerSecond(1.0)
except:
pass
# add dynamic cloth mesh
if (model.tri_count > 0):
self.cloth_mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("cloth"))
self.cloth_remap = {}
self.cloth_verts = []
self.cloth_indices = []
# USD needs a contiguous vertex buffer, use a dict to map from simulation indices->render indices
indices = self.model.tri_indices.flatten().tolist()
for i in indices:
if i not in self.cloth_remap:
# copy vertex
new_index = len(self.cloth_verts)
self.cloth_verts.append(self.model.particle_q[i].tolist())
self.cloth_indices.append(new_index)
self.cloth_remap[i] = new_index
else:
self.cloth_indices.append(self.cloth_remap[i])
self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts)
self.cloth_mesh.GetFaceVertexIndicesAttr().Set(self.cloth_indices)
self.cloth_mesh.GetFaceVertexCountsAttr().Set([3] * model.tri_count)
else:
self.cloth_mesh = None
# built-in ground plane
if (model.ground):
size = 10.0
mesh = UsdGeom.Mesh.Define(stage, self.root.GetPath().AppendChild("plane_0"))
mesh.CreateDoubleSidedAttr().Set(True)
points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size))
normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0))
counts = (4, )
indices = [0, 1, 2, 3]
mesh.GetPointsAttr().Set(points)
mesh.GetNormalsAttr().Set(normals)
mesh.GetFaceVertexCountsAttr().Set(counts)
mesh.GetFaceVertexIndicesAttr().Set(indices)
# add rigid bodies xform root
for b in range(model.link_count):
xform = UsdGeom.Xform.Define(stage, self.root.GetPath().AppendChild("body_" + str(b)))
_usd_add_xform(xform)
# add rigid body shapes
for s in range(model.shape_count):
parent_path = self.root.GetPath()
if model.shape_body[s] >= 0:
parent_path = parent_path.AppendChild("body_" + str(model.shape_body[s].item()))
geo_type = model.shape_geo_type[s].item()
geo_scale = model.shape_geo_scale[s].tolist()
geo_src = model.shape_geo_src[s]
# shape transform in body frame
X_bs = dflex.util.transform_expand(model.shape_transform[s].tolist())
if (geo_type == dflex.sim.GEO_PLANE):
# plane mesh
size = 1000.0
mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("plane_" + str(s)))
mesh.CreateDoubleSidedAttr().Set(True)
points = ((-size, 0.0, -size), (size, 0.0, -size), (size, 0.0, size), (-size, 0.0, size))
normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0))
counts = (4, )
indices = [0, 1, 2, 3]
mesh.GetPointsAttr().Set(points)
mesh.GetNormalsAttr().Set(normals)
mesh.GetFaceVertexCountsAttr().Set(counts)
mesh.GetFaceVertexIndicesAttr().Set(indices)
elif (geo_type == dflex.sim.GEO_SPHERE):
mesh = UsdGeom.Sphere.Define(stage, parent_path.AppendChild("sphere_" + str(s)))
mesh.GetRadiusAttr().Set(geo_scale[0])
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (1.0, 1.0, 1.0), 0.0)
elif (geo_type == dflex.sim.GEO_CAPSULE):
mesh = UsdGeom.Capsule.Define(stage, parent_path.AppendChild("capsule_" + str(s)))
mesh.GetRadiusAttr().Set(geo_scale[0])
mesh.GetHeightAttr().Set(geo_scale[1] * 2.0)
# geometry transform w.r.t shape, convert USD geometry to physics engine convention
X_sg = dflex.util.transform((0.0, 0.0, 0.0), dflex.util.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi * 0.5))
X_bg = dflex.util.transform_multiply(X_bs, X_sg)
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bg, (1.0, 1.0, 1.0), 0.0)
elif (geo_type == dflex.sim.GEO_BOX):
mesh = UsdGeom.Cube.Define(stage, parent_path.AppendChild("box_" + str(s)))
#mesh.GetSizeAttr().Set((geo_scale[0], geo_scale[1], geo_scale[2]))
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0)
elif (geo_type == dflex.sim.GEO_MESH):
mesh = UsdGeom.Mesh.Define(stage, parent_path.AppendChild("mesh_" + str(s)))
mesh.GetPointsAttr().Set(geo_src.vertices)
mesh.GetFaceVertexIndicesAttr().Set(geo_src.indices)
mesh.GetFaceVertexCountsAttr().Set([3] * int(len(geo_src.indices) / 3))
_usd_add_xform(mesh)
_usd_set_xform(mesh, X_bs, (geo_scale[0], geo_scale[1], geo_scale[2]), 0.0)
elif (geo_type == dflex.sim.GEO_SDF):
pass
def update(self, state: dflex.model.State, time: float):
"""Update the USD stage with latest simulation data
Args:
state: Current state of the simulation
time: The current time to update at in seconds
"""
try:
self.stage.SetEndTimeCode(time)
except:
pass
# convert to list
if self.model.particle_count:
particle_q = state.particle_q.tolist()
particle_orientations = [Gf.Quath(1.0, 0.0, 0.0, 0.0)] * self.model.particle_count
self.particle_instancer.GetPositionsAttr().Set(particle_q, time)
self.particle_instancer.GetOrientationsAttr().Set(particle_orientations, time)
# update cloth
if (self.cloth_mesh):
for k, v in self.cloth_remap.items():
self.cloth_verts[v] = particle_q[k]
self.cloth_mesh.GetPointsAttr().Set(self.cloth_verts, time)
# update springs
if (self.model.spring_count > 0):
line_positions = []
line_rotations = []
line_scales = []
for i in range(self.model.spring_count):
index0 = self.model.spring_indices[i * 2 + 0]
index1 = self.model.spring_indices[i * 2 + 1]
pos0 = particle_q[index0]
pos1 = particle_q[index1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
self.spring_instancer.GetPositionsAttr().Set(line_positions, time)
self.spring_instancer.GetOrientationsAttr().Set(line_rotations, time)
self.spring_instancer.GetScalesAttr().Set(line_scales, time)
# rigids
for b in range(self.model.link_count):
#xform = UsdGeom.Xform.Define(self.stage, self.root.GetPath().AppendChild("body_" + str(b)))
node = UsdGeom.Xform(self.stage.GetPrimAtPath(self.root.GetPath().AppendChild("body_" + str(b))))
# unpack rigid spatial_transform
X_sb = dflex.util.transform_expand(state.body_X_sc[b].tolist())
_usd_set_xform(node, X_sb, (1.0, 1.0, 1.0), time)
def add_sphere(self, pos: tuple, radius: float, name: str, time: float=0.0):
"""Debug helper to add a sphere for visualization
Args:
pos: The position of the sphere
radius: The radius of the sphere
name: A name for the USD prim on the stage
"""
sphere_path = self.root.GetPath().AppendChild(name)
sphere = UsdGeom.Sphere.Get(self.stage, sphere_path)
if not sphere:
sphere = UsdGeom.Sphere.Define(self.stage, sphere_path)
sphere.GetRadiusAttr().Set(radius, time)
mat = Gf.Matrix4d()
mat.SetIdentity()
mat.SetTranslateOnly(Gf.Vec3d(pos))
op = sphere.MakeMatrixXform()
op.Set(mat, time)
def add_box(self, pos: tuple, extents: float, name: str, time: float=0.0):
"""Debug helper to add a box for visualization
Args:
pos: The position of the sphere
extents: The radius of the sphere
name: A name for the USD prim on the stage
"""
sphere_path = self.root.GetPath().AppendChild(name)
sphere = UsdGeom.Cube.Get(self.stage, sphere_path)
if not sphere:
sphere = UsdGeom.Cube.Define(self.stage, sphere_path)
#sphere.GetSizeAttr().Set((extents[0]*2.0, extents[1]*2.0, extents[2]*2.0), time)
mat = Gf.Matrix4d()
mat.SetIdentity()
mat.SetScale(extents)
mat.SetTranslateOnly(Gf.Vec3d(pos))
op = sphere.MakeMatrixXform()
op.Set(mat, time)
def add_mesh(self, name: str, path: str, transform, scale, time: float):
ref_path = "/root/" + name
ref = UsdGeom.Xform.Get(self.stage, ref_path)
if not ref:
ref = UsdGeom.Xform.Define(self.stage, ref_path)
ref.GetPrim().GetReferences().AddReference(path)
_usd_add_xform(ref)
# update transform
_usd_set_xform(ref, transform, scale, time)
def add_line_list(self, vertices, color, time, name, radius):
"""Debug helper to add a line list as a set of capsules
Args:
vertices: The vertices of the line-strip
color: The color of the line
time: The time to update at
"""
num_lines = int(len(vertices)/2)
if (num_lines < 1):
return
# look up rope point instancer
instancer_path = self.root.GetPath().AppendChild(name)
instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path)
if not instancer:
instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path)
instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetRadiusAttr().Set(radius)
instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()])
instancer.CreatePrimvar("displayColor", Sdf.ValueTypeNames.Float3Array, "constant", 1)
line_positions = []
line_rotations = []
line_scales = []
# line_colors = []
for i in range(num_lines):
pos0 = vertices[i*2+0]
pos1 = vertices[i*2+1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
#line_colors.append(Gf.Vec3f((float(i)/num_lines, 0.5, 0.5)))
instancer.GetPositionsAttr().Set(line_positions, time)
instancer.GetOrientationsAttr().Set(line_rotations, time)
instancer.GetScalesAttr().Set(line_scales, time)
instancer.GetProtoIndicesAttr().Set([0] * num_lines, time)
# instancer.GetPrimvar("displayColor").Set(line_colors, time)
def add_line_strip(self, vertices: dflex.sim.List[dflex.sim.Vec3], color: tuple, time: float, name: str, radius: float=0.01):
"""Debug helper to add a line strip as a connected list of capsules
Args:
vertices: The vertices of the line-strip
color: The color of the line
time: The time to update at
"""
num_lines = int(len(vertices)-1)
if (num_lines < 1):
return
# look up rope point instancer
instancer_path = self.root.GetPath().AppendChild(name)
instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path)
if not instancer:
instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path)
instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetRadiusAttr().Set(radius)
instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()])
line_positions = []
line_rotations = []
line_scales = []
for i in range(num_lines):
pos0 = vertices[i]
pos1 = vertices[i+1]
(pos, rot, scale) = _compute_segment_xform(Gf.Vec3f(pos0), Gf.Vec3f(pos1))
line_positions.append(pos)
line_rotations.append(rot)
line_scales.append(scale)
instancer.GetPositionsAttr().Set(line_positions, time)
instancer.GetOrientationsAttr().Set(line_rotations, time)
instancer.GetScalesAttr().Set(line_scales, time)
instancer.GetProtoIndicesAttr().Set([0] * num_lines, time)
instancer_capsule = UsdGeom.Capsule.Get(self.stage, instancer.GetPath().AppendChild("capsule"))
instancer_capsule.GetDisplayColorAttr().Set([Gf.Vec3f(color)], time)
| 17,760 |
Python
| 34.808468 | 131 | 0.586768 |
RoboticExplorationLab/Deep-ILC/dflex/dflex/model.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""A module for building simulation models and state.
"""
import math
import torch
import numpy as np
from typing import Tuple
from typing import List
Vec3 = List[float]
Vec4 = List[float]
Quat = List[float]
Mat33 = List[float]
Transform = Tuple[Vec3, Quat]
from dflex.util import *
# shape geometry types
GEO_SPHERE = 0
GEO_BOX = 1
GEO_CAPSULE = 2
GEO_MESH = 3
GEO_SDF = 4
GEO_PLANE = 5
GEO_NONE = 6
# body joint types
JOINT_PRISMATIC = 0
JOINT_REVOLUTE = 1
JOINT_BALL = 2
JOINT_FIXED = 3
JOINT_FREE = 4
class Mesh:
"""Describes a triangle collision mesh for simulation
Attributes:
vertices (List[Vec3]): Mesh vertices
indices (List[int]): Mesh indices
I (Mat33): Inertia tensor of the mesh assuming density of 1.0 (around the center of mass)
mass (float): The total mass of the body assuming density of 1.0
com (Vec3): The center of mass of the body
"""
def __init__(self, vertices: List[Vec3], indices: List[int]):
"""Construct a Mesh object from a triangle mesh
The mesh center of mass and inertia tensor will automatically be
calculated using a density of 1.0. This computation is only valid
if the mesh is closed (two-manifold).
Args:
vertices: List of vertices in the mesh
indices: List of triangle indices, 3 per-element
"""
self.vertices = vertices
self.indices = indices
# compute com and inertia (using density=1.0)
com = np.mean(vertices, 0)
num_tris = int(len(indices) / 3)
# compute signed inertia for each tetrahedron
# formed with the interior point, using an order-2
# quadrature: https://www.sciencedirect.com/science/article/pii/S0377042712001604#br000040
weight = 0.25
alpha = math.sqrt(5.0) / 5.0
I = np.zeros((3, 3))
mass = 0.0
for i in range(num_tris):
p = np.array(vertices[indices[i * 3 + 0]])
q = np.array(vertices[indices[i * 3 + 1]])
r = np.array(vertices[indices[i * 3 + 2]])
mid = (com + p + q + r) / 4.0
pcom = p - com
qcom = q - com
rcom = r - com
Dm = np.matrix((pcom, qcom, rcom)).T
volume = np.linalg.det(Dm) / 6.0
# quadrature points lie on the line between the
# centroid and each vertex of the tetrahedron
quads = (mid + (p - mid) * alpha, mid + (q - mid) * alpha, mid + (r - mid) * alpha, mid + (com - mid) * alpha)
for j in range(4):
# displacement of quadrature point from COM
d = quads[j] - com
I += weight * volume * (length_sq(d) * np.eye(3, 3) - np.outer(d, d))
mass += weight * volume
self.I = I
self.mass = mass
self.com = com
class State:
"""The State object holds all *time-varying* data for a model.
Time-varying data includes particle positions, velocities, rigid body states, and
anything that is output from the integrator as derived data, e.g.: forces.
The exact attributes depend on the contents of the model. State objects should
generally be created using the :func:`Model.state()` function.
Attributes:
particle_q (torch.Tensor): Tensor of particle positions
particle_qd (torch.Tensor): Tensor of particle velocities
joint_q (torch.Tensor): Tensor of joint coordinates
joint_qd (torch.Tensor): Tensor of joint velocities
joint_act (torch.Tensor): Tensor of joint actuation values
"""
def __init__(self):
self.particle_count = 0
self.link_count = 0
# def flatten(self):
# """Returns a list of Tensors stored by the state
# This function is intended to be used internal-only but can be used to obtain
# a set of all tensors owned by the state.
# """
# tensors = []
# # particles
# if (self.particle_count):
# tensors.append(self.particle_q)
# tensors.append(self.particle_qd)
# # articulations
# if (self.link_count):
# tensors.append(self.joint_q)
# tensors.append(self.joint_qd)
# tensors.append(self.joint_act)
# return tensors
def flatten(self):
"""Returns a list of Tensors stored by the state
This function is intended to be used internal-only but can be used to obtain
a set of all tensors owned by the state.
"""
tensors = []
# build a list of all tensor attributes
for attr, value in self.__dict__.items():
if (torch.is_tensor(value)):
tensors.append(value)
return tensors
class Model:
"""Holds the definition of the simulation model
This class holds the non-time varying description of the system, i.e.:
all geometry, constraints, and parameters used to describe the simulation.
Attributes:
particle_q (torch.Tensor): Particle positions, shape [particle_count, 3], float
particle_qd (torch.Tensor): Particle velocities, shape [particle_count, 3], float
particle_mass (torch.Tensor): Particle mass, shape [particle_count], float
particle_inv_mass (torch.Tensor): Particle inverse mass, shape [particle_count], float
shape_transform (torch.Tensor): Rigid shape transforms, shape [shape_count, 7], float
shape_body (torch.Tensor): Rigid shape body index, shape [shape_count], int
shape_geo_type (torch.Tensor): Rigid shape geometry type, [shape_count], int
shape_geo_src (torch.Tensor): Rigid shape geometry source, shape [shape_count], int
shape_geo_scale (torch.Tensor): Rigid shape geometry scale, shape [shape_count, 3], float
shape_materials (torch.Tensor): Rigid shape contact materials, shape [shape_count, 4], float
spring_indices (torch.Tensor): Particle spring indices, shape [spring_count*2], int
spring_rest_length (torch.Tensor): Particle spring rest length, shape [spring_count], float
spring_stiffness (torch.Tensor): Particle spring stiffness, shape [spring_count], float
spring_damping (torch.Tensor): Particle spring damping, shape [spring_count], float
spring_control (torch.Tensor): Particle spring activation, shape [spring_count], float
tri_indices (torch.Tensor): Triangle element indices, shape [tri_count*3], int
tri_poses (torch.Tensor): Triangle element rest pose, shape [tri_count, 2, 2], float
tri_activations (torch.Tensor): Triangle element activations, shape [tri_count], float
edge_indices (torch.Tensor): Bending edge indices, shape [edge_count*2], int
edge_rest_angle (torch.Tensor): Bending edge rest angle, shape [edge_count], float
tet_indices (torch.Tensor): Tetrahedral element indices, shape [tet_count*4], int
tet_poses (torch.Tensor): Tetrahedral rest poses, shape [tet_count, 3, 3], float
tet_activations (torch.Tensor): Tetrahedral volumetric activations, shape [tet_count], float
tet_materials (torch.Tensor): Tetrahedral elastic parameters in form :math:`k_{mu}, k_{lambda}, k_{damp}`, shape [tet_count, 3]
body_X_cm (torch.Tensor): Rigid body center of mass (in local frame), shape [link_count, 7], float
body_I_m (torch.Tensor): Rigid body inertia tensor (relative to COM), shape [link_count, 3, 3], float
articulation_start (torch.Tensor): Articulation start offset, shape [num_articulations], int
joint_q (torch.Tensor): Joint coordinate, shape [joint_coord_count], float
joint_qd (torch.Tensor): Joint velocity, shape [joint_dof_count], float
joint_type (torch.Tensor): Joint type, shape [joint_count], int
joint_parent (torch.Tensor): Joint parent, shape [joint_count], int
joint_X_pj (torch.Tensor): Joint transform in parent frame, shape [joint_count, 7], float
joint_X_cm (torch.Tensor): Joint mass frame in child frame, shape [joint_count, 7], float
joint_axis (torch.Tensor): Joint axis in child frame, shape [joint_count, 3], float
joint_q_start (torch.Tensor): Joint coordinate offset, shape [joint_count], int
joint_qd_start (torch.Tensor): Joint velocity offset, shape [joint_count], int
joint_armature (torch.Tensor): Armature for each joint, shape [joint_count], float
joint_target_ke (torch.Tensor): Joint stiffness, shape [joint_count], float
joint_target_kd (torch.Tensor): Joint damping, shape [joint_count], float
joint_target (torch.Tensor): Joint target, shape [joint_count], float
particle_count (int): Total number of particles in the system
joint_coord_count (int): Total number of joint coordinates in the system
joint_dof_count (int): Total number of joint dofs in the system
link_count (int): Total number of links in the system
shape_count (int): Total number of shapes in the system
tri_count (int): Total number of triangles in the system
tet_count (int): Total number of tetrahedra in the system
edge_count (int): Total number of edges in the system
spring_count (int): Total number of springs in the system
contact_count (int): Total number of contacts in the system
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
def __init__(self, adapter):
self.particle_q = None
self.particle_qd = None
self.particle_mass = None
self.particle_inv_mass = None
self.shape_transform = None
self.shape_body = None
self.shape_geo_type = None
self.shape_geo_src = None
self.shape_geo_scale = None
self.shape_materials = None
self.spring_indices = None
self.spring_rest_length = None
self.spring_stiffness = None
self.spring_damping = None
self.spring_control = None
self.tri_indices = None
self.tri_poses = None
self.tri_activations = None
self.edge_indices = None
self.edge_rest_angle = None
self.tet_indices = None
self.tet_poses = None
self.tet_activations = None
self.tet_materials = None
self.body_X_cm = None
self.body_I_m = None
self.articulation_start = None
self.joint_q = None
self.joint_qd = None
self.joint_type = None
self.joint_parent = None
self.joint_X_pj = None
self.joint_X_cm = None
self.joint_axis = None
self.joint_q_start = None
self.joint_qd_start = None
self.joint_armature = None
self.joint_target_ke = None
self.joint_target_kd = None
self.joint_target = None
self.particle_count = 0
self.joint_coord_count = 0
self.joint_dof_count = 0
self.link_count = 0
self.shape_count = 0
self.tri_count = 0
self.tet_count = 0
self.edge_count = 0
self.spring_count = 0
self.contact_count = 0
self.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter)
self.contact_distance = 0.1
self.contact_ke = 1.e+3
self.contact_kd = 0.0
self.contact_kf = 1.e+3
self.contact_mu = 0.5
self.tri_ke = 100.0
self.tri_ka = 100.0
self.tri_kd = 10.0
self.tri_kb = 100.0
self.tri_drag = 0.0
self.tri_lift = 0.0
self.edge_ke = 100.0
self.edge_kd = 0.0
self.particle_radius = 0.1
self.adapter = adapter
def state(self) -> State:
"""Returns a state object for the model
The returned state will be initialized with the initial configuration given in
the model description.
"""
s = State()
s.particle_count = self.particle_count
s.link_count = self.link_count
#--------------------------------
# dynamic state (input, output)
# particles
if (self.particle_count):
s.particle_q = torch.clone(self.particle_q)
s.particle_qd = torch.clone(self.particle_qd)
# articulations
if (self.link_count):
s.joint_q = torch.clone(self.joint_q)
s.joint_qd = torch.clone(self.joint_qd)
s.joint_act = torch.zeros_like(self.joint_qd)
s.joint_q.requires_grad = True
s.joint_qd.requires_grad = True
#--------------------------------
# derived state (output only)
if (self.particle_count):
s.particle_f = torch.empty_like(self.particle_qd, requires_grad=True)
if (self.link_count):
# joints
s.joint_qdd = torch.empty_like(self.joint_qd, requires_grad=True)
s.joint_tau = torch.empty_like(self.joint_qd, requires_grad=True)
s.joint_S_s = torch.empty((self.joint_dof_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
# derived rigid body data (maximal coordinates)
s.body_X_sc = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_X_sm = torch.empty((self.link_count, 7), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_I_s = torch.empty((self.link_count, 6, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_v_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_a_s = torch.empty((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
s.body_f_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
#s.body_ft_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
#s.body_f_ext_s = torch.zeros((self.link_count, 6), dtype=torch.float32, device=self.adapter, requires_grad=True)
return s
def alloc_mass_matrix(self):
if (self.link_count):
# system matrices
self.M = torch.zeros(self.M_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.J = torch.zeros(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.P = torch.empty(self.J_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
self.H = torch.empty(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
# zero since only upper triangle is set which can trigger NaN detection
self.L = torch.zeros(self.H_size, dtype=torch.float32, device=self.adapter, requires_grad=True)
def flatten(self):
"""Returns a list of Tensors stored by the model
This function is intended to be used internal-only but can be used to obtain
a set of all tensors owned by the model.
"""
tensors = []
# build a list of all tensor attributes
for attr, value in self.__dict__.items():
if (torch.is_tensor(value)):
tensors.append(value)
return tensors
# builds contacts
def collide(self, state: State):
"""Constructs a set of contacts between rigid bodies and ground
This method performs collision detection between rigid body vertices in the scene and updates
the model's set of contacts stored as the following attributes:
* **contact_body0**: Tensor of ints with first rigid body index
* **contact_body1**: Tensor of ints with second rigid body index (currently always -1 to indicate ground)
* **contact_point0**: Tensor of Vec3 representing contact point in local frame of body0
* **contact_dist**: Tensor of float values representing the distance to maintain
* **contact_material**: Tensor contact material indices
Args:
state: The state of the simulation at which to perform collision detection
Note:
Currently this method uses an 'all pairs' approach to contact generation that is
state indepdendent. In the future this will change and will create a node in
the computational graph to propagate gradients as a function of state.
Todo:
Only ground-plane collision is currently implemented. Since the ground is static
it is acceptable to call this method once at initialization time.
"""
body0 = []
body1 = []
point = []
dist = []
mat = []
def add_contact(b0, b1, t, p0, d, m):
body0.append(b0)
body1.append(b1)
point.append(transform_point(t, np.array(p0)))
dist.append(d)
mat.append(m)
for i in range(self.shape_count):
# transform from shape to body
X_bs = transform_expand(self.shape_transform[i].tolist())
geo_type = self.shape_geo_type[i].item()
if (geo_type == GEO_SPHERE):
radius = self.shape_geo_scale[i][0].item()
add_contact(self.shape_body[i], -1, X_bs, (0.0, 0.0, 0.0), radius, i)
elif (geo_type == GEO_CAPSULE):
radius = self.shape_geo_scale[i][0].item()
half_width = self.shape_geo_scale[i][1].item()
add_contact(self.shape_body[i], -1, X_bs, (-half_width, 0.0, 0.0), radius, i)
add_contact(self.shape_body[i], -1, X_bs, (half_width, 0.0, 0.0), radius, i)
elif (geo_type == GEO_BOX):
edges = self.shape_geo_scale[i].tolist()
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, ( edges[0], -edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], -edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], -edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], -edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (-edges[0], edges[1], edges[2]), 0.0, i)
add_contact(self.shape_body[i], -1, X_bs, (edges[0], edges[1], edges[2]), 0.0, i)
elif (geo_type == GEO_MESH):
mesh = self.shape_geo_src[i]
scale = self.shape_geo_scale[i]
for v in mesh.vertices:
p = (v[0] * scale[0], v[1] * scale[1], v[2] * scale[2])
add_contact(self.shape_body[i], -1, X_bs, p, 0.0, i)
# send to torch
self.contact_body0 = torch.tensor(body0, dtype=torch.int32, device=self.adapter)
self.contact_body1 = torch.tensor(body1, dtype=torch.int32, device=self.adapter)
self.contact_point0 = torch.tensor(point, dtype=torch.float32, device=self.adapter)
self.contact_dist = torch.tensor(dist, dtype=torch.float32, device=self.adapter)
self.contact_material = torch.tensor(mat, dtype=torch.int32, device=self.adapter)
self.contact_count = len(body0)
class ModelBuilder:
"""A helper class for building simulation models at runtime.
Use the ModelBuilder to construct a simulation scene. The ModelBuilder
is independent of PyTorch and builds the scene representation using
standard Python data structures, this means it is not differentiable. Once :func:`finalize()`
has been called the ModelBuilder transfers all data to Torch tensors and returns
an object that may be used for simulation.
Example:
>>> import dflex as df
>>>
>>> builder = df.ModelBuilder()
>>>
>>> # anchor point (zero mass)
>>> builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
>>>
>>> # build chain
>>> for i in range(1,10):
>>> builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
>>> builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
>>>
>>> # create model
>>> model = builder.finalize()
Note:
It is strongly recommended to use the ModelBuilder to construct a simulation rather
than creating your own Model object directly, however it is possible to do so if
desired.
"""
def __init__(self):
# particles
self.particle_q = []
self.particle_qd = []
self.particle_mass = []
# shapes
self.shape_transform = []
self.shape_body = []
self.shape_geo_type = []
self.shape_geo_scale = []
self.shape_geo_src = []
self.shape_materials = []
# geometry
self.geo_meshes = []
self.geo_sdfs = []
# springs
self.spring_indices = []
self.spring_rest_length = []
self.spring_stiffness = []
self.spring_damping = []
self.spring_control = []
# triangles
self.tri_indices = []
self.tri_poses = []
self.tri_activations = []
# edges (bending)
self.edge_indices = []
self.edge_rest_angle = []
# tetrahedra
self.tet_indices = []
self.tet_poses = []
self.tet_activations = []
self.tet_materials = []
# muscles
self.muscle_start = []
self.muscle_params = []
self.muscle_activation = []
self.muscle_links = []
self.muscle_points = []
# rigid bodies
self.joint_parent = [] # index of the parent body (constant)
self.joint_child = [] # index of the child body (constant)
self.joint_axis = [] # joint axis in child joint frame (constant)
self.joint_X_pj = [] # frame of joint in parent (constant)
self.joint_X_cm = [] # frame of child com (in child coordinates) (constant)
self.joint_q_start = [] # joint offset in the q array
self.joint_qd_start = [] # joint offset in the qd array
self.joint_type = []
self.joint_armature = []
self.joint_target_ke = []
self.joint_target_kd = []
self.joint_target = []
self.joint_limit_lower = []
self.joint_limit_upper = []
self.joint_limit_ke = []
self.joint_limit_kd = []
self.joint_q = [] # generalized coordinates (input)
self.joint_qd = [] # generalized velocities (input)
self.joint_qdd = [] # generalized accelerations (id,fd)
self.joint_tau = [] # generalized actuation (input)
self.joint_u = [] # generalized total torque (fd)
self.body_mass = []
self.body_inertia = []
self.body_com = []
self.articulation_start = []
def add_articulation(self) -> int:
"""Add an articulation object, all subsequently added links (see: :func:`add_link`) will belong to this articulation object.
Calling this method multiple times 'closes' any previous articulations and begins a new one.
Returns:
The index of the articulation
"""
self.articulation_start.append(len(self.joint_type))
return len(self.articulation_start)-1
# rigids, register a rigid body and return its index.
def add_link(
self,
parent : int,
X_pj : Transform,
axis : Vec3,
type : int,
armature: float=0.01,
stiffness: float=0.0,
damping: float=0.0,
limit_lower: float=-1.e+3,
limit_upper: float=1.e+3,
limit_ke: float=100.0,
limit_kd: float=10.0,
com: Vec3=np.zeros(3),
I_m: Mat33=np.zeros((3, 3)),
m: float=0.0) -> int:
"""Adds a rigid body to the model.
Args:
parent: The index of the parent body
X_pj: The location of the joint in the parent's local frame connecting this body
axis: The joint axis
type: The type of joint, should be one of: JOINT_PRISMATIC, JOINT_REVOLUTE, JOINT_BALL, JOINT_FIXED, or JOINT_FREE
armature: Additional inertia around the joint axis
stiffness: Spring stiffness that attempts to return joint to zero position
damping: Spring damping that attempts to remove joint velocity
com: The center of mass of the body w.r.t its origin
I_m: The 3x3 inertia tensor of the body (specified relative to the center of mass)
m: The mass of the body
Returns:
The index of the body in the model
Note:
If the mass (m) is zero then the body is treated as kinematic with no dynamics
"""
# joint data
self.joint_type.append(type)
self.joint_axis.append(np.array(axis))
self.joint_parent.append(parent)
self.joint_X_pj.append(X_pj)
self.joint_target_ke.append(stiffness)
self.joint_target_kd.append(damping)
self.joint_limit_ke.append(limit_ke)
self.joint_limit_kd.append(limit_kd)
self.joint_q_start.append(len(self.joint_q))
self.joint_qd_start.append(len(self.joint_qd))
if (type == JOINT_PRISMATIC):
self.joint_q.append(0.0)
self.joint_qd.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_upper.append(limit_upper)
elif (type == JOINT_REVOLUTE):
self.joint_q.append(0.0)
self.joint_qd.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_upper.append(limit_upper)
elif (type == JOINT_BALL):
# quaternion
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(1.0)
# angular velocity
self.joint_qd.append(0.0)
self.joint_qd.append(0.0)
self.joint_qd.append(0.0)
# pd targets
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_armature.append(armature)
self.joint_armature.append(armature)
self.joint_armature.append(armature)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(limit_lower)
self.joint_limit_lower.append(0.0)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(limit_upper)
self.joint_limit_upper.append(0.0)
elif (type == JOINT_FIXED):
pass
elif (type == JOINT_FREE):
# translation
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
# quaternion
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(0.0)
self.joint_q.append(1.0)
# note armature for free joints should always be zero, better to modify the body inertia directly
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_armature.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_target.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_lower.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
self.joint_limit_upper.append(0.0)
# joint velocities
for i in range(6):
self.joint_qd.append(0.0)
self.body_inertia.append(np.zeros((3, 3)))
self.body_mass.append(0.0)
self.body_com.append(np.zeros(3))
# return index of body
return len(self.joint_type) - 1
# muscles
def add_muscle(self, links: List[int], positions: List[Vec3], f0: float, lm: float, lt: float, lmax: float, pen: float) -> float:
"""Adds a muscle-tendon activation unit
Args:
links: A list of link indices for each waypoint
positions: A list of positions of each waypoint in the link's local frame
f0: Force scaling
lm: Muscle length
lt: Tendon length
lmax: Maximally efficient muscle length
Returns:
The index of the muscle in the model
"""
n = len(links)
self.muscle_start.append(len(self.muscle_links))
self.muscle_params.append((f0, lm, lt, lmax, pen))
self.muscle_activation.append(0.0)
for i in range(n):
self.muscle_links.append(links[i])
self.muscle_points.append(positions[i])
# return the index of the muscle
return len(self.muscle_start)-1
# shapes
def add_shape_plane(self, plane: Vec4=(0.0, 1.0, 0.0, 0.0), ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
"""Adds a plane collision shape
Args:
plane: The plane equation in form a*x + b*y + c*z + d = 0
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(-1, (0.0, 0.0, 0.0), (0.0, 0.0, 0.0), GEO_PLANE, plane, None, 0.0, ke, kd, kf, mu)
def add_shape_sphere(self, body, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), radius: float=1.0, density: float=1000.0, ke: float=1.e+5, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
"""Adds a sphere collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the sphere
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_SPHERE, (radius, 0.0, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_box(self,
body : int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
hx: float=0.5,
hy: float=0.5,
hz: float=0.5,
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a box collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
hx: The half-extents along the x-axis
hy: The half-extents along the y-axis
hz: The half-extents along the z-axis
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_BOX, (hx, hy, hz, 0.0), None, density, ke, kd, kf, mu)
def add_shape_capsule(self,
body: int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
radius: float=1.0,
half_width: float=0.5,
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a capsule collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
radius: The radius of the capsule
half_width: The half length of the center cylinder along the x-axis
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_CAPSULE, (radius, half_width, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_mesh(self,
body: int,
pos: Vec3=(0.0, 0.0, 0.0),
rot: Quat=(0.0, 0.0, 0.0, 1.0),
mesh: Mesh=None,
scale: Vec3=(1.0, 1.0, 1.0),
density: float=1000.0,
ke: float=1.e+5,
kd: float=1000.0,
kf: float=1000.0,
mu: float=0.5):
"""Adds a triangle mesh collision shape to a link.
Args:
body: The index of the parent link this shape belongs to
pos: The location of the shape with respect to the parent frame
rot: The rotation of the shape with respect to the parent frame
mesh: The mesh object
scale: Scale to use for the collider
density: The density of the shape
ke: The contact elastic stiffness
kd: The contact damping stiffness
kf: The contact friction stiffness
mu: The coefficient of friction
"""
self._add_shape(body, pos, rot, GEO_MESH, (scale[0], scale[1], scale[2], 0.0), mesh, density, ke, kd, kf, mu)
def _add_shape(self, body , pos, rot, type, scale, src, density, ke, kd, kf, mu):
self.shape_body.append(body)
self.shape_transform.append(transform(pos, rot))
self.shape_geo_type.append(type)
self.shape_geo_scale.append((scale[0], scale[1], scale[2]))
self.shape_geo_src.append(src)
self.shape_materials.append((ke, kd, kf, mu))
(m, I) = self._compute_shape_mass(type, scale, src, density)
self._update_body_mass(body, m, I, np.array(pos), np.array(rot))
# particles
def add_particle(self, pos : Vec3, vel : Vec3, mass : float) -> int:
"""Adds a single particle to the model
Args:
pos: The initial position of the particle
vel: The initial velocity of the particle
mass: The mass of the particle
Note:
Set the mass equal to zero to create a 'kinematic' particle that does is not subject to dynamics.
Returns:
The index of the particle in the system
"""
self.particle_q.append(pos)
self.particle_qd.append(vel)
self.particle_mass.append(mass)
return len(self.particle_q) - 1
def add_spring(self, i : int, j, ke : float, kd : float, control: float):
"""Adds a spring between two particles in the system
Args:
i: The index of the first particle
j: The index of the second particle
ke: The elastic stiffness of the spring
kd: The damping stiffness of the spring
control: The actuation level of the spring
Note:
The spring is created with a rest-length based on the distance
between the particles in their initial configuration.
"""
self.spring_indices.append(i)
self.spring_indices.append(j)
self.spring_stiffness.append(ke)
self.spring_damping.append(kd)
self.spring_control.append(control)
# compute rest length
p = self.particle_q[i]
q = self.particle_q[j]
delta = np.subtract(p, q)
l = np.sqrt(np.dot(delta, delta))
self.spring_rest_length.append(l)
def add_triangle(self, i : int, j : int, k : int) -> float:
"""Adds a trianglular FEM element between three particles in the system.
Triangles are modeled as viscoelastic elements with elastic stiffness and damping
Parameters specfied on the model. See model.tri_ke, model.tri_kd.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
Return:
The area of the triangle
Note:
The triangle is created with a rest-length based on the distance
between the particles in their initial configuration.
Todo:
* Expose elastic paramters on a per-element basis
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
qp = q - p
rp = r - p
# construct basis aligned with the triangle
n = normalize(np.cross(qp, rp))
e1 = normalize(qp)
e2 = normalize(np.cross(n, e1))
R = np.matrix((e1, e2))
M = np.matrix((qp, rp))
D = R * M.T
inv_D = np.linalg.inv(D)
area = np.linalg.det(D) / 2.0
if (area < 0.0):
print("inverted triangle element")
self.tri_indices.append((i, j, k))
self.tri_poses.append(inv_D.tolist())
self.tri_activations.append(0.0)
return area
def add_tetrahedron(self, i: int, j: int, k: int, l: int, k_mu: float=1.e+3, k_lambda: float=1.e+3, k_damp: float=0.0) -> float:
"""Adds a tetrahedral FEM element between four particles in the system.
Tetrahdera are modeled as viscoelastic elements with a NeoHookean energy
density based on [Smith et al. 2018].
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The element's damping stiffness
Return:
The volume of the tetrahedron
Note:
The tetrahedron is created with a rest-pose based on the particle's initial configruation
"""
# compute basis for 2D rest pose
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
s = np.array(self.particle_q[l])
qp = q - p
rp = r - p
sp = s - p
Dm = np.matrix((qp, rp, sp)).T
volume = np.linalg.det(Dm) / 6.0
if (volume <= 0.0):
print("inverted tetrahedral element")
else:
inv_Dm = np.linalg.inv(Dm)
self.tet_indices.append((i, j, k, l))
self.tet_poses.append(inv_Dm.tolist())
self.tet_activations.append(0.0)
self.tet_materials.append((k_mu, k_lambda, k_damp))
return volume
def add_edge(self, i: int, j: int, k: int, l: int, rest: float=None):
"""Adds a bending edge element between four particles in the system.
Bending elements are designed to be between two connected triangles. Then
bending energy is based of [Bridson et al. 2002]. Bending stiffness is controlled
by the `model.tri_kb` parameter.
Args:
i: The index of the first particle
j: The index of the second particle
k: The index of the third particle
l: The index of the fourth particle
rest: The rest angle across the edge in radians, if not specified it will be computed
Note:
The edge lies between the particles indexed by 'k' and 'l' parameters with the opposing
vertices indexed by 'i' and 'j'. This defines two connected triangles with counter clockwise
winding: (i, k, l), (j, l, k).
"""
# compute rest angle
if (rest == None):
x1 = np.array(self.particle_q[i])
x2 = np.array(self.particle_q[j])
x3 = np.array(self.particle_q[k])
x4 = np.array(self.particle_q[l])
n1 = normalize(np.cross(x3 - x1, x4 - x1))
n2 = normalize(np.cross(x4 - x2, x3 - x2))
e = normalize(x4 - x3)
d = np.clip(np.dot(n2, n1), -1.0, 1.0)
angle = math.acos(d)
sign = np.sign(np.dot(np.cross(n2, n1), e))
rest = angle * sign
self.edge_indices.append((i, j, k, l))
self.edge_rest_angle.append(rest)
def add_cloth_grid(self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
cell_x: float,
cell_y: float,
mass: float,
reverse_winding: bool=False,
fix_left: bool=False,
fix_right: bool=False,
fix_top: bool=False,
fix_bottom: bool=False):
"""Helper to create a regular planar cloth grid
Creates a rectangular grid of particles with FEM triangles and bending elements
automatically.
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
mass: The mass of each particle
reverse_winding: Flip the winding of the mesh
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
def grid_index(x, y, dim_x):
return y * dim_x + x
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
for y in range(0, dim_y + 1):
for x in range(0, dim_x + 1):
g = np.array((x * cell_x, y * cell_y, 0.0))
p = quat_rotate(rot, g) + pos
m = mass
if (x == 0 and fix_left):
m = 0.0
elif (x == dim_x and fix_right):
m = 0.0
elif (y == 0 and fix_bottom):
m = 0.0
elif (y == dim_y and fix_top):
m = 0.0
self.add_particle(p, vel, m)
if (x > 0 and y > 0):
if (reverse_winding):
tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1))
tri2 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
self.add_triangle(*tri1)
self.add_triangle(*tri2)
else:
tri1 = (start_vertex + grid_index(x - 1, y - 1, dim_x + 1),
start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
tri2 = (start_vertex + grid_index(x, y - 1, dim_x + 1),
start_vertex + grid_index(x, y, dim_x + 1),
start_vertex + grid_index(x - 1, y, dim_x + 1))
self.add_triangle(*tri1)
self.add_triangle(*tri2)
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
# bending constraints, could create these explicitly for a grid but this
# is a good test of the adjacency structure
adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
for k, e in adj.edges.items():
# skip open edges
if (e.f0 == -1 or e.f1 == -1):
continue
self.add_edge(e.o0, e.o1, e.v0, e.v1) # opposite 0, opposite 1, vertex 0, vertex 1
def add_cloth_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, edge_callback=None, face_callback=None):
"""Helper to create a cloth model from a regular triangle mesh
Creates one FEM triangle element and one bending element for every face
and edge in the input triangle mesh
Args:
pos: The position of the cloth in world space
rot: The orientation of the cloth in world space
vel: The velocity of the cloth in world space
vertices: A list of vertex positions
indices: A list of triangle indices, 3 entries per-face
density: The density per-area of the mesh
edge_callback: A user callback when an edge is created
face_callback: A user callback when a face is created
Note:
The mesh should be two manifold.
"""
num_tris = int(len(indices) / 3)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
# particles
for i, v in enumerate(vertices):
p = quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# triangles
for t in range(num_tris):
i = start_vertex + indices[t * 3 + 0]
j = start_vertex + indices[t * 3 + 1]
k = start_vertex + indices[t * 3 + 2]
if (face_callback):
face_callback(i, j, k)
area = self.add_triangle(i, j, k)
# add area fraction to particles
if (area > 0.0):
self.particle_mass[i] += density * area / 3.0
self.particle_mass[j] += density * area / 3.0
self.particle_mass[k] += density * area / 3.0
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
adj = MeshAdjacency(self.tri_indices[start_tri:end_tri], end_tri - start_tri)
# bend constraints
for k, e in adj.edges.items():
# skip open edges
if (e.f0 == -1 or e.f1 == -1):
continue
if (edge_callback):
edge_callback(e.f0, e.f1)
self.add_edge(e.o0, e.o1, e.v0, e.v1)
def add_soft_grid(self,
pos: Vec3,
rot: Quat,
vel: Vec3,
dim_x: int,
dim_y: int,
dim_z: int,
cell_x: float,
cell_y: float,
cell_z: float,
density: float,
k_mu: float,
k_lambda: float,
k_damp: float,
fix_left: bool=False,
fix_right: bool=False,
fix_top: bool=False,
fix_bottom: bool=False):
"""Helper to create a rectangular tetrahedral FEM grid
Creates a regular grid of FEM tetrhedra and surface triangles. Useful for example
to create beams and sheets. Each hexahedral cell is decomposed into 5
tetrahedral elements.
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
dim_x_: The number of rectangular cells along the x-axis
dim_y: The number of rectangular cells along the y-axis
dim_z: The number of rectangular cells along the z-axis
cell_x: The width of each cell in the x-direction
cell_y: The width of each cell in the y-direction
cell_z: The width of each cell in the z-direction
density: The density of each particle
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
fix_left: Make the left-most edge of particles kinematic (fixed in place)
fix_right: Make the right-most edge of particles kinematic
fix_top: Make the top-most edge of particles kinematic
fix_bottom: Make the bottom-most edge of particles kinematic
"""
start_vertex = len(self.particle_q)
mass = cell_x * cell_y * cell_z * density
for z in range(dim_z + 1):
for y in range(dim_y + 1):
for x in range(dim_x + 1):
v = np.array((x * cell_x, y * cell_y, z * cell_z))
m = mass
if (fix_left and x == 0):
m = 0.0
if (fix_right and x == dim_x):
m = 0.0
if (fix_top and y == dim_y):
m = 0.0
if (fix_bottom and y == 0):
m = 0.0
p = quat_rotate(rot, v) + pos
self.add_particle(p, vel, m)
# dict of open faces
faces = {}
def add_face(i: int, j: int, k: int):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
def add_tet(i: int, j: int, k: int, l: int):
self.add_tetrahedron(i, j, k, l, k_mu, k_lambda, k_damp)
add_face(i, k, j)
add_face(j, k, l)
add_face(i, j, l)
add_face(i, l, k)
def grid_index(x, y, z):
return (dim_x + 1) * (dim_y + 1) * z + (dim_x + 1) * y + x
for z in range(dim_z):
for y in range(dim_y):
for x in range(dim_x):
v0 = grid_index(x, y, z) + start_vertex
v1 = grid_index(x + 1, y, z) + start_vertex
v2 = grid_index(x + 1, y, z + 1) + start_vertex
v3 = grid_index(x, y, z + 1) + start_vertex
v4 = grid_index(x, y + 1, z) + start_vertex
v5 = grid_index(x + 1, y + 1, z) + start_vertex
v6 = grid_index(x + 1, y + 1, z + 1) + start_vertex
v7 = grid_index(x, y + 1, z + 1) + start_vertex
if (((x & 1) ^ (y & 1) ^ (z & 1))):
add_tet(v0, v1, v4, v3)
add_tet(v2, v3, v6, v1)
add_tet(v5, v4, v1, v6)
add_tet(v7, v6, v3, v4)
add_tet(v4, v1, v6, v3)
else:
add_tet(v1, v2, v5, v0)
add_tet(v3, v0, v7, v2)
add_tet(v4, v7, v0, v5)
add_tet(v6, v5, v2, v7)
add_tet(v5, v2, v7, v0)
# add triangles
for k, v in faces.items():
self.add_triangle(v[0], v[1], v[2])
def add_soft_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, k_mu: float, k_lambda: float, k_damp: float):
"""Helper to create a tetrahedral model from an input tetrahedral mesh
Args:
pos: The position of the solid in world space
rot: The orientation of the solid in world space
vel: The velocity of the solid in world space
vertices: A list of vertex positions
indices: A list of tetrahedron indices, 4 entries per-element
density: The density per-area of the mesh
k_mu: The first elastic Lame parameter
k_lambda: The second elastic Lame parameter
k_damp: The damping stiffness
"""
num_tets = int(len(indices) / 4)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
# dict of open faces
faces = {}
def add_face(i, j, k):
key = tuple(sorted((i, j, k)))
if key not in faces:
faces[key] = (i, j, k)
else:
del faces[key]
# add particles
for v in vertices:
p = quat_rotate(rot, v * scale) + pos
self.add_particle(p, vel, 0.0)
# add tetrahedra
for t in range(num_tets):
v0 = start_vertex + indices[t * 4 + 0]
v1 = start_vertex + indices[t * 4 + 1]
v2 = start_vertex + indices[t * 4 + 2]
v3 = start_vertex + indices[t * 4 + 3]
volume = self.add_tetrahedron(v0, v1, v2, v3, k_mu, k_lambda, k_damp)
# distribute volume fraction to particles
if (volume > 0.0):
self.particle_mass[v0] += density * volume / 4.0
self.particle_mass[v1] += density * volume / 4.0
self.particle_mass[v2] += density * volume / 4.0
self.particle_mass[v3] += density * volume / 4.0
# build open faces
add_face(v0, v2, v1)
add_face(v1, v2, v3)
add_face(v0, v1, v3)
add_face(v0, v3, v2)
# add triangles
for k, v in faces.items():
try:
self.add_triangle(v[0], v[1], v[2])
except np.linalg.LinAlgError:
continue
def compute_sphere_inertia(self, density: float, r: float) -> tuple:
"""Helper to compute mass and inertia of a sphere
Args:
density: The sphere density
r: The sphere radius
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = 4.0 / 3.0 * math.pi * r * r * r
m = density * v
Ia = 2.0 / 5.0 * m * r * r
I = np.array([[Ia, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_capsule_inertia(self, density: float, r: float, l: float) -> tuple:
"""Helper to compute mass and inertia of a capsule
Args:
density: The capsule density
r: The capsule radius
l: The capsule length (full width of the interior cylinder)
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
ms = density * (4.0 / 3.0) * math.pi * r * r * r
mc = density * math.pi * r * r * l
# total mass
m = ms + mc
# adapted from ODE
Ia = mc * (0.25 * r * r + (1.0 / 12.0) * l * l) + ms * (0.4 * r * r + 0.375 * r * l + 0.25 * l * l)
Ib = (mc * 0.5 + ms * 0.4) * r * r
I = np.array([[Ib, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_box_inertia(self, density: float, w: float, h: float, d: float) -> tuple:
"""Helper to compute mass and inertia of a box
Args:
density: The box density
w: The box width along the x-axis
h: The box height along the y-axis
d: The box depth along the z-axis
Returns:
A tuple of (mass, inertia) with inertia specified around the origin
"""
v = w * h * d
m = density * v
Ia = 1.0 / 12.0 * m * (h * h + d * d)
Ib = 1.0 / 12.0 * m * (w * w + d * d)
Ic = 1.0 / 12.0 * m * (w * w + h * h)
I = np.array([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ic]])
return (m, I)
def _compute_shape_mass(self, type, scale, src, density):
if density == 0: # zero density means fixed
return 0, np.zeros((3, 3))
if (type == GEO_SPHERE):
return self.compute_sphere_inertia(density, scale[0])
elif (type == GEO_BOX):
return self.compute_box_inertia(density, scale[0] * 2.0, scale[1] * 2.0, scale[2] * 2.0)
elif (type == GEO_CAPSULE):
return self.compute_capsule_inertia(density, scale[0], scale[1] * 2.0)
elif (type == GEO_MESH):
#todo: non-uniform scale of inertia tensor
s = scale[0] # eventually want to compute moment of inertia for mesh.
return (density * src.mass * s * s * s, density * src.I * s * s * s * s * s)
# incrementally updates rigid body mass with additional mass and inertia expressed at a local to the body
def _update_body_mass(self, i, m, I, p, q):
if (i == -1):
return
# find new COM
new_mass = self.body_mass[i] + m
if new_mass == 0.0: # no mass
return
new_com = (self.body_com[i] * self.body_mass[i] + p * m) / new_mass
# shift inertia to new COM
com_offset = new_com - self.body_com[i]
shape_offset = new_com - p
new_inertia = transform_inertia(self.body_mass[i], self.body_inertia[i], com_offset, quat_identity()) + transform_inertia(
m, I, shape_offset, q)
self.body_mass[i] = new_mass
self.body_inertia[i] = new_inertia
self.body_com[i] = new_com
# returns a (model, state) pair given the description
def finalize(self, adapter: str) -> Model:
"""Convert this builder object to a concrete model for simulation.
After building simulation elements this method should be called to transfer
all data to PyTorch tensors ready for simulation.
Args:
adapter: The simulation adapter to use, e.g.: 'cpu', 'cuda'
Returns:
A model object.
"""
# construct particle inv masses
particle_inv_mass = []
for m in self.particle_mass:
if (m > 0.0):
particle_inv_mass.append(1.0 / m)
else:
particle_inv_mass.append(0.0)
#-------------------------------------
# construct Model (non-time varying) data
m = Model(adapter)
#---------------------
# particles
# state (initial)
m.particle_q = torch.tensor(self.particle_q, dtype=torch.float32, device=adapter)
m.particle_qd = torch.tensor(self.particle_qd, dtype=torch.float32, device=adapter)
# model
m.particle_mass = torch.tensor(self.particle_mass, dtype=torch.float32, device=adapter)
m.particle_inv_mass = torch.tensor(particle_inv_mass, dtype=torch.float32, device=adapter)
#---------------------
# collision geometry
m.shape_transform = torch.tensor(np.array(transform_flatten_list(self.shape_transform)), dtype=torch.float32, device=adapter)
m.shape_body = torch.tensor(self.shape_body, dtype=torch.int32, device=adapter)
m.shape_geo_type = torch.tensor(self.shape_geo_type, dtype=torch.int32, device=adapter)
m.shape_geo_src = self.shape_geo_src
m.shape_geo_scale = torch.tensor(self.shape_geo_scale, dtype=torch.float32, device=adapter)
m.shape_materials = torch.tensor(self.shape_materials, dtype=torch.float32, device=adapter)
#---------------------
# springs
m.spring_indices = torch.tensor(self.spring_indices, dtype=torch.int32, device=adapter)
m.spring_rest_length = torch.tensor(self.spring_rest_length, dtype=torch.float32, device=adapter)
m.spring_stiffness = torch.tensor(self.spring_stiffness, dtype=torch.float32, device=adapter)
m.spring_damping = torch.tensor(self.spring_damping, dtype=torch.float32, device=adapter)
m.spring_control = torch.tensor(self.spring_control, dtype=torch.float32, device=adapter)
#---------------------
# triangles
m.tri_indices = torch.tensor(self.tri_indices, dtype=torch.int32, device=adapter)
m.tri_poses = torch.tensor(self.tri_poses, dtype=torch.float32, device=adapter)
m.tri_activations = torch.tensor(self.tri_activations, dtype=torch.float32, device=adapter)
#---------------------
# edges
m.edge_indices = torch.tensor(self.edge_indices, dtype=torch.int32, device=adapter)
m.edge_rest_angle = torch.tensor(self.edge_rest_angle, dtype=torch.float32, device=adapter)
#---------------------
# tetrahedra
m.tet_indices = torch.tensor(self.tet_indices, dtype=torch.int32, device=adapter)
m.tet_poses = torch.tensor(self.tet_poses, dtype=torch.float32, device=adapter)
m.tet_activations = torch.tensor(self.tet_activations, dtype=torch.float32, device=adapter)
m.tet_materials = torch.tensor(self.tet_materials, dtype=torch.float32, device=adapter)
#-----------------------
# muscles
muscle_count = len(self.muscle_start)
# close the muscle waypoint indices
self.muscle_start.append(len(self.muscle_links))
m.muscle_start = torch.tensor(self.muscle_start, dtype=torch.int32, device=adapter)
m.muscle_params = torch.tensor(self.muscle_params, dtype=torch.float32, device=adapter)
m.muscle_links = torch.tensor(self.muscle_links, dtype=torch.int32, device=adapter)
m.muscle_points = torch.tensor(np.array(self.muscle_points), dtype=torch.float32, device=adapter)
m.muscle_activation = torch.tensor(self.muscle_activation, dtype=torch.float32, device=adapter)
#--------------------------------------
# articulations
# build 6x6 spatial inertia and COM transform
body_X_cm = []
body_I_m = []
for i in range(len(self.body_inertia)):
body_I_m.append(spatial_matrix_from_inertia(self.body_inertia[i], self.body_mass[i]))
body_X_cm.append(transform(self.body_com[i], quat_identity()))
m.body_I_m = torch.tensor(body_I_m, dtype=torch.float32, device=adapter)
articulation_count = len(self.articulation_start)
joint_coord_count = len(self.joint_q)
joint_dof_count = len(self.joint_qd)
# 'close' the start index arrays with a sentinel value
self.joint_q_start.append(len(self.joint_q))
self.joint_qd_start.append(len(self.joint_qd))
self.articulation_start.append(len(self.joint_type))
# calculate total size and offsets of Jacobian and mass matrices for entire system
m.J_size = 0
m.M_size = 0
m.H_size = 0
articulation_J_start = []
articulation_M_start = []
articulation_H_start = []
articulation_M_rows = []
articulation_H_rows = []
articulation_J_rows = []
articulation_J_cols = []
articulation_dof_start = []
articulation_coord_start = []
for i in range(articulation_count):
first_joint = self.articulation_start[i]
last_joint = self.articulation_start[i+1]
first_coord = self.joint_q_start[first_joint]
last_coord = self.joint_q_start[last_joint]
first_dof = self.joint_qd_start[first_joint]
last_dof = self.joint_qd_start[last_joint]
joint_count = last_joint-first_joint
dof_count = last_dof-first_dof
coord_count = last_coord-first_coord
articulation_J_start.append(m.J_size)
articulation_M_start.append(m.M_size)
articulation_H_start.append(m.H_size)
articulation_dof_start.append(first_dof)
articulation_coord_start.append(first_coord)
# bit of data duplication here, but will leave it as such for clarity
articulation_M_rows.append(joint_count*6)
articulation_H_rows.append(dof_count)
articulation_J_rows.append(joint_count*6)
articulation_J_cols.append(dof_count)
m.J_size += 6*joint_count*dof_count
m.M_size += 6*joint_count*6*joint_count
m.H_size += dof_count*dof_count
m.articulation_joint_start = torch.tensor(self.articulation_start, dtype=torch.int32, device=adapter)
# matrix offsets for batched gemm
m.articulation_J_start = torch.tensor(articulation_J_start, dtype=torch.int32, device=adapter)
m.articulation_M_start = torch.tensor(articulation_M_start, dtype=torch.int32, device=adapter)
m.articulation_H_start = torch.tensor(articulation_H_start, dtype=torch.int32, device=adapter)
m.articulation_M_rows = torch.tensor(articulation_M_rows, dtype=torch.int32, device=adapter)
m.articulation_H_rows = torch.tensor(articulation_H_rows, dtype=torch.int32, device=adapter)
m.articulation_J_rows = torch.tensor(articulation_J_rows, dtype=torch.int32, device=adapter)
m.articulation_J_cols = torch.tensor(articulation_J_cols, dtype=torch.int32, device=adapter)
m.articulation_dof_start = torch.tensor(articulation_dof_start, dtype=torch.int32, device=adapter)
m.articulation_coord_start = torch.tensor(articulation_coord_start, dtype=torch.int32, device=adapter)
# state (initial)
m.joint_q = torch.tensor(self.joint_q, dtype=torch.float32, device=adapter)
m.joint_qd = torch.tensor(self.joint_qd, dtype=torch.float32, device=adapter)
# model
m.joint_type = torch.tensor(self.joint_type, dtype=torch.int32, device=adapter)
m.joint_parent = torch.tensor(self.joint_parent, dtype=torch.int32, device=adapter)
m.joint_X_pj = torch.tensor(transform_flatten_list(self.joint_X_pj), dtype=torch.float32, device=adapter)
m.joint_X_cm = torch.tensor(transform_flatten_list(body_X_cm), dtype=torch.float32, device=adapter)
m.joint_axis = torch.tensor(self.joint_axis, dtype=torch.float32, device=adapter)
m.joint_q_start = torch.tensor(self.joint_q_start, dtype=torch.int32, device=adapter)
m.joint_qd_start = torch.tensor(self.joint_qd_start, dtype=torch.int32, device=adapter)
# dynamics properties
m.joint_armature = torch.tensor(self.joint_armature, dtype=torch.float32, device=adapter)
m.joint_target = torch.tensor(self.joint_target, dtype=torch.float32, device=adapter)
m.joint_target_ke = torch.tensor(self.joint_target_ke, dtype=torch.float32, device=adapter)
m.joint_target_kd = torch.tensor(self.joint_target_kd, dtype=torch.float32, device=adapter)
m.joint_limit_lower = torch.tensor(self.joint_limit_lower, dtype=torch.float32, device=adapter)
m.joint_limit_upper = torch.tensor(self.joint_limit_upper, dtype=torch.float32, device=adapter)
m.joint_limit_ke = torch.tensor(self.joint_limit_ke, dtype=torch.float32, device=adapter)
m.joint_limit_kd = torch.tensor(self.joint_limit_kd, dtype=torch.float32, device=adapter)
# counts
m.particle_count = len(self.particle_q)
m.articulation_count = articulation_count
m.joint_coord_count = joint_coord_count
m.joint_dof_count = joint_dof_count
m.muscle_count = muscle_count
m.link_count = len(self.joint_type)
m.shape_count = len(self.shape_geo_type)
m.tri_count = len(self.tri_poses)
m.tet_count = len(self.tet_poses)
m.edge_count = len(self.edge_rest_angle)
m.spring_count = len(self.spring_rest_length)
m.contact_count = 0
# store refs to geometry
m.geo_meshes = self.geo_meshes
m.geo_sdfs = self.geo_sdfs
# enable ground plane
m.ground = True
m.enable_tri_collisions = False
m.gravity = torch.tensor((0.0, -9.8, 0.0), dtype=torch.float32, device=adapter)
# allocate space for mass / jacobian matrices
m.alloc_mass_matrix()
return m
| 71,080 |
Python
| 36.809043 | 206 | 0.562085 |
RoboticExplorationLab/Deep-ILC/dflex/dflex/adjoint.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import imp
import ast
import math
import inspect
import typing
import weakref
import numpy as np
import torch
import torch.utils.cpp_extension
import dflex.config
import copy
# Todo
#-----
#
# [ ] Unary ops (e.g.: -)
# [ ] Inplace ops (e.g.: +=, -=)
# [ ] Conditionals
# [ ] Loops (unrolled)
# [ ] Auto-gen PyTorch operator
# [ ] CUDA kernel code gen + dynamic compilation
# -----
operators = {}
functions = {}
cuda_functions = {}
kernels = {}
#----------------------
# built-in types
class float3:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
class float4:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
w = 0.0
class quat:
def __init__(self):
x = 0.0
y = 0.0
z = 0.0
w = 1.0
class mat22:
def __init__(self):
pass
class mat33:
def __init__(self):
pass
class spatial_vector:
def __init__(self):
pass
class spatial_matrix:
def __init__(self):
pass
class spatial_transform:
def __init__(self):
pass
class void:
def __init__(self):
pass
class tensor:
def __init__(self, type):
self.type = type
self.requires_grad = True
self.__name__ = "tensor<" + type.__name__ + ">"
#----------------------
# register built-in function
def builtin(key):
def insert(func):
func.key = key
func.prefix = "df::"
functions[key] = func
return func
return insert
#---------------------------------
# built-in operators +,-,*,/
@builtin("add")
class AddFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("sub")
class SubFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("mod")
class ModFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("mul")
class MulFunc:
@staticmethod
def value_type(args):
# todo: encode type operator type globally
if (args[0].type == mat33 and args[1].type == float3):
return float3
if (args[0].type == spatial_matrix and args[1].type == spatial_vector):
return spatial_vector
else:
return args[0].type
@builtin("div")
class DivFunc:
@staticmethod
def value_type(args):
return args[0].type
#----------------------
# map operator nodes to builtin
operators[ast.Add] = "add"
operators[ast.Sub] = "sub"
operators[ast.Mult] = "mul"
operators[ast.Div] = "div"
operators[ast.FloorDiv] = "div"
operators[ast.Mod] = "mod"
operators[ast.Gt] = ">"
operators[ast.Lt] = "<"
operators[ast.GtE] = ">="
operators[ast.LtE] = "<="
operators[ast.Eq] = "=="
operators[ast.NotEq] = "!="
#----------------------
# built-in functions
@builtin("min")
class MinFunc:
@staticmethod
def value_type(args):
return float
@builtin("max")
class MaxFunc:
@staticmethod
def value_type(args):
return float
@builtin("leaky_max")
class LeakyMaxFunc:
@staticmethod
def value_type(args):
return float
@builtin("leaky_min")
class LeakyMinFunc:
@staticmethod
def value_type(args):
return float
@builtin("clamp")
class ClampFunc:
@staticmethod
def value_type(args):
return float
@builtin("step")
class StepFunc:
@staticmethod
def value_type(args):
return float
@builtin("nonzero")
class NonZeroFunc:
@staticmethod
def value_type(args):
return float
@builtin("sign")
class SignFunc:
@staticmethod
def value_type(args):
return float
@builtin("abs")
class AbsFunc:
@staticmethod
def value_type(args):
return float
@builtin("sin")
class SinFunc:
@staticmethod
def value_type(args):
return float
@builtin("cos")
class CosFunc:
@staticmethod
def value_type(args):
return float
@builtin("acos")
class ACosFunc:
@staticmethod
def value_type(args):
return float
@builtin("sin")
class SinFunc:
@staticmethod
def value_type(args):
return float
@builtin("cos")
class CosFunc:
@staticmethod
def value_type(args):
return float
@builtin("sqrt")
class SqrtFunc:
@staticmethod
def value_type(args):
return float
@builtin("dot")
class DotFunc:
@staticmethod
def value_type(args):
return float
@builtin("cross")
class CrossFunc:
@staticmethod
def value_type(args):
return float3
@builtin("skew")
class SkewFunc:
@staticmethod
def value_type(args):
return mat33
@builtin("length")
class LengthFunc:
@staticmethod
def value_type(args):
return float
@builtin("normalize")
class NormalizeFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("select")
class SelectFunc:
@staticmethod
def value_type(args):
return args[1].type
@builtin("rotate")
class RotateFunc:
@staticmethod
def value_type(args):
return float3
@builtin("rotate_inv")
class RotateInvFunc:
@staticmethod
def value_type(args):
return float3
@builtin("determinant")
class DeterminantFunc:
@staticmethod
def value_type(args):
return float
@builtin("transpose")
class TransposeFunc:
@staticmethod
def value_type(args):
return args[0].type
@builtin("load")
class LoadFunc:
@staticmethod
def value_type(args):
if (type(args[0].type) != tensor):
raise Exception("Load input 0 must be a tensor")
if (args[1].type != int):
raise Exception("Load input 1 must be a int")
return args[0].type.type
@builtin("store")
class StoreFunc:
@staticmethod
def value_type(args):
if (type(args[0].type) != tensor):
raise Exception("Store input 0 must be a tensor")
if (args[1].type != int):
raise Exception("Store input 1 must be a int")
if (args[2].type != args[0].type.type):
raise Exception("Store input 2 must be of the same type as the tensor")
return None
@builtin("atomic_add")
class AtomicAddFunc:
@staticmethod
def value_type(args):
return None
@builtin("atomic_sub")
class AtomicSubFunc:
@staticmethod
def value_type(args):
return None
@builtin("tid")
class ThreadIdFunc:
@staticmethod
def value_type(args):
return int
# type construtors
@builtin("float")
class floatFunc:
@staticmethod
def value_type(args):
return float
@builtin("int")
class IntFunc:
@staticmethod
def value_type(args):
return int
@builtin("float3")
class Float3Func:
@staticmethod
def value_type(args):
return float3
@builtin("quat")
class QuatFunc:
@staticmethod
def value_type(args):
return quat
@builtin("quat_identity")
class QuatIdentityFunc:
@staticmethod
def value_type(args):
return quat
@builtin("quat_from_axis_angle")
class QuatAxisAngleFunc:
@staticmethod
def value_type(args):
return quat
@builtin("mat22")
class Mat22Func:
@staticmethod
def value_type(args):
return mat22
@builtin("mat33")
class Mat33Func:
@staticmethod
def value_type(args):
return mat33
@builtin("spatial_vector")
class SpatialVectorFunc:
@staticmethod
def value_type(args):
return spatial_vector
# built-in spatial operators
@builtin("spatial_transform")
class TransformFunc:
@staticmethod
def value_type(args):
return spatial_transform
@builtin("spatial_transform_identity")
class TransformIdentity:
@staticmethod
def value_type(args):
return spatial_transform
@builtin("inverse")
class Inverse:
@staticmethod
def value_type(args):
return quat
# @builtin("spatial_transform_inverse")
# class TransformInverse:
# @staticmethod
# def value_type(args):
# return spatial_transform
@builtin("spatial_transform_get_translation")
class TransformGetTranslation:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_transform_get_rotation")
class TransformGetRotation:
@staticmethod
def value_type(args):
return quat
@builtin("spatial_transform_multiply")
class TransformMulFunc:
@staticmethod
def value_type(args):
return spatial_transform
# @builtin("spatial_transform_inertia")
# class TransformInertiaFunc:
# @staticmethod
# def value_type(args):
# return spatial_matrix
@builtin("spatial_adjoint")
class SpatialAdjoint:
@staticmethod
def value_type(args):
return spatial_matrix
@builtin("spatial_dot")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return float
@builtin("spatial_cross")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return spatial_vector
@builtin("spatial_cross_dual")
class SpatialDotFunc:
@staticmethod
def value_type(args):
return spatial_vector
@builtin("spatial_transform_point")
class SpatialTransformPointFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_transform_vector")
class SpatialTransformVectorFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_top")
class SpatialTopFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_bottom")
class SpatialBottomFunc:
@staticmethod
def value_type(args):
return float3
@builtin("spatial_jacobian")
class SpatialJacobian:
@staticmethod
def value_type(args):
return None
@builtin("spatial_mass")
class SpatialMass:
@staticmethod
def value_type(args):
return None
@builtin("dense_gemm")
class DenseGemm:
@staticmethod
def value_type(args):
return None
@builtin("dense_gemm_batched")
class DenseGemmBatched:
@staticmethod
def value_type(args):
return None
@builtin("dense_chol")
class DenseChol:
@staticmethod
def value_type(args):
return None
@builtin("dense_chol_batched")
class DenseCholBatched:
@staticmethod
def value_type(args):
return None
@builtin("dense_subs")
class DenseSubs:
@staticmethod
def value_type(args):
return None
@builtin("dense_solve")
class DenseSolve:
@staticmethod
def value_type(args):
return None
@builtin("dense_solve_batched")
class DenseSolve:
@staticmethod
def value_type(args):
return None
# helpers
@builtin("index")
class IndexFunc:
@staticmethod
def value_type(args):
return float
@builtin("print")
class PrintFunc:
@staticmethod
def value_type(args):
return None
class Var:
def __init__(adj, label, type, requires_grad=False, constant=None):
adj.label = label
adj.type = type
adj.requires_grad = requires_grad
adj.constant = constant
def __str__(adj):
return adj.label
def ctype(self):
if (isinstance(self.type, tensor)):
if self.type.type == float3:
return str("df::" + self.type.type.__name__) + "*"
return str(self.type.type.__name__) + "*"
elif self.type == float3:
return "df::" + str(self.type.__name__)
else:
return str(self.type.__name__)
#--------------------
# Storage class for partial AST up to a return statement.
class Stmt:
def __init__(self, cond, forward, forward_replay, reverse, ret_forward, ret_line):
self.cond = cond # condition, can be None
self.forward = forward # all forward code outside of conditional branch *since last return*
self.forward_replay = forward_replay
self.reverse = reverse # all reverse code including the reverse of any code in ret_forward
self.ret_forward = ret_forward # all forward commands in the return statement except the actual return statement
self.ret_line = ret_line # actual return statement
#------------------------------------------------------------------------
# Source code transformer, this class takes a Python function and
# computes its adjoint using single-pass translation of the function's AST
class Adjoint:
def __init__(adj, func, device='cpu'):
adj.func = func
adj.device = device
adj.symbols = {} # map from symbols to adjoint variables
adj.variables = [] # list of local variables (in order)
adj.args = [] # list of function arguments (in order)
adj.cond = None # condition variable if in branch
adj.return_var = None # return type for function or kernel
# build AST from function object
adj.source = inspect.getsource(func)
adj.tree = ast.parse(adj.source)
# parse argument types
arg_types = typing.get_type_hints(func)
# add variables and symbol map for each argument
for name, t in arg_types.items():
adj.symbols[name] = Var(name, t, False)
# build ordered list of args
for a in adj.tree.body[0].args.args:
adj.args.append(adj.symbols[a.arg])
# primal statements (allows different statements in replay)
adj.body_forward = []
adj.body_forward_replay = []
adj.body_reverse = []
adj.output = []
adj.indent_count = 0
adj.label_count = 0
# recursively evaluate function body
adj.eval(adj.tree.body[0])
# code generation methods
def format_template(adj, template, input_vars, output_var):
# output var is always the 0th index
args = [output_var] + input_vars
s = template.format(*args)
return s
# generates a comma separated list of args
def format_args(adj, prefix, indices):
args = ""
sep = ""
for i in indices:
args += sep + prefix + str(i)
sep = ", "
return args
def add_var(adj, type=None, constant=None):
index = len(adj.variables)
v = Var(str(index), type=type, constant=constant)
adj.variables.append(v)
return v
def add_constant(adj, n):
output = adj.add_var(type=type(n), constant=n)
#adj.add_forward("var_{} = {};".format(output, n))
return output
def add_load(adj, input):
output = adj.add_var(input.type)
adj.add_forward("var_{} = {};".format(output, input))
adj.add_reverse("adj_{} += adj_{};".format(input, output))
return output
def add_operator(adj, op, inputs):
# todo: just using first input as the output type, would need some
# type inference here to support things like float3 = float*float3
output = adj.add_var(inputs[0].type)
transformer = operators[op.__class__]
for t in transformer.forward():
adj.add_forward(adj.format_template(t, inputs, output))
for t in transformer.reverse():
adj.add_reverse(adj.format_template(t, inputs, output))
return output
def add_comp(adj, op_strings, left, comps):
output = adj.add_var(bool)
s = "var_" + str(output) + " = " + ("(" * len(comps)) + "var_" + str(left) + " "
for op, comp in zip(op_strings, comps):
s += op + " var_" + str(comp) + ") "
s = s.rstrip() + ";"
adj.add_forward(s)
return output
def add_bool_op(adj, op_string, exprs):
output = adj.add_var(bool)
command = "var_" + str(output) + " = " + (" " + op_string + " ").join(["var_" + str(expr) for expr in exprs]) + ";"
adj.add_forward(command)
return output
def add_call(adj, func, inputs, prefix='df::'):
# expression (zero output), e.g.: tid()
if (func.value_type(inputs) == None):
forward_call = prefix + "{}({});".format(func.key, adj.format_args("var_", inputs))
adj.add_forward(forward_call)
if (len(inputs)):
reverse_call = prefix + "{}({}, {});".format("adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs))
adj.add_reverse(reverse_call)
return None
# function (one output)
else:
output = adj.add_var(func.value_type(inputs))
forward_call = "var_{} = ".format(output) + prefix + "{}({});".format(func.key, adj.format_args("var_", inputs))
adj.add_forward(forward_call)
if (len(inputs)):
reverse_call = prefix + "{}({}, {}, {});".format(
"adj_" + func.key, adj.format_args("var_", inputs), adj.format_args("adj_", inputs), adj.format_args("adj_", [output]))
adj.add_reverse(reverse_call)
return output
def add_return(adj, var):
if (var == None):
adj.add_forward("return;".format(var), "goto label{};".format(adj.label_count))
else:
adj.add_forward("return var_{};".format(var), "goto label{};".format(adj.label_count))
adj.add_reverse("adj_" + str(var) + " += adj_ret;")
adj.add_reverse("label{}:;".format(adj.label_count))
adj.label_count += 1
# define an if statement
def begin_if(adj, cond):
adj.add_forward("if (var_{}) {{".format(cond))
adj.add_reverse("}")
adj.indent_count += 1
def end_if(adj, cond):
adj.indent_count -= 1
adj.add_forward("}")
adj.add_reverse("if (var_{}) {{".format(cond))
# define a for-loop
def begin_for(adj, iter, start, end):
# note that dynamic for-loops must not mutate any previous state, so we don't need to re-run them in the reverse pass
adj.add_forward("for (var_{0}=var_{1}; var_{0} < var_{2}; ++var_{0}) {{".format(iter, start, end), "if (false) {")
adj.add_reverse("}")
adj.indent_count += 1
def end_for(adj, iter, start, end):
adj.indent_count -= 1
adj.add_forward("}")
adj.add_reverse("for (var_{0}=var_{2}-1; var_{0} >= var_{1}; --var_{0}) {{".format(iter, start, end))
# append a statement to the forward pass
def add_forward(adj, statement, statement_replay=None):
prefix = ""
for i in range(adj.indent_count):
prefix += "\t"
adj.body_forward.append(prefix + statement)
# allow for different statement in reverse kernel replay
if (statement_replay):
adj.body_forward_replay.append(prefix + statement_replay)
else:
adj.body_forward_replay.append(prefix + statement)
# append a statement to the reverse pass
def add_reverse(adj, statement):
prefix = ""
for i in range(adj.indent_count):
prefix += "\t"
adj.body_reverse.append(prefix + statement)
def eval(adj, node):
try:
if (isinstance(node, ast.FunctionDef)):
out = None
for f in node.body:
out = adj.eval(f)
if 'return' in adj.symbols and adj.symbols['return'] is not None:
out = adj.symbols['return']
stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "")
adj.output.append(stmt)
else:
stmt = Stmt(None, adj.body_forward, adj.body_forward_replay, reversed(adj.body_reverse), [], "")
adj.output.append(stmt)
return out
elif (isinstance(node, ast.If)): # if statement
if len(node.orelse) != 0:
raise SyntaxError("Else statements not currently supported")
if len(node.body) == 0:
return None
# save symbol map
symbols_prev = adj.symbols.copy()
# eval condition
cond = adj.eval(node.test)
# eval body
adj.begin_if(cond)
for stmt in node.body:
adj.eval(stmt)
adj.end_if(cond)
# detect symbols with conflicting definitions (assigned inside the branch)
for items in symbols_prev.items():
sym = items[0]
var1 = items[1]
var2 = adj.symbols[sym]
if var1 != var2:
# insert a phi function that
# selects var1, var2 based on cond
out = adj.add_call(functions["select"], [cond, var1, var2])
adj.symbols[sym] = out
return None
elif (isinstance(node, ast.Compare)):
# node.left, node.ops (list of ops), node.comparators (things to compare to)
# e.g. (left ops[0] node.comparators[0]) ops[1] node.comparators[1]
left = adj.eval(node.left)
comps = [adj.eval(comp) for comp in node.comparators]
op_strings = [operators[type(op)] for op in node.ops]
out = adj.add_comp(op_strings, left, comps)
return out
elif (isinstance(node, ast.BoolOp)):
# op, expr list values (e.g. and and a list of things anded together)
op = node.op
if isinstance(op, ast.And):
func = "&&"
elif isinstance(op, ast.Or):
func = "||"
else:
raise KeyError("Op {} is not supported".format(op))
out = adj.add_bool_op(func, [adj.eval(expr) for expr in node.values])
# import pdb
# pdb.set_trace()
return out
elif (isinstance(node, ast.Name)):
# lookup symbol, if it has already been assigned to a variable then return the existing mapping
if (node.id in adj.symbols):
return adj.symbols[node.id]
else:
raise KeyError("Referencing undefined symbol: " + str(node.id))
elif (isinstance(node, ast.Num)):
# lookup constant, if it has already been assigned then return existing var
# currently disabled, since assigning constant in a branch means it
key = (node.n, type(node.n))
if (key in adj.symbols):
return adj.symbols[key]
else:
out = adj.add_constant(node.n)
adj.symbols[key] = out
return out
#out = adj.add_constant(node.n)
#return out
elif (isinstance(node, ast.BinOp)):
# evaluate binary operator arguments
left = adj.eval(node.left)
right = adj.eval(node.right)
name = operators[type(node.op)]
func = functions[name]
out = adj.add_call(func, [left, right])
return out
elif (isinstance(node, ast.UnaryOp)):
# evaluate unary op arguments
arg = adj.eval(node.operand)
out = adj.add_operator(node.op, [arg])
return out
elif (isinstance(node, ast.For)):
if (len(node.iter.args) != 2):
raise Exception("For loop ranges must be of form range(start, end) with both start and end specified and no skip specifier.")
# check if loop range is compile time constant
unroll = True
for a in node.iter.args:
if (isinstance(a, ast.Num) == False):
unroll = False
break
if (unroll):
# constant loop, unroll
start = node.iter.args[0].n
end = node.iter.args[1].n
for i in range(start, end):
var_iter = adj.add_constant(i)
adj.symbols[node.target.id] = var_iter
# eval body
for s in node.body:
adj.eval(s)
else:
# dynamic loop, body must be side-effect free, i.e.: not
# overwrite memory locations used by previous operations
start = adj.eval(node.iter.args[0])
end = adj.eval(node.iter.args[1])
# add iterator variable
iter = adj.add_var(int)
adj.symbols[node.target.id] = iter
adj.begin_for(iter, start, end)
# eval body
for s in node.body:
adj.eval(s)
adj.end_for(iter, start, end)
elif (isinstance(node, ast.Expr)):
return adj.eval(node.value)
elif (isinstance(node, ast.Call)):
name = None
# determine if call is to a builtin (attribute), or to a user-func (name)
if (isinstance(node.func, ast.Attribute)):
name = node.func.attr
elif (isinstance(node.func, ast.Name)):
name = node.func.id
# check it exists
if name not in functions:
raise KeyError("Could not find function {}".format(name))
if adj.device == 'cuda' and name in cuda_functions:
func = cuda_functions[name]
else:
func = functions[name]
args = []
# eval all arguments
for arg in node.args:
var = adj.eval(arg)
args.append(var)
# add var with value type from the function
out = adj.add_call(func, args, prefix=func.prefix)
return out
elif (isinstance(node, ast.Subscript)):
target = adj.eval(node.value)
indices = []
if isinstance(node.slice.value, ast.Tuple):
# handles the M[i, j] case
for arg in node.slice.value.elts:
var = adj.eval(arg)
indices.append(var)
else:
# simple expression
var = adj.eval(node.slice.value)
indices.append(var)
out = adj.add_call(functions["index"], [target, *indices])
return out
elif (isinstance(node, ast.Assign)):
# if adj.cond is not None:
# raise SyntaxError("error, cannot assign variables in a conditional branch")
# evaluate rhs
out = adj.eval(node.value)
# update symbol map (assumes lhs is a Name node)
adj.symbols[node.targets[0].id] = out
return out
elif (isinstance(node, ast.Return)):
cond = adj.cond # None if not in branch, else branch boolean
out = adj.eval(node.value)
adj.symbols['return'] = out
if out is not None: # set return type of function
return_var = out
if adj.return_var is not None and adj.return_var.ctype() != return_var.ctype():
raise TypeError("error, function returned different types")
adj.return_var = return_var
adj.add_return(out)
return out
elif node is None:
return None
else:
print("[WARNING] ast node of type {} not supported".format(type(node)))
except Exception as e:
# print error / line number
lines = adj.source.splitlines()
print("Error: {} while transforming node {} in func: {} at line: {} col: {}: \n {}".format(e, type(node), adj.func.__name__, node.lineno, node.col_offset, lines[max(node.lineno-1, 0)]))
raise
#----------------
# code generation
cpu_module_header = '''
#define CPU
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
'''
cuda_module_header = '''
#define CUDA
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
'''
cpu_function_template = '''
{return_type} {name}_cpu_func({forward_args})
{{
{forward_body}
}}
void adj_{name}_cpu_func({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_function_template = '''
CUDA_CALLABLE {return_type} {name}_cuda_func({forward_args})
{{
{forward_body}
}}
CUDA_CALLABLE void adj_{name}_cuda_func({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_kernel_template = '''
__global__ void {name}_cuda_kernel_forward(int dim, {forward_args})
{{
{forward_body}
}}
__global__ void {name}_cuda_kernel_backward(int dim, {forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cpu_kernel_template = '''
void {name}_cpu_kernel_forward({forward_args})
{{
{forward_body}
}}
void {name}_cpu_kernel_backward({forward_args}, {reverse_args})
{{
{reverse_body}
}}
'''
cuda_module_template = '''
// Python entry points
void {name}_cuda_forward(int dim, {forward_args})
{{
{name}_cuda_kernel_forward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params});
//check_cuda(cudaPeekAtLastError());
//check_cuda(cudaDeviceSynchronize());
}}
void {name}_cuda_backward(int dim, {forward_args}, {reverse_args})
{{
{name}_cuda_kernel_backward<<<(dim + 256 - 1) / 256, 256>>>(dim, {forward_params}, {reverse_params});
//check_cuda(cudaPeekAtLastError());
//check_cuda(cudaDeviceSynchronize());
}}
'''
cpu_module_template = '''
// Python entry points
void {name}_cpu_forward(int dim, {forward_args})
{{
for (int i=0; i < dim; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_forward({forward_params});
}}
}}
void {name}_cpu_backward(int dim, {forward_args}, {reverse_args})
{{
for (int i=0; i < dim; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_backward({forward_params}, {reverse_params});
}}
}}
'''
cuda_module_header_template = '''
// Python entry points
void {name}_cuda_forward(int dim, {forward_args});
void {name}_cuda_backward(int dim, {forward_args}, {reverse_args});
'''
cpu_module_header_template = '''
// Python entry points
void {name}_cpu_forward(int dim, {forward_args});
void {name}_cpu_backward(int dim, {forward_args}, {reverse_args});
'''
def indent(args, stops=1):
sep = "\n"
for i in range(stops):
sep += "\t"
return sep + args.replace(", ", "," + sep)
def codegen_func_forward_body(adj, device='cpu', indent=4):
body = []
indent_block = " " * indent
for stmt in adj.output:
for f in stmt.forward:
body += [f + "\n"]
if stmt.cond is not None:
body += ["if (" + str(stmt.cond) + ") {\n"]
for l in stmt.ret_forward:
body += [indent_block + l + "\n"]
body += [indent_block + stmt.ret_line + "\n"]
body += ["}\n"]
else:
for l in stmt.ret_forward:
body += [l + "\n"]
body += [stmt.ret_line + "\n"]
break # break once unconditional return is encountered
return "".join([indent_block + l for l in body])
def codegen_func_forward(adj, func_type='kernel', device='cpu'):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant == None:
s += " " + var.ctype() + " var_" + str(var.label) + ";\n"
else:
s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n"
# forward pass
s += " //---------\n"
s += " // forward\n"
if device == 'cpu':
s += codegen_func_forward_body(adj, device=device, indent=4)
elif device == 'cuda':
if func_type == 'kernel':
s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n"
s += " if (var_idx < dim) {\n"
s += codegen_func_forward_body(adj, device=device, indent=8)
s += " }\n"
else:
s += codegen_func_forward_body(adj, device=device, indent=4)
return s
def codegen_func_reverse_body(adj, device='cpu', indent=4):
body = []
indent_block = " " * indent
for stmt in adj.output:
# forward pass
body += ["//---------\n"]
body += ["// forward\n"]
for f in stmt.forward_replay:
body += [f + "\n"]
if stmt.cond is not None:
body += ["if (" + str(stmt.cond) + ") {\n"]
for l in stmt.ret_forward:
body += [indent_block + l + "\n"]
# reverse pass
body += [indent_block + "//---------\n"]
body += [indent_block + "// reverse\n"]
for l in stmt.reverse:
body += [indent_block + l + "\n"]
body += [indent_block + "return;\n"]
body += ["}\n"]
else:
for l in stmt.ret_forward:
body += [l + "\n"]
# reverse pass
body += ["//---------\n"]
body += ["// reverse\n"]
for l in stmt.reverse:
body += [l + "\n"]
body += ["return;\n"]
break # break once unconditional return is encountered
return "".join([indent_block + l for l in body])
def codegen_func_reverse(adj, func_type='kernel', device='cpu'):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant == None:
s += " " + var.ctype() + " var_" + str(var.label) + ";\n"
else:
s += " const " + var.ctype() + " var_" + str(var.label) + " = " + str(var.constant) + ";\n"
# dual vars
s += " //---------\n"
s += " // dual vars\n"
for var in adj.variables:
s += " " + var.ctype() + " adj_" + str(var.label) + " = 0;\n"
if device == 'cpu':
s += codegen_func_reverse_body(adj, device=device, indent=4)
elif device == 'cuda':
if func_type == 'kernel':
s += " int var_idx = blockDim.x * blockIdx.x + threadIdx.x;\n"
s += " if (var_idx < dim) {\n"
s += codegen_func_reverse_body(adj, device=device, indent=8)
s += " }\n"
else:
s += codegen_func_reverse_body(adj, device=device, indent=4)
else:
raise ValueError("Device {} not supported for codegen".format(device))
return s
def codegen_func(adj, device='cpu'):
# forward header
# return_type = "void"
return_type = 'void' if adj.return_var is None else adj.return_var.ctype()
# s = "{} {}_forward(".format(return_type, adj.func.__name__)
# sep = ""
# for arg in adj.args:
# if (arg.label != 'return'):
# s += sep + str(arg.type.__name__) + " var_" + arg.label
# sep = ", "
# reverse header
# s = "void {}_reverse(".format(adj.func.__name__)
# return s
forward_args = ""
reverse_args = ""
# s = ""
# forward args
sep = ""
for arg in adj.args:
forward_args += sep + arg.ctype() + " var_" + arg.label
sep = ", "
# reverse args
sep = ""
for arg in adj.args:
if "*" in arg.ctype():
reverse_args += sep + arg.ctype() + " adj_" + arg.label
else:
reverse_args += sep + arg.ctype() + " & adj_" + arg.label
sep = ", "
reverse_args += sep + return_type + " & adj_ret"
# reverse args
# add primal version of parameters
# sep = ""
# for var in adj.args:
# if (var.label != 'return'):
# s += sep + var.ctype() + " var_" + var.label
# sep = ", "
# # add adjoint version of parameters
# for var in adj.args:
# if (var.label != 'return'):
# s += sep + var.ctype() + "& adj_" + var.label
# sep = ", "
# # add adjoint of output
# if ('return' in adj.symbols and adj.symbols['return'] != None):
# s += sep + str(adj.symbols['return'].type.__name__) + " adj_" + str(adj.symbols['return'])
# codegen body
forward_body = codegen_func_forward(adj, func_type='function', device=device)
reverse_body = codegen_func_reverse(adj, func_type='function', device=device)
if device == 'cpu':
template = cpu_function_template
elif device == 'cuda':
template = cuda_function_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
return_type=return_type,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body)
return s
def codegen_kernel(adj, device='cpu'):
forward_args = ""
reverse_args = ""
# forward args
sep = ""
for arg in adj.args:
forward_args += sep + arg.ctype() + " var_" + arg.label
sep = ", "
# reverse args
sep = ""
for arg in adj.args:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
sep = ", "
# codegen body
forward_body = codegen_func_forward(adj, func_type='kernel', device=device)
reverse_body = codegen_func_reverse(adj, func_type='kernel', device=device)
# import pdb
# pdb.set_trace()
if device == 'cpu':
template = cpu_kernel_template
elif device == 'cuda':
template = cuda_kernel_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body)
return s
def codegen_module(adj, device='cpu'):
forward_args = ""
reverse_args = ""
forward_params = ""
reverse_params = ""
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
forward_args += sep + "torch::Tensor var_" + arg.label
forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")"
else:
forward_args += sep + arg.ctype() + " var_" + arg.label
forward_params += sep + "var_" + arg.label
sep = ", "
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
reverse_args += sep + "torch::Tensor adj_" + arg.label
reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")"
else:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
reverse_params += sep + "adj_" + arg.label
sep = ", "
if device == 'cpu':
template = cpu_module_template
elif device == 'cuda':
template = cuda_module_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__,
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_params=indent(forward_params, 3),
reverse_params=indent(reverse_params, 3))
return s
def codegen_module_decl(adj, device='cpu'):
forward_args = ""
reverse_args = ""
forward_params = ""
reverse_params = ""
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
forward_args += sep + "torch::Tensor var_" + arg.label
forward_params += sep + "cast<" + arg.ctype() + ">(var_" + arg.label + ")"
else:
forward_args += sep + arg.ctype() + " var_" + arg.label
forward_params += sep + "var_" + arg.label
sep = ", "
sep = ""
for arg in adj.args:
if (isinstance(arg.type, tensor)):
reverse_args += sep + "torch::Tensor adj_" + arg.label
reverse_params += sep + "cast<" + arg.ctype() + ">(adj_" + arg.label + ")"
else:
reverse_args += sep + arg.ctype() + " adj_" + arg.label
reverse_params += sep + "adj_" + arg.label
sep = ", "
if device == 'cpu':
template = cpu_module_header_template
elif device == 'cuda':
template = cuda_module_header_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(name=adj.func.__name__, forward_args=indent(forward_args), reverse_args=indent(reverse_args))
return s
# runs vcvars and copies back the build environment, PyTorch should really be doing this
def set_build_env():
if os.name == 'nt':
# VS2019 (required for PyTorch headers)
vcvars_path = "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\Build\\vcvars64.bat"
s = '"{}" && set'.format(vcvars_path)
output = os.popen(s).read()
for line in output.splitlines():
pair = line.split("=", 1)
if (len(pair) >= 2):
os.environ[pair[0]] = pair[1]
else: # nothing needed for Linux or Mac
pass
def import_module(module_name, path):
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
file, path, description = imp.find_module(module_name, [path])
# Close the .so file after load.
with file:
return imp.load_module(module_name, file, path, description)
def rename(name, return_type):
def func(cls):
cls.__name__ = name
cls.key = name
cls.prefix = ""
cls.return_type = return_type
return cls
return func
user_funcs = {}
user_kernels = {}
def func(f):
user_funcs[f.__name__] = f
# adj = Adjoint(f)
# print(adj.codegen_forward())
# print(adj.codegen_reverse())
# set_build_env()
# include_path = os.path.dirname(os.path.realpath(__file__))
# # requires PyTorch hotfix https://github.com/pytorch/pytorch/pull/33002
# test_cuda = torch.utils.cpp_extension.load_inline('test_cuda', [cpp_template], None, ["test_forward_1", "test_backward_1"], extra_include_paths=include_path, verbose=True)
# help(test_cuda)
def kernel(f):
# stores source and compiled entry points for a kernel (will be populated after module loads)
class Kernel:
def __init__(self, f):
self.func = f
def register(self, module):
# lookup entry points based on name
self.forward_cpu = eval("module." + self.func.__name__ + "_cpu_forward")
self.backward_cpu = eval("module." + self.func.__name__ + "_cpu_backward")
if (torch.cuda.is_available()):
self.forward_cuda = eval("module." + self.func.__name__ + "_cuda_forward")
self.backward_cuda = eval("module." + self.func.__name__ + "_cuda_backward")
k = Kernel(f)
# register globally
user_kernels[f.__name__] = k
return k
def compile():
use_cuda = torch.cuda.is_available()
if not use_cuda:
print("[INFO] CUDA support not found. Disabling CUDA kernel compilation.")
cpp_source = ""
cuda_source = ""
cpp_source += cpu_module_header
cuda_source += cuda_module_header
# kernels
entry_points = []
# functions
for name, func in user_funcs.items():
adj = Adjoint(func, device='cpu')
cpp_source += codegen_func(adj, device='cpu')
adj = Adjoint(func, device='cuda')
cuda_source += codegen_func(adj, device='cuda')
# import pdb
# pdb.set_trace()
import copy
@rename(func.__name__ + "_cpu_func", adj.return_var.type)
class Func:
@classmethod
def value_type(cls, *args):
return cls.return_type
functions[func.__name__] = Func
@rename(func.__name__ + "_cuda_func", adj.return_var.type)
class CUDAFunc:
@classmethod
def value_type(cls, *args):
return cls.return_type
cuda_functions[func.__name__] = CUDAFunc
for name, kernel in user_kernels.items():
if use_cuda:
# each kernel gets an entry point in the module
entry_points.append(name + "_cuda_forward")
entry_points.append(name + "_cuda_backward")
# each kernel gets an entry point in the module
entry_points.append(name + "_cpu_forward")
entry_points.append(name + "_cpu_backward")
if use_cuda:
adj = Adjoint(kernel.func, device='cuda')
cuda_source += codegen_kernel(adj, device='cuda')
cuda_source += codegen_module(adj, device='cuda')
cpp_source += codegen_module_decl(adj, device='cuda')
adj = Adjoint(kernel.func, device='cpu')
cpp_source += codegen_kernel(adj, device='cpu')
cpp_source += codegen_module(adj, device='cpu')
cpp_source += codegen_module_decl(adj, device='cpu')
include_path = os.path.dirname(os.path.realpath(__file__))
build_path = os.path.dirname(os.path.realpath(__file__)) + "/kernels"
cache_file = build_path + "/adjoint.gen"
if (os.path.exists(build_path) == False):
os.mkdir(build_path)
# test cache
if (os.path.exists(cache_file)):
f = open(cache_file, 'r')
cache_string = f.read()
f.close()
if (cache_string == cpp_source):
print("Using cached kernels")
module = import_module("kernels", build_path)
# register kernel methods
for k in user_kernels.values():
k.register(module)
return module
# print("ignoring rebuild, using stale kernels")
# module = import_module("kernels", build_path)
# return module
# cache stale, rebuild
print("Rebuilding kernels")
set_build_env()
# debug config
#module = torch.utils.cpp_extension.load_inline('kernels', [cpp_source], None, entry_points, extra_cflags=["/Zi", "/Od"], extra_ldflags=["/DEBUG"], build_directory=build_path, extra_include_paths=[include_path], verbose=True)
if os.name == 'nt':
cpp_flags = ["/Ox", "-DNDEBUG", "/fp:fast"]
ld_flags = ["-DNDEBUG"]
# cpp_flags = ["/Zi", "/Od", "/DEBUG"]
# ld_flags = ["/DEBUG"]
else:
cpp_flags = ["-Z", "-O2", "-DNDEBUG"]
ld_flags = ["-DNDEBUG"]
# just use minimum to ensure compatability
cuda_flags = ['-gencode=arch=compute_35,code=compute_35']
# release config
if use_cuda:
module = torch.utils.cpp_extension.load_inline('kernels',
cpp_sources=[cpp_source],
cuda_sources=[cuda_source],
functions=entry_points,
extra_cflags=cpp_flags,
extra_ldflags=ld_flags,
extra_cuda_cflags=cuda_flags,
build_directory=build_path,
extra_include_paths=[include_path],
verbose=True,
with_pytorch_error_handling=False)
else:
module = torch.utils.cpp_extension.load_inline('kernels',
cpp_sources=[cpp_source],
cuda_sources=[],
functions=entry_points,
extra_cflags=cpp_flags,
extra_ldflags=ld_flags,
extra_cuda_cflags=cuda_flags,
build_directory=build_path,
extra_include_paths=[include_path],
verbose=True,
with_pytorch_error_handling=False)
# update cache
f = open(cache_file, 'w')
f.write(cpp_source)
f.close()
# register kernel methods
for k in user_kernels.values():
k.register(module)
return module
#---------------------------------------------
# Helper functions for launching kernels as Torch ops
def check_adapter(l, a):
for t in l:
if torch.is_tensor(t):
assert(t.device.type == a)
def check_finite(l):
for t in l:
if torch.is_tensor(t):
assert(t.is_contiguous())
if (torch.isnan(t).any() == True):
print(t)
assert(torch.isnan(t).any() == False)
else:
assert(math.isnan(t) == False)
def filter_grads(grads):
"""helper that takes a list of gradient tensors and makes non-outputs None
as required by PyTorch when returning from a custom op
"""
outputs = []
for g in grads:
if torch.is_tensor(g) and len(g) > 0:
outputs.append(g)
else:
outputs.append(None)
return tuple(outputs)
def make_empty(outputs, device):
empty = []
for o in outputs:
empty.append(torch.FloatTensor().to(device))
return empty
def make_contiguous(grads):
ret = []
for g in grads:
ret.append(g.contiguous())
return ret
def copy_params(params):
out = []
for p in params:
if torch.is_tensor(p):
c = p.clone()
if c.dtype == torch.float32:
c.requires_grad_()
out.append(c)
else:
out.append(p)
return out
def assert_device(device, inputs):
"""helper that asserts that all Tensors in inputs reside on the specified
device (device should be cpu or cuda). Also checks that dtypes are correct.
"""
for arg in inputs:
if isinstance(arg, torch.Tensor):
if (arg.dtype == torch.float64) or (arg.dtype == torch.float16):
raise TypeError("Tensor {arg} has invalid dtype {dtype}".format(arg=arg, dtype=arg.dtype))
if device == 'cpu':
if arg.is_cuda: # make sure all tensors are on the right device. Can fail silently in the CUDA kernel.
raise TypeError("Tensor {arg} is using CUDA but was expected to be on the CPU.".format(arg=arg))
elif torch.device(device).type == 'cuda': #elif device.startswith('cuda'):
if not arg.is_cuda:
raise TypeError("Tensor {arg} is not on a CUDA device but was expected to be using CUDA.".format(arg=arg))
else:
raise ValueError("Device {} is not supported".format(device))
def to_weak_list(s):
w = []
for o in s:
w.append(weakref.ref(o))
return w
def to_strong_list(w):
s = []
for o in w:
s.append(o())
return s
# standalone method to launch a kernel using PyTorch graph (skip custom tape)
def launch_torch(func, dim, inputs, outputs, adapter, preserve_output=False, check_grad=False, no_grad=False):
num_inputs = len(inputs)
num_outputs = len(outputs)
# define autograd type
class TorchFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
#local_inputs = args[0:num_inputs]
#local_outputs = args[num_inputs:len(args)]
# save for backward
#ctx.inputs = list(local_inputs)
ctx.inputs = args
local_outputs = []
for o in outputs:
local_outputs.append(torch.zeros_like(o, requires_grad=True))
ctx.outputs = local_outputs
# ensure inputs match adapter
assert_device(adapter, args)
# launch
if adapter == 'cpu':
func.forward_cpu(*[dim, *args, *ctx.outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.forward_cuda(*[dim, *args, *ctx.outputs])
ret = tuple(ctx.outputs)
return ret
@staticmethod
def backward(ctx, *grads):
# ensure grads are contiguous in memory
adj_outputs = make_contiguous(grads)
# alloc grads
adj_inputs = alloc_grads(ctx.inputs, adapter)
# if we don't need outputs then make empty tensors to skip the write
local_outputs = ctx.outputs
# if preserve_output == True:
# local_outputs = ctx.outputs
# else:
# local_outputs = []
# for o in range(num_outputs):
# local_outputs.append(torch.FloatTensor().to(adapter))
# print("backward")
# print("--------")
# print (" inputs")
# for i in ctx.inputs:
# print(i)
# print (" outputs")
# for o in ctx.outputs:
# print(o)
# print (" adj_inputs")
# for adj_i in adj_inputs:
# print(adj_i)
# print (" adj_outputs")
# for adj_o in adj_outputs:
# print(adj_o)
# launch
if adapter == 'cpu':
func.backward_cpu(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.backward_cuda(*[dim, *ctx.inputs, *local_outputs, *adj_inputs, *adj_outputs])
# filter grads replaces empty tensors / constant params with None
ret = list(filter_grads(adj_inputs))
for i in range(num_outputs):
ret.append(None)
return tuple(ret)
# run
params = [*inputs]
torch.set_printoptions(edgeitems=3)
if (check_grad == True and no_grad == False):
try:
torch.autograd.gradcheck(TorchFunc.apply, params, eps=1e-2, atol=1e-3, rtol=1.e-3, raise_exception=True)
except Exception as e:
print(str(func.func.__name__) + " failed: " + str(e))
output = TorchFunc.apply(*params)
return output
class Tape:
def __init__(self):
self.launches = []
# dictionary mapping Tensor inputs to their adjoint
self.adjoints = {}
def launch(self, func, dim, inputs, outputs, adapter, preserve_output=False, skip_check_grad=False):
if (dim > 0):
# run kernel
if adapter == 'cpu':
func.forward_cpu(*[dim, *inputs, *outputs])
elif torch.device(adapter).type == 'cuda': #adapter.startswith('cuda'):
func.forward_cuda(*[dim, *inputs, *outputs])
if dflex.config.verify_fp:
check_adapter(inputs, adapter)
check_adapter(outputs, adapter)
check_finite(inputs)
check_finite(outputs)
# record launch
if dflex.config.no_grad == False:
self.launches.append([func, dim, inputs, outputs, adapter, preserve_output])
# optionally run grad check
if dflex.config.check_grad == True and skip_check_grad == False:
# copy inputs and outputs to avoid disturbing the computational graph
inputs_copy = copy_params(inputs)
outputs_copy = copy_params(outputs)
launch_torch(func, dim, inputs_copy, outputs_copy, adapter, preserve_output, check_grad=True)
def replay(self):
for kernel in reversed(self.launches):
func = kernel[0]
dim = kernel[1]
inputs = kernel[2]
#outputs = to_strong_list(kernel[3])
outputs = kernel[3]
adapter = kernel[4]
# lookup adj_inputs
adj_inputs = []
adj_outputs = []
# build input adjoints
for i in inputs:
if i in self.adjoints:
adj_inputs.append(self.adjoints[i])
else:
if torch.is_tensor(i):
adj_inputs.append(self.alloc_grad(i))
else:
adj_inputs.append(type(i)())
# build output adjoints
for o in outputs:
if o in self.adjoints:
adj_outputs.append(self.adjoints[o])
else:
# no output adjoint means the output wasn't used in the loss function so
# allocate a zero tensor (they will still be read by the kernels)
adj_outputs.append(self.alloc_grad(o))
# launch reverse
if adapter == 'cpu':
func.backward_cpu(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs])
elif torch.device(adapter).type == 'cuda': #elif adapter.startswith('cuda'):
func.backward_cuda(*[dim, *inputs, *outputs, *adj_inputs, *adj_outputs])
if dflex.config.verify_fp:
check_finite(inputs)
check_finite(outputs)
check_finite(adj_inputs)
check_finite(adj_outputs)
def reset(self):
self.adjoints = {}
self.launches = []
def alloc_grad(self, t):
if t.dtype == torch.float32 and t.requires_grad:
# zero tensor
self.adjoints[t] = torch.zeros_like(t)
return self.adjoints[t]
else:
# null tensor
return torch.FloatTensor().to(t.device)
# helper that given a set of inputs, will generate a set of output grad buffers
def alloc_grads(inputs, adapter):
"""helper that generates output grad buffers for a set of inputs
on the specified device.
Args:
inputs (iterable of Tensors, other literals): list of Tensors
to generate gradient buffers for. Non-tensors are ignored.
adapter (str, optional): name of torch device for storage location
of allocated gradient buffers. Defaults to 'cpu'.
"""
grads = []
for arg in inputs:
if (torch.is_tensor(arg)):
if (arg.requires_grad and arg.dtype == torch.float):
grads.append(torch.zeros_like(arg, device=adapter))
#grads.append(lookup_grad(arg))
else:
grads.append(torch.FloatTensor().to(adapter))
else:
grads.append(type(arg)())
return grads
def matmul(tape, m, n, k, t1, t2, A, B, C, adapter):
if (adapter == 'cpu'):
threads = 1
else:
threads = 256 # should match the threadblock size
tape.launch(
func=dflex.eval_dense_gemm,
dim=threads,
inputs=[
m,
n,
k,
t1,
t2,
A,
B,
],
outputs=[
C
],
adapter=adapter,
preserve_output=False)
def matmul_batched(tape, batch_count, m, n, k, t1, t2, A_start, B_start, C_start, A, B, C, adapter):
if (adapter == 'cpu'):
threads = batch_count
else:
threads = 256*batch_count # must match the threadblock size used in adjoint.py
tape.launch(
func=dflex.eval_dense_gemm_batched,
dim=threads,
inputs=[
m,
n,
k,
t1,
t2,
A_start,
B_start,
C_start,
A,
B,
],
outputs=[
C
],
adapter=adapter,
preserve_output=False)
| 61,332 |
Python
| 25.68973 | 229 | 0.535283 |
RoboticExplorationLab/Deep-ILC/dflex/dflex/kernels/main.cpp
|
#include <torch/extension.h>
#define CPU
#include "adjoint.h"
using namespace df;
template <typename T>
T cast(torch::Tensor t)
{{
return (T)(t.data_ptr());
}}
float test_cpu_func(
float var_c)
{
//---------
// primal vars
const float var_0 = 1.0;
const int var_1 = 2;
float var_2;
const float var_3 = 3.0;
int var_4;
bool var_5;
const float var_6 = 2.0;
float var_7;
const float var_8 = 6.0;
float var_9;
//---------
// forward
var_2 = df::float(var_1);
var_4 = df::int(var_3);
df::print(var_2);
df::print(var_4);
var_5 = (var_c < var_3);
if (var_5) {
}
var_7 = df::select(var_5, var_0, var_6);
var_9 = df::mul(var_7, var_8);
return var_9;
}
void adj_test_cpu_func(
float var_c,
float & adj_c,
float & adj_ret)
{
//---------
// primal vars
const float var_0 = 1.0;
const int var_1 = 2;
float var_2;
const float var_3 = 3.0;
int var_4;
bool var_5;
const float var_6 = 2.0;
float var_7;
const float var_8 = 6.0;
float var_9;
//---------
// dual vars
float adj_0 = 0;
int adj_1 = 0;
float adj_2 = 0;
float adj_3 = 0;
int adj_4 = 0;
bool adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
//---------
// forward
var_2 = df::float(var_1);
var_4 = df::int(var_3);
df::print(var_2);
df::print(var_4);
var_5 = (var_c < var_3);
if (var_5) {
}
var_7 = df::select(var_5, var_0, var_6);
var_9 = df::mul(var_7, var_8);
goto label0;
//---------
// reverse
label0:;
adj_9 += adj_ret;
df::adj_mul(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_select(var_5, var_0, var_6, adj_5, adj_0, adj_6, adj_7);
if (var_5) {
}
df::adj_print(var_4, adj_4);
df::adj_print(var_2, adj_2);
df::adj_int(var_3, adj_3, adj_4);
df::adj_float(var_1, adj_1, adj_2);
return;
}
df::float3 triangle_closest_point_barycentric_cpu_func(
df::float3 var_a,
df::float3 var_b,
df::float3 var_c,
df::float3 var_p)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
float var_4;
const float var_5 = 0.0;
bool var_6;
bool var_7;
bool var_8;
const float var_9 = 1.0;
df::float3 var_10;
df::float3 var_11;
float var_12;
float var_13;
bool var_14;
bool var_15;
bool var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
bool var_24;
bool var_25;
bool var_26;
bool var_27;
float var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
float var_32;
float var_33;
bool var_34;
bool var_35;
bool var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
bool var_47;
float var_48;
df::float3 var_49;
df::float3 var_50;
float var_51;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
float var_58;
bool var_59;
float var_60;
bool var_61;
float var_62;
bool var_63;
bool var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
df::float3 var_75;
//---------
// forward
var_0 = df::sub(var_b, var_a);
var_1 = df::sub(var_c, var_a);
var_2 = df::sub(var_p, var_a);
var_3 = df::dot(var_0, var_2);
var_4 = df::dot(var_1, var_2);
var_6 = (var_3 <= var_5);
var_7 = (var_4 <= var_5);
var_8 = var_6 && var_7;
if (var_8) {
var_10 = df::float3(var_9, var_5, var_5);
return var_10;
}
var_11 = df::sub(var_p, var_b);
var_12 = df::dot(var_0, var_11);
var_13 = df::dot(var_1, var_11);
var_14 = (var_12 >= var_5);
var_15 = (var_13 <= var_12);
var_16 = var_14 && var_15;
if (var_16) {
var_17 = df::float3(var_5, var_9, var_5);
return var_17;
}
var_18 = df::select(var_16, var_10, var_17);
var_19 = df::mul(var_3, var_13);
var_20 = df::mul(var_12, var_4);
var_21 = df::sub(var_19, var_20);
var_22 = df::sub(var_3, var_12);
var_23 = df::div(var_3, var_22);
var_24 = (var_21 <= var_5);
var_25 = (var_3 >= var_5);
var_26 = (var_12 <= var_5);
var_27 = var_24 && var_25 && var_26;
if (var_27) {
var_28 = df::sub(var_9, var_23);
var_29 = df::float3(var_28, var_23, var_5);
return var_29;
}
var_30 = df::select(var_27, var_18, var_29);
var_31 = df::sub(var_p, var_c);
var_32 = df::dot(var_0, var_31);
var_33 = df::dot(var_1, var_31);
var_34 = (var_33 >= var_5);
var_35 = (var_32 <= var_33);
var_36 = var_34 && var_35;
if (var_36) {
var_37 = df::float3(var_5, var_5, var_9);
return var_37;
}
var_38 = df::select(var_36, var_30, var_37);
var_39 = df::mul(var_32, var_4);
var_40 = df::mul(var_3, var_33);
var_41 = df::sub(var_39, var_40);
var_42 = df::sub(var_4, var_33);
var_43 = df::div(var_4, var_42);
var_44 = (var_41 <= var_5);
var_45 = (var_4 >= var_5);
var_46 = (var_33 <= var_5);
var_47 = var_44 && var_45 && var_46;
if (var_47) {
var_48 = df::sub(var_9, var_43);
var_49 = df::float3(var_48, var_5, var_43);
return var_49;
}
var_50 = df::select(var_47, var_38, var_49);
var_51 = df::mul(var_12, var_33);
var_52 = df::mul(var_32, var_13);
var_53 = df::sub(var_51, var_52);
var_54 = df::sub(var_13, var_12);
var_55 = df::sub(var_13, var_12);
var_56 = df::sub(var_32, var_33);
var_57 = df::add(var_55, var_56);
var_58 = df::div(var_54, var_57);
var_59 = (var_53 <= var_5);
var_60 = df::sub(var_13, var_12);
var_61 = (var_60 >= var_5);
var_62 = df::sub(var_32, var_33);
var_63 = (var_62 >= var_5);
var_64 = var_59 && var_61 && var_63;
if (var_64) {
var_65 = df::sub(var_9, var_58);
var_66 = df::float3(var_5, var_58, var_65);
return var_66;
}
var_67 = df::select(var_64, var_50, var_66);
var_68 = df::add(var_53, var_41);
var_69 = df::add(var_68, var_21);
var_70 = df::div(var_9, var_69);
var_71 = df::mul(var_41, var_70);
var_72 = df::mul(var_21, var_70);
var_73 = df::sub(var_9, var_71);
var_74 = df::sub(var_73, var_72);
var_75 = df::float3(var_74, var_71, var_72);
return var_75;
}
void adj_triangle_closest_point_barycentric_cpu_func(
df::float3 var_a,
df::float3 var_b,
df::float3 var_c,
df::float3 var_p,
df::float3 & adj_a,
df::float3 & adj_b,
df::float3 & adj_c,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
float var_4;
const float var_5 = 0.0;
bool var_6;
bool var_7;
bool var_8;
const float var_9 = 1.0;
df::float3 var_10;
df::float3 var_11;
float var_12;
float var_13;
bool var_14;
bool var_15;
bool var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
bool var_24;
bool var_25;
bool var_26;
bool var_27;
float var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
float var_32;
float var_33;
bool var_34;
bool var_35;
bool var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
bool var_47;
float var_48;
df::float3 var_49;
df::float3 var_50;
float var_51;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
float var_58;
bool var_59;
float var_60;
bool var_61;
float var_62;
bool var_63;
bool var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
df::float3 var_75;
//---------
// dual vars
df::float3 adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
float adj_5 = 0;
bool adj_6 = 0;
bool adj_7 = 0;
bool adj_8 = 0;
float adj_9 = 0;
df::float3 adj_10 = 0;
df::float3 adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
bool adj_14 = 0;
bool adj_15 = 0;
bool adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
bool adj_24 = 0;
bool adj_25 = 0;
bool adj_26 = 0;
bool adj_27 = 0;
float adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
float adj_32 = 0;
float adj_33 = 0;
bool adj_34 = 0;
bool adj_35 = 0;
bool adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
bool adj_44 = 0;
bool adj_45 = 0;
bool adj_46 = 0;
bool adj_47 = 0;
float adj_48 = 0;
df::float3 adj_49 = 0;
df::float3 adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
bool adj_59 = 0;
float adj_60 = 0;
bool adj_61 = 0;
float adj_62 = 0;
bool adj_63 = 0;
bool adj_64 = 0;
float adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
float adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
df::float3 adj_75 = 0;
//---------
// forward
var_0 = df::sub(var_b, var_a);
var_1 = df::sub(var_c, var_a);
var_2 = df::sub(var_p, var_a);
var_3 = df::dot(var_0, var_2);
var_4 = df::dot(var_1, var_2);
var_6 = (var_3 <= var_5);
var_7 = (var_4 <= var_5);
var_8 = var_6 && var_7;
if (var_8) {
var_10 = df::float3(var_9, var_5, var_5);
goto label0;
}
var_11 = df::sub(var_p, var_b);
var_12 = df::dot(var_0, var_11);
var_13 = df::dot(var_1, var_11);
var_14 = (var_12 >= var_5);
var_15 = (var_13 <= var_12);
var_16 = var_14 && var_15;
if (var_16) {
var_17 = df::float3(var_5, var_9, var_5);
goto label1;
}
var_18 = df::select(var_16, var_10, var_17);
var_19 = df::mul(var_3, var_13);
var_20 = df::mul(var_12, var_4);
var_21 = df::sub(var_19, var_20);
var_22 = df::sub(var_3, var_12);
var_23 = df::div(var_3, var_22);
var_24 = (var_21 <= var_5);
var_25 = (var_3 >= var_5);
var_26 = (var_12 <= var_5);
var_27 = var_24 && var_25 && var_26;
if (var_27) {
var_28 = df::sub(var_9, var_23);
var_29 = df::float3(var_28, var_23, var_5);
goto label2;
}
var_30 = df::select(var_27, var_18, var_29);
var_31 = df::sub(var_p, var_c);
var_32 = df::dot(var_0, var_31);
var_33 = df::dot(var_1, var_31);
var_34 = (var_33 >= var_5);
var_35 = (var_32 <= var_33);
var_36 = var_34 && var_35;
if (var_36) {
var_37 = df::float3(var_5, var_5, var_9);
goto label3;
}
var_38 = df::select(var_36, var_30, var_37);
var_39 = df::mul(var_32, var_4);
var_40 = df::mul(var_3, var_33);
var_41 = df::sub(var_39, var_40);
var_42 = df::sub(var_4, var_33);
var_43 = df::div(var_4, var_42);
var_44 = (var_41 <= var_5);
var_45 = (var_4 >= var_5);
var_46 = (var_33 <= var_5);
var_47 = var_44 && var_45 && var_46;
if (var_47) {
var_48 = df::sub(var_9, var_43);
var_49 = df::float3(var_48, var_5, var_43);
goto label4;
}
var_50 = df::select(var_47, var_38, var_49);
var_51 = df::mul(var_12, var_33);
var_52 = df::mul(var_32, var_13);
var_53 = df::sub(var_51, var_52);
var_54 = df::sub(var_13, var_12);
var_55 = df::sub(var_13, var_12);
var_56 = df::sub(var_32, var_33);
var_57 = df::add(var_55, var_56);
var_58 = df::div(var_54, var_57);
var_59 = (var_53 <= var_5);
var_60 = df::sub(var_13, var_12);
var_61 = (var_60 >= var_5);
var_62 = df::sub(var_32, var_33);
var_63 = (var_62 >= var_5);
var_64 = var_59 && var_61 && var_63;
if (var_64) {
var_65 = df::sub(var_9, var_58);
var_66 = df::float3(var_5, var_58, var_65);
goto label5;
}
var_67 = df::select(var_64, var_50, var_66);
var_68 = df::add(var_53, var_41);
var_69 = df::add(var_68, var_21);
var_70 = df::div(var_9, var_69);
var_71 = df::mul(var_41, var_70);
var_72 = df::mul(var_21, var_70);
var_73 = df::sub(var_9, var_71);
var_74 = df::sub(var_73, var_72);
var_75 = df::float3(var_74, var_71, var_72);
goto label6;
//---------
// reverse
label6:;
adj_75 += adj_ret;
df::adj_float3(var_74, var_71, var_72, adj_74, adj_71, adj_72, adj_75);
df::adj_sub(var_73, var_72, adj_73, adj_72, adj_74);
df::adj_sub(var_9, var_71, adj_9, adj_71, adj_73);
df::adj_mul(var_21, var_70, adj_21, adj_70, adj_72);
df::adj_mul(var_41, var_70, adj_41, adj_70, adj_71);
df::adj_div(var_9, var_69, adj_9, adj_69, adj_70);
df::adj_add(var_68, var_21, adj_68, adj_21, adj_69);
df::adj_add(var_53, var_41, adj_53, adj_41, adj_68);
df::adj_select(var_64, var_50, var_66, adj_64, adj_50, adj_66, adj_67);
if (var_64) {
label5:;
adj_66 += adj_ret;
df::adj_float3(var_5, var_58, var_65, adj_5, adj_58, adj_65, adj_66);
df::adj_sub(var_9, var_58, adj_9, adj_58, adj_65);
}
df::adj_sub(var_32, var_33, adj_32, adj_33, adj_62);
df::adj_sub(var_13, var_12, adj_13, adj_12, adj_60);
df::adj_div(var_54, var_57, adj_54, adj_57, adj_58);
df::adj_add(var_55, var_56, adj_55, adj_56, adj_57);
df::adj_sub(var_32, var_33, adj_32, adj_33, adj_56);
df::adj_sub(var_13, var_12, adj_13, adj_12, adj_55);
df::adj_sub(var_13, var_12, adj_13, adj_12, adj_54);
df::adj_sub(var_51, var_52, adj_51, adj_52, adj_53);
df::adj_mul(var_32, var_13, adj_32, adj_13, adj_52);
df::adj_mul(var_12, var_33, adj_12, adj_33, adj_51);
df::adj_select(var_47, var_38, var_49, adj_47, adj_38, adj_49, adj_50);
if (var_47) {
label4:;
adj_49 += adj_ret;
df::adj_float3(var_48, var_5, var_43, adj_48, adj_5, adj_43, adj_49);
df::adj_sub(var_9, var_43, adj_9, adj_43, adj_48);
}
df::adj_div(var_4, var_42, adj_4, adj_42, adj_43);
df::adj_sub(var_4, var_33, adj_4, adj_33, adj_42);
df::adj_sub(var_39, var_40, adj_39, adj_40, adj_41);
df::adj_mul(var_3, var_33, adj_3, adj_33, adj_40);
df::adj_mul(var_32, var_4, adj_32, adj_4, adj_39);
df::adj_select(var_36, var_30, var_37, adj_36, adj_30, adj_37, adj_38);
if (var_36) {
label3:;
adj_37 += adj_ret;
df::adj_float3(var_5, var_5, var_9, adj_5, adj_5, adj_9, adj_37);
}
df::adj_dot(var_1, var_31, adj_1, adj_31, adj_33);
df::adj_dot(var_0, var_31, adj_0, adj_31, adj_32);
df::adj_sub(var_p, var_c, adj_p, adj_c, adj_31);
df::adj_select(var_27, var_18, var_29, adj_27, adj_18, adj_29, adj_30);
if (var_27) {
label2:;
adj_29 += adj_ret;
df::adj_float3(var_28, var_23, var_5, adj_28, adj_23, adj_5, adj_29);
df::adj_sub(var_9, var_23, adj_9, adj_23, adj_28);
}
df::adj_div(var_3, var_22, adj_3, adj_22, adj_23);
df::adj_sub(var_3, var_12, adj_3, adj_12, adj_22);
df::adj_sub(var_19, var_20, adj_19, adj_20, adj_21);
df::adj_mul(var_12, var_4, adj_12, adj_4, adj_20);
df::adj_mul(var_3, var_13, adj_3, adj_13, adj_19);
df::adj_select(var_16, var_10, var_17, adj_16, adj_10, adj_17, adj_18);
if (var_16) {
label1:;
adj_17 += adj_ret;
df::adj_float3(var_5, var_9, var_5, adj_5, adj_9, adj_5, adj_17);
}
df::adj_dot(var_1, var_11, adj_1, adj_11, adj_13);
df::adj_dot(var_0, var_11, adj_0, adj_11, adj_12);
df::adj_sub(var_p, var_b, adj_p, adj_b, adj_11);
if (var_8) {
label0:;
adj_10 += adj_ret;
df::adj_float3(var_9, var_5, var_5, adj_9, adj_5, adj_5, adj_10);
}
df::adj_dot(var_1, var_2, adj_1, adj_2, adj_4);
df::adj_dot(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_sub(var_p, var_a, adj_p, adj_a, adj_2);
df::adj_sub(var_c, var_a, adj_c, adj_a, adj_1);
df::adj_sub(var_b, var_a, adj_b, adj_a, adj_0);
return;
}
float sphere_sdf_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p)
{
//---------
// primal vars
df::float3 var_0;
float var_1;
float var_2;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::length(var_0);
var_2 = df::sub(var_1, var_radius);
return var_2;
}
void adj_sphere_sdf_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p,
df::float3 & adj_center,
float & adj_radius,
df::float3 & adj_p,
float & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
float var_1;
float var_2;
//---------
// dual vars
df::float3 adj_0 = 0;
float adj_1 = 0;
float adj_2 = 0;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::length(var_0);
var_2 = df::sub(var_1, var_radius);
goto label0;
//---------
// reverse
label0:;
adj_2 += adj_ret;
df::adj_sub(var_1, var_radius, adj_1, adj_radius, adj_2);
df::adj_length(var_0, adj_0, adj_1);
df::adj_sub(var_p, var_center, adj_p, adj_center, adj_0);
return;
}
df::float3 sphere_sdf_grad_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::normalize(var_0);
return var_1;
}
void adj_sphere_sdf_grad_cpu_func(
df::float3 var_center,
float var_radius,
df::float3 var_p,
df::float3 & adj_center,
float & adj_radius,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
df::float3 var_1;
//---------
// dual vars
df::float3 adj_0 = 0;
df::float3 adj_1 = 0;
//---------
// forward
var_0 = df::sub(var_p, var_center);
var_1 = df::normalize(var_0);
goto label0;
//---------
// reverse
label0:;
adj_1 += adj_ret;
df::adj_normalize(var_0, adj_0, adj_1);
df::adj_sub(var_p, var_center, adj_p, adj_center, adj_0);
return;
}
float box_sdf_cpu_func(
df::float3 var_upper,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = df::max(var_4, var_15);
var_17 = df::max(var_9, var_15);
var_18 = df::max(var_14, var_15);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::length(var_19);
var_21 = df::max(var_9, var_14);
var_22 = df::max(var_4, var_21);
var_23 = df::min(var_22, var_15);
var_24 = df::add(var_20, var_23);
return var_24;
}
void adj_box_sdf_cpu_func(
df::float3 var_upper,
df::float3 var_p,
df::float3 & adj_upper,
df::float3 & adj_p,
float & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
float adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
float adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = df::max(var_4, var_15);
var_17 = df::max(var_9, var_15);
var_18 = df::max(var_14, var_15);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::length(var_19);
var_21 = df::max(var_9, var_14);
var_22 = df::max(var_4, var_21);
var_23 = df::min(var_22, var_15);
var_24 = df::add(var_20, var_23);
goto label0;
//---------
// reverse
label0:;
adj_24 += adj_ret;
df::adj_add(var_20, var_23, adj_20, adj_23, adj_24);
df::adj_min(var_22, var_15, adj_22, adj_15, adj_23);
df::adj_max(var_4, var_21, adj_4, adj_21, adj_22);
df::adj_max(var_9, var_14, adj_9, adj_14, adj_21);
df::adj_length(var_19, adj_19, adj_20);
df::adj_float3(var_16, var_17, var_18, adj_16, adj_17, adj_18, adj_19);
df::adj_max(var_14, var_15, adj_14, adj_15, adj_18);
df::adj_max(var_9, var_15, adj_9, adj_15, adj_17);
df::adj_max(var_4, var_15, adj_4, adj_15, adj_16);
df::adj_sub(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_13);
df::adj_abs(var_11, adj_11, adj_12);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_11);
df::adj_sub(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_8);
df::adj_abs(var_6, adj_6, adj_7);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_3);
df::adj_abs(var_1, adj_1, adj_2);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
df::float3 box_sdf_grad_cpu_func(
df::float3 var_upper,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
bool var_16;
bool var_17;
bool var_18;
bool var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
float var_33;
float var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
float var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
df::float3 var_47;
df::float3 var_48;
bool var_49;
bool var_50;
bool var_51;
df::float3 var_52;
df::float3 var_53;
bool var_54;
bool var_55;
bool var_56;
df::float3 var_57;
df::float3 var_58;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = (var_4 > var_15);
var_17 = (var_9 > var_15);
var_18 = (var_14 > var_15);
var_19 = var_16 || var_17 || var_18;
if (var_19) {
var_20 = df::index(var_p, var_0);
var_21 = df::index(var_upper, var_0);
var_22 = df::sub(var_15, var_21);
var_23 = df::index(var_upper, var_0);
var_24 = df::clamp(var_20, var_22, var_23);
var_25 = df::index(var_p, var_5);
var_26 = df::index(var_upper, var_5);
var_27 = df::sub(var_15, var_26);
var_28 = df::index(var_upper, var_5);
var_29 = df::clamp(var_25, var_27, var_28);
var_30 = df::index(var_p, var_10);
var_31 = df::index(var_upper, var_10);
var_32 = df::sub(var_15, var_31);
var_33 = df::index(var_upper, var_10);
var_34 = df::clamp(var_30, var_32, var_33);
var_35 = df::float3(var_24, var_29, var_34);
var_36 = df::sub(var_p, var_35);
var_37 = df::normalize(var_36);
return var_37;
}
var_38 = df::index(var_p, var_0);
var_39 = df::sign(var_38);
var_40 = df::index(var_p, var_5);
var_41 = df::sign(var_40);
var_42 = df::index(var_p, var_10);
var_43 = df::sign(var_42);
var_44 = (var_4 > var_9);
var_45 = (var_4 > var_14);
var_46 = var_44 && var_45;
if (var_46) {
var_47 = df::float3(var_39, var_15, var_15);
return var_47;
}
var_48 = df::select(var_46, var_37, var_47);
var_49 = (var_9 > var_4);
var_50 = (var_9 > var_14);
var_51 = var_49 && var_50;
if (var_51) {
var_52 = df::float3(var_15, var_41, var_15);
return var_52;
}
var_53 = df::select(var_51, var_48, var_52);
var_54 = (var_14 > var_4);
var_55 = (var_14 > var_9);
var_56 = var_54 && var_55;
if (var_56) {
var_57 = df::float3(var_15, var_15, var_43);
return var_57;
}
var_58 = df::select(var_56, var_53, var_57);
}
void adj_box_sdf_grad_cpu_func(
df::float3 var_upper,
df::float3 var_p,
df::float3 & adj_upper,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
float var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
float var_7;
float var_8;
float var_9;
const int var_10 = 2;
float var_11;
float var_12;
float var_13;
float var_14;
const float var_15 = 0.0;
bool var_16;
bool var_17;
bool var_18;
bool var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
float var_33;
float var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
float var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
bool var_44;
bool var_45;
bool var_46;
df::float3 var_47;
df::float3 var_48;
bool var_49;
bool var_50;
bool var_51;
df::float3 var_52;
df::float3 var_53;
bool var_54;
bool var_55;
bool var_56;
df::float3 var_57;
df::float3 var_58;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
float adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
float adj_14 = 0;
float adj_15 = 0;
bool adj_16 = 0;
bool adj_17 = 0;
bool adj_18 = 0;
bool adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
float adj_32 = 0;
float adj_33 = 0;
float adj_34 = 0;
df::float3 adj_35 = 0;
df::float3 adj_36 = 0;
df::float3 adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
bool adj_44 = 0;
bool adj_45 = 0;
bool adj_46 = 0;
df::float3 adj_47 = 0;
df::float3 adj_48 = 0;
bool adj_49 = 0;
bool adj_50 = 0;
bool adj_51 = 0;
df::float3 adj_52 = 0;
df::float3 adj_53 = 0;
bool adj_54 = 0;
bool adj_55 = 0;
bool adj_56 = 0;
df::float3 adj_57 = 0;
df::float3 adj_58 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = df::abs(var_1);
var_3 = df::index(var_upper, var_0);
var_4 = df::sub(var_2, var_3);
var_6 = df::index(var_p, var_5);
var_7 = df::abs(var_6);
var_8 = df::index(var_upper, var_5);
var_9 = df::sub(var_7, var_8);
var_11 = df::index(var_p, var_10);
var_12 = df::abs(var_11);
var_13 = df::index(var_upper, var_10);
var_14 = df::sub(var_12, var_13);
var_16 = (var_4 > var_15);
var_17 = (var_9 > var_15);
var_18 = (var_14 > var_15);
var_19 = var_16 || var_17 || var_18;
if (var_19) {
var_20 = df::index(var_p, var_0);
var_21 = df::index(var_upper, var_0);
var_22 = df::sub(var_15, var_21);
var_23 = df::index(var_upper, var_0);
var_24 = df::clamp(var_20, var_22, var_23);
var_25 = df::index(var_p, var_5);
var_26 = df::index(var_upper, var_5);
var_27 = df::sub(var_15, var_26);
var_28 = df::index(var_upper, var_5);
var_29 = df::clamp(var_25, var_27, var_28);
var_30 = df::index(var_p, var_10);
var_31 = df::index(var_upper, var_10);
var_32 = df::sub(var_15, var_31);
var_33 = df::index(var_upper, var_10);
var_34 = df::clamp(var_30, var_32, var_33);
var_35 = df::float3(var_24, var_29, var_34);
var_36 = df::sub(var_p, var_35);
var_37 = df::normalize(var_36);
goto label0;
}
var_38 = df::index(var_p, var_0);
var_39 = df::sign(var_38);
var_40 = df::index(var_p, var_5);
var_41 = df::sign(var_40);
var_42 = df::index(var_p, var_10);
var_43 = df::sign(var_42);
var_44 = (var_4 > var_9);
var_45 = (var_4 > var_14);
var_46 = var_44 && var_45;
if (var_46) {
var_47 = df::float3(var_39, var_15, var_15);
goto label1;
}
var_48 = df::select(var_46, var_37, var_47);
var_49 = (var_9 > var_4);
var_50 = (var_9 > var_14);
var_51 = var_49 && var_50;
if (var_51) {
var_52 = df::float3(var_15, var_41, var_15);
goto label2;
}
var_53 = df::select(var_51, var_48, var_52);
var_54 = (var_14 > var_4);
var_55 = (var_14 > var_9);
var_56 = var_54 && var_55;
if (var_56) {
var_57 = df::float3(var_15, var_15, var_43);
goto label3;
}
var_58 = df::select(var_56, var_53, var_57);
//---------
// reverse
df::adj_select(var_56, var_53, var_57, adj_56, adj_53, adj_57, adj_58);
if (var_56) {
label3:;
adj_57 += adj_ret;
df::adj_float3(var_15, var_15, var_43, adj_15, adj_15, adj_43, adj_57);
}
df::adj_select(var_51, var_48, var_52, adj_51, adj_48, adj_52, adj_53);
if (var_51) {
label2:;
adj_52 += adj_ret;
df::adj_float3(var_15, var_41, var_15, adj_15, adj_41, adj_15, adj_52);
}
df::adj_select(var_46, var_37, var_47, adj_46, adj_37, adj_47, adj_48);
if (var_46) {
label1:;
adj_47 += adj_ret;
df::adj_float3(var_39, var_15, var_15, adj_39, adj_15, adj_15, adj_47);
}
df::adj_sign(var_42, adj_42, adj_43);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_42);
df::adj_sign(var_40, adj_40, adj_41);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_40);
df::adj_sign(var_38, adj_38, adj_39);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_38);
if (var_19) {
label0:;
adj_37 += adj_ret;
df::adj_normalize(var_36, adj_36, adj_37);
df::adj_sub(var_p, var_35, adj_p, adj_35, adj_36);
df::adj_float3(var_24, var_29, var_34, adj_24, adj_29, adj_34, adj_35);
df::adj_clamp(var_30, var_32, var_33, adj_30, adj_32, adj_33, adj_34);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_33);
df::adj_sub(var_15, var_31, adj_15, adj_31, adj_32);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_31);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_30);
df::adj_clamp(var_25, var_27, var_28, adj_25, adj_27, adj_28, adj_29);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_28);
df::adj_sub(var_15, var_26, adj_15, adj_26, adj_27);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_26);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_25);
df::adj_clamp(var_20, var_22, var_23, adj_20, adj_22, adj_23, adj_24);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_23);
df::adj_sub(var_15, var_21, adj_15, adj_21, adj_22);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_21);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_20);
}
df::adj_sub(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_index(var_upper, var_10, adj_upper, adj_10, adj_13);
df::adj_abs(var_11, adj_11, adj_12);
df::adj_index(var_p, var_10, adj_p, adj_10, adj_11);
df::adj_sub(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_index(var_upper, var_5, adj_upper, adj_5, adj_8);
df::adj_abs(var_6, adj_6, adj_7);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_index(var_upper, var_0, adj_upper, adj_0, adj_3);
df::adj_abs(var_1, adj_1, adj_2);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
float capsule_sdf_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
float var_10;
float var_11;
float var_12;
const float var_13 = 0.0;
float var_14;
bool var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
float var_28;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::length(var_9);
var_11 = df::sub(var_10, var_radius);
return var_11;
}
var_12 = df::index(var_p, var_0);
var_14 = df::sub(var_13, var_half_width);
var_15 = (var_12 < var_14);
if (var_15) {
var_16 = df::index(var_p, var_0);
var_17 = df::add(var_16, var_half_width);
var_18 = df::index(var_p, var_5);
var_19 = df::index(var_p, var_7);
var_20 = df::float3(var_17, var_18, var_19);
var_21 = df::length(var_20);
var_22 = df::sub(var_21, var_radius);
return var_22;
}
var_23 = df::select(var_15, var_11, var_22);
var_24 = df::index(var_p, var_5);
var_25 = df::index(var_p, var_7);
var_26 = df::float3(var_13, var_24, var_25);
var_27 = df::length(var_26);
var_28 = df::sub(var_27, var_radius);
return var_28;
}
void adj_capsule_sdf_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p,
float & adj_radius,
float & adj_half_width,
df::float3 & adj_p,
float & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
float var_10;
float var_11;
float var_12;
const float var_13 = 0.0;
float var_14;
bool var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
float var_28;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
bool adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
int adj_7 = 0;
float adj_8 = 0;
df::float3 adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
float adj_14 = 0;
bool adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
df::float3 adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::length(var_9);
var_11 = df::sub(var_10, var_radius);
goto label0;
}
var_12 = df::index(var_p, var_0);
var_14 = df::sub(var_13, var_half_width);
var_15 = (var_12 < var_14);
if (var_15) {
var_16 = df::index(var_p, var_0);
var_17 = df::add(var_16, var_half_width);
var_18 = df::index(var_p, var_5);
var_19 = df::index(var_p, var_7);
var_20 = df::float3(var_17, var_18, var_19);
var_21 = df::length(var_20);
var_22 = df::sub(var_21, var_radius);
goto label1;
}
var_23 = df::select(var_15, var_11, var_22);
var_24 = df::index(var_p, var_5);
var_25 = df::index(var_p, var_7);
var_26 = df::float3(var_13, var_24, var_25);
var_27 = df::length(var_26);
var_28 = df::sub(var_27, var_radius);
goto label2;
//---------
// reverse
label2:;
adj_28 += adj_ret;
df::adj_sub(var_27, var_radius, adj_27, adj_radius, adj_28);
df::adj_length(var_26, adj_26, adj_27);
df::adj_float3(var_13, var_24, var_25, adj_13, adj_24, adj_25, adj_26);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_25);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_24);
df::adj_select(var_15, var_11, var_22, adj_15, adj_11, adj_22, adj_23);
if (var_15) {
label1:;
adj_22 += adj_ret;
df::adj_sub(var_21, var_radius, adj_21, adj_radius, adj_22);
df::adj_length(var_20, adj_20, adj_21);
df::adj_float3(var_17, var_18, var_19, adj_17, adj_18, adj_19, adj_20);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_19);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_18);
df::adj_add(var_16, var_half_width, adj_16, adj_half_width, adj_17);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_16);
}
df::adj_sub(var_13, var_half_width, adj_13, adj_half_width, adj_14);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_12);
if (var_2) {
label0:;
adj_11 += adj_ret;
df::adj_sub(var_10, var_radius, adj_10, adj_radius, adj_11);
df::adj_length(var_9, adj_9, adj_10);
df::adj_float3(var_4, var_6, var_8, adj_4, adj_6, adj_8, adj_9);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_8);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_3, var_half_width, adj_3, adj_half_width, adj_4);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_3);
}
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
df::float3 capsule_sdf_grad_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
df::float3 var_10;
float var_11;
const float var_12 = 0.0;
float var_13;
bool var_14;
float var_15;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
df::float3 var_25;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::normalize(var_9);
return var_10;
}
var_11 = df::index(var_p, var_0);
var_13 = df::sub(var_12, var_half_width);
var_14 = (var_11 < var_13);
if (var_14) {
var_15 = df::index(var_p, var_0);
var_16 = df::add(var_15, var_half_width);
var_17 = df::index(var_p, var_5);
var_18 = df::index(var_p, var_7);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::normalize(var_19);
return var_20;
}
var_21 = df::select(var_14, var_10, var_20);
var_22 = df::index(var_p, var_5);
var_23 = df::index(var_p, var_7);
var_24 = df::float3(var_12, var_22, var_23);
var_25 = df::normalize(var_24);
return var_25;
}
void adj_capsule_sdf_grad_cpu_func(
float var_radius,
float var_half_width,
df::float3 var_p,
float & adj_radius,
float & adj_half_width,
df::float3 & adj_p,
df::float3 & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
float var_1;
bool var_2;
float var_3;
float var_4;
const int var_5 = 1;
float var_6;
const int var_7 = 2;
float var_8;
df::float3 var_9;
df::float3 var_10;
float var_11;
const float var_12 = 0.0;
float var_13;
bool var_14;
float var_15;
float var_16;
float var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
df::float3 var_25;
//---------
// dual vars
int adj_0 = 0;
float adj_1 = 0;
bool adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
int adj_7 = 0;
float adj_8 = 0;
df::float3 adj_9 = 0;
df::float3 adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
float adj_13 = 0;
bool adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
df::float3 adj_24 = 0;
df::float3 adj_25 = 0;
//---------
// forward
var_1 = df::index(var_p, var_0);
var_2 = (var_1 > var_half_width);
if (var_2) {
var_3 = df::index(var_p, var_0);
var_4 = df::sub(var_3, var_half_width);
var_6 = df::index(var_p, var_5);
var_8 = df::index(var_p, var_7);
var_9 = df::float3(var_4, var_6, var_8);
var_10 = df::normalize(var_9);
goto label0;
}
var_11 = df::index(var_p, var_0);
var_13 = df::sub(var_12, var_half_width);
var_14 = (var_11 < var_13);
if (var_14) {
var_15 = df::index(var_p, var_0);
var_16 = df::add(var_15, var_half_width);
var_17 = df::index(var_p, var_5);
var_18 = df::index(var_p, var_7);
var_19 = df::float3(var_16, var_17, var_18);
var_20 = df::normalize(var_19);
goto label1;
}
var_21 = df::select(var_14, var_10, var_20);
var_22 = df::index(var_p, var_5);
var_23 = df::index(var_p, var_7);
var_24 = df::float3(var_12, var_22, var_23);
var_25 = df::normalize(var_24);
goto label2;
//---------
// reverse
label2:;
adj_25 += adj_ret;
df::adj_normalize(var_24, adj_24, adj_25);
df::adj_float3(var_12, var_22, var_23, adj_12, adj_22, adj_23, adj_24);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_23);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_22);
df::adj_select(var_14, var_10, var_20, adj_14, adj_10, adj_20, adj_21);
if (var_14) {
label1:;
adj_20 += adj_ret;
df::adj_normalize(var_19, adj_19, adj_20);
df::adj_float3(var_16, var_17, var_18, adj_16, adj_17, adj_18, adj_19);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_18);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_17);
df::adj_add(var_15, var_half_width, adj_15, adj_half_width, adj_16);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_15);
}
df::adj_sub(var_12, var_half_width, adj_12, adj_half_width, adj_13);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_11);
if (var_2) {
label0:;
adj_10 += adj_ret;
df::adj_normalize(var_9, adj_9, adj_10);
df::adj_float3(var_4, var_6, var_8, adj_4, adj_6, adj_8, adj_9);
df::adj_index(var_p, var_7, adj_p, adj_7, adj_8);
df::adj_index(var_p, var_5, adj_p, adj_5, adj_6);
df::adj_sub(var_3, var_half_width, adj_3, adj_half_width, adj_4);
df::adj_index(var_p, var_0, adj_p, adj_0, adj_3);
}
df::adj_index(var_p, var_0, adj_p, adj_0, adj_1);
return;
}
spatial_vector spatial_transform_twist_cpu_func(
spatial_transform var_t,
spatial_vector var_x)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_2);
var_5 = df::rotate(var_0, var_3);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_4, var_7);
return var_8;
}
void adj_spatial_transform_twist_cpu_func(
spatial_transform var_t,
spatial_vector var_x,
spatial_transform & adj_t,
spatial_vector & adj_x,
spatial_vector & adj_ret)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// dual vars
quat adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
spatial_vector adj_8 = 0;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_2);
var_5 = df::rotate(var_0, var_3);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_4, var_7);
goto label0;
//---------
// reverse
label0:;
adj_8 += adj_ret;
df::adj_spatial_vector(var_4, var_7, adj_4, adj_7, adj_8);
df::adj_add(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_cross(var_1, var_4, adj_1, adj_4, adj_6);
df::adj_rotate(var_0, var_3, adj_0, adj_3, adj_5);
df::adj_rotate(var_0, var_2, adj_0, adj_2, adj_4);
df::adj_spatial_bottom(var_x, adj_x, adj_3);
df::adj_spatial_top(var_x, adj_x, adj_2);
df::adj_spatial_transform_get_translation(var_t, adj_t, adj_1);
df::adj_spatial_transform_get_rotation(var_t, adj_t, adj_0);
return;
}
spatial_vector spatial_transform_wrench_cpu_func(
spatial_transform var_t,
spatial_vector var_x)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_3);
var_5 = df::rotate(var_0, var_2);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_7, var_4);
return var_8;
}
void adj_spatial_transform_wrench_cpu_func(
spatial_transform var_t,
spatial_vector var_x,
spatial_transform & adj_t,
spatial_vector & adj_x,
spatial_vector & adj_ret)
{
//---------
// primal vars
quat var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
spatial_vector var_8;
//---------
// dual vars
quat adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
spatial_vector adj_8 = 0;
//---------
// forward
var_0 = df::spatial_transform_get_rotation(var_t);
var_1 = df::spatial_transform_get_translation(var_t);
var_2 = df::spatial_top(var_x);
var_3 = df::spatial_bottom(var_x);
var_4 = df::rotate(var_0, var_3);
var_5 = df::rotate(var_0, var_2);
var_6 = df::cross(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::spatial_vector(var_7, var_4);
goto label0;
//---------
// reverse
label0:;
adj_8 += adj_ret;
df::adj_spatial_vector(var_7, var_4, adj_7, adj_4, adj_8);
df::adj_add(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_cross(var_1, var_4, adj_1, adj_4, adj_6);
df::adj_rotate(var_0, var_2, adj_0, adj_2, adj_5);
df::adj_rotate(var_0, var_3, adj_0, adj_3, adj_4);
df::adj_spatial_bottom(var_x, adj_x, adj_3);
df::adj_spatial_top(var_x, adj_x, adj_2);
df::adj_spatial_transform_get_translation(var_t, adj_t, adj_1);
df::adj_spatial_transform_get_rotation(var_t, adj_t, adj_0);
return;
}
spatial_transform spatial_transform_inverse_cpu_func(
spatial_transform var_t)
{
//---------
// primal vars
df::float3 var_0;
quat var_1;
quat var_2;
df::float3 var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
float var_6;
df::float3 var_7;
spatial_transform var_8;
//---------
// forward
var_0 = df::spatial_transform_get_translation(var_t);
var_1 = df::spatial_transform_get_rotation(var_t);
var_2 = df::inverse(var_1);
var_3 = df::rotate(var_2, var_0);
var_6 = df::sub(var_4, var_5);
var_7 = df::mul(var_3, var_6);
var_8 = df::spatial_transform(var_7, var_2);
return var_8;
}
void adj_spatial_transform_inverse_cpu_func(
spatial_transform var_t,
spatial_transform & adj_t,
spatial_transform & adj_ret)
{
//---------
// primal vars
df::float3 var_0;
quat var_1;
quat var_2;
df::float3 var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
float var_6;
df::float3 var_7;
spatial_transform var_8;
//---------
// dual vars
df::float3 adj_0 = 0;
quat adj_1 = 0;
quat adj_2 = 0;
df::float3 adj_3 = 0;
float adj_4 = 0;
float adj_5 = 0;
float adj_6 = 0;
df::float3 adj_7 = 0;
spatial_transform adj_8 = 0;
//---------
// forward
var_0 = df::spatial_transform_get_translation(var_t);
var_1 = df::spatial_transform_get_rotation(var_t);
var_2 = df::inverse(var_1);
var_3 = df::rotate(var_2, var_0);
var_6 = df::sub(var_4, var_5);
var_7 = df::mul(var_3, var_6);
var_8 = df::spatial_transform(var_7, var_2);
goto label0;
//---------
// reverse
label0:;
adj_8 += adj_ret;
df::adj_spatial_transform(var_7, var_2, adj_7, adj_2, adj_8);
df::adj_mul(var_3, var_6, adj_3, adj_6, adj_7);
df::adj_sub(var_4, var_5, adj_4, adj_5, adj_6);
df::adj_rotate(var_2, var_0, adj_2, adj_0, adj_3);
df::adj_inverse(var_1, adj_1, adj_2);
df::adj_spatial_transform_get_rotation(var_t, adj_t, adj_1);
df::adj_spatial_transform_get_translation(var_t, adj_t, adj_0);
return;
}
spatial_matrix spatial_transform_inertia_cpu_func(
spatial_transform var_t,
spatial_matrix var_I)
{
//---------
// primal vars
spatial_transform var_0;
quat var_1;
df::float3 var_2;
const float var_3 = 1.0;
const float var_4 = 0.0;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
df::float3 var_8;
df::float3 var_9;
df::float3 var_10;
mat33 var_11;
mat33 var_12;
mat33 var_13;
spatial_matrix var_14;
spatial_matrix var_15;
spatial_matrix var_16;
spatial_matrix var_17;
//---------
// forward
var_0 = spatial_transform_inverse_cpu_func(var_t);
var_1 = df::spatial_transform_get_rotation(var_0);
var_2 = df::spatial_transform_get_translation(var_0);
var_5 = df::float3(var_3, var_4, var_4);
var_6 = df::rotate(var_1, var_5);
var_7 = df::float3(var_4, var_3, var_4);
var_8 = df::rotate(var_1, var_7);
var_9 = df::float3(var_4, var_4, var_3);
var_10 = df::rotate(var_1, var_9);
var_11 = df::mat33(var_6, var_8, var_10);
var_12 = df::skew(var_2);
var_13 = df::mul(var_12, var_11);
var_14 = df::spatial_adjoint(var_11, var_13);
var_15 = df::transpose(var_14);
var_16 = df::mul(var_15, var_I);
var_17 = df::mul(var_16, var_14);
return var_17;
}
void adj_spatial_transform_inertia_cpu_func(
spatial_transform var_t,
spatial_matrix var_I,
spatial_transform & adj_t,
spatial_matrix & adj_I,
spatial_matrix & adj_ret)
{
//---------
// primal vars
spatial_transform var_0;
quat var_1;
df::float3 var_2;
const float var_3 = 1.0;
const float var_4 = 0.0;
df::float3 var_5;
df::float3 var_6;
df::float3 var_7;
df::float3 var_8;
df::float3 var_9;
df::float3 var_10;
mat33 var_11;
mat33 var_12;
mat33 var_13;
spatial_matrix var_14;
spatial_matrix var_15;
spatial_matrix var_16;
spatial_matrix var_17;
//---------
// dual vars
spatial_transform adj_0 = 0;
quat adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
df::float3 adj_8 = 0;
df::float3 adj_9 = 0;
df::float3 adj_10 = 0;
mat33 adj_11 = 0;
mat33 adj_12 = 0;
mat33 adj_13 = 0;
spatial_matrix adj_14 = 0;
spatial_matrix adj_15 = 0;
spatial_matrix adj_16 = 0;
spatial_matrix adj_17 = 0;
//---------
// forward
var_0 = spatial_transform_inverse_cpu_func(var_t);
var_1 = df::spatial_transform_get_rotation(var_0);
var_2 = df::spatial_transform_get_translation(var_0);
var_5 = df::float3(var_3, var_4, var_4);
var_6 = df::rotate(var_1, var_5);
var_7 = df::float3(var_4, var_3, var_4);
var_8 = df::rotate(var_1, var_7);
var_9 = df::float3(var_4, var_4, var_3);
var_10 = df::rotate(var_1, var_9);
var_11 = df::mat33(var_6, var_8, var_10);
var_12 = df::skew(var_2);
var_13 = df::mul(var_12, var_11);
var_14 = df::spatial_adjoint(var_11, var_13);
var_15 = df::transpose(var_14);
var_16 = df::mul(var_15, var_I);
var_17 = df::mul(var_16, var_14);
goto label0;
//---------
// reverse
label0:;
adj_17 += adj_ret;
df::adj_mul(var_16, var_14, adj_16, adj_14, adj_17);
df::adj_mul(var_15, var_I, adj_15, adj_I, adj_16);
df::adj_transpose(var_14, adj_14, adj_15);
df::adj_spatial_adjoint(var_11, var_13, adj_11, adj_13, adj_14);
df::adj_mul(var_12, var_11, adj_12, adj_11, adj_13);
df::adj_skew(var_2, adj_2, adj_12);
df::adj_mat33(var_6, var_8, var_10, adj_6, adj_8, adj_10, adj_11);
df::adj_rotate(var_1, var_9, adj_1, adj_9, adj_10);
df::adj_float3(var_4, var_4, var_3, adj_4, adj_4, adj_3, adj_9);
df::adj_rotate(var_1, var_7, adj_1, adj_7, adj_8);
df::adj_float3(var_4, var_3, var_4, adj_4, adj_3, adj_4, adj_7);
df::adj_rotate(var_1, var_5, adj_1, adj_5, adj_6);
df::adj_float3(var_3, var_4, var_4, adj_3, adj_4, adj_4, adj_5);
df::adj_spatial_transform_get_translation(var_0, adj_0, adj_2);
df::adj_spatial_transform_get_rotation(var_0, adj_0, adj_1);
adj_spatial_transform_inverse_cpu_func(var_t, adj_t, adj_0);
return;
}
int compute_muscle_force_cpu_func(
int var_i,
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_links,
df::float3* var_muscle_points,
float var_muscle_activation,
spatial_vector* var_body_f_s)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
bool var_4;
const int var_5 = 0;
df::float3 var_6;
int var_7;
df::float3 var_8;
spatial_transform var_9;
spatial_transform var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
spatial_vector var_17;
df::float3 var_18;
spatial_vector var_19;
//---------
// forward
var_0 = df::load(var_muscle_links, var_i);
var_2 = df::add(var_i, var_1);
var_3 = df::load(var_muscle_links, var_2);
var_4 = (var_0 == var_3);
if (var_4) {
return var_5;
}
var_6 = df::load(var_muscle_points, var_i);
var_7 = df::add(var_i, var_1);
var_8 = df::load(var_muscle_points, var_7);
var_9 = df::load(var_body_X_s, var_0);
var_10 = df::load(var_body_X_s, var_3);
var_11 = df::spatial_transform_point(var_9, var_6);
var_12 = df::spatial_transform_point(var_10, var_8);
var_13 = df::sub(var_12, var_11);
var_14 = df::normalize(var_13);
var_15 = df::mul(var_14, var_muscle_activation);
var_16 = df::cross(var_11, var_15);
var_17 = df::spatial_vector(var_16, var_15);
df::atomic_sub(var_body_f_s, var_0, var_17);
var_18 = df::cross(var_12, var_15);
var_19 = df::spatial_vector(var_18, var_15);
df::atomic_add(var_body_f_s, var_3, var_19);
return var_5;
}
void adj_compute_muscle_force_cpu_func(
int var_i,
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_links,
df::float3* var_muscle_points,
float var_muscle_activation,
spatial_vector* var_body_f_s,
int & adj_i,
spatial_transform* adj_body_X_s,
spatial_vector* adj_body_v_s,
int* adj_muscle_links,
df::float3* adj_muscle_points,
float & adj_muscle_activation,
spatial_vector* adj_body_f_s,
int & adj_ret)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
bool var_4;
const int var_5 = 0;
df::float3 var_6;
int var_7;
df::float3 var_8;
spatial_transform var_9;
spatial_transform var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
spatial_vector var_17;
df::float3 var_18;
spatial_vector var_19;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
bool adj_4 = 0;
int adj_5 = 0;
df::float3 adj_6 = 0;
int adj_7 = 0;
df::float3 adj_8 = 0;
spatial_transform adj_9 = 0;
spatial_transform adj_10 = 0;
df::float3 adj_11 = 0;
df::float3 adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
spatial_vector adj_17 = 0;
df::float3 adj_18 = 0;
spatial_vector adj_19 = 0;
//---------
// forward
var_0 = df::load(var_muscle_links, var_i);
var_2 = df::add(var_i, var_1);
var_3 = df::load(var_muscle_links, var_2);
var_4 = (var_0 == var_3);
if (var_4) {
goto label0;
}
var_6 = df::load(var_muscle_points, var_i);
var_7 = df::add(var_i, var_1);
var_8 = df::load(var_muscle_points, var_7);
var_9 = df::load(var_body_X_s, var_0);
var_10 = df::load(var_body_X_s, var_3);
var_11 = df::spatial_transform_point(var_9, var_6);
var_12 = df::spatial_transform_point(var_10, var_8);
var_13 = df::sub(var_12, var_11);
var_14 = df::normalize(var_13);
var_15 = df::mul(var_14, var_muscle_activation);
var_16 = df::cross(var_11, var_15);
var_17 = df::spatial_vector(var_16, var_15);
df::atomic_sub(var_body_f_s, var_0, var_17);
var_18 = df::cross(var_12, var_15);
var_19 = df::spatial_vector(var_18, var_15);
df::atomic_add(var_body_f_s, var_3, var_19);
goto label1;
//---------
// reverse
label1:;
adj_5 += adj_ret;
df::adj_atomic_add(var_body_f_s, var_3, var_19, adj_body_f_s, adj_3, adj_19);
df::adj_spatial_vector(var_18, var_15, adj_18, adj_15, adj_19);
df::adj_cross(var_12, var_15, adj_12, adj_15, adj_18);
df::adj_atomic_sub(var_body_f_s, var_0, var_17, adj_body_f_s, adj_0, adj_17);
df::adj_spatial_vector(var_16, var_15, adj_16, adj_15, adj_17);
df::adj_cross(var_11, var_15, adj_11, adj_15, adj_16);
df::adj_mul(var_14, var_muscle_activation, adj_14, adj_muscle_activation, adj_15);
df::adj_normalize(var_13, adj_13, adj_14);
df::adj_sub(var_12, var_11, adj_12, adj_11, adj_13);
df::adj_spatial_transform_point(var_10, var_8, adj_10, adj_8, adj_12);
df::adj_spatial_transform_point(var_9, var_6, adj_9, adj_6, adj_11);
df::adj_load(var_body_X_s, var_3, adj_body_X_s, adj_3, adj_10);
df::adj_load(var_body_X_s, var_0, adj_body_X_s, adj_0, adj_9);
df::adj_load(var_muscle_points, var_7, adj_muscle_points, adj_7, adj_8);
df::adj_add(var_i, var_1, adj_i, adj_1, adj_7);
df::adj_load(var_muscle_points, var_i, adj_muscle_points, adj_i, adj_6);
if (var_4) {
label0:;
adj_5 += adj_ret;
}
df::adj_load(var_muscle_links, var_2, adj_muscle_links, adj_2, adj_3);
df::adj_add(var_i, var_1, adj_i, adj_1, adj_2);
df::adj_load(var_muscle_links, var_i, adj_muscle_links, adj_i, adj_0);
return;
}
spatial_transform jcalc_transform_cpu_func(
int var_type,
df::float3 var_axis,
float* var_joint_q,
int var_start)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
float var_2;
df::float3 var_3;
quat var_4;
spatial_transform var_5;
const int var_6 = 1;
bool var_7;
float var_8;
const float var_9 = 0.0;
df::float3 var_10;
quat var_11;
spatial_transform var_12;
float var_13;
spatial_transform var_14;
spatial_transform var_15;
const int var_16 = 2;
bool var_17;
int var_18;
float var_19;
int var_20;
float var_21;
int var_22;
float var_23;
const int var_24 = 3;
int var_25;
float var_26;
df::float3 var_27;
quat var_28;
spatial_transform var_29;
spatial_transform var_30;
spatial_transform var_31;
bool var_32;
spatial_transform var_33;
spatial_transform var_34;
spatial_transform var_35;
const int var_36 = 4;
bool var_37;
int var_38;
float var_39;
int var_40;
float var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
const int var_48 = 5;
int var_49;
float var_50;
const int var_51 = 6;
int var_52;
float var_53;
df::float3 var_54;
quat var_55;
spatial_transform var_56;
spatial_transform var_57;
spatial_transform var_58;
float var_59;
float var_60;
float var_61;
float var_62;
spatial_transform var_63;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_2 = df::load(var_joint_q, var_start);
var_3 = df::mul(var_axis, var_2);
var_4 = df::quat_identity();
var_5 = df::spatial_transform(var_3, var_4);
return var_5;
}
var_7 = (var_type == var_6);
if (var_7) {
var_8 = df::load(var_joint_q, var_start);
var_10 = df::float3(var_9, var_9, var_9);
var_11 = df::quat_from_axis_angle(var_axis, var_8);
var_12 = df::spatial_transform(var_10, var_11);
return var_12;
}
var_13 = df::select(var_7, var_2, var_8);
var_14 = df::select(var_7, var_5, var_12);
var_15 = df::select(var_7, var_5, var_12);
var_17 = (var_type == var_16);
if (var_17) {
var_18 = df::add(var_start, var_0);
var_19 = df::load(var_joint_q, var_18);
var_20 = df::add(var_start, var_6);
var_21 = df::load(var_joint_q, var_20);
var_22 = df::add(var_start, var_16);
var_23 = df::load(var_joint_q, var_22);
var_25 = df::add(var_start, var_24);
var_26 = df::load(var_joint_q, var_25);
var_27 = df::float3(var_9, var_9, var_9);
var_28 = df::quat(var_19, var_21, var_23, var_26);
var_29 = df::spatial_transform(var_27, var_28);
return var_29;
}
var_30 = df::select(var_17, var_14, var_29);
var_31 = df::select(var_17, var_15, var_29);
var_32 = (var_type == var_24);
if (var_32) {
var_33 = df::spatial_transform_identity();
return var_33;
}
var_34 = df::select(var_32, var_30, var_33);
var_35 = df::select(var_32, var_31, var_33);
var_37 = (var_type == var_36);
if (var_37) {
var_38 = df::add(var_start, var_0);
var_39 = df::load(var_joint_q, var_38);
var_40 = df::add(var_start, var_6);
var_41 = df::load(var_joint_q, var_40);
var_42 = df::add(var_start, var_16);
var_43 = df::load(var_joint_q, var_42);
var_44 = df::add(var_start, var_24);
var_45 = df::load(var_joint_q, var_44);
var_46 = df::add(var_start, var_36);
var_47 = df::load(var_joint_q, var_46);
var_49 = df::add(var_start, var_48);
var_50 = df::load(var_joint_q, var_49);
var_52 = df::add(var_start, var_51);
var_53 = df::load(var_joint_q, var_52);
var_54 = df::float3(var_39, var_41, var_43);
var_55 = df::quat(var_45, var_47, var_50, var_53);
var_56 = df::spatial_transform(var_54, var_55);
return var_56;
}
var_57 = df::select(var_37, var_34, var_56);
var_58 = df::select(var_37, var_35, var_56);
var_59 = df::select(var_37, var_19, var_45);
var_60 = df::select(var_37, var_21, var_47);
var_61 = df::select(var_37, var_23, var_50);
var_62 = df::select(var_37, var_26, var_53);
var_63 = df::spatial_transform_identity();
return var_63;
}
void adj_jcalc_transform_cpu_func(
int var_type,
df::float3 var_axis,
float* var_joint_q,
int var_start,
int & adj_type,
df::float3 & adj_axis,
float* adj_joint_q,
int & adj_start,
spatial_transform & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
float var_2;
df::float3 var_3;
quat var_4;
spatial_transform var_5;
const int var_6 = 1;
bool var_7;
float var_8;
const float var_9 = 0.0;
df::float3 var_10;
quat var_11;
spatial_transform var_12;
float var_13;
spatial_transform var_14;
spatial_transform var_15;
const int var_16 = 2;
bool var_17;
int var_18;
float var_19;
int var_20;
float var_21;
int var_22;
float var_23;
const int var_24 = 3;
int var_25;
float var_26;
df::float3 var_27;
quat var_28;
spatial_transform var_29;
spatial_transform var_30;
spatial_transform var_31;
bool var_32;
spatial_transform var_33;
spatial_transform var_34;
spatial_transform var_35;
const int var_36 = 4;
bool var_37;
int var_38;
float var_39;
int var_40;
float var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
const int var_48 = 5;
int var_49;
float var_50;
const int var_51 = 6;
int var_52;
float var_53;
df::float3 var_54;
quat var_55;
spatial_transform var_56;
spatial_transform var_57;
spatial_transform var_58;
float var_59;
float var_60;
float var_61;
float var_62;
spatial_transform var_63;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
float adj_2 = 0;
df::float3 adj_3 = 0;
quat adj_4 = 0;
spatial_transform adj_5 = 0;
int adj_6 = 0;
bool adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
df::float3 adj_10 = 0;
quat adj_11 = 0;
spatial_transform adj_12 = 0;
float adj_13 = 0;
spatial_transform adj_14 = 0;
spatial_transform adj_15 = 0;
int adj_16 = 0;
bool adj_17 = 0;
int adj_18 = 0;
float adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
float adj_23 = 0;
int adj_24 = 0;
int adj_25 = 0;
float adj_26 = 0;
df::float3 adj_27 = 0;
quat adj_28 = 0;
spatial_transform adj_29 = 0;
spatial_transform adj_30 = 0;
spatial_transform adj_31 = 0;
bool adj_32 = 0;
spatial_transform adj_33 = 0;
spatial_transform adj_34 = 0;
spatial_transform adj_35 = 0;
int adj_36 = 0;
bool adj_37 = 0;
int adj_38 = 0;
float adj_39 = 0;
int adj_40 = 0;
float adj_41 = 0;
int adj_42 = 0;
float adj_43 = 0;
int adj_44 = 0;
float adj_45 = 0;
int adj_46 = 0;
float adj_47 = 0;
int adj_48 = 0;
int adj_49 = 0;
float adj_50 = 0;
int adj_51 = 0;
int adj_52 = 0;
float adj_53 = 0;
df::float3 adj_54 = 0;
quat adj_55 = 0;
spatial_transform adj_56 = 0;
spatial_transform adj_57 = 0;
spatial_transform adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
float adj_61 = 0;
float adj_62 = 0;
spatial_transform adj_63 = 0;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_2 = df::load(var_joint_q, var_start);
var_3 = df::mul(var_axis, var_2);
var_4 = df::quat_identity();
var_5 = df::spatial_transform(var_3, var_4);
goto label0;
}
var_7 = (var_type == var_6);
if (var_7) {
var_8 = df::load(var_joint_q, var_start);
var_10 = df::float3(var_9, var_9, var_9);
var_11 = df::quat_from_axis_angle(var_axis, var_8);
var_12 = df::spatial_transform(var_10, var_11);
goto label1;
}
var_13 = df::select(var_7, var_2, var_8);
var_14 = df::select(var_7, var_5, var_12);
var_15 = df::select(var_7, var_5, var_12);
var_17 = (var_type == var_16);
if (var_17) {
var_18 = df::add(var_start, var_0);
var_19 = df::load(var_joint_q, var_18);
var_20 = df::add(var_start, var_6);
var_21 = df::load(var_joint_q, var_20);
var_22 = df::add(var_start, var_16);
var_23 = df::load(var_joint_q, var_22);
var_25 = df::add(var_start, var_24);
var_26 = df::load(var_joint_q, var_25);
var_27 = df::float3(var_9, var_9, var_9);
var_28 = df::quat(var_19, var_21, var_23, var_26);
var_29 = df::spatial_transform(var_27, var_28);
goto label2;
}
var_30 = df::select(var_17, var_14, var_29);
var_31 = df::select(var_17, var_15, var_29);
var_32 = (var_type == var_24);
if (var_32) {
var_33 = df::spatial_transform_identity();
goto label3;
}
var_34 = df::select(var_32, var_30, var_33);
var_35 = df::select(var_32, var_31, var_33);
var_37 = (var_type == var_36);
if (var_37) {
var_38 = df::add(var_start, var_0);
var_39 = df::load(var_joint_q, var_38);
var_40 = df::add(var_start, var_6);
var_41 = df::load(var_joint_q, var_40);
var_42 = df::add(var_start, var_16);
var_43 = df::load(var_joint_q, var_42);
var_44 = df::add(var_start, var_24);
var_45 = df::load(var_joint_q, var_44);
var_46 = df::add(var_start, var_36);
var_47 = df::load(var_joint_q, var_46);
var_49 = df::add(var_start, var_48);
var_50 = df::load(var_joint_q, var_49);
var_52 = df::add(var_start, var_51);
var_53 = df::load(var_joint_q, var_52);
var_54 = df::float3(var_39, var_41, var_43);
var_55 = df::quat(var_45, var_47, var_50, var_53);
var_56 = df::spatial_transform(var_54, var_55);
goto label4;
}
var_57 = df::select(var_37, var_34, var_56);
var_58 = df::select(var_37, var_35, var_56);
var_59 = df::select(var_37, var_19, var_45);
var_60 = df::select(var_37, var_21, var_47);
var_61 = df::select(var_37, var_23, var_50);
var_62 = df::select(var_37, var_26, var_53);
var_63 = df::spatial_transform_identity();
goto label5;
//---------
// reverse
label5:;
adj_63 += adj_ret;
df::adj_select(var_37, var_26, var_53, adj_37, adj_26, adj_53, adj_62);
df::adj_select(var_37, var_23, var_50, adj_37, adj_23, adj_50, adj_61);
df::adj_select(var_37, var_21, var_47, adj_37, adj_21, adj_47, adj_60);
df::adj_select(var_37, var_19, var_45, adj_37, adj_19, adj_45, adj_59);
df::adj_select(var_37, var_35, var_56, adj_37, adj_35, adj_56, adj_58);
df::adj_select(var_37, var_34, var_56, adj_37, adj_34, adj_56, adj_57);
if (var_37) {
label4:;
adj_56 += adj_ret;
df::adj_spatial_transform(var_54, var_55, adj_54, adj_55, adj_56);
df::adj_quat(var_45, var_47, var_50, var_53, adj_45, adj_47, adj_50, adj_53, adj_55);
df::adj_float3(var_39, var_41, var_43, adj_39, adj_41, adj_43, adj_54);
df::adj_load(var_joint_q, var_52, adj_joint_q, adj_52, adj_53);
df::adj_add(var_start, var_51, adj_start, adj_51, adj_52);
df::adj_load(var_joint_q, var_49, adj_joint_q, adj_49, adj_50);
df::adj_add(var_start, var_48, adj_start, adj_48, adj_49);
df::adj_load(var_joint_q, var_46, adj_joint_q, adj_46, adj_47);
df::adj_add(var_start, var_36, adj_start, adj_36, adj_46);
df::adj_load(var_joint_q, var_44, adj_joint_q, adj_44, adj_45);
df::adj_add(var_start, var_24, adj_start, adj_24, adj_44);
df::adj_load(var_joint_q, var_42, adj_joint_q, adj_42, adj_43);
df::adj_add(var_start, var_16, adj_start, adj_16, adj_42);
df::adj_load(var_joint_q, var_40, adj_joint_q, adj_40, adj_41);
df::adj_add(var_start, var_6, adj_start, adj_6, adj_40);
df::adj_load(var_joint_q, var_38, adj_joint_q, adj_38, adj_39);
df::adj_add(var_start, var_0, adj_start, adj_0, adj_38);
}
df::adj_select(var_32, var_31, var_33, adj_32, adj_31, adj_33, adj_35);
df::adj_select(var_32, var_30, var_33, adj_32, adj_30, adj_33, adj_34);
if (var_32) {
label3:;
adj_33 += adj_ret;
}
df::adj_select(var_17, var_15, var_29, adj_17, adj_15, adj_29, adj_31);
df::adj_select(var_17, var_14, var_29, adj_17, adj_14, adj_29, adj_30);
if (var_17) {
label2:;
adj_29 += adj_ret;
df::adj_spatial_transform(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_quat(var_19, var_21, var_23, var_26, adj_19, adj_21, adj_23, adj_26, adj_28);
df::adj_float3(var_9, var_9, var_9, adj_9, adj_9, adj_9, adj_27);
df::adj_load(var_joint_q, var_25, adj_joint_q, adj_25, adj_26);
df::adj_add(var_start, var_24, adj_start, adj_24, adj_25);
df::adj_load(var_joint_q, var_22, adj_joint_q, adj_22, adj_23);
df::adj_add(var_start, var_16, adj_start, adj_16, adj_22);
df::adj_load(var_joint_q, var_20, adj_joint_q, adj_20, adj_21);
df::adj_add(var_start, var_6, adj_start, adj_6, adj_20);
df::adj_load(var_joint_q, var_18, adj_joint_q, adj_18, adj_19);
df::adj_add(var_start, var_0, adj_start, adj_0, adj_18);
}
df::adj_select(var_7, var_5, var_12, adj_7, adj_5, adj_12, adj_15);
df::adj_select(var_7, var_5, var_12, adj_7, adj_5, adj_12, adj_14);
df::adj_select(var_7, var_2, var_8, adj_7, adj_2, adj_8, adj_13);
if (var_7) {
label1:;
adj_12 += adj_ret;
df::adj_spatial_transform(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_quat_from_axis_angle(var_axis, var_8, adj_axis, adj_8, adj_11);
df::adj_float3(var_9, var_9, var_9, adj_9, adj_9, adj_9, adj_10);
df::adj_load(var_joint_q, var_start, adj_joint_q, adj_start, adj_8);
}
if (var_1) {
label0:;
adj_5 += adj_ret;
df::adj_spatial_transform(var_3, var_4, adj_3, adj_4, adj_5);
df::adj_mul(var_axis, var_2, adj_axis, adj_2, adj_3);
df::adj_load(var_joint_q, var_start, adj_joint_q, adj_start, adj_2);
}
return;
}
spatial_vector jcalc_motion_cpu_func(
int var_type,
df::float3 var_axis,
spatial_transform var_X_sc,
spatial_vector* var_joint_S_s,
float* var_joint_qd,
int var_joint_start)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const float var_2 = 0.0;
df::float3 var_3;
spatial_vector var_4;
spatial_vector var_5;
float var_6;
spatial_vector var_7;
const int var_8 = 1;
bool var_9;
df::float3 var_10;
spatial_vector var_11;
spatial_vector var_12;
float var_13;
spatial_vector var_14;
spatial_vector var_15;
spatial_vector var_16;
spatial_vector var_17;
const int var_18 = 2;
bool var_19;
int var_20;
float var_21;
int var_22;
float var_23;
int var_24;
float var_25;
df::float3 var_26;
const float var_27 = 1.0;
spatial_vector var_28;
spatial_vector var_29;
spatial_vector var_30;
spatial_vector var_31;
spatial_vector var_32;
spatial_vector var_33;
int var_34;
int var_35;
int var_36;
float var_37;
spatial_vector var_38;
float var_39;
spatial_vector var_40;
spatial_vector var_41;
float var_42;
spatial_vector var_43;
spatial_vector var_44;
spatial_vector var_45;
const int var_46 = 3;
bool var_47;
spatial_vector var_48;
spatial_vector var_49;
const int var_50 = 4;
bool var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 5;
int var_63;
float var_64;
spatial_vector var_65;
int var_66;
spatial_vector var_67;
int var_68;
spatial_vector var_69;
int var_70;
spatial_vector var_71;
int var_72;
spatial_vector var_73;
int var_74;
spatial_vector var_75;
int var_76;
spatial_vector var_77;
spatial_vector var_78;
spatial_vector var_79;
spatial_vector var_80;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_3 = df::float3(var_2, var_2, var_2);
var_4 = df::spatial_vector(var_3, var_axis);
var_5 = spatial_transform_twist_cpu_func(var_X_sc, var_4);
var_6 = df::load(var_joint_qd, var_joint_start);
var_7 = df::mul(var_5, var_6);
df::store(var_joint_S_s, var_joint_start, var_5);
return var_7;
}
var_9 = (var_type == var_8);
if (var_9) {
var_10 = df::float3(var_2, var_2, var_2);
var_11 = df::spatial_vector(var_axis, var_10);
var_12 = spatial_transform_twist_cpu_func(var_X_sc, var_11);
var_13 = df::load(var_joint_qd, var_joint_start);
var_14 = df::mul(var_12, var_13);
df::store(var_joint_S_s, var_joint_start, var_12);
return var_14;
}
var_15 = df::select(var_9, var_5, var_12);
var_16 = df::select(var_9, var_7, var_14);
var_17 = df::select(var_9, var_7, var_14);
var_19 = (var_type == var_18);
if (var_19) {
var_20 = df::add(var_joint_start, var_0);
var_21 = df::load(var_joint_qd, var_20);
var_22 = df::add(var_joint_start, var_8);
var_23 = df::load(var_joint_qd, var_22);
var_24 = df::add(var_joint_start, var_18);
var_25 = df::load(var_joint_qd, var_24);
var_26 = df::float3(var_21, var_23, var_25);
var_28 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
var_29 = spatial_transform_twist_cpu_func(var_X_sc, var_28);
var_30 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
var_31 = spatial_transform_twist_cpu_func(var_X_sc, var_30);
var_32 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
var_33 = spatial_transform_twist_cpu_func(var_X_sc, var_32);
var_34 = df::add(var_joint_start, var_0);
df::store(var_joint_S_s, var_34, var_29);
var_35 = df::add(var_joint_start, var_8);
df::store(var_joint_S_s, var_35, var_31);
var_36 = df::add(var_joint_start, var_18);
df::store(var_joint_S_s, var_36, var_33);
var_37 = df::index(var_26, var_0);
var_38 = df::mul(var_29, var_37);
var_39 = df::index(var_26, var_8);
var_40 = df::mul(var_31, var_39);
var_41 = df::add(var_38, var_40);
var_42 = df::index(var_26, var_18);
var_43 = df::mul(var_33, var_42);
var_44 = df::add(var_41, var_43);
return var_44;
}
var_45 = df::select(var_19, var_17, var_44);
var_47 = (var_type == var_46);
if (var_47) {
var_48 = df::spatial_vector();
return var_48;
}
var_49 = df::select(var_47, var_45, var_48);
var_51 = (var_type == var_50);
if (var_51) {
var_52 = df::add(var_joint_start, var_0);
var_53 = df::load(var_joint_qd, var_52);
var_54 = df::add(var_joint_start, var_8);
var_55 = df::load(var_joint_qd, var_54);
var_56 = df::add(var_joint_start, var_18);
var_57 = df::load(var_joint_qd, var_56);
var_58 = df::add(var_joint_start, var_46);
var_59 = df::load(var_joint_qd, var_58);
var_60 = df::add(var_joint_start, var_50);
var_61 = df::load(var_joint_qd, var_60);
var_63 = df::add(var_joint_start, var_62);
var_64 = df::load(var_joint_qd, var_63);
var_65 = df::spatial_vector(var_53, var_55, var_57, var_59, var_61, var_64);
var_66 = df::add(var_joint_start, var_0);
var_67 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_66, var_67);
var_68 = df::add(var_joint_start, var_8);
var_69 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_68, var_69);
var_70 = df::add(var_joint_start, var_18);
var_71 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
df::store(var_joint_S_s, var_70, var_71);
var_72 = df::add(var_joint_start, var_46);
var_73 = df::spatial_vector(var_2, var_2, var_2, var_27, var_2, var_2);
df::store(var_joint_S_s, var_72, var_73);
var_74 = df::add(var_joint_start, var_50);
var_75 = df::spatial_vector(var_2, var_2, var_2, var_2, var_27, var_2);
df::store(var_joint_S_s, var_74, var_75);
var_76 = df::add(var_joint_start, var_62);
var_77 = df::spatial_vector(var_2, var_2, var_2, var_2, var_2, var_27);
df::store(var_joint_S_s, var_76, var_77);
return var_65;
}
var_78 = df::select(var_51, var_16, var_65);
var_79 = df::select(var_51, var_49, var_65);
var_80 = df::spatial_vector();
return var_80;
}
void adj_jcalc_motion_cpu_func(
int var_type,
df::float3 var_axis,
spatial_transform var_X_sc,
spatial_vector* var_joint_S_s,
float* var_joint_qd,
int var_joint_start,
int & adj_type,
df::float3 & adj_axis,
spatial_transform & adj_X_sc,
spatial_vector* adj_joint_S_s,
float* adj_joint_qd,
int & adj_joint_start,
spatial_vector & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const float var_2 = 0.0;
df::float3 var_3;
spatial_vector var_4;
spatial_vector var_5;
float var_6;
spatial_vector var_7;
const int var_8 = 1;
bool var_9;
df::float3 var_10;
spatial_vector var_11;
spatial_vector var_12;
float var_13;
spatial_vector var_14;
spatial_vector var_15;
spatial_vector var_16;
spatial_vector var_17;
const int var_18 = 2;
bool var_19;
int var_20;
float var_21;
int var_22;
float var_23;
int var_24;
float var_25;
df::float3 var_26;
const float var_27 = 1.0;
spatial_vector var_28;
spatial_vector var_29;
spatial_vector var_30;
spatial_vector var_31;
spatial_vector var_32;
spatial_vector var_33;
int var_34;
int var_35;
int var_36;
float var_37;
spatial_vector var_38;
float var_39;
spatial_vector var_40;
spatial_vector var_41;
float var_42;
spatial_vector var_43;
spatial_vector var_44;
spatial_vector var_45;
const int var_46 = 3;
bool var_47;
spatial_vector var_48;
spatial_vector var_49;
const int var_50 = 4;
bool var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 5;
int var_63;
float var_64;
spatial_vector var_65;
int var_66;
spatial_vector var_67;
int var_68;
spatial_vector var_69;
int var_70;
spatial_vector var_71;
int var_72;
spatial_vector var_73;
int var_74;
spatial_vector var_75;
int var_76;
spatial_vector var_77;
spatial_vector var_78;
spatial_vector var_79;
spatial_vector var_80;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
float adj_2 = 0;
df::float3 adj_3 = 0;
spatial_vector adj_4 = 0;
spatial_vector adj_5 = 0;
float adj_6 = 0;
spatial_vector adj_7 = 0;
int adj_8 = 0;
bool adj_9 = 0;
df::float3 adj_10 = 0;
spatial_vector adj_11 = 0;
spatial_vector adj_12 = 0;
float adj_13 = 0;
spatial_vector adj_14 = 0;
spatial_vector adj_15 = 0;
spatial_vector adj_16 = 0;
spatial_vector adj_17 = 0;
int adj_18 = 0;
bool adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
float adj_23 = 0;
int adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
spatial_vector adj_28 = 0;
spatial_vector adj_29 = 0;
spatial_vector adj_30 = 0;
spatial_vector adj_31 = 0;
spatial_vector adj_32 = 0;
spatial_vector adj_33 = 0;
int adj_34 = 0;
int adj_35 = 0;
int adj_36 = 0;
float adj_37 = 0;
spatial_vector adj_38 = 0;
float adj_39 = 0;
spatial_vector adj_40 = 0;
spatial_vector adj_41 = 0;
float adj_42 = 0;
spatial_vector adj_43 = 0;
spatial_vector adj_44 = 0;
spatial_vector adj_45 = 0;
int adj_46 = 0;
bool adj_47 = 0;
spatial_vector adj_48 = 0;
spatial_vector adj_49 = 0;
int adj_50 = 0;
bool adj_51 = 0;
int adj_52 = 0;
float adj_53 = 0;
int adj_54 = 0;
float adj_55 = 0;
int adj_56 = 0;
float adj_57 = 0;
int adj_58 = 0;
float adj_59 = 0;
int adj_60 = 0;
float adj_61 = 0;
int adj_62 = 0;
int adj_63 = 0;
float adj_64 = 0;
spatial_vector adj_65 = 0;
int adj_66 = 0;
spatial_vector adj_67 = 0;
int adj_68 = 0;
spatial_vector adj_69 = 0;
int adj_70 = 0;
spatial_vector adj_71 = 0;
int adj_72 = 0;
spatial_vector adj_73 = 0;
int adj_74 = 0;
spatial_vector adj_75 = 0;
int adj_76 = 0;
spatial_vector adj_77 = 0;
spatial_vector adj_78 = 0;
spatial_vector adj_79 = 0;
spatial_vector adj_80 = 0;
//---------
// forward
var_1 = (var_type == var_0);
if (var_1) {
var_3 = df::float3(var_2, var_2, var_2);
var_4 = df::spatial_vector(var_3, var_axis);
var_5 = spatial_transform_twist_cpu_func(var_X_sc, var_4);
var_6 = df::load(var_joint_qd, var_joint_start);
var_7 = df::mul(var_5, var_6);
df::store(var_joint_S_s, var_joint_start, var_5);
goto label0;
}
var_9 = (var_type == var_8);
if (var_9) {
var_10 = df::float3(var_2, var_2, var_2);
var_11 = df::spatial_vector(var_axis, var_10);
var_12 = spatial_transform_twist_cpu_func(var_X_sc, var_11);
var_13 = df::load(var_joint_qd, var_joint_start);
var_14 = df::mul(var_12, var_13);
df::store(var_joint_S_s, var_joint_start, var_12);
goto label1;
}
var_15 = df::select(var_9, var_5, var_12);
var_16 = df::select(var_9, var_7, var_14);
var_17 = df::select(var_9, var_7, var_14);
var_19 = (var_type == var_18);
if (var_19) {
var_20 = df::add(var_joint_start, var_0);
var_21 = df::load(var_joint_qd, var_20);
var_22 = df::add(var_joint_start, var_8);
var_23 = df::load(var_joint_qd, var_22);
var_24 = df::add(var_joint_start, var_18);
var_25 = df::load(var_joint_qd, var_24);
var_26 = df::float3(var_21, var_23, var_25);
var_28 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
var_29 = spatial_transform_twist_cpu_func(var_X_sc, var_28);
var_30 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
var_31 = spatial_transform_twist_cpu_func(var_X_sc, var_30);
var_32 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
var_33 = spatial_transform_twist_cpu_func(var_X_sc, var_32);
var_34 = df::add(var_joint_start, var_0);
df::store(var_joint_S_s, var_34, var_29);
var_35 = df::add(var_joint_start, var_8);
df::store(var_joint_S_s, var_35, var_31);
var_36 = df::add(var_joint_start, var_18);
df::store(var_joint_S_s, var_36, var_33);
var_37 = df::index(var_26, var_0);
var_38 = df::mul(var_29, var_37);
var_39 = df::index(var_26, var_8);
var_40 = df::mul(var_31, var_39);
var_41 = df::add(var_38, var_40);
var_42 = df::index(var_26, var_18);
var_43 = df::mul(var_33, var_42);
var_44 = df::add(var_41, var_43);
goto label2;
}
var_45 = df::select(var_19, var_17, var_44);
var_47 = (var_type == var_46);
if (var_47) {
var_48 = df::spatial_vector();
goto label3;
}
var_49 = df::select(var_47, var_45, var_48);
var_51 = (var_type == var_50);
if (var_51) {
var_52 = df::add(var_joint_start, var_0);
var_53 = df::load(var_joint_qd, var_52);
var_54 = df::add(var_joint_start, var_8);
var_55 = df::load(var_joint_qd, var_54);
var_56 = df::add(var_joint_start, var_18);
var_57 = df::load(var_joint_qd, var_56);
var_58 = df::add(var_joint_start, var_46);
var_59 = df::load(var_joint_qd, var_58);
var_60 = df::add(var_joint_start, var_50);
var_61 = df::load(var_joint_qd, var_60);
var_63 = df::add(var_joint_start, var_62);
var_64 = df::load(var_joint_qd, var_63);
var_65 = df::spatial_vector(var_53, var_55, var_57, var_59, var_61, var_64);
var_66 = df::add(var_joint_start, var_0);
var_67 = df::spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_66, var_67);
var_68 = df::add(var_joint_start, var_8);
var_69 = df::spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2);
df::store(var_joint_S_s, var_68, var_69);
var_70 = df::add(var_joint_start, var_18);
var_71 = df::spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2);
df::store(var_joint_S_s, var_70, var_71);
var_72 = df::add(var_joint_start, var_46);
var_73 = df::spatial_vector(var_2, var_2, var_2, var_27, var_2, var_2);
df::store(var_joint_S_s, var_72, var_73);
var_74 = df::add(var_joint_start, var_50);
var_75 = df::spatial_vector(var_2, var_2, var_2, var_2, var_27, var_2);
df::store(var_joint_S_s, var_74, var_75);
var_76 = df::add(var_joint_start, var_62);
var_77 = df::spatial_vector(var_2, var_2, var_2, var_2, var_2, var_27);
df::store(var_joint_S_s, var_76, var_77);
goto label4;
}
var_78 = df::select(var_51, var_16, var_65);
var_79 = df::select(var_51, var_49, var_65);
var_80 = df::spatial_vector();
goto label5;
//---------
// reverse
label5:;
adj_80 += adj_ret;
df::adj_select(var_51, var_49, var_65, adj_51, adj_49, adj_65, adj_79);
df::adj_select(var_51, var_16, var_65, adj_51, adj_16, adj_65, adj_78);
if (var_51) {
label4:;
adj_65 += adj_ret;
df::adj_store(var_joint_S_s, var_76, var_77, adj_joint_S_s, adj_76, adj_77);
df::adj_spatial_vector(var_2, var_2, var_2, var_2, var_2, var_27, adj_2, adj_2, adj_2, adj_2, adj_2, adj_27, adj_77);
df::adj_add(var_joint_start, var_62, adj_joint_start, adj_62, adj_76);
df::adj_store(var_joint_S_s, var_74, var_75, adj_joint_S_s, adj_74, adj_75);
df::adj_spatial_vector(var_2, var_2, var_2, var_2, var_27, var_2, adj_2, adj_2, adj_2, adj_2, adj_27, adj_2, adj_75);
df::adj_add(var_joint_start, var_50, adj_joint_start, adj_50, adj_74);
df::adj_store(var_joint_S_s, var_72, var_73, adj_joint_S_s, adj_72, adj_73);
df::adj_spatial_vector(var_2, var_2, var_2, var_27, var_2, var_2, adj_2, adj_2, adj_2, adj_27, adj_2, adj_2, adj_73);
df::adj_add(var_joint_start, var_46, adj_joint_start, adj_46, adj_72);
df::adj_store(var_joint_S_s, var_70, var_71, adj_joint_S_s, adj_70, adj_71);
df::adj_spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2, adj_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_71);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_70);
df::adj_store(var_joint_S_s, var_68, var_69, adj_joint_S_s, adj_68, adj_69);
df::adj_spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_69);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_68);
df::adj_store(var_joint_S_s, var_66, var_67, adj_joint_S_s, adj_66, adj_67);
df::adj_spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_2, adj_67);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_66);
df::adj_spatial_vector(var_53, var_55, var_57, var_59, var_61, var_64, adj_53, adj_55, adj_57, adj_59, adj_61, adj_64, adj_65);
df::adj_load(var_joint_qd, var_63, adj_joint_qd, adj_63, adj_64);
df::adj_add(var_joint_start, var_62, adj_joint_start, adj_62, adj_63);
df::adj_load(var_joint_qd, var_60, adj_joint_qd, adj_60, adj_61);
df::adj_add(var_joint_start, var_50, adj_joint_start, adj_50, adj_60);
df::adj_load(var_joint_qd, var_58, adj_joint_qd, adj_58, adj_59);
df::adj_add(var_joint_start, var_46, adj_joint_start, adj_46, adj_58);
df::adj_load(var_joint_qd, var_56, adj_joint_qd, adj_56, adj_57);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_56);
df::adj_load(var_joint_qd, var_54, adj_joint_qd, adj_54, adj_55);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_54);
df::adj_load(var_joint_qd, var_52, adj_joint_qd, adj_52, adj_53);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_52);
}
df::adj_select(var_47, var_45, var_48, adj_47, adj_45, adj_48, adj_49);
if (var_47) {
label3:;
adj_48 += adj_ret;
}
df::adj_select(var_19, var_17, var_44, adj_19, adj_17, adj_44, adj_45);
if (var_19) {
label2:;
adj_44 += adj_ret;
df::adj_add(var_41, var_43, adj_41, adj_43, adj_44);
df::adj_mul(var_33, var_42, adj_33, adj_42, adj_43);
df::adj_index(var_26, var_18, adj_26, adj_18, adj_42);
df::adj_add(var_38, var_40, adj_38, adj_40, adj_41);
df::adj_mul(var_31, var_39, adj_31, adj_39, adj_40);
df::adj_index(var_26, var_8, adj_26, adj_8, adj_39);
df::adj_mul(var_29, var_37, adj_29, adj_37, adj_38);
df::adj_index(var_26, var_0, adj_26, adj_0, adj_37);
df::adj_store(var_joint_S_s, var_36, var_33, adj_joint_S_s, adj_36, adj_33);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_36);
df::adj_store(var_joint_S_s, var_35, var_31, adj_joint_S_s, adj_35, adj_31);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_35);
df::adj_store(var_joint_S_s, var_34, var_29, adj_joint_S_s, adj_34, adj_29);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_34);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_32, adj_X_sc, adj_32, adj_33);
df::adj_spatial_vector(var_2, var_2, var_27, var_2, var_2, var_2, adj_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_32);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_30, adj_X_sc, adj_30, adj_31);
df::adj_spatial_vector(var_2, var_27, var_2, var_2, var_2, var_2, adj_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_30);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_28, adj_X_sc, adj_28, adj_29);
df::adj_spatial_vector(var_27, var_2, var_2, var_2, var_2, var_2, adj_27, adj_2, adj_2, adj_2, adj_2, adj_2, adj_28);
df::adj_float3(var_21, var_23, var_25, adj_21, adj_23, adj_25, adj_26);
df::adj_load(var_joint_qd, var_24, adj_joint_qd, adj_24, adj_25);
df::adj_add(var_joint_start, var_18, adj_joint_start, adj_18, adj_24);
df::adj_load(var_joint_qd, var_22, adj_joint_qd, adj_22, adj_23);
df::adj_add(var_joint_start, var_8, adj_joint_start, adj_8, adj_22);
df::adj_load(var_joint_qd, var_20, adj_joint_qd, adj_20, adj_21);
df::adj_add(var_joint_start, var_0, adj_joint_start, adj_0, adj_20);
}
df::adj_select(var_9, var_7, var_14, adj_9, adj_7, adj_14, adj_17);
df::adj_select(var_9, var_7, var_14, adj_9, adj_7, adj_14, adj_16);
df::adj_select(var_9, var_5, var_12, adj_9, adj_5, adj_12, adj_15);
if (var_9) {
label1:;
adj_14 += adj_ret;
df::adj_store(var_joint_S_s, var_joint_start, var_12, adj_joint_S_s, adj_joint_start, adj_12);
df::adj_mul(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_load(var_joint_qd, var_joint_start, adj_joint_qd, adj_joint_start, adj_13);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_11, adj_X_sc, adj_11, adj_12);
df::adj_spatial_vector(var_axis, var_10, adj_axis, adj_10, adj_11);
df::adj_float3(var_2, var_2, var_2, adj_2, adj_2, adj_2, adj_10);
}
if (var_1) {
label0:;
adj_7 += adj_ret;
df::adj_store(var_joint_S_s, var_joint_start, var_5, adj_joint_S_s, adj_joint_start, adj_5);
df::adj_mul(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_load(var_joint_qd, var_joint_start, adj_joint_qd, adj_joint_start, adj_6);
adj_spatial_transform_twist_cpu_func(var_X_sc, var_4, adj_X_sc, adj_4, adj_5);
df::adj_spatial_vector(var_3, var_axis, adj_3, adj_axis, adj_4);
df::adj_float3(var_2, var_2, var_2, adj_2, adj_2, adj_2, adj_3);
}
return;
}
int jcalc_tau_cpu_func(
int var_type,
float var_target_k_e,
float var_target_k_d,
float var_limit_k_e,
float var_limit_k_d,
spatial_vector* var_joint_S_s,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
int var_coord_start,
int var_dof_start,
spatial_vector var_body_f_s,
float* var_tau)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
spatial_vector var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const float var_12 = 0.0;
bool var_13;
float var_14;
float var_15;
float var_16;
bool var_17;
float var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
const int var_33 = 2;
bool var_34;
int var_35;
float var_36;
int var_37;
float var_38;
int var_39;
float var_40;
df::float3 var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
df::float3 var_48;
const int var_49 = 0;
int var_50;
spatial_vector var_51;
float var_52;
float var_53;
int var_54;
float var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
const int var_61 = 1;
int var_62;
spatial_vector var_63;
float var_64;
float var_65;
int var_66;
float var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
const int var_73 = 2;
int var_74;
spatial_vector var_75;
float var_76;
float var_77;
int var_78;
float var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
spatial_vector var_85;
const int var_86 = 4;
bool var_87;
const int var_88 = 0;
int var_89;
spatial_vector var_90;
int var_91;
float var_92;
float var_93;
const int var_94 = 1;
int var_95;
spatial_vector var_96;
int var_97;
float var_98;
float var_99;
const int var_100 = 2;
int var_101;
spatial_vector var_102;
int var_103;
float var_104;
float var_105;
const int var_106 = 3;
int var_107;
spatial_vector var_108;
int var_109;
float var_110;
float var_111;
const int var_112 = 4;
int var_113;
spatial_vector var_114;
int var_115;
float var_116;
float var_117;
const int var_118 = 5;
int var_119;
spatial_vector var_120;
int var_121;
float var_122;
float var_123;
spatial_vector var_124;
int var_125;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_S_s, var_dof_start);
var_6 = df::load(var_joint_q, var_coord_start);
var_7 = df::load(var_joint_qd, var_dof_start);
var_8 = df::load(var_joint_act, var_dof_start);
var_9 = df::load(var_joint_target, var_coord_start);
var_10 = df::load(var_joint_limit_lower, var_coord_start);
var_11 = df::load(var_joint_limit_upper, var_coord_start);
var_13 = (var_6 < var_10);
if (var_13) {
var_14 = df::sub(var_10, var_6);
var_15 = df::mul(var_limit_k_e, var_14);
}
var_16 = df::select(var_13, var_12, var_15);
var_17 = (var_6 > var_11);
if (var_17) {
var_18 = df::sub(var_11, var_6);
var_19 = df::mul(var_limit_k_e, var_18);
}
var_20 = df::select(var_17, var_16, var_19);
var_21 = df::sub(var_12, var_limit_k_d);
var_22 = df::mul(var_21, var_7);
var_23 = df::spatial_dot(var_5, var_body_f_s);
var_24 = df::sub(var_12, var_23);
var_25 = df::sub(var_6, var_9);
var_26 = df::mul(var_target_k_e, var_25);
var_27 = df::sub(var_24, var_26);
var_28 = df::mul(var_target_k_d, var_7);
var_29 = df::sub(var_27, var_28);
var_30 = df::add(var_29, var_8);
var_31 = df::add(var_30, var_20);
var_32 = df::add(var_31, var_22);
df::store(var_tau, var_dof_start, var_32);
}
var_34 = (var_type == var_33);
if (var_34) {
var_35 = df::add(var_coord_start, var_0);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::add(var_coord_start, var_2);
var_38 = df::load(var_joint_q, var_37);
var_39 = df::add(var_coord_start, var_33);
var_40 = df::load(var_joint_q, var_39);
var_41 = df::float3(var_36, var_38, var_40);
var_42 = df::add(var_dof_start, var_0);
var_43 = df::load(var_joint_qd, var_42);
var_44 = df::add(var_dof_start, var_2);
var_45 = df::load(var_joint_qd, var_44);
var_46 = df::add(var_dof_start, var_33);
var_47 = df::load(var_joint_qd, var_46);
var_48 = df::float3(var_43, var_45, var_47);
var_50 = df::add(var_dof_start, var_49);
var_51 = df::load(var_joint_S_s, var_50);
var_52 = df::index(var_48, var_49);
var_53 = df::index(var_41, var_49);
var_54 = df::add(var_dof_start, var_49);
var_55 = df::spatial_dot(var_51, var_body_f_s);
var_56 = df::sub(var_12, var_55);
var_57 = df::mul(var_52, var_target_k_d);
var_58 = df::sub(var_56, var_57);
var_59 = df::mul(var_53, var_target_k_e);
var_60 = df::sub(var_58, var_59);
df::store(var_tau, var_54, var_60);
var_62 = df::add(var_dof_start, var_61);
var_63 = df::load(var_joint_S_s, var_62);
var_64 = df::index(var_48, var_61);
var_65 = df::index(var_41, var_61);
var_66 = df::add(var_dof_start, var_61);
var_67 = df::spatial_dot(var_63, var_body_f_s);
var_68 = df::sub(var_12, var_67);
var_69 = df::mul(var_64, var_target_k_d);
var_70 = df::sub(var_68, var_69);
var_71 = df::mul(var_65, var_target_k_e);
var_72 = df::sub(var_70, var_71);
df::store(var_tau, var_66, var_72);
var_74 = df::add(var_dof_start, var_73);
var_75 = df::load(var_joint_S_s, var_74);
var_76 = df::index(var_48, var_73);
var_77 = df::index(var_41, var_73);
var_78 = df::add(var_dof_start, var_73);
var_79 = df::spatial_dot(var_75, var_body_f_s);
var_80 = df::sub(var_12, var_79);
var_81 = df::mul(var_76, var_target_k_d);
var_82 = df::sub(var_80, var_81);
var_83 = df::mul(var_77, var_target_k_e);
var_84 = df::sub(var_82, var_83);
df::store(var_tau, var_78, var_84);
}
var_85 = df::select(var_34, var_5, var_75);
var_87 = (var_type == var_86);
if (var_87) {
var_89 = df::add(var_dof_start, var_88);
var_90 = df::load(var_joint_S_s, var_89);
var_91 = df::add(var_dof_start, var_88);
var_92 = df::spatial_dot(var_90, var_body_f_s);
var_93 = df::sub(var_12, var_92);
df::store(var_tau, var_91, var_93);
var_95 = df::add(var_dof_start, var_94);
var_96 = df::load(var_joint_S_s, var_95);
var_97 = df::add(var_dof_start, var_94);
var_98 = df::spatial_dot(var_96, var_body_f_s);
var_99 = df::sub(var_12, var_98);
df::store(var_tau, var_97, var_99);
var_101 = df::add(var_dof_start, var_100);
var_102 = df::load(var_joint_S_s, var_101);
var_103 = df::add(var_dof_start, var_100);
var_104 = df::spatial_dot(var_102, var_body_f_s);
var_105 = df::sub(var_12, var_104);
df::store(var_tau, var_103, var_105);
var_107 = df::add(var_dof_start, var_106);
var_108 = df::load(var_joint_S_s, var_107);
var_109 = df::add(var_dof_start, var_106);
var_110 = df::spatial_dot(var_108, var_body_f_s);
var_111 = df::sub(var_12, var_110);
df::store(var_tau, var_109, var_111);
var_113 = df::add(var_dof_start, var_112);
var_114 = df::load(var_joint_S_s, var_113);
var_115 = df::add(var_dof_start, var_112);
var_116 = df::spatial_dot(var_114, var_body_f_s);
var_117 = df::sub(var_12, var_116);
df::store(var_tau, var_115, var_117);
var_119 = df::add(var_dof_start, var_118);
var_120 = df::load(var_joint_S_s, var_119);
var_121 = df::add(var_dof_start, var_118);
var_122 = df::spatial_dot(var_120, var_body_f_s);
var_123 = df::sub(var_12, var_122);
df::store(var_tau, var_121, var_123);
}
var_124 = df::select(var_87, var_85, var_120);
var_125 = df::select(var_87, var_73, var_118);
return var_0;
}
void adj_jcalc_tau_cpu_func(
int var_type,
float var_target_k_e,
float var_target_k_d,
float var_limit_k_e,
float var_limit_k_d,
spatial_vector* var_joint_S_s,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
int var_coord_start,
int var_dof_start,
spatial_vector var_body_f_s,
float* var_tau,
int & adj_type,
float & adj_target_k_e,
float & adj_target_k_d,
float & adj_limit_k_e,
float & adj_limit_k_d,
spatial_vector* adj_joint_S_s,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_act,
float* adj_joint_target,
float* adj_joint_limit_lower,
float* adj_joint_limit_upper,
int & adj_coord_start,
int & adj_dof_start,
spatial_vector & adj_body_f_s,
float* adj_tau,
int & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
spatial_vector var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const float var_12 = 0.0;
bool var_13;
float var_14;
float var_15;
float var_16;
bool var_17;
float var_18;
float var_19;
float var_20;
float var_21;
float var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
float var_32;
const int var_33 = 2;
bool var_34;
int var_35;
float var_36;
int var_37;
float var_38;
int var_39;
float var_40;
df::float3 var_41;
int var_42;
float var_43;
int var_44;
float var_45;
int var_46;
float var_47;
df::float3 var_48;
const int var_49 = 0;
int var_50;
spatial_vector var_51;
float var_52;
float var_53;
int var_54;
float var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
const int var_61 = 1;
int var_62;
spatial_vector var_63;
float var_64;
float var_65;
int var_66;
float var_67;
float var_68;
float var_69;
float var_70;
float var_71;
float var_72;
const int var_73 = 2;
int var_74;
spatial_vector var_75;
float var_76;
float var_77;
int var_78;
float var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
spatial_vector var_85;
const int var_86 = 4;
bool var_87;
const int var_88 = 0;
int var_89;
spatial_vector var_90;
int var_91;
float var_92;
float var_93;
const int var_94 = 1;
int var_95;
spatial_vector var_96;
int var_97;
float var_98;
float var_99;
const int var_100 = 2;
int var_101;
spatial_vector var_102;
int var_103;
float var_104;
float var_105;
const int var_106 = 3;
int var_107;
spatial_vector var_108;
int var_109;
float var_110;
float var_111;
const int var_112 = 4;
int var_113;
spatial_vector var_114;
int var_115;
float var_116;
float var_117;
const int var_118 = 5;
int var_119;
spatial_vector var_120;
int var_121;
float var_122;
float var_123;
spatial_vector var_124;
int var_125;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
int adj_2 = 0;
bool adj_3 = 0;
bool adj_4 = 0;
spatial_vector adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
bool adj_13 = 0;
float adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
bool adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
float adj_32 = 0;
int adj_33 = 0;
bool adj_34 = 0;
int adj_35 = 0;
float adj_36 = 0;
int adj_37 = 0;
float adj_38 = 0;
int adj_39 = 0;
float adj_40 = 0;
df::float3 adj_41 = 0;
int adj_42 = 0;
float adj_43 = 0;
int adj_44 = 0;
float adj_45 = 0;
int adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
int adj_49 = 0;
int adj_50 = 0;
spatial_vector adj_51 = 0;
float adj_52 = 0;
float adj_53 = 0;
int adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
int adj_61 = 0;
int adj_62 = 0;
spatial_vector adj_63 = 0;
float adj_64 = 0;
float adj_65 = 0;
int adj_66 = 0;
float adj_67 = 0;
float adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
int adj_73 = 0;
int adj_74 = 0;
spatial_vector adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
int adj_78 = 0;
float adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
float adj_82 = 0;
float adj_83 = 0;
float adj_84 = 0;
spatial_vector adj_85 = 0;
int adj_86 = 0;
bool adj_87 = 0;
int adj_88 = 0;
int adj_89 = 0;
spatial_vector adj_90 = 0;
int adj_91 = 0;
float adj_92 = 0;
float adj_93 = 0;
int adj_94 = 0;
int adj_95 = 0;
spatial_vector adj_96 = 0;
int adj_97 = 0;
float adj_98 = 0;
float adj_99 = 0;
int adj_100 = 0;
int adj_101 = 0;
spatial_vector adj_102 = 0;
int adj_103 = 0;
float adj_104 = 0;
float adj_105 = 0;
int adj_106 = 0;
int adj_107 = 0;
spatial_vector adj_108 = 0;
int adj_109 = 0;
float adj_110 = 0;
float adj_111 = 0;
int adj_112 = 0;
int adj_113 = 0;
spatial_vector adj_114 = 0;
int adj_115 = 0;
float adj_116 = 0;
float adj_117 = 0;
int adj_118 = 0;
int adj_119 = 0;
spatial_vector adj_120 = 0;
int adj_121 = 0;
float adj_122 = 0;
float adj_123 = 0;
spatial_vector adj_124 = 0;
int adj_125 = 0;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_S_s, var_dof_start);
var_6 = df::load(var_joint_q, var_coord_start);
var_7 = df::load(var_joint_qd, var_dof_start);
var_8 = df::load(var_joint_act, var_dof_start);
var_9 = df::load(var_joint_target, var_coord_start);
var_10 = df::load(var_joint_limit_lower, var_coord_start);
var_11 = df::load(var_joint_limit_upper, var_coord_start);
var_13 = (var_6 < var_10);
if (var_13) {
var_14 = df::sub(var_10, var_6);
var_15 = df::mul(var_limit_k_e, var_14);
}
var_16 = df::select(var_13, var_12, var_15);
var_17 = (var_6 > var_11);
if (var_17) {
var_18 = df::sub(var_11, var_6);
var_19 = df::mul(var_limit_k_e, var_18);
}
var_20 = df::select(var_17, var_16, var_19);
var_21 = df::sub(var_12, var_limit_k_d);
var_22 = df::mul(var_21, var_7);
var_23 = df::spatial_dot(var_5, var_body_f_s);
var_24 = df::sub(var_12, var_23);
var_25 = df::sub(var_6, var_9);
var_26 = df::mul(var_target_k_e, var_25);
var_27 = df::sub(var_24, var_26);
var_28 = df::mul(var_target_k_d, var_7);
var_29 = df::sub(var_27, var_28);
var_30 = df::add(var_29, var_8);
var_31 = df::add(var_30, var_20);
var_32 = df::add(var_31, var_22);
df::store(var_tau, var_dof_start, var_32);
}
var_34 = (var_type == var_33);
if (var_34) {
var_35 = df::add(var_coord_start, var_0);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::add(var_coord_start, var_2);
var_38 = df::load(var_joint_q, var_37);
var_39 = df::add(var_coord_start, var_33);
var_40 = df::load(var_joint_q, var_39);
var_41 = df::float3(var_36, var_38, var_40);
var_42 = df::add(var_dof_start, var_0);
var_43 = df::load(var_joint_qd, var_42);
var_44 = df::add(var_dof_start, var_2);
var_45 = df::load(var_joint_qd, var_44);
var_46 = df::add(var_dof_start, var_33);
var_47 = df::load(var_joint_qd, var_46);
var_48 = df::float3(var_43, var_45, var_47);
var_50 = df::add(var_dof_start, var_49);
var_51 = df::load(var_joint_S_s, var_50);
var_52 = df::index(var_48, var_49);
var_53 = df::index(var_41, var_49);
var_54 = df::add(var_dof_start, var_49);
var_55 = df::spatial_dot(var_51, var_body_f_s);
var_56 = df::sub(var_12, var_55);
var_57 = df::mul(var_52, var_target_k_d);
var_58 = df::sub(var_56, var_57);
var_59 = df::mul(var_53, var_target_k_e);
var_60 = df::sub(var_58, var_59);
df::store(var_tau, var_54, var_60);
var_62 = df::add(var_dof_start, var_61);
var_63 = df::load(var_joint_S_s, var_62);
var_64 = df::index(var_48, var_61);
var_65 = df::index(var_41, var_61);
var_66 = df::add(var_dof_start, var_61);
var_67 = df::spatial_dot(var_63, var_body_f_s);
var_68 = df::sub(var_12, var_67);
var_69 = df::mul(var_64, var_target_k_d);
var_70 = df::sub(var_68, var_69);
var_71 = df::mul(var_65, var_target_k_e);
var_72 = df::sub(var_70, var_71);
df::store(var_tau, var_66, var_72);
var_74 = df::add(var_dof_start, var_73);
var_75 = df::load(var_joint_S_s, var_74);
var_76 = df::index(var_48, var_73);
var_77 = df::index(var_41, var_73);
var_78 = df::add(var_dof_start, var_73);
var_79 = df::spatial_dot(var_75, var_body_f_s);
var_80 = df::sub(var_12, var_79);
var_81 = df::mul(var_76, var_target_k_d);
var_82 = df::sub(var_80, var_81);
var_83 = df::mul(var_77, var_target_k_e);
var_84 = df::sub(var_82, var_83);
df::store(var_tau, var_78, var_84);
}
var_85 = df::select(var_34, var_5, var_75);
var_87 = (var_type == var_86);
if (var_87) {
var_89 = df::add(var_dof_start, var_88);
var_90 = df::load(var_joint_S_s, var_89);
var_91 = df::add(var_dof_start, var_88);
var_92 = df::spatial_dot(var_90, var_body_f_s);
var_93 = df::sub(var_12, var_92);
df::store(var_tau, var_91, var_93);
var_95 = df::add(var_dof_start, var_94);
var_96 = df::load(var_joint_S_s, var_95);
var_97 = df::add(var_dof_start, var_94);
var_98 = df::spatial_dot(var_96, var_body_f_s);
var_99 = df::sub(var_12, var_98);
df::store(var_tau, var_97, var_99);
var_101 = df::add(var_dof_start, var_100);
var_102 = df::load(var_joint_S_s, var_101);
var_103 = df::add(var_dof_start, var_100);
var_104 = df::spatial_dot(var_102, var_body_f_s);
var_105 = df::sub(var_12, var_104);
df::store(var_tau, var_103, var_105);
var_107 = df::add(var_dof_start, var_106);
var_108 = df::load(var_joint_S_s, var_107);
var_109 = df::add(var_dof_start, var_106);
var_110 = df::spatial_dot(var_108, var_body_f_s);
var_111 = df::sub(var_12, var_110);
df::store(var_tau, var_109, var_111);
var_113 = df::add(var_dof_start, var_112);
var_114 = df::load(var_joint_S_s, var_113);
var_115 = df::add(var_dof_start, var_112);
var_116 = df::spatial_dot(var_114, var_body_f_s);
var_117 = df::sub(var_12, var_116);
df::store(var_tau, var_115, var_117);
var_119 = df::add(var_dof_start, var_118);
var_120 = df::load(var_joint_S_s, var_119);
var_121 = df::add(var_dof_start, var_118);
var_122 = df::spatial_dot(var_120, var_body_f_s);
var_123 = df::sub(var_12, var_122);
df::store(var_tau, var_121, var_123);
}
var_124 = df::select(var_87, var_85, var_120);
var_125 = df::select(var_87, var_73, var_118);
goto label0;
//---------
// reverse
label0:;
adj_0 += adj_ret;
df::adj_select(var_87, var_73, var_118, adj_87, adj_73, adj_118, adj_125);
df::adj_select(var_87, var_85, var_120, adj_87, adj_85, adj_120, adj_124);
if (var_87) {
df::adj_store(var_tau, var_121, var_123, adj_tau, adj_121, adj_123);
df::adj_sub(var_12, var_122, adj_12, adj_122, adj_123);
df::adj_spatial_dot(var_120, var_body_f_s, adj_120, adj_body_f_s, adj_122);
df::adj_add(var_dof_start, var_118, adj_dof_start, adj_118, adj_121);
df::adj_load(var_joint_S_s, var_119, adj_joint_S_s, adj_119, adj_120);
df::adj_add(var_dof_start, var_118, adj_dof_start, adj_118, adj_119);
df::adj_store(var_tau, var_115, var_117, adj_tau, adj_115, adj_117);
df::adj_sub(var_12, var_116, adj_12, adj_116, adj_117);
df::adj_spatial_dot(var_114, var_body_f_s, adj_114, adj_body_f_s, adj_116);
df::adj_add(var_dof_start, var_112, adj_dof_start, adj_112, adj_115);
df::adj_load(var_joint_S_s, var_113, adj_joint_S_s, adj_113, adj_114);
df::adj_add(var_dof_start, var_112, adj_dof_start, adj_112, adj_113);
df::adj_store(var_tau, var_109, var_111, adj_tau, adj_109, adj_111);
df::adj_sub(var_12, var_110, adj_12, adj_110, adj_111);
df::adj_spatial_dot(var_108, var_body_f_s, adj_108, adj_body_f_s, adj_110);
df::adj_add(var_dof_start, var_106, adj_dof_start, adj_106, adj_109);
df::adj_load(var_joint_S_s, var_107, adj_joint_S_s, adj_107, adj_108);
df::adj_add(var_dof_start, var_106, adj_dof_start, adj_106, adj_107);
df::adj_store(var_tau, var_103, var_105, adj_tau, adj_103, adj_105);
df::adj_sub(var_12, var_104, adj_12, adj_104, adj_105);
df::adj_spatial_dot(var_102, var_body_f_s, adj_102, adj_body_f_s, adj_104);
df::adj_add(var_dof_start, var_100, adj_dof_start, adj_100, adj_103);
df::adj_load(var_joint_S_s, var_101, adj_joint_S_s, adj_101, adj_102);
df::adj_add(var_dof_start, var_100, adj_dof_start, adj_100, adj_101);
df::adj_store(var_tau, var_97, var_99, adj_tau, adj_97, adj_99);
df::adj_sub(var_12, var_98, adj_12, adj_98, adj_99);
df::adj_spatial_dot(var_96, var_body_f_s, adj_96, adj_body_f_s, adj_98);
df::adj_add(var_dof_start, var_94, adj_dof_start, adj_94, adj_97);
df::adj_load(var_joint_S_s, var_95, adj_joint_S_s, adj_95, adj_96);
df::adj_add(var_dof_start, var_94, adj_dof_start, adj_94, adj_95);
df::adj_store(var_tau, var_91, var_93, adj_tau, adj_91, adj_93);
df::adj_sub(var_12, var_92, adj_12, adj_92, adj_93);
df::adj_spatial_dot(var_90, var_body_f_s, adj_90, adj_body_f_s, adj_92);
df::adj_add(var_dof_start, var_88, adj_dof_start, adj_88, adj_91);
df::adj_load(var_joint_S_s, var_89, adj_joint_S_s, adj_89, adj_90);
df::adj_add(var_dof_start, var_88, adj_dof_start, adj_88, adj_89);
}
df::adj_select(var_34, var_5, var_75, adj_34, adj_5, adj_75, adj_85);
if (var_34) {
df::adj_store(var_tau, var_78, var_84, adj_tau, adj_78, adj_84);
df::adj_sub(var_82, var_83, adj_82, adj_83, adj_84);
df::adj_mul(var_77, var_target_k_e, adj_77, adj_target_k_e, adj_83);
df::adj_sub(var_80, var_81, adj_80, adj_81, adj_82);
df::adj_mul(var_76, var_target_k_d, adj_76, adj_target_k_d, adj_81);
df::adj_sub(var_12, var_79, adj_12, adj_79, adj_80);
df::adj_spatial_dot(var_75, var_body_f_s, adj_75, adj_body_f_s, adj_79);
df::adj_add(var_dof_start, var_73, adj_dof_start, adj_73, adj_78);
df::adj_index(var_41, var_73, adj_41, adj_73, adj_77);
df::adj_index(var_48, var_73, adj_48, adj_73, adj_76);
df::adj_load(var_joint_S_s, var_74, adj_joint_S_s, adj_74, adj_75);
df::adj_add(var_dof_start, var_73, adj_dof_start, adj_73, adj_74);
df::adj_store(var_tau, var_66, var_72, adj_tau, adj_66, adj_72);
df::adj_sub(var_70, var_71, adj_70, adj_71, adj_72);
df::adj_mul(var_65, var_target_k_e, adj_65, adj_target_k_e, adj_71);
df::adj_sub(var_68, var_69, adj_68, adj_69, adj_70);
df::adj_mul(var_64, var_target_k_d, adj_64, adj_target_k_d, adj_69);
df::adj_sub(var_12, var_67, adj_12, adj_67, adj_68);
df::adj_spatial_dot(var_63, var_body_f_s, adj_63, adj_body_f_s, adj_67);
df::adj_add(var_dof_start, var_61, adj_dof_start, adj_61, adj_66);
df::adj_index(var_41, var_61, adj_41, adj_61, adj_65);
df::adj_index(var_48, var_61, adj_48, adj_61, adj_64);
df::adj_load(var_joint_S_s, var_62, adj_joint_S_s, adj_62, adj_63);
df::adj_add(var_dof_start, var_61, adj_dof_start, adj_61, adj_62);
df::adj_store(var_tau, var_54, var_60, adj_tau, adj_54, adj_60);
df::adj_sub(var_58, var_59, adj_58, adj_59, adj_60);
df::adj_mul(var_53, var_target_k_e, adj_53, adj_target_k_e, adj_59);
df::adj_sub(var_56, var_57, adj_56, adj_57, adj_58);
df::adj_mul(var_52, var_target_k_d, adj_52, adj_target_k_d, adj_57);
df::adj_sub(var_12, var_55, adj_12, adj_55, adj_56);
df::adj_spatial_dot(var_51, var_body_f_s, adj_51, adj_body_f_s, adj_55);
df::adj_add(var_dof_start, var_49, adj_dof_start, adj_49, adj_54);
df::adj_index(var_41, var_49, adj_41, adj_49, adj_53);
df::adj_index(var_48, var_49, adj_48, adj_49, adj_52);
df::adj_load(var_joint_S_s, var_50, adj_joint_S_s, adj_50, adj_51);
df::adj_add(var_dof_start, var_49, adj_dof_start, adj_49, adj_50);
df::adj_float3(var_43, var_45, var_47, adj_43, adj_45, adj_47, adj_48);
df::adj_load(var_joint_qd, var_46, adj_joint_qd, adj_46, adj_47);
df::adj_add(var_dof_start, var_33, adj_dof_start, adj_33, adj_46);
df::adj_load(var_joint_qd, var_44, adj_joint_qd, adj_44, adj_45);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_44);
df::adj_load(var_joint_qd, var_42, adj_joint_qd, adj_42, adj_43);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_42);
df::adj_float3(var_36, var_38, var_40, adj_36, adj_38, adj_40, adj_41);
df::adj_load(var_joint_q, var_39, adj_joint_q, adj_39, adj_40);
df::adj_add(var_coord_start, var_33, adj_coord_start, adj_33, adj_39);
df::adj_load(var_joint_q, var_37, adj_joint_q, adj_37, adj_38);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_37);
df::adj_load(var_joint_q, var_35, adj_joint_q, adj_35, adj_36);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_35);
}
if (var_4) {
df::adj_store(var_tau, var_dof_start, var_32, adj_tau, adj_dof_start, adj_32);
df::adj_add(var_31, var_22, adj_31, adj_22, adj_32);
df::adj_add(var_30, var_20, adj_30, adj_20, adj_31);
df::adj_add(var_29, var_8, adj_29, adj_8, adj_30);
df::adj_sub(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_mul(var_target_k_d, var_7, adj_target_k_d, adj_7, adj_28);
df::adj_sub(var_24, var_26, adj_24, adj_26, adj_27);
df::adj_mul(var_target_k_e, var_25, adj_target_k_e, adj_25, adj_26);
df::adj_sub(var_6, var_9, adj_6, adj_9, adj_25);
df::adj_sub(var_12, var_23, adj_12, adj_23, adj_24);
df::adj_spatial_dot(var_5, var_body_f_s, adj_5, adj_body_f_s, adj_23);
df::adj_mul(var_21, var_7, adj_21, adj_7, adj_22);
df::adj_sub(var_12, var_limit_k_d, adj_12, adj_limit_k_d, adj_21);
df::adj_select(var_17, var_16, var_19, adj_17, adj_16, adj_19, adj_20);
if (var_17) {
df::adj_mul(var_limit_k_e, var_18, adj_limit_k_e, adj_18, adj_19);
df::adj_sub(var_11, var_6, adj_11, adj_6, adj_18);
}
df::adj_select(var_13, var_12, var_15, adj_13, adj_12, adj_15, adj_16);
if (var_13) {
df::adj_mul(var_limit_k_e, var_14, adj_limit_k_e, adj_14, adj_15);
df::adj_sub(var_10, var_6, adj_10, adj_6, adj_14);
}
df::adj_load(var_joint_limit_upper, var_coord_start, adj_joint_limit_upper, adj_coord_start, adj_11);
df::adj_load(var_joint_limit_lower, var_coord_start, adj_joint_limit_lower, adj_coord_start, adj_10);
df::adj_load(var_joint_target, var_coord_start, adj_joint_target, adj_coord_start, adj_9);
df::adj_load(var_joint_act, var_dof_start, adj_joint_act, adj_dof_start, adj_8);
df::adj_load(var_joint_qd, var_dof_start, adj_joint_qd, adj_dof_start, adj_7);
df::adj_load(var_joint_q, var_coord_start, adj_joint_q, adj_coord_start, adj_6);
df::adj_load(var_joint_S_s, var_dof_start, adj_joint_S_s, adj_dof_start, adj_5);
}
return;
}
int jcalc_integrate_cpu_func(
int var_type,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
int var_coord_start,
int var_dof_start,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
float var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const int var_12 = 2;
bool var_13;
int var_14;
float var_15;
int var_16;
float var_17;
int var_18;
float var_19;
df::float3 var_20;
int var_21;
float var_22;
int var_23;
float var_24;
int var_25;
float var_26;
df::float3 var_27;
int var_28;
float var_29;
int var_30;
float var_31;
int var_32;
float var_33;
const int var_34 = 3;
int var_35;
float var_36;
quat var_37;
df::float3 var_38;
df::float3 var_39;
const float var_40 = 0.0;
quat var_41;
quat var_42;
const float var_43 = 0.5;
quat var_44;
quat var_45;
quat var_46;
quat var_47;
int var_48;
float var_49;
int var_50;
float var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 4;
bool var_63;
int var_64;
float var_65;
int var_66;
float var_67;
int var_68;
float var_69;
df::float3 var_70;
int var_71;
float var_72;
int var_73;
float var_74;
const int var_75 = 5;
int var_76;
float var_77;
df::float3 var_78;
int var_79;
float var_80;
int var_81;
float var_82;
int var_83;
float var_84;
df::float3 var_85;
int var_86;
float var_87;
int var_88;
float var_89;
int var_90;
float var_91;
df::float3 var_92;
df::float3 var_93;
df::float3 var_94;
df::float3 var_95;
df::float3 var_96;
int var_97;
float var_98;
int var_99;
float var_100;
int var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
int var_106;
float var_107;
int var_108;
float var_109;
int var_110;
float var_111;
const int var_112 = 6;
int var_113;
float var_114;
quat var_115;
quat var_116;
quat var_117;
quat var_118;
df::float3 var_119;
df::float3 var_120;
quat var_121;
quat var_122;
quat var_123;
int var_124;
float var_125;
int var_126;
float var_127;
int var_128;
float var_129;
int var_130;
float var_131;
int var_132;
float var_133;
int var_134;
float var_135;
int var_136;
float var_137;
int var_138;
float var_139;
int var_140;
float var_141;
int var_142;
float var_143;
int var_144;
float var_145;
int var_146;
float var_147;
int var_148;
float var_149;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_qdd, var_dof_start);
var_6 = df::load(var_joint_qd, var_dof_start);
var_7 = df::load(var_joint_q, var_coord_start);
var_8 = df::mul(var_5, var_dt);
var_9 = df::add(var_6, var_8);
var_10 = df::mul(var_9, var_dt);
var_11 = df::add(var_7, var_10);
df::store(var_joint_qd_new, var_dof_start, var_9);
df::store(var_joint_q_new, var_coord_start, var_11);
}
var_13 = (var_type == var_12);
if (var_13) {
var_14 = df::add(var_dof_start, var_0);
var_15 = df::load(var_joint_qdd, var_14);
var_16 = df::add(var_dof_start, var_2);
var_17 = df::load(var_joint_qdd, var_16);
var_18 = df::add(var_dof_start, var_12);
var_19 = df::load(var_joint_qdd, var_18);
var_20 = df::float3(var_15, var_17, var_19);
var_21 = df::add(var_dof_start, var_0);
var_22 = df::load(var_joint_qd, var_21);
var_23 = df::add(var_dof_start, var_2);
var_24 = df::load(var_joint_qd, var_23);
var_25 = df::add(var_dof_start, var_12);
var_26 = df::load(var_joint_qd, var_25);
var_27 = df::float3(var_22, var_24, var_26);
var_28 = df::add(var_coord_start, var_0);
var_29 = df::load(var_joint_q, var_28);
var_30 = df::add(var_coord_start, var_2);
var_31 = df::load(var_joint_q, var_30);
var_32 = df::add(var_coord_start, var_12);
var_33 = df::load(var_joint_q, var_32);
var_35 = df::add(var_coord_start, var_34);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::quat(var_29, var_31, var_33, var_36);
var_38 = df::mul(var_20, var_dt);
var_39 = df::add(var_27, var_38);
var_41 = df::quat(var_39, var_40);
var_42 = df::mul(var_41, var_37);
var_44 = df::mul(var_42, var_43);
var_45 = df::mul(var_44, var_dt);
var_46 = df::add(var_37, var_45);
var_47 = df::normalize(var_46);
var_48 = df::add(var_coord_start, var_0);
var_49 = df::index(var_47, var_0);
df::store(var_joint_q_new, var_48, var_49);
var_50 = df::add(var_coord_start, var_2);
var_51 = df::index(var_47, var_2);
df::store(var_joint_q_new, var_50, var_51);
var_52 = df::add(var_coord_start, var_12);
var_53 = df::index(var_47, var_12);
df::store(var_joint_q_new, var_52, var_53);
var_54 = df::add(var_coord_start, var_34);
var_55 = df::index(var_47, var_34);
df::store(var_joint_q_new, var_54, var_55);
var_56 = df::add(var_dof_start, var_0);
var_57 = df::index(var_39, var_0);
df::store(var_joint_qd_new, var_56, var_57);
var_58 = df::add(var_dof_start, var_2);
var_59 = df::index(var_39, var_2);
df::store(var_joint_qd_new, var_58, var_59);
var_60 = df::add(var_dof_start, var_12);
var_61 = df::index(var_39, var_12);
df::store(var_joint_qd_new, var_60, var_61);
}
var_63 = (var_type == var_62);
if (var_63) {
var_64 = df::add(var_dof_start, var_0);
var_65 = df::load(var_joint_qdd, var_64);
var_66 = df::add(var_dof_start, var_2);
var_67 = df::load(var_joint_qdd, var_66);
var_68 = df::add(var_dof_start, var_12);
var_69 = df::load(var_joint_qdd, var_68);
var_70 = df::float3(var_65, var_67, var_69);
var_71 = df::add(var_dof_start, var_34);
var_72 = df::load(var_joint_qdd, var_71);
var_73 = df::add(var_dof_start, var_62);
var_74 = df::load(var_joint_qdd, var_73);
var_76 = df::add(var_dof_start, var_75);
var_77 = df::load(var_joint_qdd, var_76);
var_78 = df::float3(var_72, var_74, var_77);
var_79 = df::add(var_dof_start, var_0);
var_80 = df::load(var_joint_qd, var_79);
var_81 = df::add(var_dof_start, var_2);
var_82 = df::load(var_joint_qd, var_81);
var_83 = df::add(var_dof_start, var_12);
var_84 = df::load(var_joint_qd, var_83);
var_85 = df::float3(var_80, var_82, var_84);
var_86 = df::add(var_dof_start, var_34);
var_87 = df::load(var_joint_qd, var_86);
var_88 = df::add(var_dof_start, var_62);
var_89 = df::load(var_joint_qd, var_88);
var_90 = df::add(var_dof_start, var_75);
var_91 = df::load(var_joint_qd, var_90);
var_92 = df::float3(var_87, var_89, var_91);
var_93 = df::mul(var_70, var_dt);
var_94 = df::add(var_85, var_93);
var_95 = df::mul(var_78, var_dt);
var_96 = df::add(var_92, var_95);
var_97 = df::add(var_coord_start, var_0);
var_98 = df::load(var_joint_q, var_97);
var_99 = df::add(var_coord_start, var_2);
var_100 = df::load(var_joint_q, var_99);
var_101 = df::add(var_coord_start, var_12);
var_102 = df::load(var_joint_q, var_101);
var_103 = df::float3(var_98, var_100, var_102);
var_104 = df::cross(var_94, var_103);
var_105 = df::add(var_96, var_104);
var_106 = df::add(var_coord_start, var_34);
var_107 = df::load(var_joint_q, var_106);
var_108 = df::add(var_coord_start, var_62);
var_109 = df::load(var_joint_q, var_108);
var_110 = df::add(var_coord_start, var_75);
var_111 = df::load(var_joint_q, var_110);
var_113 = df::add(var_coord_start, var_112);
var_114 = df::load(var_joint_q, var_113);
var_115 = df::quat(var_107, var_109, var_111, var_114);
var_116 = df::quat(var_94, var_40);
var_117 = df::mul(var_116, var_115);
var_118 = df::mul(var_117, var_43);
var_119 = df::mul(var_105, var_dt);
var_120 = df::add(var_103, var_119);
var_121 = df::mul(var_118, var_dt);
var_122 = df::add(var_115, var_121);
var_123 = df::normalize(var_122);
var_124 = df::add(var_coord_start, var_0);
var_125 = df::index(var_120, var_0);
df::store(var_joint_q_new, var_124, var_125);
var_126 = df::add(var_coord_start, var_2);
var_127 = df::index(var_120, var_2);
df::store(var_joint_q_new, var_126, var_127);
var_128 = df::add(var_coord_start, var_12);
var_129 = df::index(var_120, var_12);
df::store(var_joint_q_new, var_128, var_129);
var_130 = df::add(var_coord_start, var_34);
var_131 = df::index(var_123, var_0);
df::store(var_joint_q_new, var_130, var_131);
var_132 = df::add(var_coord_start, var_62);
var_133 = df::index(var_123, var_2);
df::store(var_joint_q_new, var_132, var_133);
var_134 = df::add(var_coord_start, var_75);
var_135 = df::index(var_123, var_12);
df::store(var_joint_q_new, var_134, var_135);
var_136 = df::add(var_coord_start, var_112);
var_137 = df::index(var_123, var_34);
df::store(var_joint_q_new, var_136, var_137);
var_138 = df::add(var_dof_start, var_0);
var_139 = df::index(var_94, var_0);
df::store(var_joint_qd_new, var_138, var_139);
var_140 = df::add(var_dof_start, var_2);
var_141 = df::index(var_94, var_2);
df::store(var_joint_qd_new, var_140, var_141);
var_142 = df::add(var_dof_start, var_12);
var_143 = df::index(var_94, var_12);
df::store(var_joint_qd_new, var_142, var_143);
var_144 = df::add(var_dof_start, var_34);
var_145 = df::index(var_96, var_0);
df::store(var_joint_qd_new, var_144, var_145);
var_146 = df::add(var_dof_start, var_62);
var_147 = df::index(var_96, var_2);
df::store(var_joint_qd_new, var_146, var_147);
var_148 = df::add(var_dof_start, var_75);
var_149 = df::index(var_96, var_12);
df::store(var_joint_qd_new, var_148, var_149);
}
return var_0;
}
void adj_jcalc_integrate_cpu_func(
int var_type,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
int var_coord_start,
int var_dof_start,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new,
int & adj_type,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_qdd,
int & adj_coord_start,
int & adj_dof_start,
float & adj_dt,
float* adj_joint_q_new,
float* adj_joint_qd_new,
int & adj_ret)
{
//---------
// primal vars
const int var_0 = 0;
bool var_1;
const int var_2 = 1;
bool var_3;
bool var_4;
float var_5;
float var_6;
float var_7;
float var_8;
float var_9;
float var_10;
float var_11;
const int var_12 = 2;
bool var_13;
int var_14;
float var_15;
int var_16;
float var_17;
int var_18;
float var_19;
df::float3 var_20;
int var_21;
float var_22;
int var_23;
float var_24;
int var_25;
float var_26;
df::float3 var_27;
int var_28;
float var_29;
int var_30;
float var_31;
int var_32;
float var_33;
const int var_34 = 3;
int var_35;
float var_36;
quat var_37;
df::float3 var_38;
df::float3 var_39;
const float var_40 = 0.0;
quat var_41;
quat var_42;
const float var_43 = 0.5;
quat var_44;
quat var_45;
quat var_46;
quat var_47;
int var_48;
float var_49;
int var_50;
float var_51;
int var_52;
float var_53;
int var_54;
float var_55;
int var_56;
float var_57;
int var_58;
float var_59;
int var_60;
float var_61;
const int var_62 = 4;
bool var_63;
int var_64;
float var_65;
int var_66;
float var_67;
int var_68;
float var_69;
df::float3 var_70;
int var_71;
float var_72;
int var_73;
float var_74;
const int var_75 = 5;
int var_76;
float var_77;
df::float3 var_78;
int var_79;
float var_80;
int var_81;
float var_82;
int var_83;
float var_84;
df::float3 var_85;
int var_86;
float var_87;
int var_88;
float var_89;
int var_90;
float var_91;
df::float3 var_92;
df::float3 var_93;
df::float3 var_94;
df::float3 var_95;
df::float3 var_96;
int var_97;
float var_98;
int var_99;
float var_100;
int var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
int var_106;
float var_107;
int var_108;
float var_109;
int var_110;
float var_111;
const int var_112 = 6;
int var_113;
float var_114;
quat var_115;
quat var_116;
quat var_117;
quat var_118;
df::float3 var_119;
df::float3 var_120;
quat var_121;
quat var_122;
quat var_123;
int var_124;
float var_125;
int var_126;
float var_127;
int var_128;
float var_129;
int var_130;
float var_131;
int var_132;
float var_133;
int var_134;
float var_135;
int var_136;
float var_137;
int var_138;
float var_139;
int var_140;
float var_141;
int var_142;
float var_143;
int var_144;
float var_145;
int var_146;
float var_147;
int var_148;
float var_149;
//---------
// dual vars
int adj_0 = 0;
bool adj_1 = 0;
int adj_2 = 0;
bool adj_3 = 0;
bool adj_4 = 0;
float adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
int adj_12 = 0;
bool adj_13 = 0;
int adj_14 = 0;
float adj_15 = 0;
int adj_16 = 0;
float adj_17 = 0;
int adj_18 = 0;
float adj_19 = 0;
df::float3 adj_20 = 0;
int adj_21 = 0;
float adj_22 = 0;
int adj_23 = 0;
float adj_24 = 0;
int adj_25 = 0;
float adj_26 = 0;
df::float3 adj_27 = 0;
int adj_28 = 0;
float adj_29 = 0;
int adj_30 = 0;
float adj_31 = 0;
int adj_32 = 0;
float adj_33 = 0;
int adj_34 = 0;
int adj_35 = 0;
float adj_36 = 0;
quat adj_37 = 0;
df::float3 adj_38 = 0;
df::float3 adj_39 = 0;
float adj_40 = 0;
quat adj_41 = 0;
quat adj_42 = 0;
float adj_43 = 0;
quat adj_44 = 0;
quat adj_45 = 0;
quat adj_46 = 0;
quat adj_47 = 0;
int adj_48 = 0;
float adj_49 = 0;
int adj_50 = 0;
float adj_51 = 0;
int adj_52 = 0;
float adj_53 = 0;
int adj_54 = 0;
float adj_55 = 0;
int adj_56 = 0;
float adj_57 = 0;
int adj_58 = 0;
float adj_59 = 0;
int adj_60 = 0;
float adj_61 = 0;
int adj_62 = 0;
bool adj_63 = 0;
int adj_64 = 0;
float adj_65 = 0;
int adj_66 = 0;
float adj_67 = 0;
int adj_68 = 0;
float adj_69 = 0;
df::float3 adj_70 = 0;
int adj_71 = 0;
float adj_72 = 0;
int adj_73 = 0;
float adj_74 = 0;
int adj_75 = 0;
int adj_76 = 0;
float adj_77 = 0;
df::float3 adj_78 = 0;
int adj_79 = 0;
float adj_80 = 0;
int adj_81 = 0;
float adj_82 = 0;
int adj_83 = 0;
float adj_84 = 0;
df::float3 adj_85 = 0;
int adj_86 = 0;
float adj_87 = 0;
int adj_88 = 0;
float adj_89 = 0;
int adj_90 = 0;
float adj_91 = 0;
df::float3 adj_92 = 0;
df::float3 adj_93 = 0;
df::float3 adj_94 = 0;
df::float3 adj_95 = 0;
df::float3 adj_96 = 0;
int adj_97 = 0;
float adj_98 = 0;
int adj_99 = 0;
float adj_100 = 0;
int adj_101 = 0;
float adj_102 = 0;
df::float3 adj_103 = 0;
df::float3 adj_104 = 0;
df::float3 adj_105 = 0;
int adj_106 = 0;
float adj_107 = 0;
int adj_108 = 0;
float adj_109 = 0;
int adj_110 = 0;
float adj_111 = 0;
int adj_112 = 0;
int adj_113 = 0;
float adj_114 = 0;
quat adj_115 = 0;
quat adj_116 = 0;
quat adj_117 = 0;
quat adj_118 = 0;
df::float3 adj_119 = 0;
df::float3 adj_120 = 0;
quat adj_121 = 0;
quat adj_122 = 0;
quat adj_123 = 0;
int adj_124 = 0;
float adj_125 = 0;
int adj_126 = 0;
float adj_127 = 0;
int adj_128 = 0;
float adj_129 = 0;
int adj_130 = 0;
float adj_131 = 0;
int adj_132 = 0;
float adj_133 = 0;
int adj_134 = 0;
float adj_135 = 0;
int adj_136 = 0;
float adj_137 = 0;
int adj_138 = 0;
float adj_139 = 0;
int adj_140 = 0;
float adj_141 = 0;
int adj_142 = 0;
float adj_143 = 0;
int adj_144 = 0;
float adj_145 = 0;
int adj_146 = 0;
float adj_147 = 0;
int adj_148 = 0;
float adj_149 = 0;
//---------
// forward
var_1 = (var_type == var_0);
var_3 = (var_type == var_2);
var_4 = var_1 || var_3;
if (var_4) {
var_5 = df::load(var_joint_qdd, var_dof_start);
var_6 = df::load(var_joint_qd, var_dof_start);
var_7 = df::load(var_joint_q, var_coord_start);
var_8 = df::mul(var_5, var_dt);
var_9 = df::add(var_6, var_8);
var_10 = df::mul(var_9, var_dt);
var_11 = df::add(var_7, var_10);
df::store(var_joint_qd_new, var_dof_start, var_9);
df::store(var_joint_q_new, var_coord_start, var_11);
}
var_13 = (var_type == var_12);
if (var_13) {
var_14 = df::add(var_dof_start, var_0);
var_15 = df::load(var_joint_qdd, var_14);
var_16 = df::add(var_dof_start, var_2);
var_17 = df::load(var_joint_qdd, var_16);
var_18 = df::add(var_dof_start, var_12);
var_19 = df::load(var_joint_qdd, var_18);
var_20 = df::float3(var_15, var_17, var_19);
var_21 = df::add(var_dof_start, var_0);
var_22 = df::load(var_joint_qd, var_21);
var_23 = df::add(var_dof_start, var_2);
var_24 = df::load(var_joint_qd, var_23);
var_25 = df::add(var_dof_start, var_12);
var_26 = df::load(var_joint_qd, var_25);
var_27 = df::float3(var_22, var_24, var_26);
var_28 = df::add(var_coord_start, var_0);
var_29 = df::load(var_joint_q, var_28);
var_30 = df::add(var_coord_start, var_2);
var_31 = df::load(var_joint_q, var_30);
var_32 = df::add(var_coord_start, var_12);
var_33 = df::load(var_joint_q, var_32);
var_35 = df::add(var_coord_start, var_34);
var_36 = df::load(var_joint_q, var_35);
var_37 = df::quat(var_29, var_31, var_33, var_36);
var_38 = df::mul(var_20, var_dt);
var_39 = df::add(var_27, var_38);
var_41 = df::quat(var_39, var_40);
var_42 = df::mul(var_41, var_37);
var_44 = df::mul(var_42, var_43);
var_45 = df::mul(var_44, var_dt);
var_46 = df::add(var_37, var_45);
var_47 = df::normalize(var_46);
var_48 = df::add(var_coord_start, var_0);
var_49 = df::index(var_47, var_0);
df::store(var_joint_q_new, var_48, var_49);
var_50 = df::add(var_coord_start, var_2);
var_51 = df::index(var_47, var_2);
df::store(var_joint_q_new, var_50, var_51);
var_52 = df::add(var_coord_start, var_12);
var_53 = df::index(var_47, var_12);
df::store(var_joint_q_new, var_52, var_53);
var_54 = df::add(var_coord_start, var_34);
var_55 = df::index(var_47, var_34);
df::store(var_joint_q_new, var_54, var_55);
var_56 = df::add(var_dof_start, var_0);
var_57 = df::index(var_39, var_0);
df::store(var_joint_qd_new, var_56, var_57);
var_58 = df::add(var_dof_start, var_2);
var_59 = df::index(var_39, var_2);
df::store(var_joint_qd_new, var_58, var_59);
var_60 = df::add(var_dof_start, var_12);
var_61 = df::index(var_39, var_12);
df::store(var_joint_qd_new, var_60, var_61);
}
var_63 = (var_type == var_62);
if (var_63) {
var_64 = df::add(var_dof_start, var_0);
var_65 = df::load(var_joint_qdd, var_64);
var_66 = df::add(var_dof_start, var_2);
var_67 = df::load(var_joint_qdd, var_66);
var_68 = df::add(var_dof_start, var_12);
var_69 = df::load(var_joint_qdd, var_68);
var_70 = df::float3(var_65, var_67, var_69);
var_71 = df::add(var_dof_start, var_34);
var_72 = df::load(var_joint_qdd, var_71);
var_73 = df::add(var_dof_start, var_62);
var_74 = df::load(var_joint_qdd, var_73);
var_76 = df::add(var_dof_start, var_75);
var_77 = df::load(var_joint_qdd, var_76);
var_78 = df::float3(var_72, var_74, var_77);
var_79 = df::add(var_dof_start, var_0);
var_80 = df::load(var_joint_qd, var_79);
var_81 = df::add(var_dof_start, var_2);
var_82 = df::load(var_joint_qd, var_81);
var_83 = df::add(var_dof_start, var_12);
var_84 = df::load(var_joint_qd, var_83);
var_85 = df::float3(var_80, var_82, var_84);
var_86 = df::add(var_dof_start, var_34);
var_87 = df::load(var_joint_qd, var_86);
var_88 = df::add(var_dof_start, var_62);
var_89 = df::load(var_joint_qd, var_88);
var_90 = df::add(var_dof_start, var_75);
var_91 = df::load(var_joint_qd, var_90);
var_92 = df::float3(var_87, var_89, var_91);
var_93 = df::mul(var_70, var_dt);
var_94 = df::add(var_85, var_93);
var_95 = df::mul(var_78, var_dt);
var_96 = df::add(var_92, var_95);
var_97 = df::add(var_coord_start, var_0);
var_98 = df::load(var_joint_q, var_97);
var_99 = df::add(var_coord_start, var_2);
var_100 = df::load(var_joint_q, var_99);
var_101 = df::add(var_coord_start, var_12);
var_102 = df::load(var_joint_q, var_101);
var_103 = df::float3(var_98, var_100, var_102);
var_104 = df::cross(var_94, var_103);
var_105 = df::add(var_96, var_104);
var_106 = df::add(var_coord_start, var_34);
var_107 = df::load(var_joint_q, var_106);
var_108 = df::add(var_coord_start, var_62);
var_109 = df::load(var_joint_q, var_108);
var_110 = df::add(var_coord_start, var_75);
var_111 = df::load(var_joint_q, var_110);
var_113 = df::add(var_coord_start, var_112);
var_114 = df::load(var_joint_q, var_113);
var_115 = df::quat(var_107, var_109, var_111, var_114);
var_116 = df::quat(var_94, var_40);
var_117 = df::mul(var_116, var_115);
var_118 = df::mul(var_117, var_43);
var_119 = df::mul(var_105, var_dt);
var_120 = df::add(var_103, var_119);
var_121 = df::mul(var_118, var_dt);
var_122 = df::add(var_115, var_121);
var_123 = df::normalize(var_122);
var_124 = df::add(var_coord_start, var_0);
var_125 = df::index(var_120, var_0);
df::store(var_joint_q_new, var_124, var_125);
var_126 = df::add(var_coord_start, var_2);
var_127 = df::index(var_120, var_2);
df::store(var_joint_q_new, var_126, var_127);
var_128 = df::add(var_coord_start, var_12);
var_129 = df::index(var_120, var_12);
df::store(var_joint_q_new, var_128, var_129);
var_130 = df::add(var_coord_start, var_34);
var_131 = df::index(var_123, var_0);
df::store(var_joint_q_new, var_130, var_131);
var_132 = df::add(var_coord_start, var_62);
var_133 = df::index(var_123, var_2);
df::store(var_joint_q_new, var_132, var_133);
var_134 = df::add(var_coord_start, var_75);
var_135 = df::index(var_123, var_12);
df::store(var_joint_q_new, var_134, var_135);
var_136 = df::add(var_coord_start, var_112);
var_137 = df::index(var_123, var_34);
df::store(var_joint_q_new, var_136, var_137);
var_138 = df::add(var_dof_start, var_0);
var_139 = df::index(var_94, var_0);
df::store(var_joint_qd_new, var_138, var_139);
var_140 = df::add(var_dof_start, var_2);
var_141 = df::index(var_94, var_2);
df::store(var_joint_qd_new, var_140, var_141);
var_142 = df::add(var_dof_start, var_12);
var_143 = df::index(var_94, var_12);
df::store(var_joint_qd_new, var_142, var_143);
var_144 = df::add(var_dof_start, var_34);
var_145 = df::index(var_96, var_0);
df::store(var_joint_qd_new, var_144, var_145);
var_146 = df::add(var_dof_start, var_62);
var_147 = df::index(var_96, var_2);
df::store(var_joint_qd_new, var_146, var_147);
var_148 = df::add(var_dof_start, var_75);
var_149 = df::index(var_96, var_12);
df::store(var_joint_qd_new, var_148, var_149);
}
goto label0;
//---------
// reverse
label0:;
adj_0 += adj_ret;
if (var_63) {
df::adj_store(var_joint_qd_new, var_148, var_149, adj_joint_qd_new, adj_148, adj_149);
df::adj_index(var_96, var_12, adj_96, adj_12, adj_149);
df::adj_add(var_dof_start, var_75, adj_dof_start, adj_75, adj_148);
df::adj_store(var_joint_qd_new, var_146, var_147, adj_joint_qd_new, adj_146, adj_147);
df::adj_index(var_96, var_2, adj_96, adj_2, adj_147);
df::adj_add(var_dof_start, var_62, adj_dof_start, adj_62, adj_146);
df::adj_store(var_joint_qd_new, var_144, var_145, adj_joint_qd_new, adj_144, adj_145);
df::adj_index(var_96, var_0, adj_96, adj_0, adj_145);
df::adj_add(var_dof_start, var_34, adj_dof_start, adj_34, adj_144);
df::adj_store(var_joint_qd_new, var_142, var_143, adj_joint_qd_new, adj_142, adj_143);
df::adj_index(var_94, var_12, adj_94, adj_12, adj_143);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_142);
df::adj_store(var_joint_qd_new, var_140, var_141, adj_joint_qd_new, adj_140, adj_141);
df::adj_index(var_94, var_2, adj_94, adj_2, adj_141);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_140);
df::adj_store(var_joint_qd_new, var_138, var_139, adj_joint_qd_new, adj_138, adj_139);
df::adj_index(var_94, var_0, adj_94, adj_0, adj_139);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_138);
df::adj_store(var_joint_q_new, var_136, var_137, adj_joint_q_new, adj_136, adj_137);
df::adj_index(var_123, var_34, adj_123, adj_34, adj_137);
df::adj_add(var_coord_start, var_112, adj_coord_start, adj_112, adj_136);
df::adj_store(var_joint_q_new, var_134, var_135, adj_joint_q_new, adj_134, adj_135);
df::adj_index(var_123, var_12, adj_123, adj_12, adj_135);
df::adj_add(var_coord_start, var_75, adj_coord_start, adj_75, adj_134);
df::adj_store(var_joint_q_new, var_132, var_133, adj_joint_q_new, adj_132, adj_133);
df::adj_index(var_123, var_2, adj_123, adj_2, adj_133);
df::adj_add(var_coord_start, var_62, adj_coord_start, adj_62, adj_132);
df::adj_store(var_joint_q_new, var_130, var_131, adj_joint_q_new, adj_130, adj_131);
df::adj_index(var_123, var_0, adj_123, adj_0, adj_131);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_130);
df::adj_store(var_joint_q_new, var_128, var_129, adj_joint_q_new, adj_128, adj_129);
df::adj_index(var_120, var_12, adj_120, adj_12, adj_129);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_128);
df::adj_store(var_joint_q_new, var_126, var_127, adj_joint_q_new, adj_126, adj_127);
df::adj_index(var_120, var_2, adj_120, adj_2, adj_127);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_126);
df::adj_store(var_joint_q_new, var_124, var_125, adj_joint_q_new, adj_124, adj_125);
df::adj_index(var_120, var_0, adj_120, adj_0, adj_125);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_124);
df::adj_normalize(var_122, adj_122, adj_123);
df::adj_add(var_115, var_121, adj_115, adj_121, adj_122);
df::adj_mul(var_118, var_dt, adj_118, adj_dt, adj_121);
df::adj_add(var_103, var_119, adj_103, adj_119, adj_120);
df::adj_mul(var_105, var_dt, adj_105, adj_dt, adj_119);
df::adj_mul(var_117, var_43, adj_117, adj_43, adj_118);
df::adj_mul(var_116, var_115, adj_116, adj_115, adj_117);
df::adj_quat(var_94, var_40, adj_94, adj_40, adj_116);
df::adj_quat(var_107, var_109, var_111, var_114, adj_107, adj_109, adj_111, adj_114, adj_115);
df::adj_load(var_joint_q, var_113, adj_joint_q, adj_113, adj_114);
df::adj_add(var_coord_start, var_112, adj_coord_start, adj_112, adj_113);
df::adj_load(var_joint_q, var_110, adj_joint_q, adj_110, adj_111);
df::adj_add(var_coord_start, var_75, adj_coord_start, adj_75, adj_110);
df::adj_load(var_joint_q, var_108, adj_joint_q, adj_108, adj_109);
df::adj_add(var_coord_start, var_62, adj_coord_start, adj_62, adj_108);
df::adj_load(var_joint_q, var_106, adj_joint_q, adj_106, adj_107);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_106);
df::adj_add(var_96, var_104, adj_96, adj_104, adj_105);
df::adj_cross(var_94, var_103, adj_94, adj_103, adj_104);
df::adj_float3(var_98, var_100, var_102, adj_98, adj_100, adj_102, adj_103);
df::adj_load(var_joint_q, var_101, adj_joint_q, adj_101, adj_102);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_101);
df::adj_load(var_joint_q, var_99, adj_joint_q, adj_99, adj_100);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_99);
df::adj_load(var_joint_q, var_97, adj_joint_q, adj_97, adj_98);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_97);
df::adj_add(var_92, var_95, adj_92, adj_95, adj_96);
df::adj_mul(var_78, var_dt, adj_78, adj_dt, adj_95);
df::adj_add(var_85, var_93, adj_85, adj_93, adj_94);
df::adj_mul(var_70, var_dt, adj_70, adj_dt, adj_93);
df::adj_float3(var_87, var_89, var_91, adj_87, adj_89, adj_91, adj_92);
df::adj_load(var_joint_qd, var_90, adj_joint_qd, adj_90, adj_91);
df::adj_add(var_dof_start, var_75, adj_dof_start, adj_75, adj_90);
df::adj_load(var_joint_qd, var_88, adj_joint_qd, adj_88, adj_89);
df::adj_add(var_dof_start, var_62, adj_dof_start, adj_62, adj_88);
df::adj_load(var_joint_qd, var_86, adj_joint_qd, adj_86, adj_87);
df::adj_add(var_dof_start, var_34, adj_dof_start, adj_34, adj_86);
df::adj_float3(var_80, var_82, var_84, adj_80, adj_82, adj_84, adj_85);
df::adj_load(var_joint_qd, var_83, adj_joint_qd, adj_83, adj_84);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_83);
df::adj_load(var_joint_qd, var_81, adj_joint_qd, adj_81, adj_82);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_81);
df::adj_load(var_joint_qd, var_79, adj_joint_qd, adj_79, adj_80);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_79);
df::adj_float3(var_72, var_74, var_77, adj_72, adj_74, adj_77, adj_78);
df::adj_load(var_joint_qdd, var_76, adj_joint_qdd, adj_76, adj_77);
df::adj_add(var_dof_start, var_75, adj_dof_start, adj_75, adj_76);
df::adj_load(var_joint_qdd, var_73, adj_joint_qdd, adj_73, adj_74);
df::adj_add(var_dof_start, var_62, adj_dof_start, adj_62, adj_73);
df::adj_load(var_joint_qdd, var_71, adj_joint_qdd, adj_71, adj_72);
df::adj_add(var_dof_start, var_34, adj_dof_start, adj_34, adj_71);
df::adj_float3(var_65, var_67, var_69, adj_65, adj_67, adj_69, adj_70);
df::adj_load(var_joint_qdd, var_68, adj_joint_qdd, adj_68, adj_69);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_68);
df::adj_load(var_joint_qdd, var_66, adj_joint_qdd, adj_66, adj_67);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_66);
df::adj_load(var_joint_qdd, var_64, adj_joint_qdd, adj_64, adj_65);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_64);
}
if (var_13) {
df::adj_store(var_joint_qd_new, var_60, var_61, adj_joint_qd_new, adj_60, adj_61);
df::adj_index(var_39, var_12, adj_39, adj_12, adj_61);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_60);
df::adj_store(var_joint_qd_new, var_58, var_59, adj_joint_qd_new, adj_58, adj_59);
df::adj_index(var_39, var_2, adj_39, adj_2, adj_59);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_58);
df::adj_store(var_joint_qd_new, var_56, var_57, adj_joint_qd_new, adj_56, adj_57);
df::adj_index(var_39, var_0, adj_39, adj_0, adj_57);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_56);
df::adj_store(var_joint_q_new, var_54, var_55, adj_joint_q_new, adj_54, adj_55);
df::adj_index(var_47, var_34, adj_47, adj_34, adj_55);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_54);
df::adj_store(var_joint_q_new, var_52, var_53, adj_joint_q_new, adj_52, adj_53);
df::adj_index(var_47, var_12, adj_47, adj_12, adj_53);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_52);
df::adj_store(var_joint_q_new, var_50, var_51, adj_joint_q_new, adj_50, adj_51);
df::adj_index(var_47, var_2, adj_47, adj_2, adj_51);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_50);
df::adj_store(var_joint_q_new, var_48, var_49, adj_joint_q_new, adj_48, adj_49);
df::adj_index(var_47, var_0, adj_47, adj_0, adj_49);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_48);
df::adj_normalize(var_46, adj_46, adj_47);
df::adj_add(var_37, var_45, adj_37, adj_45, adj_46);
df::adj_mul(var_44, var_dt, adj_44, adj_dt, adj_45);
df::adj_mul(var_42, var_43, adj_42, adj_43, adj_44);
df::adj_mul(var_41, var_37, adj_41, adj_37, adj_42);
df::adj_quat(var_39, var_40, adj_39, adj_40, adj_41);
df::adj_add(var_27, var_38, adj_27, adj_38, adj_39);
df::adj_mul(var_20, var_dt, adj_20, adj_dt, adj_38);
df::adj_quat(var_29, var_31, var_33, var_36, adj_29, adj_31, adj_33, adj_36, adj_37);
df::adj_load(var_joint_q, var_35, adj_joint_q, adj_35, adj_36);
df::adj_add(var_coord_start, var_34, adj_coord_start, adj_34, adj_35);
df::adj_load(var_joint_q, var_32, adj_joint_q, adj_32, adj_33);
df::adj_add(var_coord_start, var_12, adj_coord_start, adj_12, adj_32);
df::adj_load(var_joint_q, var_30, adj_joint_q, adj_30, adj_31);
df::adj_add(var_coord_start, var_2, adj_coord_start, adj_2, adj_30);
df::adj_load(var_joint_q, var_28, adj_joint_q, adj_28, adj_29);
df::adj_add(var_coord_start, var_0, adj_coord_start, adj_0, adj_28);
df::adj_float3(var_22, var_24, var_26, adj_22, adj_24, adj_26, adj_27);
df::adj_load(var_joint_qd, var_25, adj_joint_qd, adj_25, adj_26);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_25);
df::adj_load(var_joint_qd, var_23, adj_joint_qd, adj_23, adj_24);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_23);
df::adj_load(var_joint_qd, var_21, adj_joint_qd, adj_21, adj_22);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_21);
df::adj_float3(var_15, var_17, var_19, adj_15, adj_17, adj_19, adj_20);
df::adj_load(var_joint_qdd, var_18, adj_joint_qdd, adj_18, adj_19);
df::adj_add(var_dof_start, var_12, adj_dof_start, adj_12, adj_18);
df::adj_load(var_joint_qdd, var_16, adj_joint_qdd, adj_16, adj_17);
df::adj_add(var_dof_start, var_2, adj_dof_start, adj_2, adj_16);
df::adj_load(var_joint_qdd, var_14, adj_joint_qdd, adj_14, adj_15);
df::adj_add(var_dof_start, var_0, adj_dof_start, adj_0, adj_14);
}
if (var_4) {
df::adj_store(var_joint_q_new, var_coord_start, var_11, adj_joint_q_new, adj_coord_start, adj_11);
df::adj_store(var_joint_qd_new, var_dof_start, var_9, adj_joint_qd_new, adj_dof_start, adj_9);
df::adj_add(var_7, var_10, adj_7, adj_10, adj_11);
df::adj_mul(var_9, var_dt, adj_9, adj_dt, adj_10);
df::adj_add(var_6, var_8, adj_6, adj_8, adj_9);
df::adj_mul(var_5, var_dt, adj_5, adj_dt, adj_8);
df::adj_load(var_joint_q, var_coord_start, adj_joint_q, adj_coord_start, adj_7);
df::adj_load(var_joint_qd, var_dof_start, adj_joint_qd, adj_dof_start, adj_6);
df::adj_load(var_joint_qdd, var_dof_start, adj_joint_qdd, adj_dof_start, adj_5);
}
return;
}
int compute_link_transform_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm)
{
//---------
// primal vars
int var_0;
spatial_transform var_1;
const int var_2 = 0;
bool var_3;
spatial_transform var_4;
spatial_transform var_5;
int var_6;
df::float3 var_7;
int var_8;
int var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
spatial_transform var_14;
spatial_transform var_15;
//---------
// forward
var_0 = df::load(var_joint_parent, var_i);
var_1 = df::spatial_transform_identity();
var_3 = (var_0 >= var_2);
if (var_3) {
var_4 = df::load(var_body_X_sc, var_0);
}
var_5 = df::select(var_3, var_1, var_4);
var_6 = df::load(var_joint_type, var_i);
var_7 = df::load(var_joint_axis, var_i);
var_8 = df::load(var_joint_q_start, var_i);
var_9 = df::load(var_joint_qd_start, var_i);
var_10 = jcalc_transform_cpu_func(var_6, var_7, var_joint_q, var_8);
var_11 = df::load(var_joint_X_pj, var_i);
var_12 = df::spatial_transform_multiply(var_11, var_10);
var_13 = df::spatial_transform_multiply(var_5, var_12);
var_14 = df::load(var_joint_X_cm, var_i);
var_15 = df::spatial_transform_multiply(var_13, var_14);
df::store(var_body_X_sc, var_i, var_13);
df::store(var_body_X_sm, var_i, var_15);
return var_2;
}
void adj_compute_link_transform_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
int & adj_i,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
spatial_transform* adj_joint_X_pj,
spatial_transform* adj_joint_X_cm,
df::float3* adj_joint_axis,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm,
int & adj_ret)
{
//---------
// primal vars
int var_0;
spatial_transform var_1;
const int var_2 = 0;
bool var_3;
spatial_transform var_4;
spatial_transform var_5;
int var_6;
df::float3 var_7;
int var_8;
int var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
spatial_transform var_14;
spatial_transform var_15;
//---------
// dual vars
int adj_0 = 0;
spatial_transform adj_1 = 0;
int adj_2 = 0;
bool adj_3 = 0;
spatial_transform adj_4 = 0;
spatial_transform adj_5 = 0;
int adj_6 = 0;
df::float3 adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
spatial_transform adj_10 = 0;
spatial_transform adj_11 = 0;
spatial_transform adj_12 = 0;
spatial_transform adj_13 = 0;
spatial_transform adj_14 = 0;
spatial_transform adj_15 = 0;
//---------
// forward
var_0 = df::load(var_joint_parent, var_i);
var_1 = df::spatial_transform_identity();
var_3 = (var_0 >= var_2);
if (var_3) {
var_4 = df::load(var_body_X_sc, var_0);
}
var_5 = df::select(var_3, var_1, var_4);
var_6 = df::load(var_joint_type, var_i);
var_7 = df::load(var_joint_axis, var_i);
var_8 = df::load(var_joint_q_start, var_i);
var_9 = df::load(var_joint_qd_start, var_i);
var_10 = jcalc_transform_cpu_func(var_6, var_7, var_joint_q, var_8);
var_11 = df::load(var_joint_X_pj, var_i);
var_12 = df::spatial_transform_multiply(var_11, var_10);
var_13 = df::spatial_transform_multiply(var_5, var_12);
var_14 = df::load(var_joint_X_cm, var_i);
var_15 = df::spatial_transform_multiply(var_13, var_14);
df::store(var_body_X_sc, var_i, var_13);
df::store(var_body_X_sm, var_i, var_15);
goto label0;
//---------
// reverse
label0:;
adj_2 += adj_ret;
df::adj_store(var_body_X_sm, var_i, var_15, adj_body_X_sm, adj_i, adj_15);
df::adj_store(var_body_X_sc, var_i, var_13, adj_body_X_sc, adj_i, adj_13);
df::adj_spatial_transform_multiply(var_13, var_14, adj_13, adj_14, adj_15);
df::adj_load(var_joint_X_cm, var_i, adj_joint_X_cm, adj_i, adj_14);
df::adj_spatial_transform_multiply(var_5, var_12, adj_5, adj_12, adj_13);
df::adj_spatial_transform_multiply(var_11, var_10, adj_11, adj_10, adj_12);
df::adj_load(var_joint_X_pj, var_i, adj_joint_X_pj, adj_i, adj_11);
adj_jcalc_transform_cpu_func(var_6, var_7, var_joint_q, var_8, adj_6, adj_7, adj_joint_q, adj_8, adj_10);
df::adj_load(var_joint_qd_start, var_i, adj_joint_qd_start, adj_i, adj_9);
df::adj_load(var_joint_q_start, var_i, adj_joint_q_start, adj_i, adj_8);
df::adj_load(var_joint_axis, var_i, adj_joint_axis, adj_i, adj_7);
df::adj_load(var_joint_type, var_i, adj_joint_type, adj_i, adj_6);
df::adj_select(var_3, var_1, var_4, adj_3, adj_1, adj_4, adj_5);
if (var_3) {
df::adj_load(var_body_X_sc, var_0, adj_body_X_sc, adj_0, adj_4);
}
df::adj_load(var_joint_parent, var_i, adj_joint_parent, adj_i, adj_0);
return;
}
int compute_link_velocity_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_qd_start,
float* var_joint_qd,
df::float3* var_joint_axis,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
int var_2;
int var_3;
spatial_transform var_4;
spatial_transform var_5;
const int var_6 = 0;
bool var_7;
spatial_transform var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_vector var_12;
spatial_vector var_13;
spatial_vector var_14;
bool var_15;
spatial_vector var_16;
spatial_vector var_17;
spatial_vector var_18;
spatial_vector var_19;
spatial_vector var_20;
spatial_vector var_21;
spatial_vector var_22;
spatial_transform var_23;
spatial_matrix var_24;
df::float3 var_25;
const int var_26 = 3;
float var_27;
df::float3 var_28;
spatial_vector var_29;
spatial_vector var_30;
df::float3 var_31;
quat var_32;
spatial_transform var_33;
spatial_vector var_34;
spatial_matrix var_35;
spatial_vector var_36;
spatial_vector var_37;
spatial_vector var_38;
spatial_vector var_39;
spatial_vector var_40;
//---------
// forward
var_0 = df::load(var_joint_type, var_i);
var_1 = df::load(var_joint_axis, var_i);
var_2 = df::load(var_joint_parent, var_i);
var_3 = df::load(var_joint_qd_start, var_i);
var_4 = df::load(var_body_X_sc, var_i);
var_5 = df::spatial_transform_identity();
var_7 = (var_2 >= var_6);
if (var_7) {
var_8 = df::load(var_body_X_sc, var_2);
}
var_9 = df::select(var_7, var_5, var_8);
var_10 = df::load(var_joint_X_pj, var_i);
var_11 = df::spatial_transform_multiply(var_9, var_10);
var_12 = jcalc_motion_cpu_func(var_0, var_1, var_11, var_joint_S_s, var_joint_qd, var_3);
var_13 = df::spatial_vector();
var_14 = df::spatial_vector();
var_15 = (var_2 >= var_6);
if (var_15) {
var_16 = df::load(var_body_v_s, var_2);
var_17 = df::load(var_body_a_s, var_2);
}
var_18 = df::select(var_15, var_13, var_16);
var_19 = df::select(var_15, var_14, var_17);
var_20 = df::add(var_18, var_12);
var_21 = df::spatial_cross(var_20, var_12);
var_22 = df::add(var_19, var_21);
var_23 = df::load(var_body_X_sm, var_i);
var_24 = df::load(var_body_I_m, var_i);
var_25 = df::load(var_gravity, var_6);
var_27 = df::index(var_24, var_26, var_26);
var_28 = df::float3();
var_29 = df::spatial_vector(var_28, var_25);
var_30 = df::mul(var_29, var_27);
var_31 = df::spatial_transform_get_translation(var_23);
var_32 = df::quat_identity();
var_33 = df::spatial_transform(var_31, var_32);
var_34 = spatial_transform_wrench_cpu_func(var_33, var_30);
var_35 = spatial_transform_inertia_cpu_func(var_23, var_24);
var_36 = df::mul(var_35, var_22);
var_37 = df::mul(var_35, var_20);
var_38 = df::spatial_cross_dual(var_20, var_37);
var_39 = df::add(var_36, var_38);
df::store(var_body_v_s, var_i, var_20);
df::store(var_body_a_s, var_i, var_22);
var_40 = df::sub(var_39, var_34);
df::store(var_body_f_s, var_i, var_40);
df::store(var_body_I_s, var_i, var_35);
return var_6;
}
void adj_compute_link_velocity_cpu_func(
int var_i,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_qd_start,
float* var_joint_qd,
df::float3* var_joint_axis,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s,
int & adj_i,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_qd_start,
float* adj_joint_qd,
df::float3* adj_joint_axis,
spatial_matrix* adj_body_I_m,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm,
spatial_transform* adj_joint_X_pj,
df::float3* adj_gravity,
spatial_vector* adj_joint_S_s,
spatial_matrix* adj_body_I_s,
spatial_vector* adj_body_v_s,
spatial_vector* adj_body_f_s,
spatial_vector* adj_body_a_s,
int & adj_ret)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
int var_2;
int var_3;
spatial_transform var_4;
spatial_transform var_5;
const int var_6 = 0;
bool var_7;
spatial_transform var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_vector var_12;
spatial_vector var_13;
spatial_vector var_14;
bool var_15;
spatial_vector var_16;
spatial_vector var_17;
spatial_vector var_18;
spatial_vector var_19;
spatial_vector var_20;
spatial_vector var_21;
spatial_vector var_22;
spatial_transform var_23;
spatial_matrix var_24;
df::float3 var_25;
const int var_26 = 3;
float var_27;
df::float3 var_28;
spatial_vector var_29;
spatial_vector var_30;
df::float3 var_31;
quat var_32;
spatial_transform var_33;
spatial_vector var_34;
spatial_matrix var_35;
spatial_vector var_36;
spatial_vector var_37;
spatial_vector var_38;
spatial_vector var_39;
spatial_vector var_40;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
spatial_transform adj_4 = 0;
spatial_transform adj_5 = 0;
int adj_6 = 0;
bool adj_7 = 0;
spatial_transform adj_8 = 0;
spatial_transform adj_9 = 0;
spatial_transform adj_10 = 0;
spatial_transform adj_11 = 0;
spatial_vector adj_12 = 0;
spatial_vector adj_13 = 0;
spatial_vector adj_14 = 0;
bool adj_15 = 0;
spatial_vector adj_16 = 0;
spatial_vector adj_17 = 0;
spatial_vector adj_18 = 0;
spatial_vector adj_19 = 0;
spatial_vector adj_20 = 0;
spatial_vector adj_21 = 0;
spatial_vector adj_22 = 0;
spatial_transform adj_23 = 0;
spatial_matrix adj_24 = 0;
df::float3 adj_25 = 0;
int adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
spatial_vector adj_29 = 0;
spatial_vector adj_30 = 0;
df::float3 adj_31 = 0;
quat adj_32 = 0;
spatial_transform adj_33 = 0;
spatial_vector adj_34 = 0;
spatial_matrix adj_35 = 0;
spatial_vector adj_36 = 0;
spatial_vector adj_37 = 0;
spatial_vector adj_38 = 0;
spatial_vector adj_39 = 0;
spatial_vector adj_40 = 0;
//---------
// forward
var_0 = df::load(var_joint_type, var_i);
var_1 = df::load(var_joint_axis, var_i);
var_2 = df::load(var_joint_parent, var_i);
var_3 = df::load(var_joint_qd_start, var_i);
var_4 = df::load(var_body_X_sc, var_i);
var_5 = df::spatial_transform_identity();
var_7 = (var_2 >= var_6);
if (var_7) {
var_8 = df::load(var_body_X_sc, var_2);
}
var_9 = df::select(var_7, var_5, var_8);
var_10 = df::load(var_joint_X_pj, var_i);
var_11 = df::spatial_transform_multiply(var_9, var_10);
var_12 = jcalc_motion_cpu_func(var_0, var_1, var_11, var_joint_S_s, var_joint_qd, var_3);
var_13 = df::spatial_vector();
var_14 = df::spatial_vector();
var_15 = (var_2 >= var_6);
if (var_15) {
var_16 = df::load(var_body_v_s, var_2);
var_17 = df::load(var_body_a_s, var_2);
}
var_18 = df::select(var_15, var_13, var_16);
var_19 = df::select(var_15, var_14, var_17);
var_20 = df::add(var_18, var_12);
var_21 = df::spatial_cross(var_20, var_12);
var_22 = df::add(var_19, var_21);
var_23 = df::load(var_body_X_sm, var_i);
var_24 = df::load(var_body_I_m, var_i);
var_25 = df::load(var_gravity, var_6);
var_27 = df::index(var_24, var_26, var_26);
var_28 = df::float3();
var_29 = df::spatial_vector(var_28, var_25);
var_30 = df::mul(var_29, var_27);
var_31 = df::spatial_transform_get_translation(var_23);
var_32 = df::quat_identity();
var_33 = df::spatial_transform(var_31, var_32);
var_34 = spatial_transform_wrench_cpu_func(var_33, var_30);
var_35 = spatial_transform_inertia_cpu_func(var_23, var_24);
var_36 = df::mul(var_35, var_22);
var_37 = df::mul(var_35, var_20);
var_38 = df::spatial_cross_dual(var_20, var_37);
var_39 = df::add(var_36, var_38);
df::store(var_body_v_s, var_i, var_20);
df::store(var_body_a_s, var_i, var_22);
var_40 = df::sub(var_39, var_34);
df::store(var_body_f_s, var_i, var_40);
df::store(var_body_I_s, var_i, var_35);
goto label0;
//---------
// reverse
label0:;
adj_6 += adj_ret;
df::adj_store(var_body_I_s, var_i, var_35, adj_body_I_s, adj_i, adj_35);
df::adj_store(var_body_f_s, var_i, var_40, adj_body_f_s, adj_i, adj_40);
df::adj_sub(var_39, var_34, adj_39, adj_34, adj_40);
df::adj_store(var_body_a_s, var_i, var_22, adj_body_a_s, adj_i, adj_22);
df::adj_store(var_body_v_s, var_i, var_20, adj_body_v_s, adj_i, adj_20);
df::adj_add(var_36, var_38, adj_36, adj_38, adj_39);
df::adj_spatial_cross_dual(var_20, var_37, adj_20, adj_37, adj_38);
df::adj_mul(var_35, var_20, adj_35, adj_20, adj_37);
df::adj_mul(var_35, var_22, adj_35, adj_22, adj_36);
adj_spatial_transform_inertia_cpu_func(var_23, var_24, adj_23, adj_24, adj_35);
adj_spatial_transform_wrench_cpu_func(var_33, var_30, adj_33, adj_30, adj_34);
df::adj_spatial_transform(var_31, var_32, adj_31, adj_32, adj_33);
df::adj_spatial_transform_get_translation(var_23, adj_23, adj_31);
df::adj_mul(var_29, var_27, adj_29, adj_27, adj_30);
df::adj_spatial_vector(var_28, var_25, adj_28, adj_25, adj_29);
df::adj_index(var_24, var_26, var_26, adj_24, adj_26, adj_26, adj_27);
df::adj_load(var_gravity, var_6, adj_gravity, adj_6, adj_25);
df::adj_load(var_body_I_m, var_i, adj_body_I_m, adj_i, adj_24);
df::adj_load(var_body_X_sm, var_i, adj_body_X_sm, adj_i, adj_23);
df::adj_add(var_19, var_21, adj_19, adj_21, adj_22);
df::adj_spatial_cross(var_20, var_12, adj_20, adj_12, adj_21);
df::adj_add(var_18, var_12, adj_18, adj_12, adj_20);
df::adj_select(var_15, var_14, var_17, adj_15, adj_14, adj_17, adj_19);
df::adj_select(var_15, var_13, var_16, adj_15, adj_13, adj_16, adj_18);
if (var_15) {
df::adj_load(var_body_a_s, var_2, adj_body_a_s, adj_2, adj_17);
df::adj_load(var_body_v_s, var_2, adj_body_v_s, adj_2, adj_16);
}
adj_jcalc_motion_cpu_func(var_0, var_1, var_11, var_joint_S_s, var_joint_qd, var_3, adj_0, adj_1, adj_11, adj_joint_S_s, adj_joint_qd, adj_3, adj_12);
df::adj_spatial_transform_multiply(var_9, var_10, adj_9, adj_10, adj_11);
df::adj_load(var_joint_X_pj, var_i, adj_joint_X_pj, adj_i, adj_10);
df::adj_select(var_7, var_5, var_8, adj_7, adj_5, adj_8, adj_9);
if (var_7) {
df::adj_load(var_body_X_sc, var_2, adj_body_X_sc, adj_2, adj_8);
}
df::adj_load(var_body_X_sc, var_i, adj_body_X_sc, adj_i, adj_4);
df::adj_load(var_joint_qd_start, var_i, adj_joint_qd_start, adj_i, adj_3);
df::adj_load(var_joint_parent, var_i, adj_joint_parent, adj_i, adj_2);
df::adj_load(var_joint_axis, var_i, adj_joint_axis, adj_i, adj_1);
df::adj_load(var_joint_type, var_i, adj_joint_type, adj_i, adj_0);
return;
}
int compute_link_tau_cpu_func(
int var_offset,
int var_joint_end,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
int var_4;
int var_5;
int var_6;
float var_7;
float var_8;
float var_9;
float var_10;
spatial_vector var_11;
spatial_vector var_12;
spatial_vector var_13;
int var_14;
const int var_15 = 0;
bool var_16;
//---------
// forward
var_0 = df::sub(var_joint_end, var_offset);
var_2 = df::sub(var_0, var_1);
var_3 = df::load(var_joint_type, var_2);
var_4 = df::load(var_joint_parent, var_2);
var_5 = df::load(var_joint_qd_start, var_2);
var_6 = df::load(var_joint_q_start, var_2);
var_7 = df::load(var_joint_target_ke, var_2);
var_8 = df::load(var_joint_target_kd, var_2);
var_9 = df::load(var_joint_limit_ke, var_2);
var_10 = df::load(var_joint_limit_kd, var_2);
var_11 = df::load(var_body_fb_s, var_2);
var_12 = df::load(var_body_ft_s, var_2);
var_13 = df::add(var_11, var_12);
var_14 = jcalc_tau_cpu_func(var_3, var_7, var_8, var_9, var_10, var_joint_S_s, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_limit_lower, var_joint_limit_upper, var_6, var_5, var_13, var_tau);
var_16 = (var_4 >= var_15);
if (var_16) {
df::atomic_add(var_body_ft_s, var_4, var_13);
}
return var_15;
}
void adj_compute_link_tau_cpu_func(
int var_offset,
int var_joint_end,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau,
int & adj_offset,
int & adj_joint_end,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_act,
float* adj_joint_target,
float* adj_joint_target_ke,
float* adj_joint_target_kd,
float* adj_joint_limit_lower,
float* adj_joint_limit_upper,
float* adj_joint_limit_ke,
float* adj_joint_limit_kd,
spatial_vector* adj_joint_S_s,
spatial_vector* adj_body_fb_s,
spatial_vector* adj_body_ft_s,
float* adj_tau,
int & adj_ret)
{
//---------
// primal vars
int var_0;
const int var_1 = 1;
int var_2;
int var_3;
int var_4;
int var_5;
int var_6;
float var_7;
float var_8;
float var_9;
float var_10;
spatial_vector var_11;
spatial_vector var_12;
spatial_vector var_13;
int var_14;
const int var_15 = 0;
bool var_16;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
spatial_vector adj_11 = 0;
spatial_vector adj_12 = 0;
spatial_vector adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
bool adj_16 = 0;
//---------
// forward
var_0 = df::sub(var_joint_end, var_offset);
var_2 = df::sub(var_0, var_1);
var_3 = df::load(var_joint_type, var_2);
var_4 = df::load(var_joint_parent, var_2);
var_5 = df::load(var_joint_qd_start, var_2);
var_6 = df::load(var_joint_q_start, var_2);
var_7 = df::load(var_joint_target_ke, var_2);
var_8 = df::load(var_joint_target_kd, var_2);
var_9 = df::load(var_joint_limit_ke, var_2);
var_10 = df::load(var_joint_limit_kd, var_2);
var_11 = df::load(var_body_fb_s, var_2);
var_12 = df::load(var_body_ft_s, var_2);
var_13 = df::add(var_11, var_12);
var_14 = jcalc_tau_cpu_func(var_3, var_7, var_8, var_9, var_10, var_joint_S_s, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_limit_lower, var_joint_limit_upper, var_6, var_5, var_13, var_tau);
var_16 = (var_4 >= var_15);
if (var_16) {
df::atomic_add(var_body_ft_s, var_4, var_13);
}
goto label0;
//---------
// reverse
label0:;
adj_15 += adj_ret;
if (var_16) {
df::adj_atomic_add(var_body_ft_s, var_4, var_13, adj_body_ft_s, adj_4, adj_13);
}
adj_jcalc_tau_cpu_func(var_3, var_7, var_8, var_9, var_10, var_joint_S_s, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_limit_lower, var_joint_limit_upper, var_6, var_5, var_13, var_tau, adj_3, adj_7, adj_8, adj_9, adj_10, adj_joint_S_s, adj_joint_q, adj_joint_qd, adj_joint_act, adj_joint_target, adj_joint_limit_lower, adj_joint_limit_upper, adj_6, adj_5, adj_13, adj_tau, adj_14);
df::adj_add(var_11, var_12, adj_11, adj_12, adj_13);
df::adj_load(var_body_ft_s, var_2, adj_body_ft_s, adj_2, adj_12);
df::adj_load(var_body_fb_s, var_2, adj_body_fb_s, adj_2, adj_11);
df::adj_load(var_joint_limit_kd, var_2, adj_joint_limit_kd, adj_2, adj_10);
df::adj_load(var_joint_limit_ke, var_2, adj_joint_limit_ke, adj_2, adj_9);
df::adj_load(var_joint_target_kd, var_2, adj_joint_target_kd, adj_2, adj_8);
df::adj_load(var_joint_target_ke, var_2, adj_joint_target_ke, adj_2, adj_7);
df::adj_load(var_joint_q_start, var_2, adj_joint_q_start, adj_2, adj_6);
df::adj_load(var_joint_qd_start, var_2, adj_joint_qd_start, adj_2, adj_5);
df::adj_load(var_joint_parent, var_2, adj_joint_parent, adj_2, adj_4);
df::adj_load(var_joint_type, var_2, adj_joint_type, adj_2, adj_3);
df::adj_sub(var_0, var_1, adj_0, adj_1, adj_2);
df::adj_sub(var_joint_end, var_offset, adj_joint_end, adj_offset, adj_0);
return;
}
void integrate_particles_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
df::float3* var_f,
float* var_w,
df::float3* var_gravity,
float var_dt,
df::float3* var_x_new,
df::float3* var_v_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
float var_4;
const int var_5 = 0;
df::float3 var_6;
df::float3 var_7;
const float var_8 = 0.0;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_f, var_0);
var_4 = df::load(var_w, var_0);
var_6 = df::load(var_gravity, var_5);
var_7 = df::mul(var_3, var_4);
var_9 = df::sub(var_8, var_4);
var_10 = df::step(var_9);
var_11 = df::mul(var_6, var_10);
var_12 = df::add(var_7, var_11);
var_13 = df::mul(var_12, var_dt);
var_14 = df::add(var_2, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_1, var_15);
df::store(var_x_new, var_0, var_16);
df::store(var_v_new, var_0, var_14);
}
void integrate_particles_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
df::float3* var_f,
float* var_w,
df::float3* var_gravity,
float var_dt,
df::float3* var_x_new,
df::float3* var_v_new,
df::float3* adj_x,
df::float3* adj_v,
df::float3* adj_f,
float* adj_w,
df::float3* adj_gravity,
float adj_dt,
df::float3* adj_x_new,
df::float3* adj_v_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
float var_4;
const int var_5 = 0;
df::float3 var_6;
df::float3 var_7;
const float var_8 = 0.0;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
float adj_4 = 0;
int adj_5 = 0;
df::float3 adj_6 = 0;
df::float3 adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
df::float3 adj_11 = 0;
df::float3 adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_f, var_0);
var_4 = df::load(var_w, var_0);
var_6 = df::load(var_gravity, var_5);
var_7 = df::mul(var_3, var_4);
var_9 = df::sub(var_8, var_4);
var_10 = df::step(var_9);
var_11 = df::mul(var_6, var_10);
var_12 = df::add(var_7, var_11);
var_13 = df::mul(var_12, var_dt);
var_14 = df::add(var_2, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_1, var_15);
df::store(var_x_new, var_0, var_16);
df::store(var_v_new, var_0, var_14);
//---------
// reverse
df::adj_store(var_v_new, var_0, var_14, adj_v_new, adj_0, adj_14);
df::adj_store(var_x_new, var_0, var_16, adj_x_new, adj_0, adj_16);
df::adj_add(var_1, var_15, adj_1, adj_15, adj_16);
df::adj_mul(var_14, var_dt, adj_14, adj_dt, adj_15);
df::adj_add(var_2, var_13, adj_2, adj_13, adj_14);
df::adj_mul(var_12, var_dt, adj_12, adj_dt, adj_13);
df::adj_add(var_7, var_11, adj_7, adj_11, adj_12);
df::adj_mul(var_6, var_10, adj_6, adj_10, adj_11);
df::adj_step(var_9, adj_9, adj_10);
df::adj_sub(var_8, var_4, adj_8, adj_4, adj_9);
df::adj_mul(var_3, var_4, adj_3, adj_4, adj_7);
df::adj_load(var_gravity, var_5, adj_gravity, adj_5, adj_6);
df::adj_load(var_w, var_0, adj_w, adj_0, adj_4);
df::adj_load(var_f, var_0, adj_f, adj_0, adj_3);
df::adj_load(var_v, var_0, adj_v, adj_0, adj_2);
df::adj_load(var_x, var_0, adj_x, adj_0, adj_1);
return;
}
// Python entry points
void integrate_particles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_particles_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<df::float3*>(var_f),
cast<float*>(var_w),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_x_new),
cast<df::float3*>(var_v_new));
}
}
void integrate_particles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_f,
torch::Tensor adj_w,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_x_new,
torch::Tensor adj_v_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_particles_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<df::float3*>(var_f),
cast<float*>(var_w),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_x_new),
cast<df::float3*>(var_v_new),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<df::float3*>(adj_f),
cast<float*>(adj_w),
cast<df::float3*>(adj_gravity),
adj_dt,
cast<df::float3*>(adj_x_new),
cast<df::float3*>(adj_v_new));
}
}
// Python entry points
void integrate_particles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new);
void integrate_particles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_f,
torch::Tensor var_w,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_x_new,
torch::Tensor var_v_new,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_f,
torch::Tensor adj_w,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_x_new,
torch::Tensor adj_v_new);
void integrate_rigids_cpu_kernel_forward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
df::float3* var_rigid_f,
df::float3* var_rigid_t,
float* var_inv_m,
mat33* var_inv_I,
df::float3* var_gravity,
float var_dt,
df::float3* var_rigid_x_new,
quat* var_rigid_r_new,
df::float3* var_rigid_v_new,
df::float3* var_rigid_w_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
quat var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
float var_7;
mat33 var_8;
const int var_9 = 0;
df::float3 var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
const float var_25 = 0.0;
quat var_26;
quat var_27;
const float var_28 = 0.5;
quat var_29;
quat var_30;
quat var_31;
quat var_32;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_rigid_x, var_0);
var_2 = df::load(var_rigid_r, var_0);
var_3 = df::load(var_rigid_v, var_0);
var_4 = df::load(var_rigid_w, var_0);
var_5 = df::load(var_rigid_f, var_0);
var_6 = df::load(var_rigid_t, var_0);
var_7 = df::load(var_inv_m, var_0);
var_8 = df::load(var_inv_I, var_0);
var_10 = df::load(var_gravity, var_9);
var_11 = df::mul(var_5, var_7);
var_12 = df::nonzero(var_7);
var_13 = df::mul(var_10, var_12);
var_14 = df::add(var_11, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_3, var_15);
var_17 = df::mul(var_16, var_dt);
var_18 = df::add(var_1, var_17);
var_19 = df::rotate_inv(var_2, var_4);
var_20 = df::rotate_inv(var_2, var_6);
var_21 = df::mul(var_8, var_20);
var_22 = df::mul(var_21, var_dt);
var_23 = df::add(var_19, var_22);
var_24 = df::rotate(var_2, var_23);
var_26 = df::quat(var_24, var_25);
var_27 = df::mul(var_26, var_2);
var_29 = df::mul(var_27, var_28);
var_30 = df::mul(var_29, var_dt);
var_31 = df::add(var_2, var_30);
var_32 = df::normalize(var_31);
df::store(var_rigid_x_new, var_0, var_18);
df::store(var_rigid_r_new, var_0, var_32);
df::store(var_rigid_v_new, var_0, var_16);
df::store(var_rigid_w_new, var_0, var_24);
}
void integrate_rigids_cpu_kernel_backward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
df::float3* var_rigid_f,
df::float3* var_rigid_t,
float* var_inv_m,
mat33* var_inv_I,
df::float3* var_gravity,
float var_dt,
df::float3* var_rigid_x_new,
quat* var_rigid_r_new,
df::float3* var_rigid_v_new,
df::float3* var_rigid_w_new,
df::float3* adj_rigid_x,
quat* adj_rigid_r,
df::float3* adj_rigid_v,
df::float3* adj_rigid_w,
df::float3* adj_rigid_f,
df::float3* adj_rigid_t,
float* adj_inv_m,
mat33* adj_inv_I,
df::float3* adj_gravity,
float adj_dt,
df::float3* adj_rigid_x_new,
quat* adj_rigid_r_new,
df::float3* adj_rigid_v_new,
df::float3* adj_rigid_w_new)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
quat var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
float var_7;
mat33 var_8;
const int var_9 = 0;
df::float3 var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
const float var_25 = 0.0;
quat var_26;
quat var_27;
const float var_28 = 0.5;
quat var_29;
quat var_30;
quat var_31;
quat var_32;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
quat adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
float adj_7 = 0;
mat33 adj_8 = 0;
int adj_9 = 0;
df::float3 adj_10 = 0;
df::float3 adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
df::float3 adj_23 = 0;
df::float3 adj_24 = 0;
float adj_25 = 0;
quat adj_26 = 0;
quat adj_27 = 0;
float adj_28 = 0;
quat adj_29 = 0;
quat adj_30 = 0;
quat adj_31 = 0;
quat adj_32 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_rigid_x, var_0);
var_2 = df::load(var_rigid_r, var_0);
var_3 = df::load(var_rigid_v, var_0);
var_4 = df::load(var_rigid_w, var_0);
var_5 = df::load(var_rigid_f, var_0);
var_6 = df::load(var_rigid_t, var_0);
var_7 = df::load(var_inv_m, var_0);
var_8 = df::load(var_inv_I, var_0);
var_10 = df::load(var_gravity, var_9);
var_11 = df::mul(var_5, var_7);
var_12 = df::nonzero(var_7);
var_13 = df::mul(var_10, var_12);
var_14 = df::add(var_11, var_13);
var_15 = df::mul(var_14, var_dt);
var_16 = df::add(var_3, var_15);
var_17 = df::mul(var_16, var_dt);
var_18 = df::add(var_1, var_17);
var_19 = df::rotate_inv(var_2, var_4);
var_20 = df::rotate_inv(var_2, var_6);
var_21 = df::mul(var_8, var_20);
var_22 = df::mul(var_21, var_dt);
var_23 = df::add(var_19, var_22);
var_24 = df::rotate(var_2, var_23);
var_26 = df::quat(var_24, var_25);
var_27 = df::mul(var_26, var_2);
var_29 = df::mul(var_27, var_28);
var_30 = df::mul(var_29, var_dt);
var_31 = df::add(var_2, var_30);
var_32 = df::normalize(var_31);
df::store(var_rigid_x_new, var_0, var_18);
df::store(var_rigid_r_new, var_0, var_32);
df::store(var_rigid_v_new, var_0, var_16);
df::store(var_rigid_w_new, var_0, var_24);
//---------
// reverse
df::adj_store(var_rigid_w_new, var_0, var_24, adj_rigid_w_new, adj_0, adj_24);
df::adj_store(var_rigid_v_new, var_0, var_16, adj_rigid_v_new, adj_0, adj_16);
df::adj_store(var_rigid_r_new, var_0, var_32, adj_rigid_r_new, adj_0, adj_32);
df::adj_store(var_rigid_x_new, var_0, var_18, adj_rigid_x_new, adj_0, adj_18);
df::adj_normalize(var_31, adj_31, adj_32);
df::adj_add(var_2, var_30, adj_2, adj_30, adj_31);
df::adj_mul(var_29, var_dt, adj_29, adj_dt, adj_30);
df::adj_mul(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_mul(var_26, var_2, adj_26, adj_2, adj_27);
df::adj_quat(var_24, var_25, adj_24, adj_25, adj_26);
df::adj_rotate(var_2, var_23, adj_2, adj_23, adj_24);
df::adj_add(var_19, var_22, adj_19, adj_22, adj_23);
df::adj_mul(var_21, var_dt, adj_21, adj_dt, adj_22);
df::adj_mul(var_8, var_20, adj_8, adj_20, adj_21);
df::adj_rotate_inv(var_2, var_6, adj_2, adj_6, adj_20);
df::adj_rotate_inv(var_2, var_4, adj_2, adj_4, adj_19);
df::adj_add(var_1, var_17, adj_1, adj_17, adj_18);
df::adj_mul(var_16, var_dt, adj_16, adj_dt, adj_17);
df::adj_add(var_3, var_15, adj_3, adj_15, adj_16);
df::adj_mul(var_14, var_dt, adj_14, adj_dt, adj_15);
df::adj_add(var_11, var_13, adj_11, adj_13, adj_14);
df::adj_mul(var_10, var_12, adj_10, adj_12, adj_13);
df::adj_nonzero(var_7, adj_7, adj_12);
df::adj_mul(var_5, var_7, adj_5, adj_7, adj_11);
df::adj_load(var_gravity, var_9, adj_gravity, adj_9, adj_10);
df::adj_load(var_inv_I, var_0, adj_inv_I, adj_0, adj_8);
df::adj_load(var_inv_m, var_0, adj_inv_m, adj_0, adj_7);
df::adj_load(var_rigid_t, var_0, adj_rigid_t, adj_0, adj_6);
df::adj_load(var_rigid_f, var_0, adj_rigid_f, adj_0, adj_5);
df::adj_load(var_rigid_w, var_0, adj_rigid_w, adj_0, adj_4);
df::adj_load(var_rigid_v, var_0, adj_rigid_v, adj_0, adj_3);
df::adj_load(var_rigid_r, var_0, adj_rigid_r, adj_0, adj_2);
df::adj_load(var_rigid_x, var_0, adj_rigid_x, adj_0, adj_1);
return;
}
// Python entry points
void integrate_rigids_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_rigids_cpu_kernel_forward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t),
cast<float*>(var_inv_m),
cast<mat33*>(var_inv_I),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_rigid_x_new),
cast<quat*>(var_rigid_r_new),
cast<df::float3*>(var_rigid_v_new),
cast<df::float3*>(var_rigid_w_new));
}
}
void integrate_rigids_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t,
torch::Tensor adj_inv_m,
torch::Tensor adj_inv_I,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_rigid_x_new,
torch::Tensor adj_rigid_r_new,
torch::Tensor adj_rigid_v_new,
torch::Tensor adj_rigid_w_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
integrate_rigids_cpu_kernel_backward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t),
cast<float*>(var_inv_m),
cast<mat33*>(var_inv_I),
cast<df::float3*>(var_gravity),
var_dt,
cast<df::float3*>(var_rigid_x_new),
cast<quat*>(var_rigid_r_new),
cast<df::float3*>(var_rigid_v_new),
cast<df::float3*>(var_rigid_w_new),
cast<df::float3*>(adj_rigid_x),
cast<quat*>(adj_rigid_r),
cast<df::float3*>(adj_rigid_v),
cast<df::float3*>(adj_rigid_w),
cast<df::float3*>(adj_rigid_f),
cast<df::float3*>(adj_rigid_t),
cast<float*>(adj_inv_m),
cast<mat33*>(adj_inv_I),
cast<df::float3*>(adj_gravity),
adj_dt,
cast<df::float3*>(adj_rigid_x_new),
cast<quat*>(adj_rigid_r_new),
cast<df::float3*>(adj_rigid_v_new),
cast<df::float3*>(adj_rigid_w_new));
}
}
// Python entry points
void integrate_rigids_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new);
void integrate_rigids_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor var_inv_m,
torch::Tensor var_inv_I,
torch::Tensor var_gravity,
float var_dt,
torch::Tensor var_rigid_x_new,
torch::Tensor var_rigid_r_new,
torch::Tensor var_rigid_v_new,
torch::Tensor var_rigid_w_new,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t,
torch::Tensor adj_inv_m,
torch::Tensor adj_inv_I,
torch::Tensor adj_gravity,
float adj_dt,
torch::Tensor adj_rigid_x_new,
torch::Tensor adj_rigid_r_new,
torch::Tensor adj_rigid_v_new,
torch::Tensor adj_rigid_w_new);
void eval_springs_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
df::float3 var_28;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::mul(var_10, var_23);
var_26 = df::mul(var_11, var_24);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_22, var_27);
df::atomic_sub(var_f, var_5, var_28);
df::atomic_add(var_f, var_9, var_28);
}
void eval_springs_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_spring_indices,
float* adj_spring_rest_lengths,
float* adj_spring_stiffness,
float* adj_spring_damping,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
df::float3 var_28;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
df::float3 adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::mul(var_10, var_23);
var_26 = df::mul(var_11, var_24);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_22, var_27);
df::atomic_sub(var_f, var_5, var_28);
df::atomic_add(var_f, var_9, var_28);
//---------
// reverse
df::adj_atomic_add(var_f, var_9, var_28, adj_f, adj_9, adj_28);
df::adj_atomic_sub(var_f, var_5, var_28, adj_f, adj_5, adj_28);
df::adj_mul(var_22, var_27, adj_22, adj_27, adj_28);
df::adj_add(var_25, var_26, adj_25, adj_26, adj_27);
df::adj_mul(var_11, var_24, adj_11, adj_24, adj_26);
df::adj_mul(var_10, var_23, adj_10, adj_23, adj_25);
df::adj_dot(var_22, var_18, adj_22, adj_18, adj_24);
df::adj_sub(var_19, var_12, adj_19, adj_12, adj_23);
df::adj_mul(var_17, var_21, adj_17, adj_21, adj_22);
df::adj_div(var_20, var_19, adj_20, adj_19, adj_21);
df::adj_length(var_17, adj_17, adj_19);
df::adj_sub(var_15, var_16, adj_15, adj_16, adj_18);
df::adj_sub(var_13, var_14, adj_13, adj_14, adj_17);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_16);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_15);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_14);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_13);
df::adj_load(var_spring_rest_lengths, var_0, adj_spring_rest_lengths, adj_0, adj_12);
df::adj_load(var_spring_damping, var_0, adj_spring_damping, adj_0, adj_11);
df::adj_load(var_spring_stiffness, var_0, adj_spring_stiffness, adj_0, adj_10);
df::adj_load(var_spring_indices, var_8, adj_spring_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_spring_indices, var_4, adj_spring_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_springs_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
cast<df::float3*>(var_f));
}
}
void eval_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_springs_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_spring_indices),
cast<float*>(adj_spring_rest_lengths),
cast<float*>(adj_spring_stiffness),
cast<float*>(adj_spring_damping),
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f);
void eval_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
torch::Tensor adj_f);
void eval_triangles_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 3;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
mat22 var_22;
float var_23;
const float var_24 = 2.0;
float var_25;
const float var_26 = 1.0;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
float var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
df::float3 var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
df::float3 var_42;
float var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
const float var_57 = 0.5;
float var_58;
float var_59;
float var_60;
float var_61;
float var_62;
df::float3 var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
df::float3 var_69;
float var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
float var_77;
float var_78;
df::float3 var_79;
df::float3 var_80;
float var_81;
df::float3 var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
const float var_87 = 0.3333;
df::float3 var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
const float var_96 = 1.57079;
float var_97;
float var_98;
float var_99;
float var_100;
df::float3 var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
df::float3 var_107;
df::float3 var_108;
df::float3 var_109;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::load(var_x, var_5);
var_15 = df::load(var_x, var_9);
var_16 = df::load(var_x, var_13);
var_17 = df::load(var_v, var_5);
var_18 = df::load(var_v, var_9);
var_19 = df::load(var_v, var_13);
var_20 = df::sub(var_15, var_14);
var_21 = df::sub(var_16, var_14);
var_22 = df::load(var_pose, var_0);
var_23 = df::determinant(var_22);
var_25 = df::mul(var_23, var_24);
var_27 = df::div(var_26, var_25);
var_28 = df::mul(var_k_mu, var_27);
var_29 = df::mul(var_k_lambda, var_27);
var_30 = df::mul(var_k_damp, var_27);
var_31 = df::index(var_22, var_3, var_3);
var_32 = df::mul(var_20, var_31);
var_33 = df::index(var_22, var_7, var_3);
var_34 = df::mul(var_21, var_33);
var_35 = df::add(var_32, var_34);
var_36 = df::index(var_22, var_3, var_7);
var_37 = df::mul(var_20, var_36);
var_38 = df::index(var_22, var_7, var_7);
var_39 = df::mul(var_21, var_38);
var_40 = df::add(var_37, var_39);
var_41 = df::index(var_22, var_3, var_3);
var_42 = df::mul(var_35, var_41);
var_43 = df::index(var_22, var_3, var_7);
var_44 = df::mul(var_40, var_43);
var_45 = df::add(var_42, var_44);
var_46 = df::mul(var_45, var_28);
var_47 = df::index(var_22, var_7, var_3);
var_48 = df::mul(var_35, var_47);
var_49 = df::index(var_22, var_7, var_7);
var_50 = df::mul(var_40, var_49);
var_51 = df::add(var_48, var_50);
var_52 = df::mul(var_51, var_28);
var_53 = df::div(var_28, var_29);
var_54 = df::add(var_26, var_53);
var_55 = df::cross(var_20, var_21);
var_56 = df::length(var_55);
var_58 = df::mul(var_56, var_57);
var_59 = df::load(var_activation, var_0);
var_60 = df::mul(var_58, var_25);
var_61 = df::sub(var_60, var_54);
var_62 = df::add(var_61, var_59);
var_63 = df::normalize(var_55);
var_64 = df::cross(var_21, var_63);
var_65 = df::mul(var_64, var_25);
var_66 = df::mul(var_65, var_57);
var_67 = df::cross(var_63, var_20);
var_68 = df::mul(var_67, var_25);
var_69 = df::mul(var_68, var_57);
var_70 = df::mul(var_29, var_62);
var_71 = df::dot(var_66, var_18);
var_72 = df::dot(var_69, var_19);
var_73 = df::add(var_71, var_72);
var_74 = df::add(var_66, var_69);
var_75 = df::dot(var_74, var_17);
var_76 = df::sub(var_73, var_75);
var_77 = df::mul(var_30, var_76);
var_78 = df::add(var_70, var_77);
var_79 = df::mul(var_66, var_78);
var_80 = df::add(var_46, var_79);
var_81 = df::add(var_70, var_77);
var_82 = df::mul(var_69, var_81);
var_83 = df::add(var_52, var_82);
var_84 = df::add(var_80, var_83);
var_85 = df::add(var_17, var_19);
var_86 = df::add(var_85, var_18);
var_88 = df::mul(var_86, var_87);
var_89 = df::normalize(var_88);
var_90 = df::mul(var_k_drag, var_58);
var_91 = df::dot(var_63, var_88);
var_92 = df::abs(var_91);
var_93 = df::mul(var_90, var_92);
var_94 = df::mul(var_88, var_93);
var_95 = df::mul(var_k_lift, var_58);
var_97 = df::dot(var_63, var_89);
var_98 = df::acos(var_97);
var_99 = df::sub(var_96, var_98);
var_100 = df::mul(var_95, var_99);
var_101 = df::mul(var_63, var_100);
var_102 = df::dot(var_88, var_88);
var_103 = df::mul(var_101, var_102);
var_104 = df::sub(var_84, var_94);
var_105 = df::sub(var_104, var_103);
var_106 = df::add(var_80, var_94);
var_107 = df::add(var_106, var_103);
var_108 = df::add(var_83, var_94);
var_109 = df::add(var_108, var_103);
df::atomic_add(var_f, var_5, var_105);
df::atomic_sub(var_f, var_9, var_107);
df::atomic_sub(var_f, var_13, var_109);
}
void eval_triangles_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
mat22* adj_pose,
float* adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 3;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
mat22 var_22;
float var_23;
const float var_24 = 2.0;
float var_25;
const float var_26 = 1.0;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
float var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
df::float3 var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
df::float3 var_42;
float var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
const float var_57 = 0.5;
float var_58;
float var_59;
float var_60;
float var_61;
float var_62;
df::float3 var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
df::float3 var_69;
float var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
float var_77;
float var_78;
df::float3 var_79;
df::float3 var_80;
float var_81;
df::float3 var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
const float var_87 = 0.3333;
df::float3 var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
const float var_96 = 1.57079;
float var_97;
float var_98;
float var_99;
float var_100;
df::float3 var_101;
float var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
df::float3 var_107;
df::float3 var_108;
df::float3 var_109;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
mat22 adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
df::float3 adj_32 = 0;
float adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
df::float3 adj_37 = 0;
float adj_38 = 0;
df::float3 adj_39 = 0;
df::float3 adj_40 = 0;
float adj_41 = 0;
df::float3 adj_42 = 0;
float adj_43 = 0;
df::float3 adj_44 = 0;
df::float3 adj_45 = 0;
df::float3 adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
float adj_49 = 0;
df::float3 adj_50 = 0;
df::float3 adj_51 = 0;
df::float3 adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
float adj_61 = 0;
float adj_62 = 0;
df::float3 adj_63 = 0;
df::float3 adj_64 = 0;
df::float3 adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
df::float3 adj_68 = 0;
df::float3 adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
df::float3 adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
float adj_78 = 0;
df::float3 adj_79 = 0;
df::float3 adj_80 = 0;
float adj_81 = 0;
df::float3 adj_82 = 0;
df::float3 adj_83 = 0;
df::float3 adj_84 = 0;
df::float3 adj_85 = 0;
df::float3 adj_86 = 0;
float adj_87 = 0;
df::float3 adj_88 = 0;
df::float3 adj_89 = 0;
float adj_90 = 0;
float adj_91 = 0;
float adj_92 = 0;
float adj_93 = 0;
df::float3 adj_94 = 0;
float adj_95 = 0;
float adj_96 = 0;
float adj_97 = 0;
float adj_98 = 0;
float adj_99 = 0;
float adj_100 = 0;
df::float3 adj_101 = 0;
float adj_102 = 0;
df::float3 adj_103 = 0;
df::float3 adj_104 = 0;
df::float3 adj_105 = 0;
df::float3 adj_106 = 0;
df::float3 adj_107 = 0;
df::float3 adj_108 = 0;
df::float3 adj_109 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::load(var_x, var_5);
var_15 = df::load(var_x, var_9);
var_16 = df::load(var_x, var_13);
var_17 = df::load(var_v, var_5);
var_18 = df::load(var_v, var_9);
var_19 = df::load(var_v, var_13);
var_20 = df::sub(var_15, var_14);
var_21 = df::sub(var_16, var_14);
var_22 = df::load(var_pose, var_0);
var_23 = df::determinant(var_22);
var_25 = df::mul(var_23, var_24);
var_27 = df::div(var_26, var_25);
var_28 = df::mul(var_k_mu, var_27);
var_29 = df::mul(var_k_lambda, var_27);
var_30 = df::mul(var_k_damp, var_27);
var_31 = df::index(var_22, var_3, var_3);
var_32 = df::mul(var_20, var_31);
var_33 = df::index(var_22, var_7, var_3);
var_34 = df::mul(var_21, var_33);
var_35 = df::add(var_32, var_34);
var_36 = df::index(var_22, var_3, var_7);
var_37 = df::mul(var_20, var_36);
var_38 = df::index(var_22, var_7, var_7);
var_39 = df::mul(var_21, var_38);
var_40 = df::add(var_37, var_39);
var_41 = df::index(var_22, var_3, var_3);
var_42 = df::mul(var_35, var_41);
var_43 = df::index(var_22, var_3, var_7);
var_44 = df::mul(var_40, var_43);
var_45 = df::add(var_42, var_44);
var_46 = df::mul(var_45, var_28);
var_47 = df::index(var_22, var_7, var_3);
var_48 = df::mul(var_35, var_47);
var_49 = df::index(var_22, var_7, var_7);
var_50 = df::mul(var_40, var_49);
var_51 = df::add(var_48, var_50);
var_52 = df::mul(var_51, var_28);
var_53 = df::div(var_28, var_29);
var_54 = df::add(var_26, var_53);
var_55 = df::cross(var_20, var_21);
var_56 = df::length(var_55);
var_58 = df::mul(var_56, var_57);
var_59 = df::load(var_activation, var_0);
var_60 = df::mul(var_58, var_25);
var_61 = df::sub(var_60, var_54);
var_62 = df::add(var_61, var_59);
var_63 = df::normalize(var_55);
var_64 = df::cross(var_21, var_63);
var_65 = df::mul(var_64, var_25);
var_66 = df::mul(var_65, var_57);
var_67 = df::cross(var_63, var_20);
var_68 = df::mul(var_67, var_25);
var_69 = df::mul(var_68, var_57);
var_70 = df::mul(var_29, var_62);
var_71 = df::dot(var_66, var_18);
var_72 = df::dot(var_69, var_19);
var_73 = df::add(var_71, var_72);
var_74 = df::add(var_66, var_69);
var_75 = df::dot(var_74, var_17);
var_76 = df::sub(var_73, var_75);
var_77 = df::mul(var_30, var_76);
var_78 = df::add(var_70, var_77);
var_79 = df::mul(var_66, var_78);
var_80 = df::add(var_46, var_79);
var_81 = df::add(var_70, var_77);
var_82 = df::mul(var_69, var_81);
var_83 = df::add(var_52, var_82);
var_84 = df::add(var_80, var_83);
var_85 = df::add(var_17, var_19);
var_86 = df::add(var_85, var_18);
var_88 = df::mul(var_86, var_87);
var_89 = df::normalize(var_88);
var_90 = df::mul(var_k_drag, var_58);
var_91 = df::dot(var_63, var_88);
var_92 = df::abs(var_91);
var_93 = df::mul(var_90, var_92);
var_94 = df::mul(var_88, var_93);
var_95 = df::mul(var_k_lift, var_58);
var_97 = df::dot(var_63, var_89);
var_98 = df::acos(var_97);
var_99 = df::sub(var_96, var_98);
var_100 = df::mul(var_95, var_99);
var_101 = df::mul(var_63, var_100);
var_102 = df::dot(var_88, var_88);
var_103 = df::mul(var_101, var_102);
var_104 = df::sub(var_84, var_94);
var_105 = df::sub(var_104, var_103);
var_106 = df::add(var_80, var_94);
var_107 = df::add(var_106, var_103);
var_108 = df::add(var_83, var_94);
var_109 = df::add(var_108, var_103);
df::atomic_add(var_f, var_5, var_105);
df::atomic_sub(var_f, var_9, var_107);
df::atomic_sub(var_f, var_13, var_109);
//---------
// reverse
df::adj_atomic_sub(var_f, var_13, var_109, adj_f, adj_13, adj_109);
df::adj_atomic_sub(var_f, var_9, var_107, adj_f, adj_9, adj_107);
df::adj_atomic_add(var_f, var_5, var_105, adj_f, adj_5, adj_105);
df::adj_add(var_108, var_103, adj_108, adj_103, adj_109);
df::adj_add(var_83, var_94, adj_83, adj_94, adj_108);
df::adj_add(var_106, var_103, adj_106, adj_103, adj_107);
df::adj_add(var_80, var_94, adj_80, adj_94, adj_106);
df::adj_sub(var_104, var_103, adj_104, adj_103, adj_105);
df::adj_sub(var_84, var_94, adj_84, adj_94, adj_104);
df::adj_mul(var_101, var_102, adj_101, adj_102, adj_103);
df::adj_dot(var_88, var_88, adj_88, adj_88, adj_102);
df::adj_mul(var_63, var_100, adj_63, adj_100, adj_101);
df::adj_mul(var_95, var_99, adj_95, adj_99, adj_100);
df::adj_sub(var_96, var_98, adj_96, adj_98, adj_99);
df::adj_acos(var_97, adj_97, adj_98);
df::adj_dot(var_63, var_89, adj_63, adj_89, adj_97);
df::adj_mul(var_k_lift, var_58, adj_k_lift, adj_58, adj_95);
df::adj_mul(var_88, var_93, adj_88, adj_93, adj_94);
df::adj_mul(var_90, var_92, adj_90, adj_92, adj_93);
df::adj_abs(var_91, adj_91, adj_92);
df::adj_dot(var_63, var_88, adj_63, adj_88, adj_91);
df::adj_mul(var_k_drag, var_58, adj_k_drag, adj_58, adj_90);
df::adj_normalize(var_88, adj_88, adj_89);
df::adj_mul(var_86, var_87, adj_86, adj_87, adj_88);
df::adj_add(var_85, var_18, adj_85, adj_18, adj_86);
df::adj_add(var_17, var_19, adj_17, adj_19, adj_85);
df::adj_add(var_80, var_83, adj_80, adj_83, adj_84);
df::adj_add(var_52, var_82, adj_52, adj_82, adj_83);
df::adj_mul(var_69, var_81, adj_69, adj_81, adj_82);
df::adj_add(var_70, var_77, adj_70, adj_77, adj_81);
df::adj_add(var_46, var_79, adj_46, adj_79, adj_80);
df::adj_mul(var_66, var_78, adj_66, adj_78, adj_79);
df::adj_add(var_70, var_77, adj_70, adj_77, adj_78);
df::adj_mul(var_30, var_76, adj_30, adj_76, adj_77);
df::adj_sub(var_73, var_75, adj_73, adj_75, adj_76);
df::adj_dot(var_74, var_17, adj_74, adj_17, adj_75);
df::adj_add(var_66, var_69, adj_66, adj_69, adj_74);
df::adj_add(var_71, var_72, adj_71, adj_72, adj_73);
df::adj_dot(var_69, var_19, adj_69, adj_19, adj_72);
df::adj_dot(var_66, var_18, adj_66, adj_18, adj_71);
df::adj_mul(var_29, var_62, adj_29, adj_62, adj_70);
df::adj_mul(var_68, var_57, adj_68, adj_57, adj_69);
df::adj_mul(var_67, var_25, adj_67, adj_25, adj_68);
df::adj_cross(var_63, var_20, adj_63, adj_20, adj_67);
df::adj_mul(var_65, var_57, adj_65, adj_57, adj_66);
df::adj_mul(var_64, var_25, adj_64, adj_25, adj_65);
df::adj_cross(var_21, var_63, adj_21, adj_63, adj_64);
df::adj_normalize(var_55, adj_55, adj_63);
df::adj_add(var_61, var_59, adj_61, adj_59, adj_62);
df::adj_sub(var_60, var_54, adj_60, adj_54, adj_61);
df::adj_mul(var_58, var_25, adj_58, adj_25, adj_60);
df::adj_load(var_activation, var_0, adj_activation, adj_0, adj_59);
df::adj_mul(var_56, var_57, adj_56, adj_57, adj_58);
df::adj_length(var_55, adj_55, adj_56);
df::adj_cross(var_20, var_21, adj_20, adj_21, adj_55);
df::adj_add(var_26, var_53, adj_26, adj_53, adj_54);
df::adj_div(var_28, var_29, adj_28, adj_29, adj_53);
df::adj_mul(var_51, var_28, adj_51, adj_28, adj_52);
df::adj_add(var_48, var_50, adj_48, adj_50, adj_51);
df::adj_mul(var_40, var_49, adj_40, adj_49, adj_50);
df::adj_index(var_22, var_7, var_7, adj_22, adj_7, adj_7, adj_49);
df::adj_mul(var_35, var_47, adj_35, adj_47, adj_48);
df::adj_index(var_22, var_7, var_3, adj_22, adj_7, adj_3, adj_47);
df::adj_mul(var_45, var_28, adj_45, adj_28, adj_46);
df::adj_add(var_42, var_44, adj_42, adj_44, adj_45);
df::adj_mul(var_40, var_43, adj_40, adj_43, adj_44);
df::adj_index(var_22, var_3, var_7, adj_22, adj_3, adj_7, adj_43);
df::adj_mul(var_35, var_41, adj_35, adj_41, adj_42);
df::adj_index(var_22, var_3, var_3, adj_22, adj_3, adj_3, adj_41);
df::adj_add(var_37, var_39, adj_37, adj_39, adj_40);
df::adj_mul(var_21, var_38, adj_21, adj_38, adj_39);
df::adj_index(var_22, var_7, var_7, adj_22, adj_7, adj_7, adj_38);
df::adj_mul(var_20, var_36, adj_20, adj_36, adj_37);
df::adj_index(var_22, var_3, var_7, adj_22, adj_3, adj_7, adj_36);
df::adj_add(var_32, var_34, adj_32, adj_34, adj_35);
df::adj_mul(var_21, var_33, adj_21, adj_33, adj_34);
df::adj_index(var_22, var_7, var_3, adj_22, adj_7, adj_3, adj_33);
df::adj_mul(var_20, var_31, adj_20, adj_31, adj_32);
df::adj_index(var_22, var_3, var_3, adj_22, adj_3, adj_3, adj_31);
df::adj_mul(var_k_damp, var_27, adj_k_damp, adj_27, adj_30);
df::adj_mul(var_k_lambda, var_27, adj_k_lambda, adj_27, adj_29);
df::adj_mul(var_k_mu, var_27, adj_k_mu, adj_27, adj_28);
df::adj_div(var_26, var_25, adj_26, adj_25, adj_27);
df::adj_mul(var_23, var_24, adj_23, adj_24, adj_25);
df::adj_determinant(var_22, adj_22, adj_23);
df::adj_load(var_pose, var_0, adj_pose, adj_0, adj_22);
df::adj_sub(var_16, var_14, adj_16, adj_14, adj_21);
df::adj_sub(var_15, var_14, adj_15, adj_14, adj_20);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_19);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_18);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_17);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_16);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_15);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_triangles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f));
}
}
void eval_triangles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<mat22*>(adj_pose),
cast<float*>(adj_activation),
adj_k_mu,
adj_k_lambda,
adj_k_damp,
adj_k_drag,
adj_k_lift,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_triangles_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f);
void eval_triangles_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f);
void eval_triangles_contact_cpu_kernel_forward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
df::float3 var_3;
const int var_4 = 3;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
int var_9;
const int var_10 = 1;
int var_11;
int var_12;
int var_13;
const int var_14 = 2;
int var_15;
int var_16;
bool var_17;
bool var_18;
bool var_19;
bool var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
df::float3 var_35;
const float var_36 = 0.01;
float var_37;
const float var_38 = 0.0;
float var_39;
df::float3 var_40;
const float var_41 = 100000.0;
df::float3 var_42;
float var_43;
df::float3 var_44;
float var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_x, var_2);
var_5 = df::mul(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::load(var_indices, var_7);
var_9 = df::mul(var_1, var_4);
var_11 = df::add(var_9, var_10);
var_12 = df::load(var_indices, var_11);
var_13 = df::mul(var_1, var_4);
var_15 = df::add(var_13, var_14);
var_16 = df::load(var_indices, var_15);
var_17 = (var_8 == var_2);
var_18 = (var_12 == var_2);
var_19 = (var_16 == var_2);
var_20 = var_17 || var_18 || var_19;
if (var_20) {
return;
}
var_21 = df::load(var_x, var_8);
var_22 = df::load(var_x, var_12);
var_23 = df::load(var_x, var_16);
var_24 = triangle_closest_point_barycentric_cpu_func(var_21, var_22, var_23, var_3);
var_25 = df::index(var_24, var_6);
var_26 = df::mul(var_21, var_25);
var_27 = df::index(var_24, var_10);
var_28 = df::mul(var_22, var_27);
var_29 = df::add(var_26, var_28);
var_30 = df::index(var_24, var_14);
var_31 = df::mul(var_23, var_30);
var_32 = df::add(var_29, var_31);
var_33 = df::sub(var_3, var_32);
var_34 = df::dot(var_33, var_33);
var_35 = df::normalize(var_33);
var_37 = df::sub(var_34, var_36);
var_39 = df::min(var_37, var_38);
var_40 = df::mul(var_35, var_39);
var_42 = df::mul(var_40, var_41);
df::atomic_sub(var_f, var_2, var_42);
var_43 = df::index(var_24, var_6);
var_44 = df::mul(var_42, var_43);
df::atomic_add(var_f, var_8, var_44);
var_45 = df::index(var_24, var_10);
var_46 = df::mul(var_42, var_45);
df::atomic_add(var_f, var_12, var_46);
var_47 = df::index(var_24, var_14);
var_48 = df::mul(var_42, var_47);
df::atomic_add(var_f, var_16, var_48);
}
void eval_triangles_contact_cpu_kernel_backward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat22* var_pose,
float* var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
df::float3* var_f,
int adj_num_particles,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
mat22* adj_pose,
float* adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
df::float3 var_3;
const int var_4 = 3;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
int var_9;
const int var_10 = 1;
int var_11;
int var_12;
int var_13;
const int var_14 = 2;
int var_15;
int var_16;
bool var_17;
bool var_18;
bool var_19;
bool var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
df::float3 var_35;
const float var_36 = 0.01;
float var_37;
const float var_38 = 0.0;
float var_39;
df::float3 var_40;
const float var_41 = 100000.0;
df::float3 var_42;
float var_43;
df::float3 var_44;
float var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
df::float3 adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
bool adj_17 = 0;
bool adj_18 = 0;
bool adj_19 = 0;
bool adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
df::float3 adj_23 = 0;
df::float3 adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
float adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
float adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
df::float3 adj_40 = 0;
float adj_41 = 0;
df::float3 adj_42 = 0;
float adj_43 = 0;
df::float3 adj_44 = 0;
float adj_45 = 0;
df::float3 adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_x, var_2);
var_5 = df::mul(var_1, var_4);
var_7 = df::add(var_5, var_6);
var_8 = df::load(var_indices, var_7);
var_9 = df::mul(var_1, var_4);
var_11 = df::add(var_9, var_10);
var_12 = df::load(var_indices, var_11);
var_13 = df::mul(var_1, var_4);
var_15 = df::add(var_13, var_14);
var_16 = df::load(var_indices, var_15);
var_17 = (var_8 == var_2);
var_18 = (var_12 == var_2);
var_19 = (var_16 == var_2);
var_20 = var_17 || var_18 || var_19;
if (var_20) {
goto label0;
}
var_21 = df::load(var_x, var_8);
var_22 = df::load(var_x, var_12);
var_23 = df::load(var_x, var_16);
var_24 = triangle_closest_point_barycentric_cpu_func(var_21, var_22, var_23, var_3);
var_25 = df::index(var_24, var_6);
var_26 = df::mul(var_21, var_25);
var_27 = df::index(var_24, var_10);
var_28 = df::mul(var_22, var_27);
var_29 = df::add(var_26, var_28);
var_30 = df::index(var_24, var_14);
var_31 = df::mul(var_23, var_30);
var_32 = df::add(var_29, var_31);
var_33 = df::sub(var_3, var_32);
var_34 = df::dot(var_33, var_33);
var_35 = df::normalize(var_33);
var_37 = df::sub(var_34, var_36);
var_39 = df::min(var_37, var_38);
var_40 = df::mul(var_35, var_39);
var_42 = df::mul(var_40, var_41);
df::atomic_sub(var_f, var_2, var_42);
var_43 = df::index(var_24, var_6);
var_44 = df::mul(var_42, var_43);
df::atomic_add(var_f, var_8, var_44);
var_45 = df::index(var_24, var_10);
var_46 = df::mul(var_42, var_45);
df::atomic_add(var_f, var_12, var_46);
var_47 = df::index(var_24, var_14);
var_48 = df::mul(var_42, var_47);
df::atomic_add(var_f, var_16, var_48);
//---------
// reverse
df::adj_atomic_add(var_f, var_16, var_48, adj_f, adj_16, adj_48);
df::adj_mul(var_42, var_47, adj_42, adj_47, adj_48);
df::adj_index(var_24, var_14, adj_24, adj_14, adj_47);
df::adj_atomic_add(var_f, var_12, var_46, adj_f, adj_12, adj_46);
df::adj_mul(var_42, var_45, adj_42, adj_45, adj_46);
df::adj_index(var_24, var_10, adj_24, adj_10, adj_45);
df::adj_atomic_add(var_f, var_8, var_44, adj_f, adj_8, adj_44);
df::adj_mul(var_42, var_43, adj_42, adj_43, adj_44);
df::adj_index(var_24, var_6, adj_24, adj_6, adj_43);
df::adj_atomic_sub(var_f, var_2, var_42, adj_f, adj_2, adj_42);
df::adj_mul(var_40, var_41, adj_40, adj_41, adj_42);
df::adj_mul(var_35, var_39, adj_35, adj_39, adj_40);
df::adj_min(var_37, var_38, adj_37, adj_38, adj_39);
df::adj_sub(var_34, var_36, adj_34, adj_36, adj_37);
df::adj_normalize(var_33, adj_33, adj_35);
df::adj_dot(var_33, var_33, adj_33, adj_33, adj_34);
df::adj_sub(var_3, var_32, adj_3, adj_32, adj_33);
df::adj_add(var_29, var_31, adj_29, adj_31, adj_32);
df::adj_mul(var_23, var_30, adj_23, adj_30, adj_31);
df::adj_index(var_24, var_14, adj_24, adj_14, adj_30);
df::adj_add(var_26, var_28, adj_26, adj_28, adj_29);
df::adj_mul(var_22, var_27, adj_22, adj_27, adj_28);
df::adj_index(var_24, var_10, adj_24, adj_10, adj_27);
df::adj_mul(var_21, var_25, adj_21, adj_25, adj_26);
df::adj_index(var_24, var_6, adj_24, adj_6, adj_25);
adj_triangle_closest_point_barycentric_cpu_func(var_21, var_22, var_23, var_3, adj_21, adj_22, adj_23, adj_3, adj_24);
df::adj_load(var_x, var_16, adj_x, adj_16, adj_23);
df::adj_load(var_x, var_12, adj_x, adj_12, adj_22);
df::adj_load(var_x, var_8, adj_x, adj_8, adj_21);
if (var_20) {
label0:;
}
df::adj_load(var_indices, var_15, adj_indices, adj_15, adj_16);
df::adj_add(var_13, var_14, adj_13, adj_14, adj_15);
df::adj_mul(var_1, var_4, adj_1, adj_4, adj_13);
df::adj_load(var_indices, var_11, adj_indices, adj_11, adj_12);
df::adj_add(var_9, var_10, adj_9, adj_10, adj_11);
df::adj_mul(var_1, var_4, adj_1, adj_4, adj_9);
df::adj_load(var_indices, var_7, adj_indices, adj_7, adj_8);
df::adj_add(var_5, var_6, adj_5, adj_6, adj_7);
df::adj_mul(var_1, var_4, adj_1, adj_4, adj_5);
df::adj_load(var_x, var_2, adj_x, adj_2, adj_3);
df::adj_mod(var_0, var_num_particles, adj_0, adj_num_particles, adj_2);
df::adj_div(var_0, var_num_particles, adj_0, adj_num_particles, adj_1);
return;
}
// Python entry points
void eval_triangles_contact_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_contact_cpu_kernel_forward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f));
}
}
void eval_triangles_contact_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_contact_cpu_kernel_backward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat22*>(var_pose),
cast<float*>(var_activation),
var_k_mu,
var_k_lambda,
var_k_damp,
var_k_drag,
var_k_lift,
cast<df::float3*>(var_f),
adj_num_particles,
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<mat22*>(adj_pose),
cast<float*>(adj_activation),
adj_k_mu,
adj_k_lambda,
adj_k_damp,
adj_k_drag,
adj_k_lift,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_triangles_contact_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f);
void eval_triangles_contact_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
float var_k_mu,
float var_k_lambda,
float var_k_damp,
float var_k_drag,
float var_k_lift,
torch::Tensor var_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
float adj_k_mu,
float adj_k_lambda,
float adj_k_damp,
float adj_k_drag,
float adj_k_lift,
torch::Tensor adj_f);
void eval_triangles_rigid_contacts_cpu_kernel_forward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_tri_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
float var_5;
int var_6;
const int var_7 = 4;
int var_8;
const int var_9 = 0;
int var_10;
float var_11;
int var_12;
const int var_13 = 1;
int var_14;
float var_15;
int var_16;
const int var_17 = 2;
int var_18;
float var_19;
int var_20;
const int var_21 = 3;
int var_22;
float var_23;
df::float3 var_24;
quat var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
int var_36;
int var_37;
int var_38;
int var_39;
int var_40;
int var_41;
int var_42;
int var_43;
int var_44;
df::float3 var_45;
df::float3 var_46;
df::float3 var_47;
df::float3 var_48;
df::float3 var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
float var_54;
df::float3 var_55;
df::float3 var_56;
float var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
float var_61;
df::float3 var_62;
const float var_63 = 0.05;
float var_64;
const float var_65 = 0.0;
float var_66;
float var_67;
float var_68;
df::float3 var_69;
float var_70;
df::float3 var_71;
df::float3 var_72;
float var_73;
df::float3 var_74;
df::float3 var_75;
df::float3 var_76;
float var_77;
df::float3 var_78;
df::float3 var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
float var_85;
float var_86;
float var_87;
const float var_88 = 1.0;
df::float3 var_89;
df::float3 var_90;
df::float3 var_91;
df::float3 var_92;
df::float3 var_93;
float var_94;
float var_95;
df::float3 var_96;
float var_97;
float var_98;
df::float3 var_99;
df::float3 var_100;
df::float3 var_101;
float var_102;
float var_103;
df::float3 var_104;
float var_105;
df::float3 var_106;
df::float3 var_107;
float var_108;
df::float3 var_109;
float var_110;
df::float3 var_111;
float var_112;
df::float3 var_113;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_contact_body, var_2);
var_4 = df::load(var_contact_point, var_2);
var_5 = df::load(var_contact_dist, var_2);
var_6 = df::load(var_contact_mat, var_2);
var_8 = df::mul(var_6, var_7);
var_10 = df::add(var_8, var_9);
var_11 = df::load(var_materials, var_10);
var_12 = df::mul(var_6, var_7);
var_14 = df::add(var_12, var_13);
var_15 = df::load(var_materials, var_14);
var_16 = df::mul(var_6, var_7);
var_18 = df::add(var_16, var_17);
var_19 = df::load(var_materials, var_18);
var_20 = df::mul(var_6, var_7);
var_22 = df::add(var_20, var_21);
var_23 = df::load(var_materials, var_22);
var_24 = df::load(var_rigid_x, var_3);
var_25 = df::load(var_rigid_r, var_3);
var_26 = df::load(var_rigid_v, var_3);
var_27 = df::load(var_rigid_w, var_3);
var_28 = df::rotate(var_25, var_4);
var_29 = df::add(var_24, var_28);
var_30 = df::sub(var_29, var_24);
var_31 = df::normalize(var_30);
var_32 = df::mul(var_31, var_5);
var_33 = df::add(var_29, var_32);
var_34 = df::cross(var_27, var_30);
var_35 = df::add(var_26, var_34);
var_36 = df::mul(var_1, var_21);
var_37 = df::add(var_36, var_9);
var_38 = df::load(var_indices, var_37);
var_39 = df::mul(var_1, var_21);
var_40 = df::add(var_39, var_13);
var_41 = df::load(var_indices, var_40);
var_42 = df::mul(var_1, var_21);
var_43 = df::add(var_42, var_17);
var_44 = df::load(var_indices, var_43);
var_45 = df::load(var_x, var_38);
var_46 = df::load(var_x, var_41);
var_47 = df::load(var_x, var_44);
var_48 = df::load(var_v, var_38);
var_49 = df::load(var_v, var_41);
var_50 = df::load(var_v, var_44);
var_51 = triangle_closest_point_barycentric_cpu_func(var_45, var_46, var_47, var_33);
var_52 = df::index(var_51, var_9);
var_53 = df::mul(var_45, var_52);
var_54 = df::index(var_51, var_13);
var_55 = df::mul(var_46, var_54);
var_56 = df::add(var_53, var_55);
var_57 = df::index(var_51, var_17);
var_58 = df::mul(var_47, var_57);
var_59 = df::add(var_56, var_58);
var_60 = df::sub(var_33, var_59);
var_61 = df::dot(var_60, var_60);
var_62 = df::normalize(var_60);
var_64 = df::sub(var_61, var_63);
var_66 = df::min(var_64, var_65);
var_67 = df::mul(var_66, var_11);
var_68 = df::index(var_51, var_9);
var_69 = df::mul(var_48, var_68);
var_70 = df::index(var_51, var_13);
var_71 = df::mul(var_49, var_70);
var_72 = df::add(var_69, var_71);
var_73 = df::index(var_51, var_17);
var_74 = df::mul(var_50, var_73);
var_75 = df::add(var_72, var_74);
var_76 = df::sub(var_75, var_35);
var_77 = df::dot(var_62, var_76);
var_78 = df::mul(var_62, var_77);
var_79 = df::sub(var_76, var_78);
var_80 = df::max(var_77, var_65);
var_81 = df::mul(var_80, var_15);
var_82 = df::step(var_66);
var_83 = df::mul(var_81, var_82);
var_84 = df::sub(var_65, var_83);
var_85 = df::add(var_67, var_84);
var_86 = df::mul(var_23, var_85);
var_87 = df::sub(var_65, var_86);
var_89 = df::float3(var_65, var_65, var_88);
var_90 = df::cross(var_62, var_89);
var_91 = df::float3(var_88, var_65, var_65);
var_92 = df::cross(var_62, var_91);
var_93 = df::mul(var_90, var_19);
var_94 = df::dot(var_93, var_79);
var_95 = df::clamp(var_94, var_86, var_87);
var_96 = df::mul(var_92, var_19);
var_97 = df::dot(var_96, var_79);
var_98 = df::clamp(var_97, var_86, var_87);
var_99 = df::mul(var_90, var_95);
var_100 = df::mul(var_92, var_98);
var_101 = df::add(var_99, var_100);
var_102 = df::step(var_66);
var_103 = df::sub(var_65, var_102);
var_104 = df::mul(var_101, var_103);
var_105 = df::add(var_67, var_84);
var_106 = df::mul(var_62, var_105);
var_107 = df::add(var_106, var_104);
var_108 = df::index(var_51, var_9);
var_109 = df::mul(var_107, var_108);
df::atomic_add(var_tri_f, var_38, var_109);
var_110 = df::index(var_51, var_13);
var_111 = df::mul(var_107, var_110);
df::atomic_add(var_tri_f, var_41, var_111);
var_112 = df::index(var_51, var_17);
var_113 = df::mul(var_107, var_112);
df::atomic_add(var_tri_f, var_44, var_113);
}
void eval_triangles_rigid_contacts_cpu_kernel_backward(
int var_num_particles,
df::float3* var_x,
df::float3* var_v,
int* var_indices,
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_tri_f,
int adj_num_particles,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
df::float3* adj_rigid_x,
quat* adj_rigid_r,
df::float3* adj_rigid_v,
df::float3* adj_rigid_w,
int* adj_contact_body,
df::float3* adj_contact_point,
float* adj_contact_dist,
int* adj_contact_mat,
float* adj_materials,
df::float3* adj_tri_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
float var_5;
int var_6;
const int var_7 = 4;
int var_8;
const int var_9 = 0;
int var_10;
float var_11;
int var_12;
const int var_13 = 1;
int var_14;
float var_15;
int var_16;
const int var_17 = 2;
int var_18;
float var_19;
int var_20;
const int var_21 = 3;
int var_22;
float var_23;
df::float3 var_24;
quat var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
int var_36;
int var_37;
int var_38;
int var_39;
int var_40;
int var_41;
int var_42;
int var_43;
int var_44;
df::float3 var_45;
df::float3 var_46;
df::float3 var_47;
df::float3 var_48;
df::float3 var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
float var_54;
df::float3 var_55;
df::float3 var_56;
float var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
float var_61;
df::float3 var_62;
const float var_63 = 0.05;
float var_64;
const float var_65 = 0.0;
float var_66;
float var_67;
float var_68;
df::float3 var_69;
float var_70;
df::float3 var_71;
df::float3 var_72;
float var_73;
df::float3 var_74;
df::float3 var_75;
df::float3 var_76;
float var_77;
df::float3 var_78;
df::float3 var_79;
float var_80;
float var_81;
float var_82;
float var_83;
float var_84;
float var_85;
float var_86;
float var_87;
const float var_88 = 1.0;
df::float3 var_89;
df::float3 var_90;
df::float3 var_91;
df::float3 var_92;
df::float3 var_93;
float var_94;
float var_95;
df::float3 var_96;
float var_97;
float var_98;
df::float3 var_99;
df::float3 var_100;
df::float3 var_101;
float var_102;
float var_103;
df::float3 var_104;
float var_105;
df::float3 var_106;
df::float3 var_107;
float var_108;
df::float3 var_109;
float var_110;
df::float3 var_111;
float var_112;
df::float3 var_113;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
df::float3 adj_4 = 0;
float adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
float adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
float adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
int adj_18 = 0;
float adj_19 = 0;
int adj_20 = 0;
int adj_21 = 0;
int adj_22 = 0;
float adj_23 = 0;
df::float3 adj_24 = 0;
quat adj_25 = 0;
df::float3 adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
int adj_36 = 0;
int adj_37 = 0;
int adj_38 = 0;
int adj_39 = 0;
int adj_40 = 0;
int adj_41 = 0;
int adj_42 = 0;
int adj_43 = 0;
int adj_44 = 0;
df::float3 adj_45 = 0;
df::float3 adj_46 = 0;
df::float3 adj_47 = 0;
df::float3 adj_48 = 0;
df::float3 adj_49 = 0;
df::float3 adj_50 = 0;
df::float3 adj_51 = 0;
float adj_52 = 0;
df::float3 adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
df::float3 adj_56 = 0;
float adj_57 = 0;
df::float3 adj_58 = 0;
df::float3 adj_59 = 0;
df::float3 adj_60 = 0;
float adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
float adj_64 = 0;
float adj_65 = 0;
float adj_66 = 0;
float adj_67 = 0;
float adj_68 = 0;
df::float3 adj_69 = 0;
float adj_70 = 0;
df::float3 adj_71 = 0;
df::float3 adj_72 = 0;
float adj_73 = 0;
df::float3 adj_74 = 0;
df::float3 adj_75 = 0;
df::float3 adj_76 = 0;
float adj_77 = 0;
df::float3 adj_78 = 0;
df::float3 adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
float adj_82 = 0;
float adj_83 = 0;
float adj_84 = 0;
float adj_85 = 0;
float adj_86 = 0;
float adj_87 = 0;
float adj_88 = 0;
df::float3 adj_89 = 0;
df::float3 adj_90 = 0;
df::float3 adj_91 = 0;
df::float3 adj_92 = 0;
df::float3 adj_93 = 0;
float adj_94 = 0;
float adj_95 = 0;
df::float3 adj_96 = 0;
float adj_97 = 0;
float adj_98 = 0;
df::float3 adj_99 = 0;
df::float3 adj_100 = 0;
df::float3 adj_101 = 0;
float adj_102 = 0;
float adj_103 = 0;
df::float3 adj_104 = 0;
float adj_105 = 0;
df::float3 adj_106 = 0;
df::float3 adj_107 = 0;
float adj_108 = 0;
df::float3 adj_109 = 0;
float adj_110 = 0;
df::float3 adj_111 = 0;
float adj_112 = 0;
df::float3 adj_113 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_contact_body, var_2);
var_4 = df::load(var_contact_point, var_2);
var_5 = df::load(var_contact_dist, var_2);
var_6 = df::load(var_contact_mat, var_2);
var_8 = df::mul(var_6, var_7);
var_10 = df::add(var_8, var_9);
var_11 = df::load(var_materials, var_10);
var_12 = df::mul(var_6, var_7);
var_14 = df::add(var_12, var_13);
var_15 = df::load(var_materials, var_14);
var_16 = df::mul(var_6, var_7);
var_18 = df::add(var_16, var_17);
var_19 = df::load(var_materials, var_18);
var_20 = df::mul(var_6, var_7);
var_22 = df::add(var_20, var_21);
var_23 = df::load(var_materials, var_22);
var_24 = df::load(var_rigid_x, var_3);
var_25 = df::load(var_rigid_r, var_3);
var_26 = df::load(var_rigid_v, var_3);
var_27 = df::load(var_rigid_w, var_3);
var_28 = df::rotate(var_25, var_4);
var_29 = df::add(var_24, var_28);
var_30 = df::sub(var_29, var_24);
var_31 = df::normalize(var_30);
var_32 = df::mul(var_31, var_5);
var_33 = df::add(var_29, var_32);
var_34 = df::cross(var_27, var_30);
var_35 = df::add(var_26, var_34);
var_36 = df::mul(var_1, var_21);
var_37 = df::add(var_36, var_9);
var_38 = df::load(var_indices, var_37);
var_39 = df::mul(var_1, var_21);
var_40 = df::add(var_39, var_13);
var_41 = df::load(var_indices, var_40);
var_42 = df::mul(var_1, var_21);
var_43 = df::add(var_42, var_17);
var_44 = df::load(var_indices, var_43);
var_45 = df::load(var_x, var_38);
var_46 = df::load(var_x, var_41);
var_47 = df::load(var_x, var_44);
var_48 = df::load(var_v, var_38);
var_49 = df::load(var_v, var_41);
var_50 = df::load(var_v, var_44);
var_51 = triangle_closest_point_barycentric_cpu_func(var_45, var_46, var_47, var_33);
var_52 = df::index(var_51, var_9);
var_53 = df::mul(var_45, var_52);
var_54 = df::index(var_51, var_13);
var_55 = df::mul(var_46, var_54);
var_56 = df::add(var_53, var_55);
var_57 = df::index(var_51, var_17);
var_58 = df::mul(var_47, var_57);
var_59 = df::add(var_56, var_58);
var_60 = df::sub(var_33, var_59);
var_61 = df::dot(var_60, var_60);
var_62 = df::normalize(var_60);
var_64 = df::sub(var_61, var_63);
var_66 = df::min(var_64, var_65);
var_67 = df::mul(var_66, var_11);
var_68 = df::index(var_51, var_9);
var_69 = df::mul(var_48, var_68);
var_70 = df::index(var_51, var_13);
var_71 = df::mul(var_49, var_70);
var_72 = df::add(var_69, var_71);
var_73 = df::index(var_51, var_17);
var_74 = df::mul(var_50, var_73);
var_75 = df::add(var_72, var_74);
var_76 = df::sub(var_75, var_35);
var_77 = df::dot(var_62, var_76);
var_78 = df::mul(var_62, var_77);
var_79 = df::sub(var_76, var_78);
var_80 = df::max(var_77, var_65);
var_81 = df::mul(var_80, var_15);
var_82 = df::step(var_66);
var_83 = df::mul(var_81, var_82);
var_84 = df::sub(var_65, var_83);
var_85 = df::add(var_67, var_84);
var_86 = df::mul(var_23, var_85);
var_87 = df::sub(var_65, var_86);
var_89 = df::float3(var_65, var_65, var_88);
var_90 = df::cross(var_62, var_89);
var_91 = df::float3(var_88, var_65, var_65);
var_92 = df::cross(var_62, var_91);
var_93 = df::mul(var_90, var_19);
var_94 = df::dot(var_93, var_79);
var_95 = df::clamp(var_94, var_86, var_87);
var_96 = df::mul(var_92, var_19);
var_97 = df::dot(var_96, var_79);
var_98 = df::clamp(var_97, var_86, var_87);
var_99 = df::mul(var_90, var_95);
var_100 = df::mul(var_92, var_98);
var_101 = df::add(var_99, var_100);
var_102 = df::step(var_66);
var_103 = df::sub(var_65, var_102);
var_104 = df::mul(var_101, var_103);
var_105 = df::add(var_67, var_84);
var_106 = df::mul(var_62, var_105);
var_107 = df::add(var_106, var_104);
var_108 = df::index(var_51, var_9);
var_109 = df::mul(var_107, var_108);
df::atomic_add(var_tri_f, var_38, var_109);
var_110 = df::index(var_51, var_13);
var_111 = df::mul(var_107, var_110);
df::atomic_add(var_tri_f, var_41, var_111);
var_112 = df::index(var_51, var_17);
var_113 = df::mul(var_107, var_112);
df::atomic_add(var_tri_f, var_44, var_113);
//---------
// reverse
df::adj_atomic_add(var_tri_f, var_44, var_113, adj_tri_f, adj_44, adj_113);
df::adj_mul(var_107, var_112, adj_107, adj_112, adj_113);
df::adj_index(var_51, var_17, adj_51, adj_17, adj_112);
df::adj_atomic_add(var_tri_f, var_41, var_111, adj_tri_f, adj_41, adj_111);
df::adj_mul(var_107, var_110, adj_107, adj_110, adj_111);
df::adj_index(var_51, var_13, adj_51, adj_13, adj_110);
df::adj_atomic_add(var_tri_f, var_38, var_109, adj_tri_f, adj_38, adj_109);
df::adj_mul(var_107, var_108, adj_107, adj_108, adj_109);
df::adj_index(var_51, var_9, adj_51, adj_9, adj_108);
df::adj_add(var_106, var_104, adj_106, adj_104, adj_107);
df::adj_mul(var_62, var_105, adj_62, adj_105, adj_106);
df::adj_add(var_67, var_84, adj_67, adj_84, adj_105);
df::adj_mul(var_101, var_103, adj_101, adj_103, adj_104);
df::adj_sub(var_65, var_102, adj_65, adj_102, adj_103);
df::adj_step(var_66, adj_66, adj_102);
df::adj_add(var_99, var_100, adj_99, adj_100, adj_101);
df::adj_mul(var_92, var_98, adj_92, adj_98, adj_100);
df::adj_mul(var_90, var_95, adj_90, adj_95, adj_99);
df::adj_clamp(var_97, var_86, var_87, adj_97, adj_86, adj_87, adj_98);
df::adj_dot(var_96, var_79, adj_96, adj_79, adj_97);
df::adj_mul(var_92, var_19, adj_92, adj_19, adj_96);
df::adj_clamp(var_94, var_86, var_87, adj_94, adj_86, adj_87, adj_95);
df::adj_dot(var_93, var_79, adj_93, adj_79, adj_94);
df::adj_mul(var_90, var_19, adj_90, adj_19, adj_93);
df::adj_cross(var_62, var_91, adj_62, adj_91, adj_92);
df::adj_float3(var_88, var_65, var_65, adj_88, adj_65, adj_65, adj_91);
df::adj_cross(var_62, var_89, adj_62, adj_89, adj_90);
df::adj_float3(var_65, var_65, var_88, adj_65, adj_65, adj_88, adj_89);
df::adj_sub(var_65, var_86, adj_65, adj_86, adj_87);
df::adj_mul(var_23, var_85, adj_23, adj_85, adj_86);
df::adj_add(var_67, var_84, adj_67, adj_84, adj_85);
df::adj_sub(var_65, var_83, adj_65, adj_83, adj_84);
df::adj_mul(var_81, var_82, adj_81, adj_82, adj_83);
df::adj_step(var_66, adj_66, adj_82);
df::adj_mul(var_80, var_15, adj_80, adj_15, adj_81);
df::adj_max(var_77, var_65, adj_77, adj_65, adj_80);
df::adj_sub(var_76, var_78, adj_76, adj_78, adj_79);
df::adj_mul(var_62, var_77, adj_62, adj_77, adj_78);
df::adj_dot(var_62, var_76, adj_62, adj_76, adj_77);
df::adj_sub(var_75, var_35, adj_75, adj_35, adj_76);
df::adj_add(var_72, var_74, adj_72, adj_74, adj_75);
df::adj_mul(var_50, var_73, adj_50, adj_73, adj_74);
df::adj_index(var_51, var_17, adj_51, adj_17, adj_73);
df::adj_add(var_69, var_71, adj_69, adj_71, adj_72);
df::adj_mul(var_49, var_70, adj_49, adj_70, adj_71);
df::adj_index(var_51, var_13, adj_51, adj_13, adj_70);
df::adj_mul(var_48, var_68, adj_48, adj_68, adj_69);
df::adj_index(var_51, var_9, adj_51, adj_9, adj_68);
df::adj_mul(var_66, var_11, adj_66, adj_11, adj_67);
df::adj_min(var_64, var_65, adj_64, adj_65, adj_66);
df::adj_sub(var_61, var_63, adj_61, adj_63, adj_64);
df::adj_normalize(var_60, adj_60, adj_62);
df::adj_dot(var_60, var_60, adj_60, adj_60, adj_61);
df::adj_sub(var_33, var_59, adj_33, adj_59, adj_60);
df::adj_add(var_56, var_58, adj_56, adj_58, adj_59);
df::adj_mul(var_47, var_57, adj_47, adj_57, adj_58);
df::adj_index(var_51, var_17, adj_51, adj_17, adj_57);
df::adj_add(var_53, var_55, adj_53, adj_55, adj_56);
df::adj_mul(var_46, var_54, adj_46, adj_54, adj_55);
df::adj_index(var_51, var_13, adj_51, adj_13, adj_54);
df::adj_mul(var_45, var_52, adj_45, adj_52, adj_53);
df::adj_index(var_51, var_9, adj_51, adj_9, adj_52);
adj_triangle_closest_point_barycentric_cpu_func(var_45, var_46, var_47, var_33, adj_45, adj_46, adj_47, adj_33, adj_51);
df::adj_load(var_v, var_44, adj_v, adj_44, adj_50);
df::adj_load(var_v, var_41, adj_v, adj_41, adj_49);
df::adj_load(var_v, var_38, adj_v, adj_38, adj_48);
df::adj_load(var_x, var_44, adj_x, adj_44, adj_47);
df::adj_load(var_x, var_41, adj_x, adj_41, adj_46);
df::adj_load(var_x, var_38, adj_x, adj_38, adj_45);
df::adj_load(var_indices, var_43, adj_indices, adj_43, adj_44);
df::adj_add(var_42, var_17, adj_42, adj_17, adj_43);
df::adj_mul(var_1, var_21, adj_1, adj_21, adj_42);
df::adj_load(var_indices, var_40, adj_indices, adj_40, adj_41);
df::adj_add(var_39, var_13, adj_39, adj_13, adj_40);
df::adj_mul(var_1, var_21, adj_1, adj_21, adj_39);
df::adj_load(var_indices, var_37, adj_indices, adj_37, adj_38);
df::adj_add(var_36, var_9, adj_36, adj_9, adj_37);
df::adj_mul(var_1, var_21, adj_1, adj_21, adj_36);
df::adj_add(var_26, var_34, adj_26, adj_34, adj_35);
df::adj_cross(var_27, var_30, adj_27, adj_30, adj_34);
df::adj_add(var_29, var_32, adj_29, adj_32, adj_33);
df::adj_mul(var_31, var_5, adj_31, adj_5, adj_32);
df::adj_normalize(var_30, adj_30, adj_31);
df::adj_sub(var_29, var_24, adj_29, adj_24, adj_30);
df::adj_add(var_24, var_28, adj_24, adj_28, adj_29);
df::adj_rotate(var_25, var_4, adj_25, adj_4, adj_28);
df::adj_load(var_rigid_w, var_3, adj_rigid_w, adj_3, adj_27);
df::adj_load(var_rigid_v, var_3, adj_rigid_v, adj_3, adj_26);
df::adj_load(var_rigid_r, var_3, adj_rigid_r, adj_3, adj_25);
df::adj_load(var_rigid_x, var_3, adj_rigid_x, adj_3, adj_24);
df::adj_load(var_materials, var_22, adj_materials, adj_22, adj_23);
df::adj_add(var_20, var_21, adj_20, adj_21, adj_22);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_20);
df::adj_load(var_materials, var_18, adj_materials, adj_18, adj_19);
df::adj_add(var_16, var_17, adj_16, adj_17, adj_18);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_16);
df::adj_load(var_materials, var_14, adj_materials, adj_14, adj_15);
df::adj_add(var_12, var_13, adj_12, adj_13, adj_14);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_12);
df::adj_load(var_materials, var_10, adj_materials, adj_10, adj_11);
df::adj_add(var_8, var_9, adj_8, adj_9, adj_10);
df::adj_mul(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_load(var_contact_mat, var_2, adj_contact_mat, adj_2, adj_6);
df::adj_load(var_contact_dist, var_2, adj_contact_dist, adj_2, adj_5);
df::adj_load(var_contact_point, var_2, adj_contact_point, adj_2, adj_4);
df::adj_load(var_contact_body, var_2, adj_contact_body, adj_2, adj_3);
df::adj_mod(var_0, var_num_particles, adj_0, adj_num_particles, adj_2);
df::adj_div(var_0, var_num_particles, adj_0, adj_num_particles, adj_1);
return;
}
// Python entry points
void eval_triangles_rigid_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_rigid_contacts_cpu_kernel_forward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_tri_f));
}
}
void eval_triangles_rigid_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_tri_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_triangles_rigid_contacts_cpu_kernel_backward(
var_num_particles,
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_tri_f),
adj_num_particles,
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<df::float3*>(adj_rigid_x),
cast<quat*>(adj_rigid_r),
cast<df::float3*>(adj_rigid_v),
cast<df::float3*>(adj_rigid_w),
cast<int*>(adj_contact_body),
cast<df::float3*>(adj_contact_point),
cast<float*>(adj_contact_dist),
cast<int*>(adj_contact_mat),
cast<float*>(adj_materials),
cast<df::float3*>(adj_tri_f));
}
}
// Python entry points
void eval_triangles_rigid_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f);
void eval_triangles_rigid_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_tri_f,
int adj_num_particles,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_tri_f);
void eval_bending_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
float* var_rest,
float var_ke,
float var_kd,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
df::float3 var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
float var_33;
float var_34;
const float var_35 = 1.0;
float var_36;
float var_37;
float var_38;
float var_39;
float var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
float var_50;
float var_51;
float var_52;
df::float3 var_53;
df::float3 var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
df::float3 var_58;
float var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
float var_66;
df::float3 var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
float var_78;
const float var_79 = 0.0;
float var_80;
float var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_rest, var_0);
var_19 = df::load(var_x, var_5);
var_20 = df::load(var_x, var_9);
var_21 = df::load(var_x, var_13);
var_22 = df::load(var_x, var_17);
var_23 = df::load(var_v, var_5);
var_24 = df::load(var_v, var_9);
var_25 = df::load(var_v, var_13);
var_26 = df::load(var_v, var_17);
var_27 = df::sub(var_21, var_19);
var_28 = df::sub(var_22, var_19);
var_29 = df::cross(var_27, var_28);
var_30 = df::sub(var_22, var_20);
var_31 = df::sub(var_21, var_20);
var_32 = df::cross(var_30, var_31);
var_33 = df::length(var_29);
var_34 = df::length(var_32);
var_36 = df::div(var_35, var_33);
var_37 = df::div(var_35, var_34);
var_38 = df::dot(var_29, var_32);
var_39 = df::mul(var_38, var_36);
var_40 = df::mul(var_39, var_37);
var_41 = df::mul(var_29, var_36);
var_42 = df::mul(var_41, var_36);
var_43 = df::mul(var_32, var_37);
var_44 = df::mul(var_43, var_37);
var_45 = df::sub(var_22, var_21);
var_46 = df::normalize(var_45);
var_47 = df::length(var_45);
var_48 = df::cross(var_44, var_42);
var_49 = df::dot(var_48, var_46);
var_50 = df::sign(var_49);
var_51 = df::acos(var_40);
var_52 = df::mul(var_51, var_50);
var_53 = df::mul(var_42, var_47);
var_54 = df::mul(var_44, var_47);
var_55 = df::sub(var_19, var_22);
var_56 = df::dot(var_55, var_46);
var_57 = df::mul(var_42, var_56);
var_58 = df::sub(var_20, var_22);
var_59 = df::dot(var_58, var_46);
var_60 = df::mul(var_44, var_59);
var_61 = df::add(var_57, var_60);
var_62 = df::sub(var_21, var_19);
var_63 = df::dot(var_62, var_46);
var_64 = df::mul(var_42, var_63);
var_65 = df::sub(var_21, var_20);
var_66 = df::dot(var_65, var_46);
var_67 = df::mul(var_44, var_66);
var_68 = df::add(var_64, var_67);
var_69 = df::sub(var_52, var_18);
var_70 = df::mul(var_ke, var_69);
var_71 = df::dot(var_53, var_23);
var_72 = df::dot(var_54, var_24);
var_73 = df::add(var_71, var_72);
var_74 = df::dot(var_61, var_25);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_68, var_26);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_kd, var_77);
var_80 = df::add(var_70, var_78);
var_81 = df::mul(var_47, var_80);
var_82 = df::sub(var_79, var_81);
var_83 = df::mul(var_53, var_82);
df::atomic_add(var_f, var_5, var_83);
var_84 = df::mul(var_54, var_82);
df::atomic_add(var_f, var_9, var_84);
var_85 = df::mul(var_61, var_82);
df::atomic_add(var_f, var_13, var_85);
var_86 = df::mul(var_68, var_82);
df::atomic_add(var_f, var_17, var_86);
}
void eval_bending_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
float* var_rest,
float var_ke,
float var_kd,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
float* adj_rest,
float adj_ke,
float adj_kd,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
df::float3 var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
df::float3 var_23;
df::float3 var_24;
df::float3 var_25;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
float var_33;
float var_34;
const float var_35 = 1.0;
float var_36;
float var_37;
float var_38;
float var_39;
float var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
df::float3 var_46;
float var_47;
df::float3 var_48;
float var_49;
float var_50;
float var_51;
float var_52;
df::float3 var_53;
df::float3 var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
df::float3 var_58;
float var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
float var_66;
df::float3 var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
float var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
float var_78;
const float var_79 = 0.0;
float var_80;
float var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
df::float3 var_86;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
df::float3 adj_23 = 0;
df::float3 adj_24 = 0;
df::float3 adj_25 = 0;
df::float3 adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
float adj_33 = 0;
float adj_34 = 0;
float adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
df::float3 adj_41 = 0;
df::float3 adj_42 = 0;
df::float3 adj_43 = 0;
df::float3 adj_44 = 0;
df::float3 adj_45 = 0;
df::float3 adj_46 = 0;
float adj_47 = 0;
df::float3 adj_48 = 0;
float adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
df::float3 adj_53 = 0;
df::float3 adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
df::float3 adj_57 = 0;
df::float3 adj_58 = 0;
float adj_59 = 0;
df::float3 adj_60 = 0;
df::float3 adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
df::float3 adj_65 = 0;
float adj_66 = 0;
df::float3 adj_67 = 0;
df::float3 adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
float adj_78 = 0;
float adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
float adj_82 = 0;
df::float3 adj_83 = 0;
df::float3 adj_84 = 0;
df::float3 adj_85 = 0;
df::float3 adj_86 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_rest, var_0);
var_19 = df::load(var_x, var_5);
var_20 = df::load(var_x, var_9);
var_21 = df::load(var_x, var_13);
var_22 = df::load(var_x, var_17);
var_23 = df::load(var_v, var_5);
var_24 = df::load(var_v, var_9);
var_25 = df::load(var_v, var_13);
var_26 = df::load(var_v, var_17);
var_27 = df::sub(var_21, var_19);
var_28 = df::sub(var_22, var_19);
var_29 = df::cross(var_27, var_28);
var_30 = df::sub(var_22, var_20);
var_31 = df::sub(var_21, var_20);
var_32 = df::cross(var_30, var_31);
var_33 = df::length(var_29);
var_34 = df::length(var_32);
var_36 = df::div(var_35, var_33);
var_37 = df::div(var_35, var_34);
var_38 = df::dot(var_29, var_32);
var_39 = df::mul(var_38, var_36);
var_40 = df::mul(var_39, var_37);
var_41 = df::mul(var_29, var_36);
var_42 = df::mul(var_41, var_36);
var_43 = df::mul(var_32, var_37);
var_44 = df::mul(var_43, var_37);
var_45 = df::sub(var_22, var_21);
var_46 = df::normalize(var_45);
var_47 = df::length(var_45);
var_48 = df::cross(var_44, var_42);
var_49 = df::dot(var_48, var_46);
var_50 = df::sign(var_49);
var_51 = df::acos(var_40);
var_52 = df::mul(var_51, var_50);
var_53 = df::mul(var_42, var_47);
var_54 = df::mul(var_44, var_47);
var_55 = df::sub(var_19, var_22);
var_56 = df::dot(var_55, var_46);
var_57 = df::mul(var_42, var_56);
var_58 = df::sub(var_20, var_22);
var_59 = df::dot(var_58, var_46);
var_60 = df::mul(var_44, var_59);
var_61 = df::add(var_57, var_60);
var_62 = df::sub(var_21, var_19);
var_63 = df::dot(var_62, var_46);
var_64 = df::mul(var_42, var_63);
var_65 = df::sub(var_21, var_20);
var_66 = df::dot(var_65, var_46);
var_67 = df::mul(var_44, var_66);
var_68 = df::add(var_64, var_67);
var_69 = df::sub(var_52, var_18);
var_70 = df::mul(var_ke, var_69);
var_71 = df::dot(var_53, var_23);
var_72 = df::dot(var_54, var_24);
var_73 = df::add(var_71, var_72);
var_74 = df::dot(var_61, var_25);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_68, var_26);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_kd, var_77);
var_80 = df::add(var_70, var_78);
var_81 = df::mul(var_47, var_80);
var_82 = df::sub(var_79, var_81);
var_83 = df::mul(var_53, var_82);
df::atomic_add(var_f, var_5, var_83);
var_84 = df::mul(var_54, var_82);
df::atomic_add(var_f, var_9, var_84);
var_85 = df::mul(var_61, var_82);
df::atomic_add(var_f, var_13, var_85);
var_86 = df::mul(var_68, var_82);
df::atomic_add(var_f, var_17, var_86);
//---------
// reverse
df::adj_atomic_add(var_f, var_17, var_86, adj_f, adj_17, adj_86);
df::adj_mul(var_68, var_82, adj_68, adj_82, adj_86);
df::adj_atomic_add(var_f, var_13, var_85, adj_f, adj_13, adj_85);
df::adj_mul(var_61, var_82, adj_61, adj_82, adj_85);
df::adj_atomic_add(var_f, var_9, var_84, adj_f, adj_9, adj_84);
df::adj_mul(var_54, var_82, adj_54, adj_82, adj_84);
df::adj_atomic_add(var_f, var_5, var_83, adj_f, adj_5, adj_83);
df::adj_mul(var_53, var_82, adj_53, adj_82, adj_83);
df::adj_sub(var_79, var_81, adj_79, adj_81, adj_82);
df::adj_mul(var_47, var_80, adj_47, adj_80, adj_81);
df::adj_add(var_70, var_78, adj_70, adj_78, adj_80);
df::adj_mul(var_kd, var_77, adj_kd, adj_77, adj_78);
df::adj_add(var_75, var_76, adj_75, adj_76, adj_77);
df::adj_dot(var_68, var_26, adj_68, adj_26, adj_76);
df::adj_add(var_73, var_74, adj_73, adj_74, adj_75);
df::adj_dot(var_61, var_25, adj_61, adj_25, adj_74);
df::adj_add(var_71, var_72, adj_71, adj_72, adj_73);
df::adj_dot(var_54, var_24, adj_54, adj_24, adj_72);
df::adj_dot(var_53, var_23, adj_53, adj_23, adj_71);
df::adj_mul(var_ke, var_69, adj_ke, adj_69, adj_70);
df::adj_sub(var_52, var_18, adj_52, adj_18, adj_69);
df::adj_add(var_64, var_67, adj_64, adj_67, adj_68);
df::adj_mul(var_44, var_66, adj_44, adj_66, adj_67);
df::adj_dot(var_65, var_46, adj_65, adj_46, adj_66);
df::adj_sub(var_21, var_20, adj_21, adj_20, adj_65);
df::adj_mul(var_42, var_63, adj_42, adj_63, adj_64);
df::adj_dot(var_62, var_46, adj_62, adj_46, adj_63);
df::adj_sub(var_21, var_19, adj_21, adj_19, adj_62);
df::adj_add(var_57, var_60, adj_57, adj_60, adj_61);
df::adj_mul(var_44, var_59, adj_44, adj_59, adj_60);
df::adj_dot(var_58, var_46, adj_58, adj_46, adj_59);
df::adj_sub(var_20, var_22, adj_20, adj_22, adj_58);
df::adj_mul(var_42, var_56, adj_42, adj_56, adj_57);
df::adj_dot(var_55, var_46, adj_55, adj_46, adj_56);
df::adj_sub(var_19, var_22, adj_19, adj_22, adj_55);
df::adj_mul(var_44, var_47, adj_44, adj_47, adj_54);
df::adj_mul(var_42, var_47, adj_42, adj_47, adj_53);
df::adj_mul(var_51, var_50, adj_51, adj_50, adj_52);
df::adj_acos(var_40, adj_40, adj_51);
df::adj_sign(var_49, adj_49, adj_50);
df::adj_dot(var_48, var_46, adj_48, adj_46, adj_49);
df::adj_cross(var_44, var_42, adj_44, adj_42, adj_48);
df::adj_length(var_45, adj_45, adj_47);
df::adj_normalize(var_45, adj_45, adj_46);
df::adj_sub(var_22, var_21, adj_22, adj_21, adj_45);
df::adj_mul(var_43, var_37, adj_43, adj_37, adj_44);
df::adj_mul(var_32, var_37, adj_32, adj_37, adj_43);
df::adj_mul(var_41, var_36, adj_41, adj_36, adj_42);
df::adj_mul(var_29, var_36, adj_29, adj_36, adj_41);
df::adj_mul(var_39, var_37, adj_39, adj_37, adj_40);
df::adj_mul(var_38, var_36, adj_38, adj_36, adj_39);
df::adj_dot(var_29, var_32, adj_29, adj_32, adj_38);
df::adj_div(var_35, var_34, adj_35, adj_34, adj_37);
df::adj_div(var_35, var_33, adj_35, adj_33, adj_36);
df::adj_length(var_32, adj_32, adj_34);
df::adj_length(var_29, adj_29, adj_33);
df::adj_cross(var_30, var_31, adj_30, adj_31, adj_32);
df::adj_sub(var_21, var_20, adj_21, adj_20, adj_31);
df::adj_sub(var_22, var_20, adj_22, adj_20, adj_30);
df::adj_cross(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_sub(var_22, var_19, adj_22, adj_19, adj_28);
df::adj_sub(var_21, var_19, adj_21, adj_19, adj_27);
df::adj_load(var_v, var_17, adj_v, adj_17, adj_26);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_25);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_24);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_23);
df::adj_load(var_x, var_17, adj_x, adj_17, adj_22);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_21);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_20);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_19);
df::adj_load(var_rest, var_0, adj_rest, adj_0, adj_18);
df::adj_load(var_indices, var_16, adj_indices, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_bending_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_bending_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<float*>(var_rest),
var_ke,
var_kd,
cast<df::float3*>(var_f));
}
}
void eval_bending_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rest,
float adj_ke,
float adj_kd,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_bending_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<float*>(var_rest),
var_ke,
var_kd,
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<float*>(adj_rest),
adj_ke,
adj_kd,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_bending_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f);
void eval_bending_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_rest,
float var_ke,
float var_kd,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_rest,
float adj_ke,
float adj_kd,
torch::Tensor adj_f);
void eval_tetrahedra_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
df::float3 var_38;
df::float3 var_39;
df::float3 var_40;
df::float3 var_41;
mat33 var_42;
mat33 var_43;
float var_44;
const float var_45 = 6.0;
float var_46;
const float var_47 = 1.0;
float var_48;
float var_49;
float var_50;
const float var_51 = 4.0;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
mat33 var_58;
mat33 var_59;
mat33 var_60;
float var_61;
float var_62;
float var_63;
df::float3 var_64;
float var_65;
float var_66;
float var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
df::float3 var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
mat33 var_78;
float var_79;
float var_80;
float var_81;
mat33 var_82;
mat33 var_83;
mat33 var_84;
mat33 var_85;
mat33 var_86;
float var_87;
float var_88;
float var_89;
df::float3 var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
float var_96;
float var_97;
df::float3 var_98;
float var_99;
float var_100;
df::float3 var_101;
df::float3 var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
df::float3 var_117;
df::float3 var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
df::float3 var_123;
df::float3 var_124;
const float var_125 = 0.0;
float var_126;
df::float3 var_127;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::sub(var_29, var_28);
var_37 = df::sub(var_30, var_28);
var_38 = df::sub(var_31, var_28);
var_39 = df::sub(var_33, var_32);
var_40 = df::sub(var_34, var_32);
var_41 = df::sub(var_35, var_32);
var_42 = df::mat33(var_36, var_37, var_38);
var_43 = df::load(var_pose, var_0);
var_44 = df::determinant(var_43);
var_46 = df::mul(var_44, var_45);
var_48 = df::div(var_47, var_46);
var_49 = df::div(var_21, var_24);
var_50 = df::add(var_47, var_49);
var_52 = df::mul(var_51, var_24);
var_53 = df::div(var_21, var_52);
var_54 = df::sub(var_50, var_53);
var_55 = df::mul(var_21, var_48);
var_56 = df::mul(var_24, var_48);
var_57 = df::mul(var_27, var_48);
var_58 = df::mul(var_42, var_43);
var_59 = df::mat33(var_39, var_40, var_41);
var_60 = df::mul(var_59, var_43);
var_61 = df::index(var_58, var_3, var_3);
var_62 = df::index(var_58, var_7, var_3);
var_63 = df::index(var_58, var_11, var_3);
var_64 = df::float3(var_61, var_62, var_63);
var_65 = df::index(var_58, var_3, var_7);
var_66 = df::index(var_58, var_7, var_7);
var_67 = df::index(var_58, var_11, var_7);
var_68 = df::float3(var_65, var_66, var_67);
var_69 = df::index(var_58, var_3, var_11);
var_70 = df::index(var_58, var_7, var_11);
var_71 = df::index(var_58, var_11, var_11);
var_72 = df::float3(var_69, var_70, var_71);
var_73 = df::dot(var_64, var_64);
var_74 = df::dot(var_68, var_68);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_72, var_72);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_58, var_55);
var_79 = df::add(var_77, var_47);
var_80 = df::div(var_47, var_79);
var_81 = df::sub(var_47, var_80);
var_82 = df::mul(var_78, var_81);
var_83 = df::mul(var_60, var_57);
var_84 = df::add(var_82, var_83);
var_85 = df::transpose(var_43);
var_86 = df::mul(var_84, var_85);
var_87 = df::index(var_86, var_3, var_3);
var_88 = df::index(var_86, var_7, var_3);
var_89 = df::index(var_86, var_11, var_3);
var_90 = df::float3(var_87, var_88, var_89);
var_91 = df::index(var_86, var_3, var_7);
var_92 = df::index(var_86, var_7, var_7);
var_93 = df::index(var_86, var_11, var_7);
var_94 = df::float3(var_91, var_92, var_93);
var_95 = df::index(var_86, var_3, var_11);
var_96 = df::index(var_86, var_7, var_11);
var_97 = df::index(var_86, var_11, var_11);
var_98 = df::float3(var_95, var_96, var_97);
var_99 = df::determinant(var_58);
var_100 = df::div(var_46, var_45);
var_101 = df::cross(var_37, var_38);
var_102 = df::mul(var_101, var_100);
var_103 = df::cross(var_38, var_36);
var_104 = df::mul(var_103, var_100);
var_105 = df::cross(var_36, var_37);
var_106 = df::mul(var_105, var_100);
var_107 = df::sub(var_99, var_54);
var_108 = df::add(var_107, var_18);
var_109 = df::mul(var_108, var_56);
var_110 = df::dot(var_102, var_33);
var_111 = df::dot(var_104, var_34);
var_112 = df::add(var_110, var_111);
var_113 = df::dot(var_106, var_35);
var_114 = df::add(var_112, var_113);
var_115 = df::mul(var_114, var_57);
var_116 = df::add(var_109, var_115);
var_117 = df::mul(var_102, var_116);
var_118 = df::add(var_90, var_117);
var_119 = df::mul(var_104, var_116);
var_120 = df::add(var_94, var_119);
var_121 = df::mul(var_106, var_116);
var_122 = df::add(var_98, var_121);
var_123 = df::add(var_118, var_120);
var_124 = df::add(var_123, var_122);
var_126 = df::sub(var_125, var_47);
var_127 = df::mul(var_124, var_126);
df::atomic_sub(var_f, var_5, var_127);
df::atomic_sub(var_f, var_9, var_118);
df::atomic_sub(var_f, var_13, var_120);
df::atomic_sub(var_f, var_17, var_122);
}
void eval_tetrahedra_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
int* adj_indices,
mat33* adj_pose,
float* adj_activation,
float* adj_materials,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
df::float3 var_36;
df::float3 var_37;
df::float3 var_38;
df::float3 var_39;
df::float3 var_40;
df::float3 var_41;
mat33 var_42;
mat33 var_43;
float var_44;
const float var_45 = 6.0;
float var_46;
const float var_47 = 1.0;
float var_48;
float var_49;
float var_50;
const float var_51 = 4.0;
float var_52;
float var_53;
float var_54;
float var_55;
float var_56;
float var_57;
mat33 var_58;
mat33 var_59;
mat33 var_60;
float var_61;
float var_62;
float var_63;
df::float3 var_64;
float var_65;
float var_66;
float var_67;
df::float3 var_68;
float var_69;
float var_70;
float var_71;
df::float3 var_72;
float var_73;
float var_74;
float var_75;
float var_76;
float var_77;
mat33 var_78;
float var_79;
float var_80;
float var_81;
mat33 var_82;
mat33 var_83;
mat33 var_84;
mat33 var_85;
mat33 var_86;
float var_87;
float var_88;
float var_89;
df::float3 var_90;
float var_91;
float var_92;
float var_93;
df::float3 var_94;
float var_95;
float var_96;
float var_97;
df::float3 var_98;
float var_99;
float var_100;
df::float3 var_101;
df::float3 var_102;
df::float3 var_103;
df::float3 var_104;
df::float3 var_105;
df::float3 var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
df::float3 var_117;
df::float3 var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
df::float3 var_123;
df::float3 var_124;
const float var_125 = 0.0;
float var_126;
df::float3 var_127;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
float adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
int adj_23 = 0;
float adj_24 = 0;
int adj_25 = 0;
int adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
df::float3 adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
df::float3 adj_39 = 0;
df::float3 adj_40 = 0;
df::float3 adj_41 = 0;
mat33 adj_42 = 0;
mat33 adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
float adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
mat33 adj_58 = 0;
mat33 adj_59 = 0;
mat33 adj_60 = 0;
float adj_61 = 0;
float adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
float adj_65 = 0;
float adj_66 = 0;
float adj_67 = 0;
df::float3 adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
df::float3 adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
float adj_77 = 0;
mat33 adj_78 = 0;
float adj_79 = 0;
float adj_80 = 0;
float adj_81 = 0;
mat33 adj_82 = 0;
mat33 adj_83 = 0;
mat33 adj_84 = 0;
mat33 adj_85 = 0;
mat33 adj_86 = 0;
float adj_87 = 0;
float adj_88 = 0;
float adj_89 = 0;
df::float3 adj_90 = 0;
float adj_91 = 0;
float adj_92 = 0;
float adj_93 = 0;
df::float3 adj_94 = 0;
float adj_95 = 0;
float adj_96 = 0;
float adj_97 = 0;
df::float3 adj_98 = 0;
float adj_99 = 0;
float adj_100 = 0;
df::float3 adj_101 = 0;
df::float3 adj_102 = 0;
df::float3 adj_103 = 0;
df::float3 adj_104 = 0;
df::float3 adj_105 = 0;
df::float3 adj_106 = 0;
float adj_107 = 0;
float adj_108 = 0;
float adj_109 = 0;
float adj_110 = 0;
float adj_111 = 0;
float adj_112 = 0;
float adj_113 = 0;
float adj_114 = 0;
float adj_115 = 0;
float adj_116 = 0;
df::float3 adj_117 = 0;
df::float3 adj_118 = 0;
df::float3 adj_119 = 0;
df::float3 adj_120 = 0;
df::float3 adj_121 = 0;
df::float3 adj_122 = 0;
df::float3 adj_123 = 0;
df::float3 adj_124 = 0;
float adj_125 = 0;
float adj_126 = 0;
df::float3 adj_127 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::sub(var_29, var_28);
var_37 = df::sub(var_30, var_28);
var_38 = df::sub(var_31, var_28);
var_39 = df::sub(var_33, var_32);
var_40 = df::sub(var_34, var_32);
var_41 = df::sub(var_35, var_32);
var_42 = df::mat33(var_36, var_37, var_38);
var_43 = df::load(var_pose, var_0);
var_44 = df::determinant(var_43);
var_46 = df::mul(var_44, var_45);
var_48 = df::div(var_47, var_46);
var_49 = df::div(var_21, var_24);
var_50 = df::add(var_47, var_49);
var_52 = df::mul(var_51, var_24);
var_53 = df::div(var_21, var_52);
var_54 = df::sub(var_50, var_53);
var_55 = df::mul(var_21, var_48);
var_56 = df::mul(var_24, var_48);
var_57 = df::mul(var_27, var_48);
var_58 = df::mul(var_42, var_43);
var_59 = df::mat33(var_39, var_40, var_41);
var_60 = df::mul(var_59, var_43);
var_61 = df::index(var_58, var_3, var_3);
var_62 = df::index(var_58, var_7, var_3);
var_63 = df::index(var_58, var_11, var_3);
var_64 = df::float3(var_61, var_62, var_63);
var_65 = df::index(var_58, var_3, var_7);
var_66 = df::index(var_58, var_7, var_7);
var_67 = df::index(var_58, var_11, var_7);
var_68 = df::float3(var_65, var_66, var_67);
var_69 = df::index(var_58, var_3, var_11);
var_70 = df::index(var_58, var_7, var_11);
var_71 = df::index(var_58, var_11, var_11);
var_72 = df::float3(var_69, var_70, var_71);
var_73 = df::dot(var_64, var_64);
var_74 = df::dot(var_68, var_68);
var_75 = df::add(var_73, var_74);
var_76 = df::dot(var_72, var_72);
var_77 = df::add(var_75, var_76);
var_78 = df::mul(var_58, var_55);
var_79 = df::add(var_77, var_47);
var_80 = df::div(var_47, var_79);
var_81 = df::sub(var_47, var_80);
var_82 = df::mul(var_78, var_81);
var_83 = df::mul(var_60, var_57);
var_84 = df::add(var_82, var_83);
var_85 = df::transpose(var_43);
var_86 = df::mul(var_84, var_85);
var_87 = df::index(var_86, var_3, var_3);
var_88 = df::index(var_86, var_7, var_3);
var_89 = df::index(var_86, var_11, var_3);
var_90 = df::float3(var_87, var_88, var_89);
var_91 = df::index(var_86, var_3, var_7);
var_92 = df::index(var_86, var_7, var_7);
var_93 = df::index(var_86, var_11, var_7);
var_94 = df::float3(var_91, var_92, var_93);
var_95 = df::index(var_86, var_3, var_11);
var_96 = df::index(var_86, var_7, var_11);
var_97 = df::index(var_86, var_11, var_11);
var_98 = df::float3(var_95, var_96, var_97);
var_99 = df::determinant(var_58);
var_100 = df::div(var_46, var_45);
var_101 = df::cross(var_37, var_38);
var_102 = df::mul(var_101, var_100);
var_103 = df::cross(var_38, var_36);
var_104 = df::mul(var_103, var_100);
var_105 = df::cross(var_36, var_37);
var_106 = df::mul(var_105, var_100);
var_107 = df::sub(var_99, var_54);
var_108 = df::add(var_107, var_18);
var_109 = df::mul(var_108, var_56);
var_110 = df::dot(var_102, var_33);
var_111 = df::dot(var_104, var_34);
var_112 = df::add(var_110, var_111);
var_113 = df::dot(var_106, var_35);
var_114 = df::add(var_112, var_113);
var_115 = df::mul(var_114, var_57);
var_116 = df::add(var_109, var_115);
var_117 = df::mul(var_102, var_116);
var_118 = df::add(var_90, var_117);
var_119 = df::mul(var_104, var_116);
var_120 = df::add(var_94, var_119);
var_121 = df::mul(var_106, var_116);
var_122 = df::add(var_98, var_121);
var_123 = df::add(var_118, var_120);
var_124 = df::add(var_123, var_122);
var_126 = df::sub(var_125, var_47);
var_127 = df::mul(var_124, var_126);
df::atomic_sub(var_f, var_5, var_127);
df::atomic_sub(var_f, var_9, var_118);
df::atomic_sub(var_f, var_13, var_120);
df::atomic_sub(var_f, var_17, var_122);
//---------
// reverse
df::adj_atomic_sub(var_f, var_17, var_122, adj_f, adj_17, adj_122);
df::adj_atomic_sub(var_f, var_13, var_120, adj_f, adj_13, adj_120);
df::adj_atomic_sub(var_f, var_9, var_118, adj_f, adj_9, adj_118);
df::adj_atomic_sub(var_f, var_5, var_127, adj_f, adj_5, adj_127);
df::adj_mul(var_124, var_126, adj_124, adj_126, adj_127);
df::adj_sub(var_125, var_47, adj_125, adj_47, adj_126);
df::adj_add(var_123, var_122, adj_123, adj_122, adj_124);
df::adj_add(var_118, var_120, adj_118, adj_120, adj_123);
df::adj_add(var_98, var_121, adj_98, adj_121, adj_122);
df::adj_mul(var_106, var_116, adj_106, adj_116, adj_121);
df::adj_add(var_94, var_119, adj_94, adj_119, adj_120);
df::adj_mul(var_104, var_116, adj_104, adj_116, adj_119);
df::adj_add(var_90, var_117, adj_90, adj_117, adj_118);
df::adj_mul(var_102, var_116, adj_102, adj_116, adj_117);
df::adj_add(var_109, var_115, adj_109, adj_115, adj_116);
df::adj_mul(var_114, var_57, adj_114, adj_57, adj_115);
df::adj_add(var_112, var_113, adj_112, adj_113, adj_114);
df::adj_dot(var_106, var_35, adj_106, adj_35, adj_113);
df::adj_add(var_110, var_111, adj_110, adj_111, adj_112);
df::adj_dot(var_104, var_34, adj_104, adj_34, adj_111);
df::adj_dot(var_102, var_33, adj_102, adj_33, adj_110);
df::adj_mul(var_108, var_56, adj_108, adj_56, adj_109);
df::adj_add(var_107, var_18, adj_107, adj_18, adj_108);
df::adj_sub(var_99, var_54, adj_99, adj_54, adj_107);
df::adj_mul(var_105, var_100, adj_105, adj_100, adj_106);
df::adj_cross(var_36, var_37, adj_36, adj_37, adj_105);
df::adj_mul(var_103, var_100, adj_103, adj_100, adj_104);
df::adj_cross(var_38, var_36, adj_38, adj_36, adj_103);
df::adj_mul(var_101, var_100, adj_101, adj_100, adj_102);
df::adj_cross(var_37, var_38, adj_37, adj_38, adj_101);
df::adj_div(var_46, var_45, adj_46, adj_45, adj_100);
df::adj_determinant(var_58, adj_58, adj_99);
df::adj_float3(var_95, var_96, var_97, adj_95, adj_96, adj_97, adj_98);
df::adj_index(var_86, var_11, var_11, adj_86, adj_11, adj_11, adj_97);
df::adj_index(var_86, var_7, var_11, adj_86, adj_7, adj_11, adj_96);
df::adj_index(var_86, var_3, var_11, adj_86, adj_3, adj_11, adj_95);
df::adj_float3(var_91, var_92, var_93, adj_91, adj_92, adj_93, adj_94);
df::adj_index(var_86, var_11, var_7, adj_86, adj_11, adj_7, adj_93);
df::adj_index(var_86, var_7, var_7, adj_86, adj_7, adj_7, adj_92);
df::adj_index(var_86, var_3, var_7, adj_86, adj_3, adj_7, adj_91);
df::adj_float3(var_87, var_88, var_89, adj_87, adj_88, adj_89, adj_90);
df::adj_index(var_86, var_11, var_3, adj_86, adj_11, adj_3, adj_89);
df::adj_index(var_86, var_7, var_3, adj_86, adj_7, adj_3, adj_88);
df::adj_index(var_86, var_3, var_3, adj_86, adj_3, adj_3, adj_87);
df::adj_mul(var_84, var_85, adj_84, adj_85, adj_86);
df::adj_transpose(var_43, adj_43, adj_85);
df::adj_add(var_82, var_83, adj_82, adj_83, adj_84);
df::adj_mul(var_60, var_57, adj_60, adj_57, adj_83);
df::adj_mul(var_78, var_81, adj_78, adj_81, adj_82);
df::adj_sub(var_47, var_80, adj_47, adj_80, adj_81);
df::adj_div(var_47, var_79, adj_47, adj_79, adj_80);
df::adj_add(var_77, var_47, adj_77, adj_47, adj_79);
df::adj_mul(var_58, var_55, adj_58, adj_55, adj_78);
df::adj_add(var_75, var_76, adj_75, adj_76, adj_77);
df::adj_dot(var_72, var_72, adj_72, adj_72, adj_76);
df::adj_add(var_73, var_74, adj_73, adj_74, adj_75);
df::adj_dot(var_68, var_68, adj_68, adj_68, adj_74);
df::adj_dot(var_64, var_64, adj_64, adj_64, adj_73);
df::adj_float3(var_69, var_70, var_71, adj_69, adj_70, adj_71, adj_72);
df::adj_index(var_58, var_11, var_11, adj_58, adj_11, adj_11, adj_71);
df::adj_index(var_58, var_7, var_11, adj_58, adj_7, adj_11, adj_70);
df::adj_index(var_58, var_3, var_11, adj_58, adj_3, adj_11, adj_69);
df::adj_float3(var_65, var_66, var_67, adj_65, adj_66, adj_67, adj_68);
df::adj_index(var_58, var_11, var_7, adj_58, adj_11, adj_7, adj_67);
df::adj_index(var_58, var_7, var_7, adj_58, adj_7, adj_7, adj_66);
df::adj_index(var_58, var_3, var_7, adj_58, adj_3, adj_7, adj_65);
df::adj_float3(var_61, var_62, var_63, adj_61, adj_62, adj_63, adj_64);
df::adj_index(var_58, var_11, var_3, adj_58, adj_11, adj_3, adj_63);
df::adj_index(var_58, var_7, var_3, adj_58, adj_7, adj_3, adj_62);
df::adj_index(var_58, var_3, var_3, adj_58, adj_3, adj_3, adj_61);
df::adj_mul(var_59, var_43, adj_59, adj_43, adj_60);
df::adj_mat33(var_39, var_40, var_41, adj_39, adj_40, adj_41, adj_59);
df::adj_mul(var_42, var_43, adj_42, adj_43, adj_58);
df::adj_mul(var_27, var_48, adj_27, adj_48, adj_57);
df::adj_mul(var_24, var_48, adj_24, adj_48, adj_56);
df::adj_mul(var_21, var_48, adj_21, adj_48, adj_55);
df::adj_sub(var_50, var_53, adj_50, adj_53, adj_54);
df::adj_div(var_21, var_52, adj_21, adj_52, adj_53);
df::adj_mul(var_51, var_24, adj_51, adj_24, adj_52);
df::adj_add(var_47, var_49, adj_47, adj_49, adj_50);
df::adj_div(var_21, var_24, adj_21, adj_24, adj_49);
df::adj_div(var_47, var_46, adj_47, adj_46, adj_48);
df::adj_mul(var_44, var_45, adj_44, adj_45, adj_46);
df::adj_determinant(var_43, adj_43, adj_44);
df::adj_load(var_pose, var_0, adj_pose, adj_0, adj_43);
df::adj_mat33(var_36, var_37, var_38, adj_36, adj_37, adj_38, adj_42);
df::adj_sub(var_35, var_32, adj_35, adj_32, adj_41);
df::adj_sub(var_34, var_32, adj_34, adj_32, adj_40);
df::adj_sub(var_33, var_32, adj_33, adj_32, adj_39);
df::adj_sub(var_31, var_28, adj_31, adj_28, adj_38);
df::adj_sub(var_30, var_28, adj_30, adj_28, adj_37);
df::adj_sub(var_29, var_28, adj_29, adj_28, adj_36);
df::adj_load(var_v, var_17, adj_v, adj_17, adj_35);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_34);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_33);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_32);
df::adj_load(var_x, var_17, adj_x, adj_17, adj_31);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_30);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_29);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_28);
df::adj_load(var_materials, var_26, adj_materials, adj_26, adj_27);
df::adj_add(var_25, var_11, adj_25, adj_11, adj_26);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_25);
df::adj_load(var_materials, var_23, adj_materials, adj_23, adj_24);
df::adj_add(var_22, var_7, adj_22, adj_7, adj_23);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_19, var_3, adj_19, adj_3, adj_20);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_19);
df::adj_load(var_activation, var_0, adj_activation, adj_0, adj_18);
df::adj_load(var_indices, var_16, adj_indices, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void eval_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_tetrahedra_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
cast<df::float3*>(var_f));
}
}
void eval_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_tetrahedra_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<int*>(adj_indices),
cast<mat33*>(adj_pose),
cast<float*>(adj_activation),
cast<float*>(adj_materials),
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f);
void eval_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
torch::Tensor adj_f);
void eval_contacts_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_f)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
const float var_3 = 0.0;
const float var_4 = 1.0;
df::float3 var_5;
float var_6;
const float var_7 = 0.01;
float var_8;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
df::float3 var_16;
df::float3 var_17;
float var_18;
float var_19;
float var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
float var_25;
float var_26;
df::float3 var_27;
df::float3 var_28;
float var_29;
df::float3 var_30;
df::float3 var_31;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_5 = df::float3(var_3, var_4, var_3);
var_6 = df::dot(var_5, var_1);
var_8 = df::sub(var_6, var_7);
var_9 = df::min(var_8, var_3);
var_10 = df::dot(var_5, var_2);
var_11 = df::mul(var_5, var_10);
var_12 = df::sub(var_2, var_11);
var_13 = df::mul(var_5, var_9);
var_14 = df::mul(var_13, var_ke);
var_15 = df::min(var_10, var_3);
var_16 = df::mul(var_5, var_15);
var_17 = df::mul(var_16, var_kd);
var_18 = df::mul(var_mu, var_9);
var_19 = df::mul(var_18, var_ke);
var_20 = df::sub(var_3, var_19);
var_21 = df::float3(var_kf, var_3, var_3);
var_22 = df::dot(var_21, var_12);
var_23 = df::clamp(var_22, var_19, var_20);
var_24 = df::float3(var_3, var_3, var_kf);
var_25 = df::dot(var_24, var_12);
var_26 = df::clamp(var_25, var_19, var_20);
var_27 = df::float3(var_23, var_3, var_26);
var_28 = df::add(var_17, var_27);
var_29 = df::step(var_9);
var_30 = df::mul(var_28, var_29);
var_31 = df::add(var_14, var_30);
df::atomic_sub(var_f, var_0, var_31);
}
void eval_contacts_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_f,
df::float3* adj_x,
df::float3* adj_v,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
df::float3* adj_f)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
const float var_3 = 0.0;
const float var_4 = 1.0;
df::float3 var_5;
float var_6;
const float var_7 = 0.01;
float var_8;
float var_9;
float var_10;
df::float3 var_11;
df::float3 var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
df::float3 var_16;
df::float3 var_17;
float var_18;
float var_19;
float var_20;
df::float3 var_21;
float var_22;
float var_23;
df::float3 var_24;
float var_25;
float var_26;
df::float3 var_27;
df::float3 var_28;
float var_29;
df::float3 var_30;
df::float3 var_31;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
df::float3 adj_5 = 0;
float adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
float adj_10 = 0;
df::float3 adj_11 = 0;
df::float3 adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
float adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
df::float3 adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
df::float3 adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
float adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_5 = df::float3(var_3, var_4, var_3);
var_6 = df::dot(var_5, var_1);
var_8 = df::sub(var_6, var_7);
var_9 = df::min(var_8, var_3);
var_10 = df::dot(var_5, var_2);
var_11 = df::mul(var_5, var_10);
var_12 = df::sub(var_2, var_11);
var_13 = df::mul(var_5, var_9);
var_14 = df::mul(var_13, var_ke);
var_15 = df::min(var_10, var_3);
var_16 = df::mul(var_5, var_15);
var_17 = df::mul(var_16, var_kd);
var_18 = df::mul(var_mu, var_9);
var_19 = df::mul(var_18, var_ke);
var_20 = df::sub(var_3, var_19);
var_21 = df::float3(var_kf, var_3, var_3);
var_22 = df::dot(var_21, var_12);
var_23 = df::clamp(var_22, var_19, var_20);
var_24 = df::float3(var_3, var_3, var_kf);
var_25 = df::dot(var_24, var_12);
var_26 = df::clamp(var_25, var_19, var_20);
var_27 = df::float3(var_23, var_3, var_26);
var_28 = df::add(var_17, var_27);
var_29 = df::step(var_9);
var_30 = df::mul(var_28, var_29);
var_31 = df::add(var_14, var_30);
df::atomic_sub(var_f, var_0, var_31);
//---------
// reverse
df::adj_atomic_sub(var_f, var_0, var_31, adj_f, adj_0, adj_31);
df::adj_add(var_14, var_30, adj_14, adj_30, adj_31);
df::adj_mul(var_28, var_29, adj_28, adj_29, adj_30);
df::adj_step(var_9, adj_9, adj_29);
df::adj_add(var_17, var_27, adj_17, adj_27, adj_28);
df::adj_float3(var_23, var_3, var_26, adj_23, adj_3, adj_26, adj_27);
df::adj_clamp(var_25, var_19, var_20, adj_25, adj_19, adj_20, adj_26);
df::adj_dot(var_24, var_12, adj_24, adj_12, adj_25);
df::adj_float3(var_3, var_3, var_kf, adj_3, adj_3, adj_kf, adj_24);
df::adj_clamp(var_22, var_19, var_20, adj_22, adj_19, adj_20, adj_23);
df::adj_dot(var_21, var_12, adj_21, adj_12, adj_22);
df::adj_float3(var_kf, var_3, var_3, adj_kf, adj_3, adj_3, adj_21);
df::adj_sub(var_3, var_19, adj_3, adj_19, adj_20);
df::adj_mul(var_18, var_ke, adj_18, adj_ke, adj_19);
df::adj_mul(var_mu, var_9, adj_mu, adj_9, adj_18);
df::adj_mul(var_16, var_kd, adj_16, adj_kd, adj_17);
df::adj_mul(var_5, var_15, adj_5, adj_15, adj_16);
df::adj_min(var_10, var_3, adj_10, adj_3, adj_15);
df::adj_mul(var_13, var_ke, adj_13, adj_ke, adj_14);
df::adj_mul(var_5, var_9, adj_5, adj_9, adj_13);
df::adj_sub(var_2, var_11, adj_2, adj_11, adj_12);
df::adj_mul(var_5, var_10, adj_5, adj_10, adj_11);
df::adj_dot(var_5, var_2, adj_5, adj_2, adj_10);
df::adj_min(var_8, var_3, adj_8, adj_3, adj_9);
df::adj_sub(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_dot(var_5, var_1, adj_5, adj_1, adj_6);
df::adj_float3(var_3, var_4, var_3, adj_3, adj_4, adj_3, adj_5);
df::adj_load(var_v, var_0, adj_v, adj_0, adj_2);
df::adj_load(var_x, var_0, adj_x, adj_0, adj_1);
return;
}
// Python entry points
void eval_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_contacts_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_f));
}
}
void eval_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_contacts_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_f),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
adj_ke,
adj_kd,
adj_kf,
adj_mu,
cast<df::float3*>(adj_f));
}
}
// Python entry points
void eval_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f);
void eval_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_f,
torch::Tensor adj_x,
torch::Tensor adj_v,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_f);
void eval_soft_contacts_cpu_kernel_forward(
int var_num_particles,
df::float3* var_particle_x,
df::float3* var_particle_v,
spatial_transform* var_body_X_sc,
spatial_vector* var_body_v_sc,
spatial_transform* var_shape_X_co,
int* var_shape_body,
int* var_shape_geo_type,
int* var_shape_geo_src,
df::float3* var_shape_geo_scale,
float* var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_particle_f,
spatial_vector* var_body_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
df::float3 var_5;
spatial_transform var_6;
const int var_7 = 0;
bool var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
df::float3 var_14;
int var_15;
df::float3 var_16;
const float var_17 = 0.01;
const float var_18 = 0.0;
df::float3 var_19;
bool var_20;
df::float3 var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
const int var_32 = 1;
bool var_33;
float var_34;
float var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
df::float3 var_40;
const int var_41 = 2;
bool var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
spatial_vector var_54;
bool var_55;
spatial_vector var_56;
spatial_vector var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
df::float3 var_69;
df::float3 var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
df::float3 var_77;
float var_78;
float var_79;
df::float3 var_80;
df::float3 var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
bool var_86;
spatial_vector var_87;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_shape_body, var_1);
var_4 = df::load(var_particle_x, var_2);
var_5 = df::load(var_particle_v, var_2);
var_6 = df::spatial_transform_identity();
var_8 = (var_3 >= var_7);
if (var_8) {
var_9 = df::load(var_body_X_sc, var_3);
}
var_10 = df::select(var_8, var_6, var_9);
var_11 = df::load(var_shape_X_co, var_1);
var_12 = df::spatial_transform_multiply(var_10, var_11);
var_13 = spatial_transform_inverse_cpu_func(var_12);
var_14 = df::spatial_transform_point(var_13, var_4);
var_15 = df::load(var_shape_geo_type, var_1);
var_16 = df::load(var_shape_geo_scale, var_1);
var_19 = df::float3(var_18, var_18, var_18);
var_20 = (var_15 == var_7);
if (var_20) {
var_21 = df::float3(var_18, var_18, var_18);
var_22 = df::index(var_16, var_7);
var_23 = sphere_sdf_cpu_func(var_21, var_22, var_14);
var_24 = df::sub(var_23, var_17);
var_25 = df::min(var_24, var_18);
var_26 = df::float3(var_18, var_18, var_18);
var_27 = df::index(var_16, var_7);
var_28 = sphere_sdf_grad_cpu_func(var_26, var_27, var_14);
var_29 = df::spatial_transform_vector(var_12, var_28);
}
var_30 = df::select(var_20, var_18, var_25);
var_31 = df::select(var_20, var_19, var_29);
var_33 = (var_15 == var_32);
if (var_33) {
var_34 = box_sdf_cpu_func(var_16, var_14);
var_35 = df::sub(var_34, var_17);
var_36 = df::min(var_35, var_18);
var_37 = box_sdf_grad_cpu_func(var_16, var_14);
var_38 = df::spatial_transform_vector(var_12, var_37);
}
var_39 = df::select(var_33, var_30, var_36);
var_40 = df::select(var_33, var_31, var_38);
var_42 = (var_15 == var_41);
if (var_42) {
var_43 = df::index(var_16, var_7);
var_44 = df::index(var_16, var_32);
var_45 = capsule_sdf_cpu_func(var_43, var_44, var_14);
var_46 = df::sub(var_45, var_17);
var_47 = df::min(var_46, var_18);
var_48 = df::index(var_16, var_7);
var_49 = df::index(var_16, var_32);
var_50 = capsule_sdf_grad_cpu_func(var_48, var_49, var_14);
var_51 = df::spatial_transform_vector(var_12, var_50);
}
var_52 = df::select(var_42, var_39, var_47);
var_53 = df::select(var_42, var_40, var_51);
var_54 = df::spatial_vector();
var_55 = (var_3 >= var_7);
if (var_55) {
var_56 = df::load(var_body_v_sc, var_3);
}
var_57 = df::select(var_55, var_54, var_56);
var_58 = df::spatial_top(var_57);
var_59 = df::spatial_bottom(var_57);
var_60 = df::cross(var_58, var_4);
var_61 = df::add(var_59, var_60);
var_62 = df::sub(var_5, var_61);
var_63 = df::dot(var_53, var_62);
var_64 = df::mul(var_53, var_63);
var_65 = df::sub(var_62, var_64);
var_66 = df::mul(var_53, var_52);
var_67 = df::mul(var_66, var_ke);
var_68 = df::min(var_63, var_18);
var_69 = df::mul(var_53, var_68);
var_70 = df::mul(var_69, var_kd);
var_71 = df::mul(var_mu, var_52);
var_72 = df::mul(var_71, var_ke);
var_73 = df::sub(var_18, var_72);
var_74 = df::float3(var_kf, var_18, var_18);
var_75 = df::dot(var_74, var_65);
var_76 = df::clamp(var_75, var_72, var_73);
var_77 = df::float3(var_18, var_18, var_kf);
var_78 = df::dot(var_77, var_65);
var_79 = df::clamp(var_78, var_72, var_73);
var_80 = df::float3(var_76, var_18, var_79);
var_81 = df::add(var_70, var_80);
var_82 = df::step(var_52);
var_83 = df::mul(var_81, var_82);
var_84 = df::add(var_67, var_83);
var_85 = df::cross(var_4, var_84);
df::atomic_sub(var_particle_f, var_2, var_84);
var_86 = (var_3 >= var_7);
if (var_86) {
var_87 = df::spatial_vector(var_85, var_84);
df::atomic_sub(var_body_f, var_3, var_87);
}
}
void eval_soft_contacts_cpu_kernel_backward(
int var_num_particles,
df::float3* var_particle_x,
df::float3* var_particle_v,
spatial_transform* var_body_X_sc,
spatial_vector* var_body_v_sc,
spatial_transform* var_shape_X_co,
int* var_shape_body,
int* var_shape_geo_type,
int* var_shape_geo_src,
df::float3* var_shape_geo_scale,
float* var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
df::float3* var_particle_f,
spatial_vector* var_body_f,
int adj_num_particles,
df::float3* adj_particle_x,
df::float3* adj_particle_v,
spatial_transform* adj_body_X_sc,
spatial_vector* adj_body_v_sc,
spatial_transform* adj_shape_X_co,
int* adj_shape_body,
int* adj_shape_geo_type,
int* adj_shape_geo_src,
df::float3* adj_shape_geo_scale,
float* adj_shape_materials,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
df::float3* adj_particle_f,
spatial_vector* adj_body_f)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
df::float3 var_4;
df::float3 var_5;
spatial_transform var_6;
const int var_7 = 0;
bool var_8;
spatial_transform var_9;
spatial_transform var_10;
spatial_transform var_11;
spatial_transform var_12;
spatial_transform var_13;
df::float3 var_14;
int var_15;
df::float3 var_16;
const float var_17 = 0.01;
const float var_18 = 0.0;
df::float3 var_19;
bool var_20;
df::float3 var_21;
float var_22;
float var_23;
float var_24;
float var_25;
df::float3 var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
float var_30;
df::float3 var_31;
const int var_32 = 1;
bool var_33;
float var_34;
float var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
df::float3 var_40;
const int var_41 = 2;
bool var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
float var_49;
df::float3 var_50;
df::float3 var_51;
float var_52;
df::float3 var_53;
spatial_vector var_54;
bool var_55;
spatial_vector var_56;
spatial_vector var_57;
df::float3 var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
df::float3 var_65;
df::float3 var_66;
df::float3 var_67;
float var_68;
df::float3 var_69;
df::float3 var_70;
float var_71;
float var_72;
float var_73;
df::float3 var_74;
float var_75;
float var_76;
df::float3 var_77;
float var_78;
float var_79;
df::float3 var_80;
df::float3 var_81;
float var_82;
df::float3 var_83;
df::float3 var_84;
df::float3 var_85;
bool var_86;
spatial_vector var_87;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
spatial_transform adj_6 = 0;
int adj_7 = 0;
bool adj_8 = 0;
spatial_transform adj_9 = 0;
spatial_transform adj_10 = 0;
spatial_transform adj_11 = 0;
spatial_transform adj_12 = 0;
spatial_transform adj_13 = 0;
df::float3 adj_14 = 0;
int adj_15 = 0;
df::float3 adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
df::float3 adj_19 = 0;
bool adj_20 = 0;
df::float3 adj_21 = 0;
float adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
float adj_30 = 0;
df::float3 adj_31 = 0;
int adj_32 = 0;
bool adj_33 = 0;
float adj_34 = 0;
float adj_35 = 0;
float adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
float adj_39 = 0;
df::float3 adj_40 = 0;
int adj_41 = 0;
bool adj_42 = 0;
float adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
float adj_49 = 0;
df::float3 adj_50 = 0;
df::float3 adj_51 = 0;
float adj_52 = 0;
df::float3 adj_53 = 0;
spatial_vector adj_54 = 0;
bool adj_55 = 0;
spatial_vector adj_56 = 0;
spatial_vector adj_57 = 0;
df::float3 adj_58 = 0;
df::float3 adj_59 = 0;
df::float3 adj_60 = 0;
df::float3 adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
df::float3 adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
float adj_68 = 0;
df::float3 adj_69 = 0;
df::float3 adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
df::float3 adj_74 = 0;
float adj_75 = 0;
float adj_76 = 0;
df::float3 adj_77 = 0;
float adj_78 = 0;
float adj_79 = 0;
df::float3 adj_80 = 0;
df::float3 adj_81 = 0;
float adj_82 = 0;
df::float3 adj_83 = 0;
df::float3 adj_84 = 0;
df::float3 adj_85 = 0;
bool adj_86 = 0;
spatial_vector adj_87 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::div(var_0, var_num_particles);
var_2 = df::mod(var_0, var_num_particles);
var_3 = df::load(var_shape_body, var_1);
var_4 = df::load(var_particle_x, var_2);
var_5 = df::load(var_particle_v, var_2);
var_6 = df::spatial_transform_identity();
var_8 = (var_3 >= var_7);
if (var_8) {
var_9 = df::load(var_body_X_sc, var_3);
}
var_10 = df::select(var_8, var_6, var_9);
var_11 = df::load(var_shape_X_co, var_1);
var_12 = df::spatial_transform_multiply(var_10, var_11);
var_13 = spatial_transform_inverse_cpu_func(var_12);
var_14 = df::spatial_transform_point(var_13, var_4);
var_15 = df::load(var_shape_geo_type, var_1);
var_16 = df::load(var_shape_geo_scale, var_1);
var_19 = df::float3(var_18, var_18, var_18);
var_20 = (var_15 == var_7);
if (var_20) {
var_21 = df::float3(var_18, var_18, var_18);
var_22 = df::index(var_16, var_7);
var_23 = sphere_sdf_cpu_func(var_21, var_22, var_14);
var_24 = df::sub(var_23, var_17);
var_25 = df::min(var_24, var_18);
var_26 = df::float3(var_18, var_18, var_18);
var_27 = df::index(var_16, var_7);
var_28 = sphere_sdf_grad_cpu_func(var_26, var_27, var_14);
var_29 = df::spatial_transform_vector(var_12, var_28);
}
var_30 = df::select(var_20, var_18, var_25);
var_31 = df::select(var_20, var_19, var_29);
var_33 = (var_15 == var_32);
if (var_33) {
var_34 = box_sdf_cpu_func(var_16, var_14);
var_35 = df::sub(var_34, var_17);
var_36 = df::min(var_35, var_18);
var_37 = box_sdf_grad_cpu_func(var_16, var_14);
var_38 = df::spatial_transform_vector(var_12, var_37);
}
var_39 = df::select(var_33, var_30, var_36);
var_40 = df::select(var_33, var_31, var_38);
var_42 = (var_15 == var_41);
if (var_42) {
var_43 = df::index(var_16, var_7);
var_44 = df::index(var_16, var_32);
var_45 = capsule_sdf_cpu_func(var_43, var_44, var_14);
var_46 = df::sub(var_45, var_17);
var_47 = df::min(var_46, var_18);
var_48 = df::index(var_16, var_7);
var_49 = df::index(var_16, var_32);
var_50 = capsule_sdf_grad_cpu_func(var_48, var_49, var_14);
var_51 = df::spatial_transform_vector(var_12, var_50);
}
var_52 = df::select(var_42, var_39, var_47);
var_53 = df::select(var_42, var_40, var_51);
var_54 = df::spatial_vector();
var_55 = (var_3 >= var_7);
if (var_55) {
var_56 = df::load(var_body_v_sc, var_3);
}
var_57 = df::select(var_55, var_54, var_56);
var_58 = df::spatial_top(var_57);
var_59 = df::spatial_bottom(var_57);
var_60 = df::cross(var_58, var_4);
var_61 = df::add(var_59, var_60);
var_62 = df::sub(var_5, var_61);
var_63 = df::dot(var_53, var_62);
var_64 = df::mul(var_53, var_63);
var_65 = df::sub(var_62, var_64);
var_66 = df::mul(var_53, var_52);
var_67 = df::mul(var_66, var_ke);
var_68 = df::min(var_63, var_18);
var_69 = df::mul(var_53, var_68);
var_70 = df::mul(var_69, var_kd);
var_71 = df::mul(var_mu, var_52);
var_72 = df::mul(var_71, var_ke);
var_73 = df::sub(var_18, var_72);
var_74 = df::float3(var_kf, var_18, var_18);
var_75 = df::dot(var_74, var_65);
var_76 = df::clamp(var_75, var_72, var_73);
var_77 = df::float3(var_18, var_18, var_kf);
var_78 = df::dot(var_77, var_65);
var_79 = df::clamp(var_78, var_72, var_73);
var_80 = df::float3(var_76, var_18, var_79);
var_81 = df::add(var_70, var_80);
var_82 = df::step(var_52);
var_83 = df::mul(var_81, var_82);
var_84 = df::add(var_67, var_83);
var_85 = df::cross(var_4, var_84);
df::atomic_sub(var_particle_f, var_2, var_84);
var_86 = (var_3 >= var_7);
if (var_86) {
var_87 = df::spatial_vector(var_85, var_84);
df::atomic_sub(var_body_f, var_3, var_87);
}
//---------
// reverse
if (var_86) {
df::adj_atomic_sub(var_body_f, var_3, var_87, adj_body_f, adj_3, adj_87);
df::adj_spatial_vector(var_85, var_84, adj_85, adj_84, adj_87);
}
df::adj_atomic_sub(var_particle_f, var_2, var_84, adj_particle_f, adj_2, adj_84);
df::adj_cross(var_4, var_84, adj_4, adj_84, adj_85);
df::adj_add(var_67, var_83, adj_67, adj_83, adj_84);
df::adj_mul(var_81, var_82, adj_81, adj_82, adj_83);
df::adj_step(var_52, adj_52, adj_82);
df::adj_add(var_70, var_80, adj_70, adj_80, adj_81);
df::adj_float3(var_76, var_18, var_79, adj_76, adj_18, adj_79, adj_80);
df::adj_clamp(var_78, var_72, var_73, adj_78, adj_72, adj_73, adj_79);
df::adj_dot(var_77, var_65, adj_77, adj_65, adj_78);
df::adj_float3(var_18, var_18, var_kf, adj_18, adj_18, adj_kf, adj_77);
df::adj_clamp(var_75, var_72, var_73, adj_75, adj_72, adj_73, adj_76);
df::adj_dot(var_74, var_65, adj_74, adj_65, adj_75);
df::adj_float3(var_kf, var_18, var_18, adj_kf, adj_18, adj_18, adj_74);
df::adj_sub(var_18, var_72, adj_18, adj_72, adj_73);
df::adj_mul(var_71, var_ke, adj_71, adj_ke, adj_72);
df::adj_mul(var_mu, var_52, adj_mu, adj_52, adj_71);
df::adj_mul(var_69, var_kd, adj_69, adj_kd, adj_70);
df::adj_mul(var_53, var_68, adj_53, adj_68, adj_69);
df::adj_min(var_63, var_18, adj_63, adj_18, adj_68);
df::adj_mul(var_66, var_ke, adj_66, adj_ke, adj_67);
df::adj_mul(var_53, var_52, adj_53, adj_52, adj_66);
df::adj_sub(var_62, var_64, adj_62, adj_64, adj_65);
df::adj_mul(var_53, var_63, adj_53, adj_63, adj_64);
df::adj_dot(var_53, var_62, adj_53, adj_62, adj_63);
df::adj_sub(var_5, var_61, adj_5, adj_61, adj_62);
df::adj_add(var_59, var_60, adj_59, adj_60, adj_61);
df::adj_cross(var_58, var_4, adj_58, adj_4, adj_60);
df::adj_spatial_bottom(var_57, adj_57, adj_59);
df::adj_spatial_top(var_57, adj_57, adj_58);
df::adj_select(var_55, var_54, var_56, adj_55, adj_54, adj_56, adj_57);
if (var_55) {
df::adj_load(var_body_v_sc, var_3, adj_body_v_sc, adj_3, adj_56);
}
df::adj_select(var_42, var_40, var_51, adj_42, adj_40, adj_51, adj_53);
df::adj_select(var_42, var_39, var_47, adj_42, adj_39, adj_47, adj_52);
if (var_42) {
df::adj_spatial_transform_vector(var_12, var_50, adj_12, adj_50, adj_51);
adj_capsule_sdf_grad_cpu_func(var_48, var_49, var_14, adj_48, adj_49, adj_14, adj_50);
df::adj_index(var_16, var_32, adj_16, adj_32, adj_49);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_48);
df::adj_min(var_46, var_18, adj_46, adj_18, adj_47);
df::adj_sub(var_45, var_17, adj_45, adj_17, adj_46);
adj_capsule_sdf_cpu_func(var_43, var_44, var_14, adj_43, adj_44, adj_14, adj_45);
df::adj_index(var_16, var_32, adj_16, adj_32, adj_44);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_43);
}
df::adj_select(var_33, var_31, var_38, adj_33, adj_31, adj_38, adj_40);
df::adj_select(var_33, var_30, var_36, adj_33, adj_30, adj_36, adj_39);
if (var_33) {
df::adj_spatial_transform_vector(var_12, var_37, adj_12, adj_37, adj_38);
adj_box_sdf_grad_cpu_func(var_16, var_14, adj_16, adj_14, adj_37);
df::adj_min(var_35, var_18, adj_35, adj_18, adj_36);
df::adj_sub(var_34, var_17, adj_34, adj_17, adj_35);
adj_box_sdf_cpu_func(var_16, var_14, adj_16, adj_14, adj_34);
}
df::adj_select(var_20, var_19, var_29, adj_20, adj_19, adj_29, adj_31);
df::adj_select(var_20, var_18, var_25, adj_20, adj_18, adj_25, adj_30);
if (var_20) {
df::adj_spatial_transform_vector(var_12, var_28, adj_12, adj_28, adj_29);
adj_sphere_sdf_grad_cpu_func(var_26, var_27, var_14, adj_26, adj_27, adj_14, adj_28);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_27);
df::adj_float3(var_18, var_18, var_18, adj_18, adj_18, adj_18, adj_26);
df::adj_min(var_24, var_18, adj_24, adj_18, adj_25);
df::adj_sub(var_23, var_17, adj_23, adj_17, adj_24);
adj_sphere_sdf_cpu_func(var_21, var_22, var_14, adj_21, adj_22, adj_14, adj_23);
df::adj_index(var_16, var_7, adj_16, adj_7, adj_22);
df::adj_float3(var_18, var_18, var_18, adj_18, adj_18, adj_18, adj_21);
}
df::adj_float3(var_18, var_18, var_18, adj_18, adj_18, adj_18, adj_19);
df::adj_load(var_shape_geo_scale, var_1, adj_shape_geo_scale, adj_1, adj_16);
df::adj_load(var_shape_geo_type, var_1, adj_shape_geo_type, adj_1, adj_15);
df::adj_spatial_transform_point(var_13, var_4, adj_13, adj_4, adj_14);
adj_spatial_transform_inverse_cpu_func(var_12, adj_12, adj_13);
df::adj_spatial_transform_multiply(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_load(var_shape_X_co, var_1, adj_shape_X_co, adj_1, adj_11);
df::adj_select(var_8, var_6, var_9, adj_8, adj_6, adj_9, adj_10);
if (var_8) {
df::adj_load(var_body_X_sc, var_3, adj_body_X_sc, adj_3, adj_9);
}
df::adj_load(var_particle_v, var_2, adj_particle_v, adj_2, adj_5);
df::adj_load(var_particle_x, var_2, adj_particle_x, adj_2, adj_4);
df::adj_load(var_shape_body, var_1, adj_shape_body, adj_1, adj_3);
df::adj_mod(var_0, var_num_particles, adj_0, adj_num_particles, adj_2);
df::adj_div(var_0, var_num_particles, adj_0, adj_num_particles, adj_1);
return;
}
// Python entry points
void eval_soft_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_soft_contacts_cpu_kernel_forward(
var_num_particles,
cast<df::float3*>(var_particle_x),
cast<df::float3*>(var_particle_v),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_vector*>(var_body_v_sc),
cast<spatial_transform*>(var_shape_X_co),
cast<int*>(var_shape_body),
cast<int*>(var_shape_geo_type),
cast<int*>(var_shape_geo_src),
cast<df::float3*>(var_shape_geo_scale),
cast<float*>(var_shape_materials),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_particle_f),
cast<spatial_vector*>(var_body_f));
}
}
void eval_soft_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f,
int adj_num_particles,
torch::Tensor adj_particle_x,
torch::Tensor adj_particle_v,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_v_sc,
torch::Tensor adj_shape_X_co,
torch::Tensor adj_shape_body,
torch::Tensor adj_shape_geo_type,
torch::Tensor adj_shape_geo_src,
torch::Tensor adj_shape_geo_scale,
torch::Tensor adj_shape_materials,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_particle_f,
torch::Tensor adj_body_f)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_soft_contacts_cpu_kernel_backward(
var_num_particles,
cast<df::float3*>(var_particle_x),
cast<df::float3*>(var_particle_v),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_vector*>(var_body_v_sc),
cast<spatial_transform*>(var_shape_X_co),
cast<int*>(var_shape_body),
cast<int*>(var_shape_geo_type),
cast<int*>(var_shape_geo_src),
cast<df::float3*>(var_shape_geo_scale),
cast<float*>(var_shape_materials),
var_ke,
var_kd,
var_kf,
var_mu,
cast<df::float3*>(var_particle_f),
cast<spatial_vector*>(var_body_f),
adj_num_particles,
cast<df::float3*>(adj_particle_x),
cast<df::float3*>(adj_particle_v),
cast<spatial_transform*>(adj_body_X_sc),
cast<spatial_vector*>(adj_body_v_sc),
cast<spatial_transform*>(adj_shape_X_co),
cast<int*>(adj_shape_body),
cast<int*>(adj_shape_geo_type),
cast<int*>(adj_shape_geo_src),
cast<df::float3*>(adj_shape_geo_scale),
cast<float*>(adj_shape_materials),
adj_ke,
adj_kd,
adj_kf,
adj_mu,
cast<df::float3*>(adj_particle_f),
cast<spatial_vector*>(adj_body_f));
}
}
// Python entry points
void eval_soft_contacts_cpu_forward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f);
void eval_soft_contacts_cpu_backward(int dim,
int var_num_particles,
torch::Tensor var_particle_x,
torch::Tensor var_particle_v,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_v_sc,
torch::Tensor var_shape_X_co,
torch::Tensor var_shape_body,
torch::Tensor var_shape_geo_type,
torch::Tensor var_shape_geo_src,
torch::Tensor var_shape_geo_scale,
torch::Tensor var_shape_materials,
float var_ke,
float var_kd,
float var_kf,
float var_mu,
torch::Tensor var_particle_f,
torch::Tensor var_body_f,
int adj_num_particles,
torch::Tensor adj_particle_x,
torch::Tensor adj_particle_v,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_v_sc,
torch::Tensor adj_shape_X_co,
torch::Tensor adj_shape_body,
torch::Tensor adj_shape_geo_type,
torch::Tensor adj_shape_geo_src,
torch::Tensor adj_shape_geo_scale,
torch::Tensor adj_shape_materials,
float adj_ke,
float adj_kd,
float adj_kf,
float adj_mu,
torch::Tensor adj_particle_f,
torch::Tensor adj_body_f);
void eval_rigid_contacts_cpu_kernel_forward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_rigid_f,
df::float3* var_rigid_t)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
df::float3 var_22;
quat var_23;
df::float3 var_24;
df::float3 var_25;
const float var_26 = 0.0;
const float var_27 = 1.0;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
float var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_rigid_x, var_1);
var_23 = df::load(var_rigid_r, var_1);
var_24 = df::load(var_rigid_v, var_1);
var_25 = df::load(var_rigid_w, var_1);
var_28 = df::float3(var_26, var_27, var_26);
var_29 = df::rotate(var_23, var_2);
var_30 = df::add(var_22, var_29);
var_31 = df::mul(var_28, var_3);
var_32 = df::sub(var_30, var_31);
var_33 = df::sub(var_32, var_22);
var_34 = df::cross(var_25, var_33);
var_35 = df::add(var_24, var_34);
var_36 = df::dot(var_28, var_32);
var_37 = df::min(var_36, var_26);
var_38 = df::dot(var_28, var_35);
var_39 = df::mul(var_28, var_38);
var_40 = df::sub(var_35, var_39);
var_41 = df::mul(var_37, var_9);
var_42 = df::min(var_38, var_26);
var_43 = df::mul(var_42, var_13);
var_44 = df::step(var_37);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_41, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_26, var_47);
var_49 = df::float3(var_17, var_26, var_26);
var_50 = df::dot(var_49, var_40);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_26, var_26, var_17);
var_53 = df::dot(var_52, var_40);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::float3(var_51, var_26, var_54);
var_56 = df::step(var_37);
var_57 = df::mul(var_55, var_56);
var_58 = df::add(var_41, var_45);
var_59 = df::mul(var_28, var_58);
var_60 = df::add(var_59, var_57);
var_61 = df::cross(var_33, var_60);
df::atomic_sub(var_rigid_f, var_1, var_60);
df::atomic_sub(var_rigid_t, var_1, var_61);
}
void eval_rigid_contacts_cpu_kernel_backward(
df::float3* var_rigid_x,
quat* var_rigid_r,
df::float3* var_rigid_v,
df::float3* var_rigid_w,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
df::float3* var_rigid_f,
df::float3* var_rigid_t,
df::float3* adj_rigid_x,
quat* adj_rigid_r,
df::float3* adj_rigid_v,
df::float3* adj_rigid_w,
int* adj_contact_body,
df::float3* adj_contact_point,
float* adj_contact_dist,
int* adj_contact_mat,
float* adj_materials,
df::float3* adj_rigid_f,
df::float3* adj_rigid_t)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
df::float3 var_22;
quat var_23;
df::float3 var_24;
df::float3 var_25;
const float var_26 = 0.0;
const float var_27 = 1.0;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
df::float3 var_39;
df::float3 var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
df::float3 var_57;
float var_58;
df::float3 var_59;
df::float3 var_60;
df::float3 var_61;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
float adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
float adj_17 = 0;
int adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
df::float3 adj_22 = 0;
quat adj_23 = 0;
df::float3 adj_24 = 0;
df::float3 adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
df::float3 adj_39 = 0;
df::float3 adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
df::float3 adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
df::float3 adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
df::float3 adj_57 = 0;
float adj_58 = 0;
df::float3 adj_59 = 0;
df::float3 adj_60 = 0;
df::float3 adj_61 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_rigid_x, var_1);
var_23 = df::load(var_rigid_r, var_1);
var_24 = df::load(var_rigid_v, var_1);
var_25 = df::load(var_rigid_w, var_1);
var_28 = df::float3(var_26, var_27, var_26);
var_29 = df::rotate(var_23, var_2);
var_30 = df::add(var_22, var_29);
var_31 = df::mul(var_28, var_3);
var_32 = df::sub(var_30, var_31);
var_33 = df::sub(var_32, var_22);
var_34 = df::cross(var_25, var_33);
var_35 = df::add(var_24, var_34);
var_36 = df::dot(var_28, var_32);
var_37 = df::min(var_36, var_26);
var_38 = df::dot(var_28, var_35);
var_39 = df::mul(var_28, var_38);
var_40 = df::sub(var_35, var_39);
var_41 = df::mul(var_37, var_9);
var_42 = df::min(var_38, var_26);
var_43 = df::mul(var_42, var_13);
var_44 = df::step(var_37);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_41, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_26, var_47);
var_49 = df::float3(var_17, var_26, var_26);
var_50 = df::dot(var_49, var_40);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_26, var_26, var_17);
var_53 = df::dot(var_52, var_40);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::float3(var_51, var_26, var_54);
var_56 = df::step(var_37);
var_57 = df::mul(var_55, var_56);
var_58 = df::add(var_41, var_45);
var_59 = df::mul(var_28, var_58);
var_60 = df::add(var_59, var_57);
var_61 = df::cross(var_33, var_60);
df::atomic_sub(var_rigid_f, var_1, var_60);
df::atomic_sub(var_rigid_t, var_1, var_61);
//---------
// reverse
df::adj_atomic_sub(var_rigid_t, var_1, var_61, adj_rigid_t, adj_1, adj_61);
df::adj_atomic_sub(var_rigid_f, var_1, var_60, adj_rigid_f, adj_1, adj_60);
df::adj_cross(var_33, var_60, adj_33, adj_60, adj_61);
df::adj_add(var_59, var_57, adj_59, adj_57, adj_60);
df::adj_mul(var_28, var_58, adj_28, adj_58, adj_59);
df::adj_add(var_41, var_45, adj_41, adj_45, adj_58);
df::adj_mul(var_55, var_56, adj_55, adj_56, adj_57);
df::adj_step(var_37, adj_37, adj_56);
df::adj_float3(var_51, var_26, var_54, adj_51, adj_26, adj_54, adj_55);
df::adj_clamp(var_53, var_47, var_48, adj_53, adj_47, adj_48, adj_54);
df::adj_dot(var_52, var_40, adj_52, adj_40, adj_53);
df::adj_float3(var_26, var_26, var_17, adj_26, adj_26, adj_17, adj_52);
df::adj_clamp(var_50, var_47, var_48, adj_50, adj_47, adj_48, adj_51);
df::adj_dot(var_49, var_40, adj_49, adj_40, adj_50);
df::adj_float3(var_17, var_26, var_26, adj_17, adj_26, adj_26, adj_49);
df::adj_sub(var_26, var_47, adj_26, adj_47, adj_48);
df::adj_mul(var_21, var_46, adj_21, adj_46, adj_47);
df::adj_add(var_41, var_45, adj_41, adj_45, adj_46);
df::adj_mul(var_43, var_44, adj_43, adj_44, adj_45);
df::adj_step(var_37, adj_37, adj_44);
df::adj_mul(var_42, var_13, adj_42, adj_13, adj_43);
df::adj_min(var_38, var_26, adj_38, adj_26, adj_42);
df::adj_mul(var_37, var_9, adj_37, adj_9, adj_41);
df::adj_sub(var_35, var_39, adj_35, adj_39, adj_40);
df::adj_mul(var_28, var_38, adj_28, adj_38, adj_39);
df::adj_dot(var_28, var_35, adj_28, adj_35, adj_38);
df::adj_min(var_36, var_26, adj_36, adj_26, adj_37);
df::adj_dot(var_28, var_32, adj_28, adj_32, adj_36);
df::adj_add(var_24, var_34, adj_24, adj_34, adj_35);
df::adj_cross(var_25, var_33, adj_25, adj_33, adj_34);
df::adj_sub(var_32, var_22, adj_32, adj_22, adj_33);
df::adj_sub(var_30, var_31, adj_30, adj_31, adj_32);
df::adj_mul(var_28, var_3, adj_28, adj_3, adj_31);
df::adj_add(var_22, var_29, adj_22, adj_29, adj_30);
df::adj_rotate(var_23, var_2, adj_23, adj_2, adj_29);
df::adj_float3(var_26, var_27, var_26, adj_26, adj_27, adj_26, adj_28);
df::adj_load(var_rigid_w, var_1, adj_rigid_w, adj_1, adj_25);
df::adj_load(var_rigid_v, var_1, adj_rigid_v, adj_1, adj_24);
df::adj_load(var_rigid_r, var_1, adj_rigid_r, adj_1, adj_23);
df::adj_load(var_rigid_x, var_1, adj_rigid_x, adj_1, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_18, var_19, adj_18, adj_19, adj_20);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_18);
df::adj_load(var_materials, var_16, adj_materials, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_14);
df::adj_load(var_materials, var_12, adj_materials, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_10);
df::adj_load(var_materials, var_8, adj_materials, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_6);
df::adj_load(var_contact_mat, var_0, adj_contact_mat, adj_0, adj_4);
df::adj_load(var_contact_dist, var_0, adj_contact_dist, adj_0, adj_3);
df::adj_load(var_contact_point, var_0, adj_contact_point, adj_0, adj_2);
df::adj_load(var_contact_body, var_0, adj_contact_body, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_contacts_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_cpu_kernel_forward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t));
}
}
void eval_rigid_contacts_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_cpu_kernel_backward(
cast<df::float3*>(var_rigid_x),
cast<quat*>(var_rigid_r),
cast<df::float3*>(var_rigid_v),
cast<df::float3*>(var_rigid_w),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<df::float3*>(var_rigid_f),
cast<df::float3*>(var_rigid_t),
cast<df::float3*>(adj_rigid_x),
cast<quat*>(adj_rigid_r),
cast<df::float3*>(adj_rigid_v),
cast<df::float3*>(adj_rigid_w),
cast<int*>(adj_contact_body),
cast<df::float3*>(adj_contact_point),
cast<float*>(adj_contact_dist),
cast<int*>(adj_contact_mat),
cast<float*>(adj_materials),
cast<df::float3*>(adj_rigid_f),
cast<df::float3*>(adj_rigid_t));
}
}
// Python entry points
void eval_rigid_contacts_cpu_forward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t);
void eval_rigid_contacts_cpu_backward(int dim,
torch::Tensor var_rigid_x,
torch::Tensor var_rigid_r,
torch::Tensor var_rigid_v,
torch::Tensor var_rigid_w,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_rigid_f,
torch::Tensor var_rigid_t,
torch::Tensor adj_rigid_x,
torch::Tensor adj_rigid_r,
torch::Tensor adj_rigid_v,
torch::Tensor adj_rigid_w,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_rigid_f,
torch::Tensor adj_rigid_t);
void eval_rigid_contacts_art_cpu_kernel_forward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
spatial_vector* var_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
spatial_transform var_22;
spatial_vector var_23;
const float var_24 = 0.0;
const float var_25 = 1.0;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
bool var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
float var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
spatial_vector var_69;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_body_X_s, var_1);
var_23 = df::load(var_body_v_s, var_1);
var_26 = df::float3(var_24, var_25, var_24);
var_27 = df::spatial_transform_point(var_22, var_2);
var_28 = df::mul(var_26, var_3);
var_29 = df::sub(var_27, var_28);
var_30 = df::spatial_top(var_23);
var_31 = df::spatial_bottom(var_23);
var_32 = df::cross(var_30, var_29);
var_33 = df::add(var_31, var_32);
var_34 = df::dot(var_26, var_29);
var_35 = (var_34 >= var_24);
if (var_35) {
return;
}
var_36 = df::dot(var_26, var_33);
var_37 = df::mul(var_26, var_36);
var_38 = df::sub(var_33, var_37);
var_39 = df::mul(var_34, var_9);
var_40 = df::min(var_36, var_24);
var_41 = df::mul(var_40, var_13);
var_42 = df::step(var_34);
var_43 = df::mul(var_41, var_42);
var_44 = df::sub(var_24, var_34);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_39, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_24, var_47);
var_49 = df::float3(var_17, var_24, var_24);
var_50 = df::dot(var_49, var_38);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_24, var_24, var_17);
var_53 = df::dot(var_52, var_38);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::normalize(var_38);
var_56 = df::length(var_38);
var_57 = df::mul(var_17, var_56);
var_58 = df::mul(var_21, var_34);
var_59 = df::mul(var_58, var_9);
var_60 = df::sub(var_24, var_59);
var_61 = df::min(var_57, var_60);
var_62 = df::mul(var_55, var_61);
var_63 = df::step(var_34);
var_64 = df::mul(var_62, var_63);
var_65 = df::add(var_39, var_45);
var_66 = df::mul(var_26, var_65);
var_67 = df::add(var_66, var_64);
var_68 = df::cross(var_29, var_67);
var_69 = df::spatial_vector(var_68, var_67);
df::atomic_add(var_body_f_s, var_1, var_69);
}
void eval_rigid_contacts_art_cpu_kernel_backward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_contact_body,
df::float3* var_contact_point,
float* var_contact_dist,
int* var_contact_mat,
float* var_materials,
spatial_vector* var_body_f_s,
spatial_transform* adj_body_X_s,
spatial_vector* adj_body_v_s,
int* adj_contact_body,
df::float3* adj_contact_point,
float* adj_contact_dist,
int* adj_contact_mat,
float* adj_materials,
spatial_vector* adj_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
df::float3 var_2;
float var_3;
int var_4;
const int var_5 = 4;
int var_6;
const int var_7 = 0;
int var_8;
float var_9;
int var_10;
const int var_11 = 1;
int var_12;
float var_13;
int var_14;
const int var_15 = 2;
int var_16;
float var_17;
int var_18;
const int var_19 = 3;
int var_20;
float var_21;
spatial_transform var_22;
spatial_vector var_23;
const float var_24 = 0.0;
const float var_25 = 1.0;
df::float3 var_26;
df::float3 var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
float var_34;
bool var_35;
float var_36;
df::float3 var_37;
df::float3 var_38;
float var_39;
float var_40;
float var_41;
float var_42;
float var_43;
float var_44;
float var_45;
float var_46;
float var_47;
float var_48;
df::float3 var_49;
float var_50;
float var_51;
df::float3 var_52;
float var_53;
float var_54;
df::float3 var_55;
float var_56;
float var_57;
float var_58;
float var_59;
float var_60;
float var_61;
df::float3 var_62;
float var_63;
df::float3 var_64;
float var_65;
df::float3 var_66;
df::float3 var_67;
df::float3 var_68;
spatial_vector var_69;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
float adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
float adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
float adj_17 = 0;
int adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
spatial_transform adj_22 = 0;
spatial_vector adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
df::float3 adj_26 = 0;
df::float3 adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
float adj_34 = 0;
bool adj_35 = 0;
float adj_36 = 0;
df::float3 adj_37 = 0;
df::float3 adj_38 = 0;
float adj_39 = 0;
float adj_40 = 0;
float adj_41 = 0;
float adj_42 = 0;
float adj_43 = 0;
float adj_44 = 0;
float adj_45 = 0;
float adj_46 = 0;
float adj_47 = 0;
float adj_48 = 0;
df::float3 adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
df::float3 adj_52 = 0;
float adj_53 = 0;
float adj_54 = 0;
df::float3 adj_55 = 0;
float adj_56 = 0;
float adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
float adj_61 = 0;
df::float3 adj_62 = 0;
float adj_63 = 0;
df::float3 adj_64 = 0;
float adj_65 = 0;
df::float3 adj_66 = 0;
df::float3 adj_67 = 0;
df::float3 adj_68 = 0;
spatial_vector adj_69 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_contact_body, var_0);
var_2 = df::load(var_contact_point, var_0);
var_3 = df::load(var_contact_dist, var_0);
var_4 = df::load(var_contact_mat, var_0);
var_6 = df::mul(var_4, var_5);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_materials, var_8);
var_10 = df::mul(var_4, var_5);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_materials, var_12);
var_14 = df::mul(var_4, var_5);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_materials, var_16);
var_18 = df::mul(var_4, var_5);
var_20 = df::add(var_18, var_19);
var_21 = df::load(var_materials, var_20);
var_22 = df::load(var_body_X_s, var_1);
var_23 = df::load(var_body_v_s, var_1);
var_26 = df::float3(var_24, var_25, var_24);
var_27 = df::spatial_transform_point(var_22, var_2);
var_28 = df::mul(var_26, var_3);
var_29 = df::sub(var_27, var_28);
var_30 = df::spatial_top(var_23);
var_31 = df::spatial_bottom(var_23);
var_32 = df::cross(var_30, var_29);
var_33 = df::add(var_31, var_32);
var_34 = df::dot(var_26, var_29);
var_35 = (var_34 >= var_24);
if (var_35) {
goto label0;
}
var_36 = df::dot(var_26, var_33);
var_37 = df::mul(var_26, var_36);
var_38 = df::sub(var_33, var_37);
var_39 = df::mul(var_34, var_9);
var_40 = df::min(var_36, var_24);
var_41 = df::mul(var_40, var_13);
var_42 = df::step(var_34);
var_43 = df::mul(var_41, var_42);
var_44 = df::sub(var_24, var_34);
var_45 = df::mul(var_43, var_44);
var_46 = df::add(var_39, var_45);
var_47 = df::mul(var_21, var_46);
var_48 = df::sub(var_24, var_47);
var_49 = df::float3(var_17, var_24, var_24);
var_50 = df::dot(var_49, var_38);
var_51 = df::clamp(var_50, var_47, var_48);
var_52 = df::float3(var_24, var_24, var_17);
var_53 = df::dot(var_52, var_38);
var_54 = df::clamp(var_53, var_47, var_48);
var_55 = df::normalize(var_38);
var_56 = df::length(var_38);
var_57 = df::mul(var_17, var_56);
var_58 = df::mul(var_21, var_34);
var_59 = df::mul(var_58, var_9);
var_60 = df::sub(var_24, var_59);
var_61 = df::min(var_57, var_60);
var_62 = df::mul(var_55, var_61);
var_63 = df::step(var_34);
var_64 = df::mul(var_62, var_63);
var_65 = df::add(var_39, var_45);
var_66 = df::mul(var_26, var_65);
var_67 = df::add(var_66, var_64);
var_68 = df::cross(var_29, var_67);
var_69 = df::spatial_vector(var_68, var_67);
df::atomic_add(var_body_f_s, var_1, var_69);
//---------
// reverse
df::adj_atomic_add(var_body_f_s, var_1, var_69, adj_body_f_s, adj_1, adj_69);
df::adj_spatial_vector(var_68, var_67, adj_68, adj_67, adj_69);
df::adj_cross(var_29, var_67, adj_29, adj_67, adj_68);
df::adj_add(var_66, var_64, adj_66, adj_64, adj_67);
df::adj_mul(var_26, var_65, adj_26, adj_65, adj_66);
df::adj_add(var_39, var_45, adj_39, adj_45, adj_65);
df::adj_mul(var_62, var_63, adj_62, adj_63, adj_64);
df::adj_step(var_34, adj_34, adj_63);
df::adj_mul(var_55, var_61, adj_55, adj_61, adj_62);
df::adj_min(var_57, var_60, adj_57, adj_60, adj_61);
df::adj_sub(var_24, var_59, adj_24, adj_59, adj_60);
df::adj_mul(var_58, var_9, adj_58, adj_9, adj_59);
df::adj_mul(var_21, var_34, adj_21, adj_34, adj_58);
df::adj_mul(var_17, var_56, adj_17, adj_56, adj_57);
df::adj_length(var_38, adj_38, adj_56);
df::adj_normalize(var_38, adj_38, adj_55);
df::adj_clamp(var_53, var_47, var_48, adj_53, adj_47, adj_48, adj_54);
df::adj_dot(var_52, var_38, adj_52, adj_38, adj_53);
df::adj_float3(var_24, var_24, var_17, adj_24, adj_24, adj_17, adj_52);
df::adj_clamp(var_50, var_47, var_48, adj_50, adj_47, adj_48, adj_51);
df::adj_dot(var_49, var_38, adj_49, adj_38, adj_50);
df::adj_float3(var_17, var_24, var_24, adj_17, adj_24, adj_24, adj_49);
df::adj_sub(var_24, var_47, adj_24, adj_47, adj_48);
df::adj_mul(var_21, var_46, adj_21, adj_46, adj_47);
df::adj_add(var_39, var_45, adj_39, adj_45, adj_46);
df::adj_mul(var_43, var_44, adj_43, adj_44, adj_45);
df::adj_sub(var_24, var_34, adj_24, adj_34, adj_44);
df::adj_mul(var_41, var_42, adj_41, adj_42, adj_43);
df::adj_step(var_34, adj_34, adj_42);
df::adj_mul(var_40, var_13, adj_40, adj_13, adj_41);
df::adj_min(var_36, var_24, adj_36, adj_24, adj_40);
df::adj_mul(var_34, var_9, adj_34, adj_9, adj_39);
df::adj_sub(var_33, var_37, adj_33, adj_37, adj_38);
df::adj_mul(var_26, var_36, adj_26, adj_36, adj_37);
df::adj_dot(var_26, var_33, adj_26, adj_33, adj_36);
if (var_35) {
label0:;
}
df::adj_dot(var_26, var_29, adj_26, adj_29, adj_34);
df::adj_add(var_31, var_32, adj_31, adj_32, adj_33);
df::adj_cross(var_30, var_29, adj_30, adj_29, adj_32);
df::adj_spatial_bottom(var_23, adj_23, adj_31);
df::adj_spatial_top(var_23, adj_23, adj_30);
df::adj_sub(var_27, var_28, adj_27, adj_28, adj_29);
df::adj_mul(var_26, var_3, adj_26, adj_3, adj_28);
df::adj_spatial_transform_point(var_22, var_2, adj_22, adj_2, adj_27);
df::adj_float3(var_24, var_25, var_24, adj_24, adj_25, adj_24, adj_26);
df::adj_load(var_body_v_s, var_1, adj_body_v_s, adj_1, adj_23);
df::adj_load(var_body_X_s, var_1, adj_body_X_s, adj_1, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_18, var_19, adj_18, adj_19, adj_20);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_18);
df::adj_load(var_materials, var_16, adj_materials, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_14);
df::adj_load(var_materials, var_12, adj_materials, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_10);
df::adj_load(var_materials, var_8, adj_materials, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_4, var_5, adj_4, adj_5, adj_6);
df::adj_load(var_contact_mat, var_0, adj_contact_mat, adj_0, adj_4);
df::adj_load(var_contact_dist, var_0, adj_contact_dist, adj_0, adj_3);
df::adj_load(var_contact_point, var_0, adj_contact_point, adj_0, adj_2);
df::adj_load(var_contact_body, var_0, adj_contact_body, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_contacts_art_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_art_cpu_kernel_forward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<spatial_vector*>(var_body_f_s));
}
}
void eval_rigid_contacts_art_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_contacts_art_cpu_kernel_backward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_contact_body),
cast<df::float3*>(var_contact_point),
cast<float*>(var_contact_dist),
cast<int*>(var_contact_mat),
cast<float*>(var_materials),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_transform*>(adj_body_X_s),
cast<spatial_vector*>(adj_body_v_s),
cast<int*>(adj_contact_body),
cast<df::float3*>(adj_contact_point),
cast<float*>(adj_contact_dist),
cast<int*>(adj_contact_mat),
cast<float*>(adj_materials),
cast<spatial_vector*>(adj_body_f_s));
}
}
// Python entry points
void eval_rigid_contacts_art_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s);
void eval_rigid_contacts_art_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_contact_body,
torch::Tensor var_contact_point,
torch::Tensor var_contact_dist,
torch::Tensor var_contact_mat,
torch::Tensor var_materials,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_contact_body,
torch::Tensor adj_contact_point,
torch::Tensor adj_contact_dist,
torch::Tensor adj_contact_mat,
torch::Tensor adj_materials,
torch::Tensor adj_body_f_s);
void eval_muscles_cpu_kernel_forward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_start,
float* var_muscle_params,
int* var_muscle_links,
df::float3* var_muscle_points,
float* var_muscle_activation,
spatial_vector* var_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
float var_6;
int var_7;
int var_8;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_muscle_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_muscle_start, var_3);
var_5 = df::sub(var_4, var_2);
var_6 = df::load(var_muscle_activation, var_0);
for (var_7=var_1; var_7 < var_5; ++var_7) {
var_8 = compute_muscle_force_cpu_func(var_7, var_body_X_s, var_body_v_s, var_muscle_links, var_muscle_points, var_6, var_body_f_s);
}
}
void eval_muscles_cpu_kernel_backward(
spatial_transform* var_body_X_s,
spatial_vector* var_body_v_s,
int* var_muscle_start,
float* var_muscle_params,
int* var_muscle_links,
df::float3* var_muscle_points,
float* var_muscle_activation,
spatial_vector* var_body_f_s,
spatial_transform* adj_body_X_s,
spatial_vector* adj_body_v_s,
int* adj_muscle_start,
float* adj_muscle_params,
int* adj_muscle_links,
df::float3* adj_muscle_points,
float* adj_muscle_activation,
spatial_vector* adj_body_f_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
float var_6;
int var_7;
int var_8;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
float adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_muscle_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_muscle_start, var_3);
var_5 = df::sub(var_4, var_2);
var_6 = df::load(var_muscle_activation, var_0);
if (false) {
var_8 = compute_muscle_force_cpu_func(var_7, var_body_X_s, var_body_v_s, var_muscle_links, var_muscle_points, var_6, var_body_f_s);
}
//---------
// reverse
for (var_7=var_5-1; var_7 >= var_1; --var_7) {
adj_compute_muscle_force_cpu_func(var_7, var_body_X_s, var_body_v_s, var_muscle_links, var_muscle_points, var_6, var_body_f_s, adj_7, adj_body_X_s, adj_body_v_s, adj_muscle_links, adj_muscle_points, adj_6, adj_body_f_s, adj_8);
}
df::adj_load(var_muscle_activation, var_0, adj_muscle_activation, adj_0, adj_6);
df::adj_sub(var_4, var_2, adj_4, adj_2, adj_5);
df::adj_load(var_muscle_start, var_3, adj_muscle_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_muscle_start, var_0, adj_muscle_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_muscles_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_muscles_cpu_kernel_forward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_muscle_start),
cast<float*>(var_muscle_params),
cast<int*>(var_muscle_links),
cast<df::float3*>(var_muscle_points),
cast<float*>(var_muscle_activation),
cast<spatial_vector*>(var_body_f_s));
}
}
void eval_muscles_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_muscle_start,
torch::Tensor adj_muscle_params,
torch::Tensor adj_muscle_links,
torch::Tensor adj_muscle_points,
torch::Tensor adj_muscle_activation,
torch::Tensor adj_body_f_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_muscles_cpu_kernel_backward(
cast<spatial_transform*>(var_body_X_s),
cast<spatial_vector*>(var_body_v_s),
cast<int*>(var_muscle_start),
cast<float*>(var_muscle_params),
cast<int*>(var_muscle_links),
cast<df::float3*>(var_muscle_points),
cast<float*>(var_muscle_activation),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_transform*>(adj_body_X_s),
cast<spatial_vector*>(adj_body_v_s),
cast<int*>(adj_muscle_start),
cast<float*>(adj_muscle_params),
cast<int*>(adj_muscle_links),
cast<df::float3*>(adj_muscle_points),
cast<float*>(adj_muscle_activation),
cast<spatial_vector*>(adj_body_f_s));
}
}
// Python entry points
void eval_muscles_cpu_forward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s);
void eval_muscles_cpu_backward(int dim,
torch::Tensor var_body_X_s,
torch::Tensor var_body_v_s,
torch::Tensor var_muscle_start,
torch::Tensor var_muscle_params,
torch::Tensor var_muscle_links,
torch::Tensor var_muscle_points,
torch::Tensor var_muscle_activation,
torch::Tensor var_body_f_s,
torch::Tensor adj_body_X_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_muscle_start,
torch::Tensor adj_muscle_params,
torch::Tensor adj_muscle_links,
torch::Tensor adj_muscle_points,
torch::Tensor adj_muscle_activation,
torch::Tensor adj_body_f_s);
void eval_rigid_fk_cpu_kernel_forward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
for (var_5=var_1; var_5 < var_4; ++var_5) {
var_6 = compute_link_transform_cpu_func(var_5, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_X_pj, var_joint_X_cm, var_joint_axis, var_body_X_sc, var_body_X_sm);
}
}
void eval_rigid_fk_cpu_kernel_backward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
spatial_transform* var_joint_X_pj,
spatial_transform* var_joint_X_cm,
df::float3* var_joint_axis,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
int* adj_articulation_start,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
spatial_transform* adj_joint_X_pj,
spatial_transform* adj_joint_X_cm,
df::float3* adj_joint_axis,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
if (false) {
var_6 = compute_link_transform_cpu_func(var_5, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_X_pj, var_joint_X_cm, var_joint_axis, var_body_X_sc, var_body_X_sm);
}
//---------
// reverse
for (var_5=var_4-1; var_5 >= var_1; --var_5) {
adj_compute_link_transform_cpu_func(var_5, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_X_pj, var_joint_X_cm, var_joint_axis, var_body_X_sc, var_body_X_sm, adj_5, adj_joint_type, adj_joint_parent, adj_joint_q_start, adj_joint_qd_start, adj_joint_q, adj_joint_X_pj, adj_joint_X_cm, adj_joint_axis, adj_body_X_sc, adj_body_X_sm, adj_6);
}
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_fk_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_fk_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<spatial_transform*>(var_joint_X_pj),
cast<spatial_transform*>(var_joint_X_cm),
cast<df::float3*>(var_joint_axis),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm));
}
}
void eval_rigid_fk_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_joint_X_cm,
torch::Tensor adj_joint_axis,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_fk_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<spatial_transform*>(var_joint_X_pj),
cast<spatial_transform*>(var_joint_X_cm),
cast<df::float3*>(var_joint_axis),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm),
cast<int*>(adj_articulation_start),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<spatial_transform*>(adj_joint_X_pj),
cast<spatial_transform*>(adj_joint_X_cm),
cast<df::float3*>(adj_joint_axis),
cast<spatial_transform*>(adj_body_X_sc),
cast<spatial_transform*>(adj_body_X_sm));
}
}
// Python entry points
void eval_rigid_fk_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm);
void eval_rigid_fk_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_X_pj,
torch::Tensor var_joint_X_cm,
torch::Tensor var_joint_axis,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_joint_X_cm,
torch::Tensor adj_joint_axis,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm);
void eval_rigid_id_cpu_kernel_forward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
df::float3* var_joint_axis,
float* var_joint_target_ke,
float* var_joint_target_kd,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
int var_7;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
for (var_6=var_1; var_6 < var_4; ++var_6) {
var_7 = compute_link_velocity_cpu_func(var_6, var_joint_type, var_joint_parent, var_joint_qd_start, var_joint_qd, var_joint_axis, var_body_I_m, var_body_X_sc, var_body_X_sm, var_joint_X_pj, var_gravity, var_joint_S_s, var_body_I_s, var_body_v_s, var_body_f_s, var_body_a_s);
}
}
void eval_rigid_id_cpu_kernel_backward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
df::float3* var_joint_axis,
float* var_joint_target_ke,
float* var_joint_target_kd,
spatial_matrix* var_body_I_m,
spatial_transform* var_body_X_sc,
spatial_transform* var_body_X_sm,
spatial_transform* var_joint_X_pj,
df::float3* var_gravity,
spatial_vector* var_joint_S_s,
spatial_matrix* var_body_I_s,
spatial_vector* var_body_v_s,
spatial_vector* var_body_f_s,
spatial_vector* var_body_a_s,
int* adj_articulation_start,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
df::float3* adj_joint_axis,
float* adj_joint_target_ke,
float* adj_joint_target_kd,
spatial_matrix* adj_body_I_m,
spatial_transform* adj_body_X_sc,
spatial_transform* adj_body_X_sm,
spatial_transform* adj_joint_X_pj,
df::float3* adj_gravity,
spatial_vector* adj_joint_S_s,
spatial_matrix* adj_body_I_s,
spatial_vector* adj_body_v_s,
spatial_vector* adj_body_f_s,
spatial_vector* adj_body_a_s)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
int var_7;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
if (false) {
var_7 = compute_link_velocity_cpu_func(var_6, var_joint_type, var_joint_parent, var_joint_qd_start, var_joint_qd, var_joint_axis, var_body_I_m, var_body_X_sc, var_body_X_sm, var_joint_X_pj, var_gravity, var_joint_S_s, var_body_I_s, var_body_v_s, var_body_f_s, var_body_a_s);
}
//---------
// reverse
for (var_6=var_4-1; var_6 >= var_1; --var_6) {
adj_compute_link_velocity_cpu_func(var_6, var_joint_type, var_joint_parent, var_joint_qd_start, var_joint_qd, var_joint_axis, var_body_I_m, var_body_X_sc, var_body_X_sm, var_joint_X_pj, var_gravity, var_joint_S_s, var_body_I_s, var_body_v_s, var_body_f_s, var_body_a_s, adj_6, adj_joint_type, adj_joint_parent, adj_joint_qd_start, adj_joint_qd, adj_joint_axis, adj_body_I_m, adj_body_X_sc, adj_body_X_sm, adj_joint_X_pj, adj_gravity, adj_joint_S_s, adj_body_I_s, adj_body_v_s, adj_body_f_s, adj_body_a_s, adj_7);
}
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_id_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_id_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<df::float3*>(var_joint_axis),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<spatial_matrix*>(var_body_I_m),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm),
cast<spatial_transform*>(var_joint_X_pj),
cast<df::float3*>(var_gravity),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_matrix*>(var_body_I_s),
cast<spatial_vector*>(var_body_v_s),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_vector*>(var_body_a_s));
}
}
void eval_rigid_id_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_body_I_m,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_gravity,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_I_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_body_f_s,
torch::Tensor adj_body_a_s)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_id_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<df::float3*>(var_joint_axis),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<spatial_matrix*>(var_body_I_m),
cast<spatial_transform*>(var_body_X_sc),
cast<spatial_transform*>(var_body_X_sm),
cast<spatial_transform*>(var_joint_X_pj),
cast<df::float3*>(var_gravity),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_matrix*>(var_body_I_s),
cast<spatial_vector*>(var_body_v_s),
cast<spatial_vector*>(var_body_f_s),
cast<spatial_vector*>(var_body_a_s),
cast<int*>(adj_articulation_start),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<float*>(adj_joint_qd),
cast<df::float3*>(adj_joint_axis),
cast<float*>(adj_joint_target_ke),
cast<float*>(adj_joint_target_kd),
cast<spatial_matrix*>(adj_body_I_m),
cast<spatial_transform*>(adj_body_X_sc),
cast<spatial_transform*>(adj_body_X_sm),
cast<spatial_transform*>(adj_joint_X_pj),
cast<df::float3*>(adj_gravity),
cast<spatial_vector*>(adj_joint_S_s),
cast<spatial_matrix*>(adj_body_I_s),
cast<spatial_vector*>(adj_body_v_s),
cast<spatial_vector*>(adj_body_f_s),
cast<spatial_vector*>(adj_body_a_s));
}
}
// Python entry points
void eval_rigid_id_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s);
void eval_rigid_id_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_body_I_m,
torch::Tensor var_body_X_sc,
torch::Tensor var_body_X_sm,
torch::Tensor var_joint_X_pj,
torch::Tensor var_gravity,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_I_s,
torch::Tensor var_body_v_s,
torch::Tensor var_body_f_s,
torch::Tensor var_body_a_s,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_body_I_m,
torch::Tensor adj_body_X_sc,
torch::Tensor adj_body_X_sm,
torch::Tensor adj_joint_X_pj,
torch::Tensor adj_gravity,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_I_s,
torch::Tensor adj_body_v_s,
torch::Tensor adj_body_f_s,
torch::Tensor adj_body_a_s);
void eval_rigid_tau_cpu_kernel_forward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
df::float3* var_joint_axis,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
for (var_7=var_6; var_7 < var_5; ++var_7) {
var_8 = compute_link_tau_cpu_func(var_7, var_4, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_target_ke, var_joint_target_kd, var_joint_limit_lower, var_joint_limit_upper, var_joint_limit_ke, var_joint_limit_kd, var_joint_S_s, var_body_fb_s, var_body_ft_s, var_tau);
}
}
void eval_rigid_tau_cpu_kernel_backward(
int* var_articulation_start,
int* var_joint_type,
int* var_joint_parent,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_act,
float* var_joint_target,
float* var_joint_target_ke,
float* var_joint_target_kd,
float* var_joint_limit_lower,
float* var_joint_limit_upper,
float* var_joint_limit_ke,
float* var_joint_limit_kd,
df::float3* var_joint_axis,
spatial_vector* var_joint_S_s,
spatial_vector* var_body_fb_s,
spatial_vector* var_body_ft_s,
float* var_tau,
int* adj_articulation_start,
int* adj_joint_type,
int* adj_joint_parent,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_act,
float* adj_joint_target,
float* adj_joint_target_ke,
float* adj_joint_target_kd,
float* adj_joint_limit_lower,
float* adj_joint_limit_upper,
float* adj_joint_limit_ke,
float* adj_joint_limit_kd,
df::float3* adj_joint_axis,
spatial_vector* adj_joint_S_s,
spatial_vector* adj_body_fb_s,
spatial_vector* adj_body_ft_s,
float* adj_tau)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
const int var_6 = 0;
int var_7;
int var_8;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
if (false) {
var_8 = compute_link_tau_cpu_func(var_7, var_4, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_target_ke, var_joint_target_kd, var_joint_limit_lower, var_joint_limit_upper, var_joint_limit_ke, var_joint_limit_kd, var_joint_S_s, var_body_fb_s, var_body_ft_s, var_tau);
}
//---------
// reverse
for (var_7=var_5-1; var_7 >= var_6; --var_7) {
adj_compute_link_tau_cpu_func(var_7, var_4, var_joint_type, var_joint_parent, var_joint_q_start, var_joint_qd_start, var_joint_q, var_joint_qd, var_joint_act, var_joint_target, var_joint_target_ke, var_joint_target_kd, var_joint_limit_lower, var_joint_limit_upper, var_joint_limit_ke, var_joint_limit_kd, var_joint_S_s, var_body_fb_s, var_body_ft_s, var_tau, adj_7, adj_4, adj_joint_type, adj_joint_parent, adj_joint_q_start, adj_joint_qd_start, adj_joint_q, adj_joint_qd, adj_joint_act, adj_joint_target, adj_joint_target_ke, adj_joint_target_kd, adj_joint_limit_lower, adj_joint_limit_upper, adj_joint_limit_ke, adj_joint_limit_kd, adj_joint_S_s, adj_body_fb_s, adj_body_ft_s, adj_tau, adj_8);
}
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_tau_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_tau_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_act),
cast<float*>(var_joint_target),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<float*>(var_joint_limit_lower),
cast<float*>(var_joint_limit_upper),
cast<float*>(var_joint_limit_ke),
cast<float*>(var_joint_limit_kd),
cast<df::float3*>(var_joint_axis),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_vector*>(var_body_fb_s),
cast<spatial_vector*>(var_body_ft_s),
cast<float*>(var_tau));
}
}
void eval_rigid_tau_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_act,
torch::Tensor adj_joint_target,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_joint_limit_lower,
torch::Tensor adj_joint_limit_upper,
torch::Tensor adj_joint_limit_ke,
torch::Tensor adj_joint_limit_kd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_fb_s,
torch::Tensor adj_body_ft_s,
torch::Tensor adj_tau)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_tau_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_joint_type),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_act),
cast<float*>(var_joint_target),
cast<float*>(var_joint_target_ke),
cast<float*>(var_joint_target_kd),
cast<float*>(var_joint_limit_lower),
cast<float*>(var_joint_limit_upper),
cast<float*>(var_joint_limit_ke),
cast<float*>(var_joint_limit_kd),
cast<df::float3*>(var_joint_axis),
cast<spatial_vector*>(var_joint_S_s),
cast<spatial_vector*>(var_body_fb_s),
cast<spatial_vector*>(var_body_ft_s),
cast<float*>(var_tau),
cast<int*>(adj_articulation_start),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<float*>(adj_joint_qd),
cast<float*>(adj_joint_act),
cast<float*>(adj_joint_target),
cast<float*>(adj_joint_target_ke),
cast<float*>(adj_joint_target_kd),
cast<float*>(adj_joint_limit_lower),
cast<float*>(adj_joint_limit_upper),
cast<float*>(adj_joint_limit_ke),
cast<float*>(adj_joint_limit_kd),
cast<df::float3*>(adj_joint_axis),
cast<spatial_vector*>(adj_joint_S_s),
cast<spatial_vector*>(adj_body_fb_s),
cast<spatial_vector*>(adj_body_ft_s),
cast<float*>(adj_tau));
}
}
// Python entry points
void eval_rigid_tau_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau);
void eval_rigid_tau_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_joint_type,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_act,
torch::Tensor var_joint_target,
torch::Tensor var_joint_target_ke,
torch::Tensor var_joint_target_kd,
torch::Tensor var_joint_limit_lower,
torch::Tensor var_joint_limit_upper,
torch::Tensor var_joint_limit_ke,
torch::Tensor var_joint_limit_kd,
torch::Tensor var_joint_axis,
torch::Tensor var_joint_S_s,
torch::Tensor var_body_fb_s,
torch::Tensor var_body_ft_s,
torch::Tensor var_tau,
torch::Tensor adj_articulation_start,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_act,
torch::Tensor adj_joint_target,
torch::Tensor adj_joint_target_ke,
torch::Tensor adj_joint_target_kd,
torch::Tensor adj_joint_limit_lower,
torch::Tensor adj_joint_limit_upper,
torch::Tensor adj_joint_limit_ke,
torch::Tensor adj_joint_limit_kd,
torch::Tensor adj_joint_axis,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_body_fb_s,
torch::Tensor adj_body_ft_s,
torch::Tensor adj_tau);
void eval_rigid_jacobian_cpu_kernel_forward(
int* var_articulation_start,
int* var_articulation_J_start,
int* var_joint_parent,
int* var_joint_qd_start,
spatial_vector* var_joint_S_s,
float* var_J)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_J_start, var_0);
df::spatial_jacobian(var_joint_S_s, var_joint_parent, var_joint_qd_start, var_1, var_5, var_6, var_J);
}
void eval_rigid_jacobian_cpu_kernel_backward(
int* var_articulation_start,
int* var_articulation_J_start,
int* var_joint_parent,
int* var_joint_qd_start,
spatial_vector* var_joint_S_s,
float* var_J,
int* adj_articulation_start,
int* adj_articulation_J_start,
int* adj_joint_parent,
int* adj_joint_qd_start,
spatial_vector* adj_joint_S_s,
float* adj_J)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_J_start, var_0);
df::spatial_jacobian(var_joint_S_s, var_joint_parent, var_joint_qd_start, var_1, var_5, var_6, var_J);
//---------
// reverse
df::adj_spatial_jacobian(var_joint_S_s, var_joint_parent, var_joint_qd_start, var_1, var_5, var_6, var_J, adj_joint_S_s, adj_joint_parent, adj_joint_qd_start, adj_1, adj_5, adj_6, adj_J);
df::adj_load(var_articulation_J_start, var_0, adj_articulation_J_start, adj_0, adj_6);
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_jacobian_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_jacobian_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_J_start),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_qd_start),
cast<spatial_vector*>(var_joint_S_s),
cast<float*>(var_J));
}
}
void eval_rigid_jacobian_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_J_start,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_J)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_jacobian_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_J_start),
cast<int*>(var_joint_parent),
cast<int*>(var_joint_qd_start),
cast<spatial_vector*>(var_joint_S_s),
cast<float*>(var_J),
cast<int*>(adj_articulation_start),
cast<int*>(adj_articulation_J_start),
cast<int*>(adj_joint_parent),
cast<int*>(adj_joint_qd_start),
cast<spatial_vector*>(adj_joint_S_s),
cast<float*>(adj_J));
}
}
// Python entry points
void eval_rigid_jacobian_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J);
void eval_rigid_jacobian_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_J_start,
torch::Tensor var_joint_parent,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_S_s,
torch::Tensor var_J,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_J_start,
torch::Tensor adj_joint_parent,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_S_s,
torch::Tensor adj_J);
void eval_rigid_mass_cpu_kernel_forward(
int* var_articulation_start,
int* var_articulation_M_start,
spatial_matrix* var_body_I_s,
float* var_M)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_M_start, var_0);
df::spatial_mass(var_body_I_s, var_1, var_5, var_6, var_M);
}
void eval_rigid_mass_cpu_kernel_backward(
int* var_articulation_start,
int* var_articulation_M_start,
spatial_matrix* var_body_I_s,
float* var_M,
int* adj_articulation_start,
int* adj_articulation_M_start,
spatial_matrix* adj_body_I_s,
float* adj_M)
{
//---------
// primal vars
int var_0;
int var_1;
const int var_2 = 1;
int var_3;
int var_4;
int var_5;
int var_6;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_articulation_start, var_0);
var_3 = df::add(var_0, var_2);
var_4 = df::load(var_articulation_start, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::load(var_articulation_M_start, var_0);
df::spatial_mass(var_body_I_s, var_1, var_5, var_6, var_M);
//---------
// reverse
df::adj_spatial_mass(var_body_I_s, var_1, var_5, var_6, var_M, adj_body_I_s, adj_1, adj_5, adj_6, adj_M);
df::adj_load(var_articulation_M_start, var_0, adj_articulation_M_start, adj_0, adj_6);
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_load(var_articulation_start, var_3, adj_articulation_start, adj_3, adj_4);
df::adj_add(var_0, var_2, adj_0, adj_2, adj_3);
df::adj_load(var_articulation_start, var_0, adj_articulation_start, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_mass_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_mass_cpu_kernel_forward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_M_start),
cast<spatial_matrix*>(var_body_I_s),
cast<float*>(var_M));
}
}
void eval_rigid_mass_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_M_start,
torch::Tensor adj_body_I_s,
torch::Tensor adj_M)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_mass_cpu_kernel_backward(
cast<int*>(var_articulation_start),
cast<int*>(var_articulation_M_start),
cast<spatial_matrix*>(var_body_I_s),
cast<float*>(var_M),
cast<int*>(adj_articulation_start),
cast<int*>(adj_articulation_M_start),
cast<spatial_matrix*>(adj_body_I_s),
cast<float*>(adj_M));
}
}
// Python entry points
void eval_rigid_mass_cpu_forward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M);
void eval_rigid_mass_cpu_backward(int dim,
torch::Tensor var_articulation_start,
torch::Tensor var_articulation_M_start,
torch::Tensor var_body_I_s,
torch::Tensor var_M,
torch::Tensor adj_articulation_start,
torch::Tensor adj_articulation_M_start,
torch::Tensor adj_body_I_s,
torch::Tensor adj_M);
void eval_dense_gemm_cpu_kernel_forward(
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
float* var_A,
float* var_B,
float* var_C)
{
//---------
// primal vars
//---------
// forward
df::dense_gemm(var_m, var_n, var_p, var_t1, var_t2, var_A, var_B, var_C);
}
void eval_dense_gemm_cpu_kernel_backward(
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
float* var_A,
float* var_B,
float* var_C,
int adj_m,
int adj_n,
int adj_p,
int adj_t1,
int adj_t2,
float* adj_A,
float* adj_B,
float* adj_C)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_gemm(var_m, var_n, var_p, var_t1, var_t2, var_A, var_B, var_C);
//---------
// reverse
df::adj_dense_gemm(var_m, var_n, var_p, var_t1, var_t2, var_A, var_B, var_C, adj_m, adj_n, adj_p, adj_t1, adj_t2, adj_A, adj_B, adj_C);
return;
}
// Python entry points
void eval_dense_gemm_cpu_forward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_cpu_kernel_forward(
var_m,
var_n,
var_p,
var_t1,
var_t2,
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C));
}
}
void eval_dense_gemm_cpu_backward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
int adj_m,
int adj_n,
int adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_cpu_kernel_backward(
var_m,
var_n,
var_p,
var_t1,
var_t2,
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C),
adj_m,
adj_n,
adj_p,
adj_t1,
adj_t2,
cast<float*>(adj_A),
cast<float*>(adj_B),
cast<float*>(adj_C));
}
}
// Python entry points
void eval_dense_gemm_cpu_forward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C);
void eval_dense_gemm_cpu_backward(int dim,
int var_m,
int var_n,
int var_p,
int var_t1,
int var_t2,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
int adj_m,
int adj_n,
int adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C);
void eval_dense_gemm_batched_cpu_kernel_forward(
int* var_m,
int* var_n,
int* var_p,
int var_t1,
int var_t2,
int* var_A_start,
int* var_B_start,
int* var_C_start,
float* var_A,
float* var_B,
float* var_C)
{
//---------
// primal vars
//---------
// forward
df::dense_gemm_batched(var_m, var_n, var_p, var_t1, var_t2, var_A_start, var_B_start, var_C_start, var_A, var_B, var_C);
}
void eval_dense_gemm_batched_cpu_kernel_backward(
int* var_m,
int* var_n,
int* var_p,
int var_t1,
int var_t2,
int* var_A_start,
int* var_B_start,
int* var_C_start,
float* var_A,
float* var_B,
float* var_C,
int* adj_m,
int* adj_n,
int* adj_p,
int adj_t1,
int adj_t2,
int* adj_A_start,
int* adj_B_start,
int* adj_C_start,
float* adj_A,
float* adj_B,
float* adj_C)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_gemm_batched(var_m, var_n, var_p, var_t1, var_t2, var_A_start, var_B_start, var_C_start, var_A, var_B, var_C);
//---------
// reverse
df::adj_dense_gemm_batched(var_m, var_n, var_p, var_t1, var_t2, var_A_start, var_B_start, var_C_start, var_A, var_B, var_C, adj_m, adj_n, adj_p, adj_t1, adj_t2, adj_A_start, adj_B_start, adj_C_start, adj_A, adj_B, adj_C);
return;
}
// Python entry points
void eval_dense_gemm_batched_cpu_forward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_batched_cpu_kernel_forward(
cast<int*>(var_m),
cast<int*>(var_n),
cast<int*>(var_p),
var_t1,
var_t2,
cast<int*>(var_A_start),
cast<int*>(var_B_start),
cast<int*>(var_C_start),
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C));
}
}
void eval_dense_gemm_batched_cpu_backward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
torch::Tensor adj_m,
torch::Tensor adj_n,
torch::Tensor adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A_start,
torch::Tensor adj_B_start,
torch::Tensor adj_C_start,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_gemm_batched_cpu_kernel_backward(
cast<int*>(var_m),
cast<int*>(var_n),
cast<int*>(var_p),
var_t1,
var_t2,
cast<int*>(var_A_start),
cast<int*>(var_B_start),
cast<int*>(var_C_start),
cast<float*>(var_A),
cast<float*>(var_B),
cast<float*>(var_C),
cast<int*>(adj_m),
cast<int*>(adj_n),
cast<int*>(adj_p),
adj_t1,
adj_t2,
cast<int*>(adj_A_start),
cast<int*>(adj_B_start),
cast<int*>(adj_C_start),
cast<float*>(adj_A),
cast<float*>(adj_B),
cast<float*>(adj_C));
}
}
// Python entry points
void eval_dense_gemm_batched_cpu_forward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C);
void eval_dense_gemm_batched_cpu_backward(int dim,
torch::Tensor var_m,
torch::Tensor var_n,
torch::Tensor var_p,
int var_t1,
int var_t2,
torch::Tensor var_A_start,
torch::Tensor var_B_start,
torch::Tensor var_C_start,
torch::Tensor var_A,
torch::Tensor var_B,
torch::Tensor var_C,
torch::Tensor adj_m,
torch::Tensor adj_n,
torch::Tensor adj_p,
int adj_t1,
int adj_t2,
torch::Tensor adj_A_start,
torch::Tensor adj_B_start,
torch::Tensor adj_C_start,
torch::Tensor adj_A,
torch::Tensor adj_B,
torch::Tensor adj_C);
void eval_dense_cholesky_cpu_kernel_forward(
int var_n,
float* var_A,
float* var_regularization,
float* var_L)
{
//---------
// primal vars
//---------
// forward
df::dense_chol(var_n, var_A, var_regularization, var_L);
}
void eval_dense_cholesky_cpu_kernel_backward(
int var_n,
float* var_A,
float* var_regularization,
float* var_L,
int adj_n,
float* adj_A,
float* adj_regularization,
float* adj_L)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_chol(var_n, var_A, var_regularization, var_L);
//---------
// reverse
df::adj_dense_chol(var_n, var_A, var_regularization, var_L, adj_n, adj_A, adj_regularization, adj_L);
return;
}
// Python entry points
void eval_dense_cholesky_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_cpu_kernel_forward(
var_n,
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L));
}
}
void eval_dense_cholesky_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_cpu_kernel_backward(
var_n,
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L),
adj_n,
cast<float*>(adj_A),
cast<float*>(adj_regularization),
cast<float*>(adj_L));
}
}
// Python entry points
void eval_dense_cholesky_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L);
void eval_dense_cholesky_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L);
void eval_dense_cholesky_batched_cpu_kernel_forward(
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_regularization,
float* var_L)
{
//---------
// primal vars
//---------
// forward
df::dense_chol_batched(var_A_start, var_A_dim, var_A, var_regularization, var_L);
}
void eval_dense_cholesky_batched_cpu_kernel_backward(
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_regularization,
float* var_L,
int* adj_A_start,
int* adj_A_dim,
float* adj_A,
float* adj_regularization,
float* adj_L)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_chol_batched(var_A_start, var_A_dim, var_A, var_regularization, var_L);
//---------
// reverse
df::adj_dense_chol_batched(var_A_start, var_A_dim, var_A, var_regularization, var_L, adj_A_start, adj_A_dim, adj_A, adj_regularization, adj_L);
return;
}
// Python entry points
void eval_dense_cholesky_batched_cpu_forward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_batched_cpu_kernel_forward(
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L));
}
}
void eval_dense_cholesky_batched_cpu_backward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_cholesky_batched_cpu_kernel_backward(
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_regularization),
cast<float*>(var_L),
cast<int*>(adj_A_start),
cast<int*>(adj_A_dim),
cast<float*>(adj_A),
cast<float*>(adj_regularization),
cast<float*>(adj_L));
}
}
// Python entry points
void eval_dense_cholesky_batched_cpu_forward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L);
void eval_dense_cholesky_batched_cpu_backward(int dim,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_regularization,
torch::Tensor var_L,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_regularization,
torch::Tensor adj_L);
void eval_dense_subs_cpu_kernel_forward(
int var_n,
float* var_L,
float* var_b,
float* var_x)
{
//---------
// primal vars
//---------
// forward
df::dense_subs(var_n, var_L, var_b, var_x);
}
void eval_dense_subs_cpu_kernel_backward(
int var_n,
float* var_L,
float* var_b,
float* var_x,
int adj_n,
float* adj_L,
float* adj_b,
float* adj_x)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_subs(var_n, var_L, var_b, var_x);
//---------
// reverse
df::adj_dense_subs(var_n, var_L, var_b, var_x, adj_n, adj_L, adj_b, adj_x);
return;
}
// Python entry points
void eval_dense_subs_cpu_forward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_subs_cpu_kernel_forward(
var_n,
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_x));
}
}
void eval_dense_subs_cpu_backward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_subs_cpu_kernel_backward(
var_n,
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_x),
adj_n,
cast<float*>(adj_L),
cast<float*>(adj_b),
cast<float*>(adj_x));
}
}
// Python entry points
void eval_dense_subs_cpu_forward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x);
void eval_dense_subs_cpu_backward(int dim,
int var_n,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_x);
void eval_dense_solve_cpu_kernel_forward(
int var_n,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x)
{
//---------
// primal vars
//---------
// forward
df::dense_solve(var_n, var_A, var_L, var_b, var_tmp, var_x);
}
void eval_dense_solve_cpu_kernel_backward(
int var_n,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x,
int adj_n,
float* adj_A,
float* adj_L,
float* adj_b,
float* adj_tmp,
float* adj_x)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_solve(var_n, var_A, var_L, var_b, var_tmp, var_x);
//---------
// reverse
df::adj_dense_solve(var_n, var_A, var_L, var_b, var_tmp, var_x, adj_n, adj_A, adj_L, adj_b, adj_tmp, adj_x);
return;
}
// Python entry points
void eval_dense_solve_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_cpu_kernel_forward(
var_n,
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x));
}
}
void eval_dense_solve_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_cpu_kernel_backward(
var_n,
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x),
adj_n,
cast<float*>(adj_A),
cast<float*>(adj_L),
cast<float*>(adj_b),
cast<float*>(adj_tmp),
cast<float*>(adj_x));
}
}
// Python entry points
void eval_dense_solve_cpu_forward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x);
void eval_dense_solve_cpu_backward(int dim,
int var_n,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
int adj_n,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x);
void eval_dense_solve_batched_cpu_kernel_forward(
int* var_b_start,
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x)
{
//---------
// primal vars
//---------
// forward
df::dense_solve_batched(var_b_start, var_A_start, var_A_dim, var_A, var_L, var_b, var_tmp, var_x);
}
void eval_dense_solve_batched_cpu_kernel_backward(
int* var_b_start,
int* var_A_start,
int* var_A_dim,
float* var_A,
float* var_L,
float* var_b,
float* var_tmp,
float* var_x,
int* adj_b_start,
int* adj_A_start,
int* adj_A_dim,
float* adj_A,
float* adj_L,
float* adj_b,
float* adj_tmp,
float* adj_x)
{
//---------
// primal vars
//---------
// dual vars
//---------
// forward
df::dense_solve_batched(var_b_start, var_A_start, var_A_dim, var_A, var_L, var_b, var_tmp, var_x);
//---------
// reverse
df::adj_dense_solve_batched(var_b_start, var_A_start, var_A_dim, var_A, var_L, var_b, var_tmp, var_x, adj_b_start, adj_A_start, adj_A_dim, adj_A, adj_L, adj_b, adj_tmp, adj_x);
return;
}
// Python entry points
void eval_dense_solve_batched_cpu_forward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_batched_cpu_kernel_forward(
cast<int*>(var_b_start),
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x));
}
}
void eval_dense_solve_batched_cpu_backward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
torch::Tensor adj_b_start,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_dense_solve_batched_cpu_kernel_backward(
cast<int*>(var_b_start),
cast<int*>(var_A_start),
cast<int*>(var_A_dim),
cast<float*>(var_A),
cast<float*>(var_L),
cast<float*>(var_b),
cast<float*>(var_tmp),
cast<float*>(var_x),
cast<int*>(adj_b_start),
cast<int*>(adj_A_start),
cast<int*>(adj_A_dim),
cast<float*>(adj_A),
cast<float*>(adj_L),
cast<float*>(adj_b),
cast<float*>(adj_tmp),
cast<float*>(adj_x));
}
}
// Python entry points
void eval_dense_solve_batched_cpu_forward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x);
void eval_dense_solve_batched_cpu_backward(int dim,
torch::Tensor var_b_start,
torch::Tensor var_A_start,
torch::Tensor var_A_dim,
torch::Tensor var_A,
torch::Tensor var_L,
torch::Tensor var_b,
torch::Tensor var_tmp,
torch::Tensor var_x,
torch::Tensor adj_b_start,
torch::Tensor adj_A_start,
torch::Tensor adj_A_dim,
torch::Tensor adj_A,
torch::Tensor adj_L,
torch::Tensor adj_b,
torch::Tensor adj_tmp,
torch::Tensor adj_x);
void eval_rigid_integrate_cpu_kernel_forward(
int* var_joint_type,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
int var_4;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_joint_type, var_0);
var_2 = df::load(var_joint_q_start, var_0);
var_3 = df::load(var_joint_qd_start, var_0);
var_4 = jcalc_integrate_cpu_func(var_1, var_joint_q, var_joint_qd, var_joint_qdd, var_2, var_3, var_dt, var_joint_q_new, var_joint_qd_new);
}
void eval_rigid_integrate_cpu_kernel_backward(
int* var_joint_type,
int* var_joint_q_start,
int* var_joint_qd_start,
float* var_joint_q,
float* var_joint_qd,
float* var_joint_qdd,
float var_dt,
float* var_joint_q_new,
float* var_joint_qd_new,
int* adj_joint_type,
int* adj_joint_q_start,
int* adj_joint_qd_start,
float* adj_joint_q,
float* adj_joint_qd,
float* adj_joint_qdd,
float adj_dt,
float* adj_joint_q_new,
float* adj_joint_qd_new)
{
//---------
// primal vars
int var_0;
int var_1;
int var_2;
int var_3;
int var_4;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_joint_type, var_0);
var_2 = df::load(var_joint_q_start, var_0);
var_3 = df::load(var_joint_qd_start, var_0);
var_4 = jcalc_integrate_cpu_func(var_1, var_joint_q, var_joint_qd, var_joint_qdd, var_2, var_3, var_dt, var_joint_q_new, var_joint_qd_new);
//---------
// reverse
adj_jcalc_integrate_cpu_func(var_1, var_joint_q, var_joint_qd, var_joint_qdd, var_2, var_3, var_dt, var_joint_q_new, var_joint_qd_new, adj_1, adj_joint_q, adj_joint_qd, adj_joint_qdd, adj_2, adj_3, adj_dt, adj_joint_q_new, adj_joint_qd_new, adj_4);
df::adj_load(var_joint_qd_start, var_0, adj_joint_qd_start, adj_0, adj_3);
df::adj_load(var_joint_q_start, var_0, adj_joint_q_start, adj_0, adj_2);
df::adj_load(var_joint_type, var_0, adj_joint_type, adj_0, adj_1);
return;
}
// Python entry points
void eval_rigid_integrate_cpu_forward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_integrate_cpu_kernel_forward(
cast<int*>(var_joint_type),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_qdd),
var_dt,
cast<float*>(var_joint_q_new),
cast<float*>(var_joint_qd_new));
}
}
void eval_rigid_integrate_cpu_backward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_qdd,
float adj_dt,
torch::Tensor adj_joint_q_new,
torch::Tensor adj_joint_qd_new)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
eval_rigid_integrate_cpu_kernel_backward(
cast<int*>(var_joint_type),
cast<int*>(var_joint_q_start),
cast<int*>(var_joint_qd_start),
cast<float*>(var_joint_q),
cast<float*>(var_joint_qd),
cast<float*>(var_joint_qdd),
var_dt,
cast<float*>(var_joint_q_new),
cast<float*>(var_joint_qd_new),
cast<int*>(adj_joint_type),
cast<int*>(adj_joint_q_start),
cast<int*>(adj_joint_qd_start),
cast<float*>(adj_joint_q),
cast<float*>(adj_joint_qd),
cast<float*>(adj_joint_qdd),
adj_dt,
cast<float*>(adj_joint_q_new),
cast<float*>(adj_joint_qd_new));
}
}
// Python entry points
void eval_rigid_integrate_cpu_forward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new);
void eval_rigid_integrate_cpu_backward(int dim,
torch::Tensor var_joint_type,
torch::Tensor var_joint_q_start,
torch::Tensor var_joint_qd_start,
torch::Tensor var_joint_q,
torch::Tensor var_joint_qd,
torch::Tensor var_joint_qdd,
float var_dt,
torch::Tensor var_joint_q_new,
torch::Tensor var_joint_qd_new,
torch::Tensor adj_joint_type,
torch::Tensor adj_joint_q_start,
torch::Tensor adj_joint_qd_start,
torch::Tensor adj_joint_q,
torch::Tensor adj_joint_qd,
torch::Tensor adj_joint_qdd,
float adj_dt,
torch::Tensor adj_joint_q_new,
torch::Tensor adj_joint_qd_new);
void solve_springs_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float* var_invmass,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
float var_dt,
df::float3* var_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::load(var_invmass, var_5);
var_26 = df::load(var_invmass, var_9);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_10, var_dt);
var_29 = df::mul(var_28, var_dt);
var_30 = df::div(var_20, var_29);
var_31 = df::div(var_23, var_27);
var_32 = df::mul(var_22, var_31);
var_33 = df::mul(var_32, var_25);
df::atomic_sub(var_delta, var_5, var_33);
var_34 = df::mul(var_32, var_26);
df::atomic_add(var_delta, var_9, var_34);
}
void solve_springs_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float* var_invmass,
int* var_spring_indices,
float* var_spring_rest_lengths,
float* var_spring_stiffness,
float* var_spring_damping,
float var_dt,
df::float3* var_delta,
df::float3* adj_x,
df::float3* adj_v,
float* adj_invmass,
int* adj_spring_indices,
float* adj_spring_rest_lengths,
float* adj_spring_stiffness,
float* adj_spring_damping,
float adj_dt,
df::float3* adj_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 2;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
float var_10;
float var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
df::float3 var_15;
df::float3 var_16;
df::float3 var_17;
df::float3 var_18;
float var_19;
const float var_20 = 1.0;
float var_21;
df::float3 var_22;
float var_23;
float var_24;
float var_25;
float var_26;
float var_27;
float var_28;
float var_29;
float var_30;
float var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
float adj_10 = 0;
float adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
df::float3 adj_15 = 0;
df::float3 adj_16 = 0;
df::float3 adj_17 = 0;
df::float3 adj_18 = 0;
float adj_19 = 0;
float adj_20 = 0;
float adj_21 = 0;
df::float3 adj_22 = 0;
float adj_23 = 0;
float adj_24 = 0;
float adj_25 = 0;
float adj_26 = 0;
float adj_27 = 0;
float adj_28 = 0;
float adj_29 = 0;
float adj_30 = 0;
float adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_spring_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_spring_indices, var_8);
var_10 = df::load(var_spring_stiffness, var_0);
var_11 = df::load(var_spring_damping, var_0);
var_12 = df::load(var_spring_rest_lengths, var_0);
var_13 = df::load(var_x, var_5);
var_14 = df::load(var_x, var_9);
var_15 = df::load(var_v, var_5);
var_16 = df::load(var_v, var_9);
var_17 = df::sub(var_13, var_14);
var_18 = df::sub(var_15, var_16);
var_19 = df::length(var_17);
var_21 = df::div(var_20, var_19);
var_22 = df::mul(var_17, var_21);
var_23 = df::sub(var_19, var_12);
var_24 = df::dot(var_22, var_18);
var_25 = df::load(var_invmass, var_5);
var_26 = df::load(var_invmass, var_9);
var_27 = df::add(var_25, var_26);
var_28 = df::mul(var_10, var_dt);
var_29 = df::mul(var_28, var_dt);
var_30 = df::div(var_20, var_29);
var_31 = df::div(var_23, var_27);
var_32 = df::mul(var_22, var_31);
var_33 = df::mul(var_32, var_25);
df::atomic_sub(var_delta, var_5, var_33);
var_34 = df::mul(var_32, var_26);
df::atomic_add(var_delta, var_9, var_34);
//---------
// reverse
df::adj_atomic_add(var_delta, var_9, var_34, adj_delta, adj_9, adj_34);
df::adj_mul(var_32, var_26, adj_32, adj_26, adj_34);
df::adj_atomic_sub(var_delta, var_5, var_33, adj_delta, adj_5, adj_33);
df::adj_mul(var_32, var_25, adj_32, adj_25, adj_33);
df::adj_mul(var_22, var_31, adj_22, adj_31, adj_32);
df::adj_div(var_23, var_27, adj_23, adj_27, adj_31);
df::adj_div(var_20, var_29, adj_20, adj_29, adj_30);
df::adj_mul(var_28, var_dt, adj_28, adj_dt, adj_29);
df::adj_mul(var_10, var_dt, adj_10, adj_dt, adj_28);
df::adj_add(var_25, var_26, adj_25, adj_26, adj_27);
df::adj_load(var_invmass, var_9, adj_invmass, adj_9, adj_26);
df::adj_load(var_invmass, var_5, adj_invmass, adj_5, adj_25);
df::adj_dot(var_22, var_18, adj_22, adj_18, adj_24);
df::adj_sub(var_19, var_12, adj_19, adj_12, adj_23);
df::adj_mul(var_17, var_21, adj_17, adj_21, adj_22);
df::adj_div(var_20, var_19, adj_20, adj_19, adj_21);
df::adj_length(var_17, adj_17, adj_19);
df::adj_sub(var_15, var_16, adj_15, adj_16, adj_18);
df::adj_sub(var_13, var_14, adj_13, adj_14, adj_17);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_16);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_15);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_14);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_13);
df::adj_load(var_spring_rest_lengths, var_0, adj_spring_rest_lengths, adj_0, adj_12);
df::adj_load(var_spring_damping, var_0, adj_spring_damping, adj_0, adj_11);
df::adj_load(var_spring_stiffness, var_0, adj_spring_stiffness, adj_0, adj_10);
df::adj_load(var_spring_indices, var_8, adj_spring_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_spring_indices, var_4, adj_spring_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void solve_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_springs_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_invmass),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
var_dt,
cast<df::float3*>(var_delta));
}
}
void solve_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_invmass,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
float adj_dt,
torch::Tensor adj_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_springs_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_invmass),
cast<int*>(var_spring_indices),
cast<float*>(var_spring_rest_lengths),
cast<float*>(var_spring_stiffness),
cast<float*>(var_spring_damping),
var_dt,
cast<df::float3*>(var_delta),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<float*>(adj_invmass),
cast<int*>(adj_spring_indices),
cast<float*>(adj_spring_rest_lengths),
cast<float*>(adj_spring_stiffness),
cast<float*>(adj_spring_damping),
adj_dt,
cast<df::float3*>(adj_delta));
}
}
// Python entry points
void solve_springs_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta);
void solve_springs_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_invmass,
torch::Tensor var_spring_indices,
torch::Tensor var_spring_rest_lengths,
torch::Tensor var_spring_stiffness,
torch::Tensor var_spring_damping,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_invmass,
torch::Tensor adj_spring_indices,
torch::Tensor adj_spring_rest_lengths,
torch::Tensor adj_spring_stiffness,
torch::Tensor adj_spring_damping,
float adj_dt,
torch::Tensor adj_delta);
void solve_tetrahedra_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
float var_dt,
float var_relaxation,
df::float3* var_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
float var_39;
df::float3 var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
mat33 var_46;
mat33 var_47;
float var_48;
const float var_49 = 6.0;
float var_50;
const float var_51 = 1.0;
float var_52;
mat33 var_53;
float var_54;
float var_55;
float var_56;
df::float3 var_57;
float var_58;
float var_59;
float var_60;
df::float3 var_61;
float var_62;
float var_63;
float var_64;
df::float3 var_65;
float var_66;
float var_67;
float var_68;
float var_69;
float var_70;
const float var_71 = 3.0;
float var_72;
float var_73;
float var_74;
const float var_75 = 0.0;
bool var_76;
bool var_77;
float var_78;
float var_79;
mat33 var_80;
mat33 var_81;
float var_82;
mat33 var_83;
float var_84;
float var_85;
float var_86;
float var_87;
float var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
df::float3 var_93;
float var_94;
float var_95;
float var_96;
df::float3 var_97;
df::float3 var_98;
df::float3 var_99;
float var_100;
df::float3 var_101;
float var_102;
float var_103;
float var_104;
float var_105;
float var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
float var_117;
float var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
float var_123;
float var_124;
float var_125;
df::float3 var_126;
df::float3 var_127;
df::float3 var_128;
df::float3 var_129;
df::float3 var_130;
df::float3 var_131;
df::float3 var_132;
df::float3 var_133;
float var_134;
df::float3 var_135;
float var_136;
float var_137;
float var_138;
float var_139;
float var_140;
float var_141;
float var_142;
float var_143;
float var_144;
float var_145;
float var_146;
float var_147;
float var_148;
float var_149;
float var_150;
float var_151;
float var_152;
df::float3 var_153;
df::float3 var_154;
df::float3 var_155;
df::float3 var_156;
df::float3 var_157;
df::float3 var_158;
df::float3 var_159;
df::float3 var_160;
df::float3 var_161;
df::float3 var_162;
df::float3 var_163;
df::float3 var_164;
df::float3 var_165;
df::float3 var_166;
df::float3 var_167;
df::float3 var_168;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::load(var_inv_mass, var_5);
var_37 = df::load(var_inv_mass, var_9);
var_38 = df::load(var_inv_mass, var_13);
var_39 = df::load(var_inv_mass, var_17);
var_40 = df::sub(var_29, var_28);
var_41 = df::sub(var_30, var_28);
var_42 = df::sub(var_31, var_28);
var_43 = df::sub(var_33, var_32);
var_44 = df::sub(var_34, var_32);
var_45 = df::sub(var_35, var_32);
var_46 = df::mat33(var_40, var_41, var_42);
var_47 = df::load(var_pose, var_0);
var_48 = df::determinant(var_47);
var_50 = df::mul(var_48, var_49);
var_52 = df::div(var_51, var_50);
var_53 = df::mul(var_46, var_47);
var_54 = df::index(var_53, var_3, var_3);
var_55 = df::index(var_53, var_7, var_3);
var_56 = df::index(var_53, var_11, var_3);
var_57 = df::float3(var_54, var_55, var_56);
var_58 = df::index(var_53, var_3, var_7);
var_59 = df::index(var_53, var_7, var_7);
var_60 = df::index(var_53, var_11, var_7);
var_61 = df::float3(var_58, var_59, var_60);
var_62 = df::index(var_53, var_3, var_11);
var_63 = df::index(var_53, var_7, var_11);
var_64 = df::index(var_53, var_11, var_11);
var_65 = df::float3(var_62, var_63, var_64);
var_66 = df::dot(var_57, var_57);
var_67 = df::dot(var_61, var_61);
var_68 = df::add(var_66, var_67);
var_69 = df::dot(var_65, var_65);
var_70 = df::add(var_68, var_69);
var_72 = df::sub(var_70, var_71);
var_73 = df::abs(var_72);
var_74 = df::sqrt(var_73);
var_76 = (var_74 == var_75);
if (var_76) {
return;
}
var_77 = (var_70 < var_71);
if (var_77) {
var_78 = df::sub(var_75, var_74);
}
var_79 = df::select(var_77, var_74, var_78);
var_80 = df::transpose(var_47);
var_81 = df::mul(var_53, var_80);
var_82 = df::div(var_51, var_79);
var_83 = df::mul(var_81, var_82);
var_84 = df::div(var_21, var_24);
var_85 = df::add(var_51, var_84);
var_86 = df::index(var_83, var_3, var_3);
var_87 = df::index(var_83, var_7, var_3);
var_88 = df::index(var_83, var_11, var_3);
var_89 = df::float3(var_86, var_87, var_88);
var_90 = df::index(var_83, var_3, var_7);
var_91 = df::index(var_83, var_7, var_7);
var_92 = df::index(var_83, var_11, var_7);
var_93 = df::float3(var_90, var_91, var_92);
var_94 = df::index(var_83, var_3, var_11);
var_95 = df::index(var_83, var_7, var_11);
var_96 = df::index(var_83, var_11, var_11);
var_97 = df::float3(var_94, var_95, var_96);
var_98 = df::add(var_89, var_93);
var_99 = df::add(var_98, var_97);
var_100 = df::sub(var_75, var_51);
var_101 = df::mul(var_99, var_100);
var_102 = df::dot(var_101, var_101);
var_103 = df::mul(var_102, var_36);
var_104 = df::dot(var_89, var_89);
var_105 = df::mul(var_104, var_37);
var_106 = df::add(var_103, var_105);
var_107 = df::dot(var_93, var_93);
var_108 = df::mul(var_107, var_38);
var_109 = df::add(var_106, var_108);
var_110 = df::dot(var_97, var_97);
var_111 = df::mul(var_110, var_39);
var_112 = df::add(var_109, var_111);
var_113 = df::mul(var_21, var_dt);
var_114 = df::mul(var_113, var_dt);
var_115 = df::mul(var_114, var_52);
var_116 = df::div(var_51, var_115);
var_117 = df::add(var_112, var_116);
var_118 = df::div(var_74, var_117);
var_119 = df::mul(var_101, var_118);
var_120 = df::mul(var_89, var_118);
var_121 = df::mul(var_93, var_118);
var_122 = df::mul(var_97, var_118);
var_123 = df::determinant(var_53);
var_124 = df::sub(var_123, var_85);
var_125 = df::div(var_50, var_49);
var_126 = df::cross(var_41, var_42);
var_127 = df::mul(var_126, var_125);
var_128 = df::cross(var_42, var_40);
var_129 = df::mul(var_128, var_125);
var_130 = df::cross(var_40, var_41);
var_131 = df::mul(var_130, var_125);
var_132 = df::add(var_127, var_129);
var_133 = df::add(var_132, var_131);
var_134 = df::sub(var_75, var_51);
var_135 = df::mul(var_133, var_134);
var_136 = df::dot(var_135, var_135);
var_137 = df::mul(var_136, var_36);
var_138 = df::dot(var_127, var_127);
var_139 = df::mul(var_138, var_37);
var_140 = df::add(var_137, var_139);
var_141 = df::dot(var_129, var_129);
var_142 = df::mul(var_141, var_38);
var_143 = df::add(var_140, var_142);
var_144 = df::dot(var_131, var_131);
var_145 = df::mul(var_144, var_39);
var_146 = df::add(var_143, var_145);
var_147 = df::mul(var_24, var_dt);
var_148 = df::mul(var_147, var_dt);
var_149 = df::mul(var_148, var_52);
var_150 = df::div(var_51, var_149);
var_151 = df::add(var_146, var_150);
var_152 = df::div(var_124, var_151);
var_153 = df::mul(var_135, var_152);
var_154 = df::add(var_119, var_153);
var_155 = df::mul(var_127, var_152);
var_156 = df::add(var_120, var_155);
var_157 = df::mul(var_129, var_152);
var_158 = df::add(var_121, var_157);
var_159 = df::mul(var_131, var_152);
var_160 = df::add(var_122, var_159);
var_161 = df::mul(var_154, var_36);
var_162 = df::mul(var_161, var_relaxation);
df::atomic_sub(var_delta, var_5, var_162);
var_163 = df::mul(var_156, var_37);
var_164 = df::mul(var_163, var_relaxation);
df::atomic_sub(var_delta, var_9, var_164);
var_165 = df::mul(var_158, var_38);
var_166 = df::mul(var_165, var_relaxation);
df::atomic_sub(var_delta, var_13, var_166);
var_167 = df::mul(var_160, var_39);
var_168 = df::mul(var_167, var_relaxation);
df::atomic_sub(var_delta, var_17, var_168);
}
void solve_tetrahedra_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
int* var_indices,
mat33* var_pose,
float* var_activation,
float* var_materials,
float var_dt,
float var_relaxation,
df::float3* var_delta,
df::float3* adj_x,
df::float3* adj_v,
float* adj_inv_mass,
int* adj_indices,
mat33* adj_pose,
float* adj_activation,
float* adj_materials,
float adj_dt,
float adj_relaxation,
df::float3* adj_delta)
{
//---------
// primal vars
int var_0;
const int var_1 = 4;
int var_2;
const int var_3 = 0;
int var_4;
int var_5;
int var_6;
const int var_7 = 1;
int var_8;
int var_9;
int var_10;
const int var_11 = 2;
int var_12;
int var_13;
int var_14;
const int var_15 = 3;
int var_16;
int var_17;
float var_18;
int var_19;
int var_20;
float var_21;
int var_22;
int var_23;
float var_24;
int var_25;
int var_26;
float var_27;
df::float3 var_28;
df::float3 var_29;
df::float3 var_30;
df::float3 var_31;
df::float3 var_32;
df::float3 var_33;
df::float3 var_34;
df::float3 var_35;
float var_36;
float var_37;
float var_38;
float var_39;
df::float3 var_40;
df::float3 var_41;
df::float3 var_42;
df::float3 var_43;
df::float3 var_44;
df::float3 var_45;
mat33 var_46;
mat33 var_47;
float var_48;
const float var_49 = 6.0;
float var_50;
const float var_51 = 1.0;
float var_52;
mat33 var_53;
float var_54;
float var_55;
float var_56;
df::float3 var_57;
float var_58;
float var_59;
float var_60;
df::float3 var_61;
float var_62;
float var_63;
float var_64;
df::float3 var_65;
float var_66;
float var_67;
float var_68;
float var_69;
float var_70;
const float var_71 = 3.0;
float var_72;
float var_73;
float var_74;
const float var_75 = 0.0;
bool var_76;
bool var_77;
float var_78;
float var_79;
mat33 var_80;
mat33 var_81;
float var_82;
mat33 var_83;
float var_84;
float var_85;
float var_86;
float var_87;
float var_88;
df::float3 var_89;
float var_90;
float var_91;
float var_92;
df::float3 var_93;
float var_94;
float var_95;
float var_96;
df::float3 var_97;
df::float3 var_98;
df::float3 var_99;
float var_100;
df::float3 var_101;
float var_102;
float var_103;
float var_104;
float var_105;
float var_106;
float var_107;
float var_108;
float var_109;
float var_110;
float var_111;
float var_112;
float var_113;
float var_114;
float var_115;
float var_116;
float var_117;
float var_118;
df::float3 var_119;
df::float3 var_120;
df::float3 var_121;
df::float3 var_122;
float var_123;
float var_124;
float var_125;
df::float3 var_126;
df::float3 var_127;
df::float3 var_128;
df::float3 var_129;
df::float3 var_130;
df::float3 var_131;
df::float3 var_132;
df::float3 var_133;
float var_134;
df::float3 var_135;
float var_136;
float var_137;
float var_138;
float var_139;
float var_140;
float var_141;
float var_142;
float var_143;
float var_144;
float var_145;
float var_146;
float var_147;
float var_148;
float var_149;
float var_150;
float var_151;
float var_152;
df::float3 var_153;
df::float3 var_154;
df::float3 var_155;
df::float3 var_156;
df::float3 var_157;
df::float3 var_158;
df::float3 var_159;
df::float3 var_160;
df::float3 var_161;
df::float3 var_162;
df::float3 var_163;
df::float3 var_164;
df::float3 var_165;
df::float3 var_166;
df::float3 var_167;
df::float3 var_168;
//---------
// dual vars
int adj_0 = 0;
int adj_1 = 0;
int adj_2 = 0;
int adj_3 = 0;
int adj_4 = 0;
int adj_5 = 0;
int adj_6 = 0;
int adj_7 = 0;
int adj_8 = 0;
int adj_9 = 0;
int adj_10 = 0;
int adj_11 = 0;
int adj_12 = 0;
int adj_13 = 0;
int adj_14 = 0;
int adj_15 = 0;
int adj_16 = 0;
int adj_17 = 0;
float adj_18 = 0;
int adj_19 = 0;
int adj_20 = 0;
float adj_21 = 0;
int adj_22 = 0;
int adj_23 = 0;
float adj_24 = 0;
int adj_25 = 0;
int adj_26 = 0;
float adj_27 = 0;
df::float3 adj_28 = 0;
df::float3 adj_29 = 0;
df::float3 adj_30 = 0;
df::float3 adj_31 = 0;
df::float3 adj_32 = 0;
df::float3 adj_33 = 0;
df::float3 adj_34 = 0;
df::float3 adj_35 = 0;
float adj_36 = 0;
float adj_37 = 0;
float adj_38 = 0;
float adj_39 = 0;
df::float3 adj_40 = 0;
df::float3 adj_41 = 0;
df::float3 adj_42 = 0;
df::float3 adj_43 = 0;
df::float3 adj_44 = 0;
df::float3 adj_45 = 0;
mat33 adj_46 = 0;
mat33 adj_47 = 0;
float adj_48 = 0;
float adj_49 = 0;
float adj_50 = 0;
float adj_51 = 0;
float adj_52 = 0;
mat33 adj_53 = 0;
float adj_54 = 0;
float adj_55 = 0;
float adj_56 = 0;
df::float3 adj_57 = 0;
float adj_58 = 0;
float adj_59 = 0;
float adj_60 = 0;
df::float3 adj_61 = 0;
float adj_62 = 0;
float adj_63 = 0;
float adj_64 = 0;
df::float3 adj_65 = 0;
float adj_66 = 0;
float adj_67 = 0;
float adj_68 = 0;
float adj_69 = 0;
float adj_70 = 0;
float adj_71 = 0;
float adj_72 = 0;
float adj_73 = 0;
float adj_74 = 0;
float adj_75 = 0;
bool adj_76 = 0;
bool adj_77 = 0;
float adj_78 = 0;
float adj_79 = 0;
mat33 adj_80 = 0;
mat33 adj_81 = 0;
float adj_82 = 0;
mat33 adj_83 = 0;
float adj_84 = 0;
float adj_85 = 0;
float adj_86 = 0;
float adj_87 = 0;
float adj_88 = 0;
df::float3 adj_89 = 0;
float adj_90 = 0;
float adj_91 = 0;
float adj_92 = 0;
df::float3 adj_93 = 0;
float adj_94 = 0;
float adj_95 = 0;
float adj_96 = 0;
df::float3 adj_97 = 0;
df::float3 adj_98 = 0;
df::float3 adj_99 = 0;
float adj_100 = 0;
df::float3 adj_101 = 0;
float adj_102 = 0;
float adj_103 = 0;
float adj_104 = 0;
float adj_105 = 0;
float adj_106 = 0;
float adj_107 = 0;
float adj_108 = 0;
float adj_109 = 0;
float adj_110 = 0;
float adj_111 = 0;
float adj_112 = 0;
float adj_113 = 0;
float adj_114 = 0;
float adj_115 = 0;
float adj_116 = 0;
float adj_117 = 0;
float adj_118 = 0;
df::float3 adj_119 = 0;
df::float3 adj_120 = 0;
df::float3 adj_121 = 0;
df::float3 adj_122 = 0;
float adj_123 = 0;
float adj_124 = 0;
float adj_125 = 0;
df::float3 adj_126 = 0;
df::float3 adj_127 = 0;
df::float3 adj_128 = 0;
df::float3 adj_129 = 0;
df::float3 adj_130 = 0;
df::float3 adj_131 = 0;
df::float3 adj_132 = 0;
df::float3 adj_133 = 0;
float adj_134 = 0;
df::float3 adj_135 = 0;
float adj_136 = 0;
float adj_137 = 0;
float adj_138 = 0;
float adj_139 = 0;
float adj_140 = 0;
float adj_141 = 0;
float adj_142 = 0;
float adj_143 = 0;
float adj_144 = 0;
float adj_145 = 0;
float adj_146 = 0;
float adj_147 = 0;
float adj_148 = 0;
float adj_149 = 0;
float adj_150 = 0;
float adj_151 = 0;
float adj_152 = 0;
df::float3 adj_153 = 0;
df::float3 adj_154 = 0;
df::float3 adj_155 = 0;
df::float3 adj_156 = 0;
df::float3 adj_157 = 0;
df::float3 adj_158 = 0;
df::float3 adj_159 = 0;
df::float3 adj_160 = 0;
df::float3 adj_161 = 0;
df::float3 adj_162 = 0;
df::float3 adj_163 = 0;
df::float3 adj_164 = 0;
df::float3 adj_165 = 0;
df::float3 adj_166 = 0;
df::float3 adj_167 = 0;
df::float3 adj_168 = 0;
//---------
// forward
var_0 = df::tid();
var_2 = df::mul(var_0, var_1);
var_4 = df::add(var_2, var_3);
var_5 = df::load(var_indices, var_4);
var_6 = df::mul(var_0, var_1);
var_8 = df::add(var_6, var_7);
var_9 = df::load(var_indices, var_8);
var_10 = df::mul(var_0, var_1);
var_12 = df::add(var_10, var_11);
var_13 = df::load(var_indices, var_12);
var_14 = df::mul(var_0, var_1);
var_16 = df::add(var_14, var_15);
var_17 = df::load(var_indices, var_16);
var_18 = df::load(var_activation, var_0);
var_19 = df::mul(var_0, var_15);
var_20 = df::add(var_19, var_3);
var_21 = df::load(var_materials, var_20);
var_22 = df::mul(var_0, var_15);
var_23 = df::add(var_22, var_7);
var_24 = df::load(var_materials, var_23);
var_25 = df::mul(var_0, var_15);
var_26 = df::add(var_25, var_11);
var_27 = df::load(var_materials, var_26);
var_28 = df::load(var_x, var_5);
var_29 = df::load(var_x, var_9);
var_30 = df::load(var_x, var_13);
var_31 = df::load(var_x, var_17);
var_32 = df::load(var_v, var_5);
var_33 = df::load(var_v, var_9);
var_34 = df::load(var_v, var_13);
var_35 = df::load(var_v, var_17);
var_36 = df::load(var_inv_mass, var_5);
var_37 = df::load(var_inv_mass, var_9);
var_38 = df::load(var_inv_mass, var_13);
var_39 = df::load(var_inv_mass, var_17);
var_40 = df::sub(var_29, var_28);
var_41 = df::sub(var_30, var_28);
var_42 = df::sub(var_31, var_28);
var_43 = df::sub(var_33, var_32);
var_44 = df::sub(var_34, var_32);
var_45 = df::sub(var_35, var_32);
var_46 = df::mat33(var_40, var_41, var_42);
var_47 = df::load(var_pose, var_0);
var_48 = df::determinant(var_47);
var_50 = df::mul(var_48, var_49);
var_52 = df::div(var_51, var_50);
var_53 = df::mul(var_46, var_47);
var_54 = df::index(var_53, var_3, var_3);
var_55 = df::index(var_53, var_7, var_3);
var_56 = df::index(var_53, var_11, var_3);
var_57 = df::float3(var_54, var_55, var_56);
var_58 = df::index(var_53, var_3, var_7);
var_59 = df::index(var_53, var_7, var_7);
var_60 = df::index(var_53, var_11, var_7);
var_61 = df::float3(var_58, var_59, var_60);
var_62 = df::index(var_53, var_3, var_11);
var_63 = df::index(var_53, var_7, var_11);
var_64 = df::index(var_53, var_11, var_11);
var_65 = df::float3(var_62, var_63, var_64);
var_66 = df::dot(var_57, var_57);
var_67 = df::dot(var_61, var_61);
var_68 = df::add(var_66, var_67);
var_69 = df::dot(var_65, var_65);
var_70 = df::add(var_68, var_69);
var_72 = df::sub(var_70, var_71);
var_73 = df::abs(var_72);
var_74 = df::sqrt(var_73);
var_76 = (var_74 == var_75);
if (var_76) {
goto label0;
}
var_77 = (var_70 < var_71);
if (var_77) {
var_78 = df::sub(var_75, var_74);
}
var_79 = df::select(var_77, var_74, var_78);
var_80 = df::transpose(var_47);
var_81 = df::mul(var_53, var_80);
var_82 = df::div(var_51, var_79);
var_83 = df::mul(var_81, var_82);
var_84 = df::div(var_21, var_24);
var_85 = df::add(var_51, var_84);
var_86 = df::index(var_83, var_3, var_3);
var_87 = df::index(var_83, var_7, var_3);
var_88 = df::index(var_83, var_11, var_3);
var_89 = df::float3(var_86, var_87, var_88);
var_90 = df::index(var_83, var_3, var_7);
var_91 = df::index(var_83, var_7, var_7);
var_92 = df::index(var_83, var_11, var_7);
var_93 = df::float3(var_90, var_91, var_92);
var_94 = df::index(var_83, var_3, var_11);
var_95 = df::index(var_83, var_7, var_11);
var_96 = df::index(var_83, var_11, var_11);
var_97 = df::float3(var_94, var_95, var_96);
var_98 = df::add(var_89, var_93);
var_99 = df::add(var_98, var_97);
var_100 = df::sub(var_75, var_51);
var_101 = df::mul(var_99, var_100);
var_102 = df::dot(var_101, var_101);
var_103 = df::mul(var_102, var_36);
var_104 = df::dot(var_89, var_89);
var_105 = df::mul(var_104, var_37);
var_106 = df::add(var_103, var_105);
var_107 = df::dot(var_93, var_93);
var_108 = df::mul(var_107, var_38);
var_109 = df::add(var_106, var_108);
var_110 = df::dot(var_97, var_97);
var_111 = df::mul(var_110, var_39);
var_112 = df::add(var_109, var_111);
var_113 = df::mul(var_21, var_dt);
var_114 = df::mul(var_113, var_dt);
var_115 = df::mul(var_114, var_52);
var_116 = df::div(var_51, var_115);
var_117 = df::add(var_112, var_116);
var_118 = df::div(var_74, var_117);
var_119 = df::mul(var_101, var_118);
var_120 = df::mul(var_89, var_118);
var_121 = df::mul(var_93, var_118);
var_122 = df::mul(var_97, var_118);
var_123 = df::determinant(var_53);
var_124 = df::sub(var_123, var_85);
var_125 = df::div(var_50, var_49);
var_126 = df::cross(var_41, var_42);
var_127 = df::mul(var_126, var_125);
var_128 = df::cross(var_42, var_40);
var_129 = df::mul(var_128, var_125);
var_130 = df::cross(var_40, var_41);
var_131 = df::mul(var_130, var_125);
var_132 = df::add(var_127, var_129);
var_133 = df::add(var_132, var_131);
var_134 = df::sub(var_75, var_51);
var_135 = df::mul(var_133, var_134);
var_136 = df::dot(var_135, var_135);
var_137 = df::mul(var_136, var_36);
var_138 = df::dot(var_127, var_127);
var_139 = df::mul(var_138, var_37);
var_140 = df::add(var_137, var_139);
var_141 = df::dot(var_129, var_129);
var_142 = df::mul(var_141, var_38);
var_143 = df::add(var_140, var_142);
var_144 = df::dot(var_131, var_131);
var_145 = df::mul(var_144, var_39);
var_146 = df::add(var_143, var_145);
var_147 = df::mul(var_24, var_dt);
var_148 = df::mul(var_147, var_dt);
var_149 = df::mul(var_148, var_52);
var_150 = df::div(var_51, var_149);
var_151 = df::add(var_146, var_150);
var_152 = df::div(var_124, var_151);
var_153 = df::mul(var_135, var_152);
var_154 = df::add(var_119, var_153);
var_155 = df::mul(var_127, var_152);
var_156 = df::add(var_120, var_155);
var_157 = df::mul(var_129, var_152);
var_158 = df::add(var_121, var_157);
var_159 = df::mul(var_131, var_152);
var_160 = df::add(var_122, var_159);
var_161 = df::mul(var_154, var_36);
var_162 = df::mul(var_161, var_relaxation);
df::atomic_sub(var_delta, var_5, var_162);
var_163 = df::mul(var_156, var_37);
var_164 = df::mul(var_163, var_relaxation);
df::atomic_sub(var_delta, var_9, var_164);
var_165 = df::mul(var_158, var_38);
var_166 = df::mul(var_165, var_relaxation);
df::atomic_sub(var_delta, var_13, var_166);
var_167 = df::mul(var_160, var_39);
var_168 = df::mul(var_167, var_relaxation);
df::atomic_sub(var_delta, var_17, var_168);
//---------
// reverse
df::adj_atomic_sub(var_delta, var_17, var_168, adj_delta, adj_17, adj_168);
df::adj_mul(var_167, var_relaxation, adj_167, adj_relaxation, adj_168);
df::adj_mul(var_160, var_39, adj_160, adj_39, adj_167);
df::adj_atomic_sub(var_delta, var_13, var_166, adj_delta, adj_13, adj_166);
df::adj_mul(var_165, var_relaxation, adj_165, adj_relaxation, adj_166);
df::adj_mul(var_158, var_38, adj_158, adj_38, adj_165);
df::adj_atomic_sub(var_delta, var_9, var_164, adj_delta, adj_9, adj_164);
df::adj_mul(var_163, var_relaxation, adj_163, adj_relaxation, adj_164);
df::adj_mul(var_156, var_37, adj_156, adj_37, adj_163);
df::adj_atomic_sub(var_delta, var_5, var_162, adj_delta, adj_5, adj_162);
df::adj_mul(var_161, var_relaxation, adj_161, adj_relaxation, adj_162);
df::adj_mul(var_154, var_36, adj_154, adj_36, adj_161);
df::adj_add(var_122, var_159, adj_122, adj_159, adj_160);
df::adj_mul(var_131, var_152, adj_131, adj_152, adj_159);
df::adj_add(var_121, var_157, adj_121, adj_157, adj_158);
df::adj_mul(var_129, var_152, adj_129, adj_152, adj_157);
df::adj_add(var_120, var_155, adj_120, adj_155, adj_156);
df::adj_mul(var_127, var_152, adj_127, adj_152, adj_155);
df::adj_add(var_119, var_153, adj_119, adj_153, adj_154);
df::adj_mul(var_135, var_152, adj_135, adj_152, adj_153);
df::adj_div(var_124, var_151, adj_124, adj_151, adj_152);
df::adj_add(var_146, var_150, adj_146, adj_150, adj_151);
df::adj_div(var_51, var_149, adj_51, adj_149, adj_150);
df::adj_mul(var_148, var_52, adj_148, adj_52, adj_149);
df::adj_mul(var_147, var_dt, adj_147, adj_dt, adj_148);
df::adj_mul(var_24, var_dt, adj_24, adj_dt, adj_147);
df::adj_add(var_143, var_145, adj_143, adj_145, adj_146);
df::adj_mul(var_144, var_39, adj_144, adj_39, adj_145);
df::adj_dot(var_131, var_131, adj_131, adj_131, adj_144);
df::adj_add(var_140, var_142, adj_140, adj_142, adj_143);
df::adj_mul(var_141, var_38, adj_141, adj_38, adj_142);
df::adj_dot(var_129, var_129, adj_129, adj_129, adj_141);
df::adj_add(var_137, var_139, adj_137, adj_139, adj_140);
df::adj_mul(var_138, var_37, adj_138, adj_37, adj_139);
df::adj_dot(var_127, var_127, adj_127, adj_127, adj_138);
df::adj_mul(var_136, var_36, adj_136, adj_36, adj_137);
df::adj_dot(var_135, var_135, adj_135, adj_135, adj_136);
df::adj_mul(var_133, var_134, adj_133, adj_134, adj_135);
df::adj_sub(var_75, var_51, adj_75, adj_51, adj_134);
df::adj_add(var_132, var_131, adj_132, adj_131, adj_133);
df::adj_add(var_127, var_129, adj_127, adj_129, adj_132);
df::adj_mul(var_130, var_125, adj_130, adj_125, adj_131);
df::adj_cross(var_40, var_41, adj_40, adj_41, adj_130);
df::adj_mul(var_128, var_125, adj_128, adj_125, adj_129);
df::adj_cross(var_42, var_40, adj_42, adj_40, adj_128);
df::adj_mul(var_126, var_125, adj_126, adj_125, adj_127);
df::adj_cross(var_41, var_42, adj_41, adj_42, adj_126);
df::adj_div(var_50, var_49, adj_50, adj_49, adj_125);
df::adj_sub(var_123, var_85, adj_123, adj_85, adj_124);
df::adj_determinant(var_53, adj_53, adj_123);
df::adj_mul(var_97, var_118, adj_97, adj_118, adj_122);
df::adj_mul(var_93, var_118, adj_93, adj_118, adj_121);
df::adj_mul(var_89, var_118, adj_89, adj_118, adj_120);
df::adj_mul(var_101, var_118, adj_101, adj_118, adj_119);
df::adj_div(var_74, var_117, adj_74, adj_117, adj_118);
df::adj_add(var_112, var_116, adj_112, adj_116, adj_117);
df::adj_div(var_51, var_115, adj_51, adj_115, adj_116);
df::adj_mul(var_114, var_52, adj_114, adj_52, adj_115);
df::adj_mul(var_113, var_dt, adj_113, adj_dt, adj_114);
df::adj_mul(var_21, var_dt, adj_21, adj_dt, adj_113);
df::adj_add(var_109, var_111, adj_109, adj_111, adj_112);
df::adj_mul(var_110, var_39, adj_110, adj_39, adj_111);
df::adj_dot(var_97, var_97, adj_97, adj_97, adj_110);
df::adj_add(var_106, var_108, adj_106, adj_108, adj_109);
df::adj_mul(var_107, var_38, adj_107, adj_38, adj_108);
df::adj_dot(var_93, var_93, adj_93, adj_93, adj_107);
df::adj_add(var_103, var_105, adj_103, adj_105, adj_106);
df::adj_mul(var_104, var_37, adj_104, adj_37, adj_105);
df::adj_dot(var_89, var_89, adj_89, adj_89, adj_104);
df::adj_mul(var_102, var_36, adj_102, adj_36, adj_103);
df::adj_dot(var_101, var_101, adj_101, adj_101, adj_102);
df::adj_mul(var_99, var_100, adj_99, adj_100, adj_101);
df::adj_sub(var_75, var_51, adj_75, adj_51, adj_100);
df::adj_add(var_98, var_97, adj_98, adj_97, adj_99);
df::adj_add(var_89, var_93, adj_89, adj_93, adj_98);
df::adj_float3(var_94, var_95, var_96, adj_94, adj_95, adj_96, adj_97);
df::adj_index(var_83, var_11, var_11, adj_83, adj_11, adj_11, adj_96);
df::adj_index(var_83, var_7, var_11, adj_83, adj_7, adj_11, adj_95);
df::adj_index(var_83, var_3, var_11, adj_83, adj_3, adj_11, adj_94);
df::adj_float3(var_90, var_91, var_92, adj_90, adj_91, adj_92, adj_93);
df::adj_index(var_83, var_11, var_7, adj_83, adj_11, adj_7, adj_92);
df::adj_index(var_83, var_7, var_7, adj_83, adj_7, adj_7, adj_91);
df::adj_index(var_83, var_3, var_7, adj_83, adj_3, adj_7, adj_90);
df::adj_float3(var_86, var_87, var_88, adj_86, adj_87, adj_88, adj_89);
df::adj_index(var_83, var_11, var_3, adj_83, adj_11, adj_3, adj_88);
df::adj_index(var_83, var_7, var_3, adj_83, adj_7, adj_3, adj_87);
df::adj_index(var_83, var_3, var_3, adj_83, adj_3, adj_3, adj_86);
df::adj_add(var_51, var_84, adj_51, adj_84, adj_85);
df::adj_div(var_21, var_24, adj_21, adj_24, adj_84);
df::adj_mul(var_81, var_82, adj_81, adj_82, adj_83);
df::adj_div(var_51, var_79, adj_51, adj_79, adj_82);
df::adj_mul(var_53, var_80, adj_53, adj_80, adj_81);
df::adj_transpose(var_47, adj_47, adj_80);
df::adj_select(var_77, var_74, var_78, adj_77, adj_74, adj_78, adj_79);
if (var_77) {
df::adj_sub(var_75, var_74, adj_75, adj_74, adj_78);
}
if (var_76) {
label0:;
}
df::adj_sqrt(var_73, adj_73, adj_74);
df::adj_abs(var_72, adj_72, adj_73);
df::adj_sub(var_70, var_71, adj_70, adj_71, adj_72);
df::adj_add(var_68, var_69, adj_68, adj_69, adj_70);
df::adj_dot(var_65, var_65, adj_65, adj_65, adj_69);
df::adj_add(var_66, var_67, adj_66, adj_67, adj_68);
df::adj_dot(var_61, var_61, adj_61, adj_61, adj_67);
df::adj_dot(var_57, var_57, adj_57, adj_57, adj_66);
df::adj_float3(var_62, var_63, var_64, adj_62, adj_63, adj_64, adj_65);
df::adj_index(var_53, var_11, var_11, adj_53, adj_11, adj_11, adj_64);
df::adj_index(var_53, var_7, var_11, adj_53, adj_7, adj_11, adj_63);
df::adj_index(var_53, var_3, var_11, adj_53, adj_3, adj_11, adj_62);
df::adj_float3(var_58, var_59, var_60, adj_58, adj_59, adj_60, adj_61);
df::adj_index(var_53, var_11, var_7, adj_53, adj_11, adj_7, adj_60);
df::adj_index(var_53, var_7, var_7, adj_53, adj_7, adj_7, adj_59);
df::adj_index(var_53, var_3, var_7, adj_53, adj_3, adj_7, adj_58);
df::adj_float3(var_54, var_55, var_56, adj_54, adj_55, adj_56, adj_57);
df::adj_index(var_53, var_11, var_3, adj_53, adj_11, adj_3, adj_56);
df::adj_index(var_53, var_7, var_3, adj_53, adj_7, adj_3, adj_55);
df::adj_index(var_53, var_3, var_3, adj_53, adj_3, adj_3, adj_54);
df::adj_mul(var_46, var_47, adj_46, adj_47, adj_53);
df::adj_div(var_51, var_50, adj_51, adj_50, adj_52);
df::adj_mul(var_48, var_49, adj_48, adj_49, adj_50);
df::adj_determinant(var_47, adj_47, adj_48);
df::adj_load(var_pose, var_0, adj_pose, adj_0, adj_47);
df::adj_mat33(var_40, var_41, var_42, adj_40, adj_41, adj_42, adj_46);
df::adj_sub(var_35, var_32, adj_35, adj_32, adj_45);
df::adj_sub(var_34, var_32, adj_34, adj_32, adj_44);
df::adj_sub(var_33, var_32, adj_33, adj_32, adj_43);
df::adj_sub(var_31, var_28, adj_31, adj_28, adj_42);
df::adj_sub(var_30, var_28, adj_30, adj_28, adj_41);
df::adj_sub(var_29, var_28, adj_29, adj_28, adj_40);
df::adj_load(var_inv_mass, var_17, adj_inv_mass, adj_17, adj_39);
df::adj_load(var_inv_mass, var_13, adj_inv_mass, adj_13, adj_38);
df::adj_load(var_inv_mass, var_9, adj_inv_mass, adj_9, adj_37);
df::adj_load(var_inv_mass, var_5, adj_inv_mass, adj_5, adj_36);
df::adj_load(var_v, var_17, adj_v, adj_17, adj_35);
df::adj_load(var_v, var_13, adj_v, adj_13, adj_34);
df::adj_load(var_v, var_9, adj_v, adj_9, adj_33);
df::adj_load(var_v, var_5, adj_v, adj_5, adj_32);
df::adj_load(var_x, var_17, adj_x, adj_17, adj_31);
df::adj_load(var_x, var_13, adj_x, adj_13, adj_30);
df::adj_load(var_x, var_9, adj_x, adj_9, adj_29);
df::adj_load(var_x, var_5, adj_x, adj_5, adj_28);
df::adj_load(var_materials, var_26, adj_materials, adj_26, adj_27);
df::adj_add(var_25, var_11, adj_25, adj_11, adj_26);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_25);
df::adj_load(var_materials, var_23, adj_materials, adj_23, adj_24);
df::adj_add(var_22, var_7, adj_22, adj_7, adj_23);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_22);
df::adj_load(var_materials, var_20, adj_materials, adj_20, adj_21);
df::adj_add(var_19, var_3, adj_19, adj_3, adj_20);
df::adj_mul(var_0, var_15, adj_0, adj_15, adj_19);
df::adj_load(var_activation, var_0, adj_activation, adj_0, adj_18);
df::adj_load(var_indices, var_16, adj_indices, adj_16, adj_17);
df::adj_add(var_14, var_15, adj_14, adj_15, adj_16);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_14);
df::adj_load(var_indices, var_12, adj_indices, adj_12, adj_13);
df::adj_add(var_10, var_11, adj_10, adj_11, adj_12);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_10);
df::adj_load(var_indices, var_8, adj_indices, adj_8, adj_9);
df::adj_add(var_6, var_7, adj_6, adj_7, adj_8);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_6);
df::adj_load(var_indices, var_4, adj_indices, adj_4, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_mul(var_0, var_1, adj_0, adj_1, adj_2);
return;
}
// Python entry points
void solve_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_tetrahedra_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
var_dt,
var_relaxation,
cast<df::float3*>(var_delta));
}
}
void solve_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
float adj_dt,
float adj_relaxation,
torch::Tensor adj_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_tetrahedra_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
cast<int*>(var_indices),
cast<mat33*>(var_pose),
cast<float*>(var_activation),
cast<float*>(var_materials),
var_dt,
var_relaxation,
cast<df::float3*>(var_delta),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<float*>(adj_inv_mass),
cast<int*>(adj_indices),
cast<mat33*>(adj_pose),
cast<float*>(adj_activation),
cast<float*>(adj_materials),
adj_dt,
adj_relaxation,
cast<df::float3*>(adj_delta));
}
}
// Python entry points
void solve_tetrahedra_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta);
void solve_tetrahedra_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
torch::Tensor var_indices,
torch::Tensor var_pose,
torch::Tensor var_activation,
torch::Tensor var_materials,
float var_dt,
float var_relaxation,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
torch::Tensor adj_indices,
torch::Tensor adj_pose,
torch::Tensor adj_activation,
torch::Tensor adj_materials,
float adj_dt,
float adj_relaxation,
torch::Tensor adj_delta);
void solve_contacts_cpu_kernel_forward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
float var_mu,
float var_dt,
df::float3* var_delta)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
df::float3 var_6;
float var_7;
const float var_8 = 0.01;
float var_9;
bool var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_inv_mass, var_0);
var_6 = df::float3(var_4, var_5, var_4);
var_7 = df::dot(var_6, var_1);
var_9 = df::sub(var_7, var_8);
var_10 = (var_9 > var_4);
if (var_10) {
return;
}
var_11 = df::mul(var_6, var_9);
var_12 = df::dot(var_6, var_2);
var_13 = df::mul(var_6, var_12);
var_14 = df::sub(var_2, var_13);
var_15 = df::mul(var_mu, var_9);
var_16 = df::length(var_14);
var_17 = df::mul(var_16, var_dt);
var_18 = df::sub(var_4, var_17);
var_19 = df::max(var_15, var_18);
var_20 = df::normalize(var_14);
var_21 = df::mul(var_20, var_19);
var_22 = df::sub(var_21, var_11);
df::atomic_add(var_delta, var_0, var_22);
}
void solve_contacts_cpu_kernel_backward(
df::float3* var_x,
df::float3* var_v,
float* var_inv_mass,
float var_mu,
float var_dt,
df::float3* var_delta,
df::float3* adj_x,
df::float3* adj_v,
float* adj_inv_mass,
float adj_mu,
float adj_dt,
df::float3* adj_delta)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
float var_3;
const float var_4 = 0.0;
const float var_5 = 1.0;
df::float3 var_6;
float var_7;
const float var_8 = 0.01;
float var_9;
bool var_10;
df::float3 var_11;
float var_12;
df::float3 var_13;
df::float3 var_14;
float var_15;
float var_16;
float var_17;
float var_18;
float var_19;
df::float3 var_20;
df::float3 var_21;
df::float3 var_22;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
float adj_3 = 0;
float adj_4 = 0;
float adj_5 = 0;
df::float3 adj_6 = 0;
float adj_7 = 0;
float adj_8 = 0;
float adj_9 = 0;
bool adj_10 = 0;
df::float3 adj_11 = 0;
float adj_12 = 0;
df::float3 adj_13 = 0;
df::float3 adj_14 = 0;
float adj_15 = 0;
float adj_16 = 0;
float adj_17 = 0;
float adj_18 = 0;
float adj_19 = 0;
df::float3 adj_20 = 0;
df::float3 adj_21 = 0;
df::float3 adj_22 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x, var_0);
var_2 = df::load(var_v, var_0);
var_3 = df::load(var_inv_mass, var_0);
var_6 = df::float3(var_4, var_5, var_4);
var_7 = df::dot(var_6, var_1);
var_9 = df::sub(var_7, var_8);
var_10 = (var_9 > var_4);
if (var_10) {
goto label0;
}
var_11 = df::mul(var_6, var_9);
var_12 = df::dot(var_6, var_2);
var_13 = df::mul(var_6, var_12);
var_14 = df::sub(var_2, var_13);
var_15 = df::mul(var_mu, var_9);
var_16 = df::length(var_14);
var_17 = df::mul(var_16, var_dt);
var_18 = df::sub(var_4, var_17);
var_19 = df::max(var_15, var_18);
var_20 = df::normalize(var_14);
var_21 = df::mul(var_20, var_19);
var_22 = df::sub(var_21, var_11);
df::atomic_add(var_delta, var_0, var_22);
//---------
// reverse
df::adj_atomic_add(var_delta, var_0, var_22, adj_delta, adj_0, adj_22);
df::adj_sub(var_21, var_11, adj_21, adj_11, adj_22);
df::adj_mul(var_20, var_19, adj_20, adj_19, adj_21);
df::adj_normalize(var_14, adj_14, adj_20);
df::adj_max(var_15, var_18, adj_15, adj_18, adj_19);
df::adj_sub(var_4, var_17, adj_4, adj_17, adj_18);
df::adj_mul(var_16, var_dt, adj_16, adj_dt, adj_17);
df::adj_length(var_14, adj_14, adj_16);
df::adj_mul(var_mu, var_9, adj_mu, adj_9, adj_15);
df::adj_sub(var_2, var_13, adj_2, adj_13, adj_14);
df::adj_mul(var_6, var_12, adj_6, adj_12, adj_13);
df::adj_dot(var_6, var_2, adj_6, adj_2, adj_12);
df::adj_mul(var_6, var_9, adj_6, adj_9, adj_11);
if (var_10) {
label0:;
}
df::adj_sub(var_7, var_8, adj_7, adj_8, adj_9);
df::adj_dot(var_6, var_1, adj_6, adj_1, adj_7);
df::adj_float3(var_4, var_5, var_4, adj_4, adj_5, adj_4, adj_6);
df::adj_load(var_inv_mass, var_0, adj_inv_mass, adj_0, adj_3);
df::adj_load(var_v, var_0, adj_v, adj_0, adj_2);
df::adj_load(var_x, var_0, adj_x, adj_0, adj_1);
return;
}
// Python entry points
void solve_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_contacts_cpu_kernel_forward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
var_mu,
var_dt,
cast<df::float3*>(var_delta));
}
}
void solve_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
float adj_mu,
float adj_dt,
torch::Tensor adj_delta)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
solve_contacts_cpu_kernel_backward(
cast<df::float3*>(var_x),
cast<df::float3*>(var_v),
cast<float*>(var_inv_mass),
var_mu,
var_dt,
cast<df::float3*>(var_delta),
cast<df::float3*>(adj_x),
cast<df::float3*>(adj_v),
cast<float*>(adj_inv_mass),
adj_mu,
adj_dt,
cast<df::float3*>(adj_delta));
}
}
// Python entry points
void solve_contacts_cpu_forward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta);
void solve_contacts_cpu_backward(int dim,
torch::Tensor var_x,
torch::Tensor var_v,
torch::Tensor var_inv_mass,
float var_mu,
float var_dt,
torch::Tensor var_delta,
torch::Tensor adj_x,
torch::Tensor adj_v,
torch::Tensor adj_inv_mass,
float adj_mu,
float adj_dt,
torch::Tensor adj_delta);
void apply_deltas_cpu_kernel_forward(
df::float3* var_x_orig,
df::float3* var_v_orig,
df::float3* var_x_pred,
df::float3* var_delta,
float var_dt,
df::float3* var_x_out,
df::float3* var_v_out)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x_orig, var_0);
var_2 = df::load(var_x_pred, var_0);
var_3 = df::load(var_delta, var_0);
var_4 = df::add(var_2, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::div(var_5, var_dt);
df::store(var_x_out, var_0, var_4);
df::store(var_v_out, var_0, var_6);
}
void apply_deltas_cpu_kernel_backward(
df::float3* var_x_orig,
df::float3* var_v_orig,
df::float3* var_x_pred,
df::float3* var_delta,
float var_dt,
df::float3* var_x_out,
df::float3* var_v_out,
df::float3* adj_x_orig,
df::float3* adj_v_orig,
df::float3* adj_x_pred,
df::float3* adj_delta,
float adj_dt,
df::float3* adj_x_out,
df::float3* adj_v_out)
{
//---------
// primal vars
int var_0;
df::float3 var_1;
df::float3 var_2;
df::float3 var_3;
df::float3 var_4;
df::float3 var_5;
df::float3 var_6;
//---------
// dual vars
int adj_0 = 0;
df::float3 adj_1 = 0;
df::float3 adj_2 = 0;
df::float3 adj_3 = 0;
df::float3 adj_4 = 0;
df::float3 adj_5 = 0;
df::float3 adj_6 = 0;
//---------
// forward
var_0 = df::tid();
var_1 = df::load(var_x_orig, var_0);
var_2 = df::load(var_x_pred, var_0);
var_3 = df::load(var_delta, var_0);
var_4 = df::add(var_2, var_3);
var_5 = df::sub(var_4, var_1);
var_6 = df::div(var_5, var_dt);
df::store(var_x_out, var_0, var_4);
df::store(var_v_out, var_0, var_6);
//---------
// reverse
df::adj_store(var_v_out, var_0, var_6, adj_v_out, adj_0, adj_6);
df::adj_store(var_x_out, var_0, var_4, adj_x_out, adj_0, adj_4);
df::adj_div(var_5, var_dt, adj_5, adj_dt, adj_6);
df::adj_sub(var_4, var_1, adj_4, adj_1, adj_5);
df::adj_add(var_2, var_3, adj_2, adj_3, adj_4);
df::adj_load(var_delta, var_0, adj_delta, adj_0, adj_3);
df::adj_load(var_x_pred, var_0, adj_x_pred, adj_0, adj_2);
df::adj_load(var_x_orig, var_0, adj_x_orig, adj_0, adj_1);
return;
}
// Python entry points
void apply_deltas_cpu_forward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
apply_deltas_cpu_kernel_forward(
cast<df::float3*>(var_x_orig),
cast<df::float3*>(var_v_orig),
cast<df::float3*>(var_x_pred),
cast<df::float3*>(var_delta),
var_dt,
cast<df::float3*>(var_x_out),
cast<df::float3*>(var_v_out));
}
}
void apply_deltas_cpu_backward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out,
torch::Tensor adj_x_orig,
torch::Tensor adj_v_orig,
torch::Tensor adj_x_pred,
torch::Tensor adj_delta,
float adj_dt,
torch::Tensor adj_x_out,
torch::Tensor adj_v_out)
{
for (int i=0; i < dim; ++i)
{
s_threadIdx = i;
apply_deltas_cpu_kernel_backward(
cast<df::float3*>(var_x_orig),
cast<df::float3*>(var_v_orig),
cast<df::float3*>(var_x_pred),
cast<df::float3*>(var_delta),
var_dt,
cast<df::float3*>(var_x_out),
cast<df::float3*>(var_v_out),
cast<df::float3*>(adj_x_orig),
cast<df::float3*>(adj_v_orig),
cast<df::float3*>(adj_x_pred),
cast<df::float3*>(adj_delta),
adj_dt,
cast<df::float3*>(adj_x_out),
cast<df::float3*>(adj_v_out));
}
}
// Python entry points
void apply_deltas_cpu_forward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out);
void apply_deltas_cpu_backward(int dim,
torch::Tensor var_x_orig,
torch::Tensor var_v_orig,
torch::Tensor var_x_pred,
torch::Tensor var_delta,
float var_dt,
torch::Tensor var_x_out,
torch::Tensor var_v_out,
torch::Tensor adj_x_orig,
torch::Tensor adj_v_orig,
torch::Tensor adj_x_pred,
torch::Tensor adj_delta,
float adj_dt,
torch::Tensor adj_x_out,
torch::Tensor adj_v_out);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("integrate_particles_cpu_forward", integrate_particles_cpu_forward, "integrate_particles_cpu_forward");
m.def("integrate_particles_cpu_backward", integrate_particles_cpu_backward, "integrate_particles_cpu_backward");
m.def("integrate_rigids_cpu_forward", integrate_rigids_cpu_forward, "integrate_rigids_cpu_forward");
m.def("integrate_rigids_cpu_backward", integrate_rigids_cpu_backward, "integrate_rigids_cpu_backward");
m.def("eval_springs_cpu_forward", eval_springs_cpu_forward, "eval_springs_cpu_forward");
m.def("eval_springs_cpu_backward", eval_springs_cpu_backward, "eval_springs_cpu_backward");
m.def("eval_triangles_cpu_forward", eval_triangles_cpu_forward, "eval_triangles_cpu_forward");
m.def("eval_triangles_cpu_backward", eval_triangles_cpu_backward, "eval_triangles_cpu_backward");
m.def("eval_triangles_contact_cpu_forward", eval_triangles_contact_cpu_forward, "eval_triangles_contact_cpu_forward");
m.def("eval_triangles_contact_cpu_backward", eval_triangles_contact_cpu_backward, "eval_triangles_contact_cpu_backward");
m.def("eval_triangles_rigid_contacts_cpu_forward", eval_triangles_rigid_contacts_cpu_forward, "eval_triangles_rigid_contacts_cpu_forward");
m.def("eval_triangles_rigid_contacts_cpu_backward", eval_triangles_rigid_contacts_cpu_backward, "eval_triangles_rigid_contacts_cpu_backward");
m.def("eval_bending_cpu_forward", eval_bending_cpu_forward, "eval_bending_cpu_forward");
m.def("eval_bending_cpu_backward", eval_bending_cpu_backward, "eval_bending_cpu_backward");
m.def("eval_tetrahedra_cpu_forward", eval_tetrahedra_cpu_forward, "eval_tetrahedra_cpu_forward");
m.def("eval_tetrahedra_cpu_backward", eval_tetrahedra_cpu_backward, "eval_tetrahedra_cpu_backward");
m.def("eval_contacts_cpu_forward", eval_contacts_cpu_forward, "eval_contacts_cpu_forward");
m.def("eval_contacts_cpu_backward", eval_contacts_cpu_backward, "eval_contacts_cpu_backward");
m.def("eval_soft_contacts_cpu_forward", eval_soft_contacts_cpu_forward, "eval_soft_contacts_cpu_forward");
m.def("eval_soft_contacts_cpu_backward", eval_soft_contacts_cpu_backward, "eval_soft_contacts_cpu_backward");
m.def("eval_rigid_contacts_cpu_forward", eval_rigid_contacts_cpu_forward, "eval_rigid_contacts_cpu_forward");
m.def("eval_rigid_contacts_cpu_backward", eval_rigid_contacts_cpu_backward, "eval_rigid_contacts_cpu_backward");
m.def("eval_rigid_contacts_art_cpu_forward", eval_rigid_contacts_art_cpu_forward, "eval_rigid_contacts_art_cpu_forward");
m.def("eval_rigid_contacts_art_cpu_backward", eval_rigid_contacts_art_cpu_backward, "eval_rigid_contacts_art_cpu_backward");
m.def("eval_muscles_cpu_forward", eval_muscles_cpu_forward, "eval_muscles_cpu_forward");
m.def("eval_muscles_cpu_backward", eval_muscles_cpu_backward, "eval_muscles_cpu_backward");
m.def("eval_rigid_fk_cpu_forward", eval_rigid_fk_cpu_forward, "eval_rigid_fk_cpu_forward");
m.def("eval_rigid_fk_cpu_backward", eval_rigid_fk_cpu_backward, "eval_rigid_fk_cpu_backward");
m.def("eval_rigid_id_cpu_forward", eval_rigid_id_cpu_forward, "eval_rigid_id_cpu_forward");
m.def("eval_rigid_id_cpu_backward", eval_rigid_id_cpu_backward, "eval_rigid_id_cpu_backward");
m.def("eval_rigid_tau_cpu_forward", eval_rigid_tau_cpu_forward, "eval_rigid_tau_cpu_forward");
m.def("eval_rigid_tau_cpu_backward", eval_rigid_tau_cpu_backward, "eval_rigid_tau_cpu_backward");
m.def("eval_rigid_jacobian_cpu_forward", eval_rigid_jacobian_cpu_forward, "eval_rigid_jacobian_cpu_forward");
m.def("eval_rigid_jacobian_cpu_backward", eval_rigid_jacobian_cpu_backward, "eval_rigid_jacobian_cpu_backward");
m.def("eval_rigid_mass_cpu_forward", eval_rigid_mass_cpu_forward, "eval_rigid_mass_cpu_forward");
m.def("eval_rigid_mass_cpu_backward", eval_rigid_mass_cpu_backward, "eval_rigid_mass_cpu_backward");
m.def("eval_dense_gemm_cpu_forward", eval_dense_gemm_cpu_forward, "eval_dense_gemm_cpu_forward");
m.def("eval_dense_gemm_cpu_backward", eval_dense_gemm_cpu_backward, "eval_dense_gemm_cpu_backward");
m.def("eval_dense_gemm_batched_cpu_forward", eval_dense_gemm_batched_cpu_forward, "eval_dense_gemm_batched_cpu_forward");
m.def("eval_dense_gemm_batched_cpu_backward", eval_dense_gemm_batched_cpu_backward, "eval_dense_gemm_batched_cpu_backward");
m.def("eval_dense_cholesky_cpu_forward", eval_dense_cholesky_cpu_forward, "eval_dense_cholesky_cpu_forward");
m.def("eval_dense_cholesky_cpu_backward", eval_dense_cholesky_cpu_backward, "eval_dense_cholesky_cpu_backward");
m.def("eval_dense_cholesky_batched_cpu_forward", eval_dense_cholesky_batched_cpu_forward, "eval_dense_cholesky_batched_cpu_forward");
m.def("eval_dense_cholesky_batched_cpu_backward", eval_dense_cholesky_batched_cpu_backward, "eval_dense_cholesky_batched_cpu_backward");
m.def("eval_dense_subs_cpu_forward", eval_dense_subs_cpu_forward, "eval_dense_subs_cpu_forward");
m.def("eval_dense_subs_cpu_backward", eval_dense_subs_cpu_backward, "eval_dense_subs_cpu_backward");
m.def("eval_dense_solve_cpu_forward", eval_dense_solve_cpu_forward, "eval_dense_solve_cpu_forward");
m.def("eval_dense_solve_cpu_backward", eval_dense_solve_cpu_backward, "eval_dense_solve_cpu_backward");
m.def("eval_dense_solve_batched_cpu_forward", eval_dense_solve_batched_cpu_forward, "eval_dense_solve_batched_cpu_forward");
m.def("eval_dense_solve_batched_cpu_backward", eval_dense_solve_batched_cpu_backward, "eval_dense_solve_batched_cpu_backward");
m.def("eval_rigid_integrate_cpu_forward", eval_rigid_integrate_cpu_forward, "eval_rigid_integrate_cpu_forward");
m.def("eval_rigid_integrate_cpu_backward", eval_rigid_integrate_cpu_backward, "eval_rigid_integrate_cpu_backward");
m.def("solve_springs_cpu_forward", solve_springs_cpu_forward, "solve_springs_cpu_forward");
m.def("solve_springs_cpu_backward", solve_springs_cpu_backward, "solve_springs_cpu_backward");
m.def("solve_tetrahedra_cpu_forward", solve_tetrahedra_cpu_forward, "solve_tetrahedra_cpu_forward");
m.def("solve_tetrahedra_cpu_backward", solve_tetrahedra_cpu_backward, "solve_tetrahedra_cpu_backward");
m.def("solve_contacts_cpu_forward", solve_contacts_cpu_forward, "solve_contacts_cpu_forward");
m.def("solve_contacts_cpu_backward", solve_contacts_cpu_backward, "solve_contacts_cpu_backward");
m.def("apply_deltas_cpu_forward", apply_deltas_cpu_forward, "apply_deltas_cpu_forward");
m.def("apply_deltas_cpu_backward", apply_deltas_cpu_backward, "apply_deltas_cpu_backward");
}
| 532,217 |
C++
| 29.40377 | 700 | 0.581105 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_humanoid.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 16
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 1024
train_rate = 0.05
ground = True
name = "humanoid"
regularization = 1.e-3
phase_count = 8
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# humanoid
test_util.urdf_load(
builder,
"assets/humanoid.urdf",
df.transform((0.0, 1.35, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
#df.transform((0.0, 0.65, 0.0), df.quat_identity()),
floating=True,
shape_ke=1.e+3*5.0,
shape_kd=1.e+2*2.0,
shape_kf=1.e+2,
shape_mu=0.5)
# set pd-stiffness
for i in range(len(builder.joint_target_ke)):
builder.joint_target_ke[i] = 10.0
builder.joint_target_kd[i] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
#self.actions = torch.zeros((self.episode_frames, len(self.model.joint_qd)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, len(self.model.joint_qd), bias=False), torch.nn.Tanh()).to(adapter)
self.action_strength = 0.0
self.action_penalty = 0.01
self.balance_reward = 15.0
self.forward_reward = 1.0
self.discount_scale = 3.0
self.discount_factor = 0.5
self.target = torch.tensor((0.0, 0.65, 0.0, 0.0, 0.0, 0.0, 1.0), dtype=torch.float32, device=adapter, requires_grad=False)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, dtype=torch.float32, device=self.model.adapter)
for f in range(0, self.episode_frames):
# df.config.no_grad = True
#df.config.verify_fp = True
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
# build sinusoidal phase inputs
for p in range(self.phase_count):
phases[p] = math.sin(10.0 * (self.sim_time + 0.5 * p * math.pi))
actions = self.network(phases)*self.action_strength
for i in range(0, self.sim_substeps):
self.state.joint_act = actions
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
loss = loss - self.state.joint_q[1]*self.state.joint_q[1]*self.balance_reward*discount #torch.norm(actions)*self.action_penalty
# loss = loss + self.state.joint_qd[5]*self.state.joint_q[1]# + torch.norm(actions)*self.action_penalty
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
if (self.render):
try:
self.stage.Save()
except:
print("USD save error")
return loss
def run(self):
df.config.no_grad = True
#with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.actions[frame]
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.actions.grad[frame].numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = self.network.parameters()
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward", detailed=True):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward", detailed=True):
#with torch.autograd.detect_anomaly():
l.backward()
#print("vel: " + str(params[0]))
#print("grad: " + str(params[0].grad))
#print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self):
self.network = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
robot.run()
#robot.load()
#robot.train(mode='adam')
| 9,237 |
Python
| 28.514377 | 144 | 0.533831 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_bending.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import cProfile
import numpy as np
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Bending:
sim_duration = 10.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 200
train_rate = 0.01
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
if (True):
mesh = Usd.Stage.Open("assets/icosphere_open.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/Shell/Mesh"))
#mesh = Usd.Stage.Open("assets/cylinder_long_open.usda")
#geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/CylinderLong/CylinderLong"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
linear_vel = np.array((1.0, 0.0, 0.0))
angular_vel = np.array((0.0, 0.0, 0.0))
center = np.array((0.0, 1.6, 0.0))
radius = 0.5
r = df.quat_multiply(df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5))
builder.add_cloth_mesh(pos=center, rot=(0.0, 0.0, 0.0, 1.0), scale=radius, vel=(0.0, 0.0, 0.0), vertices=points, indices=indices, density=10.0)
for i in range(len(builder.particle_qd)):
v = np.cross(np.array(builder.particle_q) - center, angular_vel)
builder.particle_qd[i] = v + linear_vel
self.model = builder.finalize(adapter)
self.model.tri_ke = 2000.0
self.model.tri_ka = 2000.0
self.model.tri_kd = 3.0
self.model.tri_lift = 0.0
self.model.tri_drag = 0.0
self.model.edge_ke = 20.0
self.model.edge_kd = 0.3
self.model.gravity = torch.tensor((0.0, -10.0, 0.0), device=adapter)
else:
builder.add_particle(pos=(1.0, 2.0, 1.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_particle(pos=(1.0, 2.0, -1.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_particle(pos=(-1.0, 2.0, -1.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_particle(pos=(-1.0, 2.0, 1.0), vel=(0.0, 0.0, 0.0), mass=1.0)
builder.add_triangle(0, 1, 2)
builder.add_triangle(0, 2, 3)
builder.add_edge(1, 3, 2, 0)
builder.edge_rest_angle[0] = -math.pi * 0.6
self.model = builder.finalize(adapter)
self.model.tri_ke = 2000.0
self.model.tri_ka = 2000.0
self.model.tri_kd = 3.0
self.model.tri_lift = 0.0
self.model.tri_drag = 0.0
self.model.edge_ke = 20.0
self.model.edge_kd = 1.7
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# contact params
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 5.0
self.model.particle_radius = 0.01
self.model.ground = True
# training params
self.target_pos = torch.tensor((4.0, 2.0, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/bending.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True)
for i in range(0, self.sim_steps):
# forward dynamics
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
if (render and (i % self.sim_substeps == 0)):
self.sim_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.sim_time)
# loss
#com_loss = torch.mean(self.state.particle_qd*self.model.particle_mass[:, None], 0)
#act_loss = torch.norm(activation)*self.activation_penalty
#loss = loss - com_loss[1]
return loss
def run(self):
with torch.no_grad():
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
l = self.loss(render)
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
bending = Bending(adapter='cpu')
bending.run()
#bending.train('lbfgs')
#bending.train('sgd')
| 7,170 |
Python
| 28.755187 | 156 | 0.54212 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_ant.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
import xml.etree.ElementTree as ET
class Robot:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 32
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 1024
train_rate = 0.001
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 6.0
ground = True
name = "humanoid"
regularization = 1.e-3
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.parse_mjcf("./assets/" + self.name + ".xml", builder,
stiffness=0.0,
damping=0.0,
contact_ke=1.e+3,
contact_kd=1.e+3,
contact_kf=1.e+2,
contact_mu=0.75,
limit_ke=1.e+2,
limit_kd=1.e+1)
# base transform
# set joint targets to rest pose in mjcf
if (self.name == "ant"):
builder.joint_q[0:3] = [0.0, 0.70, 0.0]
builder.joint_q[3:7] = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
builder.joint_q[7:] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
builder.joint_target[7:] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
if (self.name == "humanoid"):
builder.joint_q[0:3] = [0.0, 1.70, 0.0]
builder.joint_q[3:7] = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
# width = 0.1
# radius = 0.05
# builder.add_articulation()
# body = -1
# for i in range(1):
# body = builder.add_link(
# parent=body,
# X_pj=df.transform((2.0*width, 0.0, 0.0), df.quat_identity()),
# axis=(0.0, 0.0, 1.0),
# damping=0.0,
# stiffness=0.0,
# limit_lower=np.deg2rad(-30.0),
# limit_upper=np.deg2rad(30.0),
# limit_ke=100.0,
# limit_kd=10.0,
# type=df.JOINT_REVOLUTE)
# shape = builder.add_shape_capsule(body, pos=(width, 0.0, 0.0), half_width=width, radius=radius)
# self.ground = False
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, len(self.model.joint_qd)-6, bias=False), torch.nn.Tanh()).to(adapter)
self.action_strength = 150.0
self.action_penalty = 0.01
self.balance_reward = 15.0
self.forward_reward = 1.0
self.discount_scale = 1.0
self.discount_factor = 0.5
self.target = torch.tensor((0.0, 0.65, 0.0, 0.0, 0.0, 0.0, 1.0), dtype=torch.float32, device=adapter, requires_grad=False)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def parse_mjcf(
self,
filename,
builder,
density=1000.0,
stiffness=0.0,
damping=0.0,
contact_ke=1000.0,
contact_kd=100.0,
contact_kf=100.0,
contact_mu=0.5,
limit_ke=100.0,
limit_kd=10.0):
file = ET.parse(filename)
root = file.getroot()
# map node names to link indices
self.node_map = {}
self.xform_map = {}
self.mesh_map = {}
type_map = {
"ball": df.JOINT_BALL,
"hinge": df.JOINT_REVOLUTE,
"slide": df.JOINT_PRISMATIC,
"free": df.JOINT_FREE,
"fixed": df.JOINT_FIXED
}
def parse_float(node, key, default):
if key in node.attrib:
return float(node.attrib[key])
else:
return default
def parse_vec(node, key, default):
if key in node.attrib:
return np.fromstring(node.attrib[key], sep=" ")
else:
return np.array(default)
def parse_body(body, parent):
body_name = body.attrib["name"]
body_pos = np.fromstring(body.attrib["pos"], sep=" ")
#-----------------
# add body for each joint
for joint in body.findall("joint"):
joint_name = joint.attrib["name"],
joint_type = type_map[joint.attrib["type"]]
joint_axis = parse_vec(joint, "axis", (0.0, 0.0, 0.0))
joint_pos = parse_vec(joint, "pos", (0.0, 0.0, 0.0))
joint_range = parse_vec(joint, "range", (-3.0, 3.0))
joint_armature = parse_float(joint, "armature", 0.0)
joint_stiffness = parse_float(joint, "stiffness", stiffness)
joint_damping = parse_float(joint, "damping", damping)
joint_axis = df.normalize(joint_axis)
if (parent == -1):
body_pos = np.array((0.0, 0.0, 0.0))
link = builder.add_link(
parent,
X_pj=df.transform(body_pos, df.quat_identity()),
axis=joint_axis,
type=joint_type,
limit_lower=np.deg2rad(joint_range[0]),
limit_upper=np.deg2rad(joint_range[1]),
limit_ke=limit_ke,
limit_kd=limit_kd,
stiffness=joint_stiffness,
damping=joint_damping,
armature=joint_armature)
parent = link
body_pos = [0.0, 0.0, 0.0] # todo: assumes that serial joints are all aligned at the same point
#-----------------
# add shapes
for geom in body.findall("geom"):
geom_name = geom.attrib["name"]
geom_type = geom.attrib["type"]
geom_size = parse_vec(geom, "size", [1.0])
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
geom_rot = parse_vec(geom, "quat", (0.0, 0.0, 0.0, 1.0))
if (geom_type == "sphere"):
builder.add_shape_sphere(
link,
pos=geom_pos,
rot=geom_rot,
radius=geom_size[0],
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
elif (geom_type == "capsule"):
if ("fromto" in geom.attrib):
geom_fromto = parse_vec(geom, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
start = geom_fromto[0:3]
end = geom_fromto[3:6]
# compute rotation to align dflex capsule (along x-axis), with mjcf fromto direction
axis = df.normalize(end-start)
angle = math.acos(np.dot(axis, (1.0, 0.0, 0.0)))
axis = df.normalize(np.cross(axis, (1.0, 0.0, 0.0)))
geom_pos = (start + end)*0.5
geom_rot = df.quat_from_axis_angle(axis, -angle)
geom_radius = geom_size[0]
geom_width = np.linalg.norm(end-start)*0.5
else:
geom_radius = geom_size[0]
geom_width = geom_size[1]
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
builder.add_shape_capsule(
link,
pos=geom_pos,
rot=geom_rot,
radius=geom_radius,
half_width=geom_width,
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
else:
print("Type: " + geom_type + " unsupported")
#-----------------
# recurse
for child in body.findall("body"):
parse_body(child, link)
#-----------------
# start articulation
builder.add_articulation()
world = root.find("worldbody")
for body in world.findall("body"):
parse_body(body, -1)
def set_target(self, x, name):
self.target = torch.tensor(x, device=self.adapter)
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, dtype=torch.float32, device=self.model.adapter)
for f in range(0, self.episode_frames):
# build sinusoidal input phases
# with df.ScopedTimer("inference", False):
# phases = torch.zeros(self.phase_count, device=self.model.adapter)
# for p in range(self.phase_count):
# phases[p] = math.sin(self.phase_freq * self.sim_time + p * self.phase_step)
# # compute activations (joint torques)
# actions = self.network(phases) * self.action_strength
# simulate
with df.ScopedTimer("simulate", detailed=False, active=True):
for i in range(0, self.sim_substeps):
# apply actions
#self.state.joint_act[6:] = actions
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, i==0)
self.sim_time += self.sim_dt
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
pos = self.state.joint_q[0:3]
vel = df.get_body_linear_velocity(self.state.joint_qd[0:6], pos)
loss = loss - discount*vel[0] # + torch.norm(self.state.joint_q[1]-0.5)
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
try:
self.stage.Save()
except:
print("USD save error")
return loss
def run(self):
df.config.no_grad = True
with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.actions[frame]
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.actions.grad[frame].numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = self.network.parameters()
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
#print("vel: " + str(params[0]))
#print("grad: " + str(params[0].grad))
#print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
#df.util.mem_report()
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
for p in list(params):
p -= self.train_rate * p.grad
p.grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self):
self.network = torch.load("outputs/" + self.name + ".pt")
#---------
#robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
#robot.load()
#robot.run()
robot = Robot(depth=1, mode='dflex', render=True, adapter='cuda')
#robot.load()
#robot.train(mode='adam')
robot.run()
| 16,157 |
Python
| 29.486792 | 146 | 0.47744 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_jelly.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import timeit
import cProfile
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Bending:
sim_duration = 5.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 200
train_rate = 0.01
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 2.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
r = df.quat_multiply(df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5))
builder = df.sim.ModelBuilder()
mesh = Usd.Stage.Open("assets/jellyfish.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/Icosphere/Icosphere"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
face_materials = [-1] * len(counts)
face_subsets = UsdGeom.Subset.GetAllGeomSubsets(geom)
for i, s in enumerate(face_subsets):
face_subset_indices = s.GetIndicesAttr().Get()
for f in face_subset_indices:
face_materials[f] = i
active_material = 0
active_scale = []
def add_edge(f0, f1):
if (face_materials[f0] == active_material and face_materials[f1] == active_material):
active_scale.append(1.0)
else:
active_scale.append(0.0)
builder.add_cloth_mesh(pos=(0.0, 2.5, 0.0),
rot=r,
scale=1.0,
vel=(0.0, 0.0, 0.0),
vertices=points,
indices=indices,
edge_callback=add_edge,
density=100.0)
self.model = builder.finalize(adapter)
self.model.tri_ke = 5000.0
self.model.tri_ka = 5000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 1000.0
self.model.tri_drag = 0.0
self.model.edge_ke = 20.0
self.model.edge_kd = 1.0 #2.5
self.model.contact_ke = 1.e+4
self.model.contact_kd = 0.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
self.model.ground = False
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# training params
self.target_pos = torch.tensor((4.0, 2.0, 0.0), device=adapter)
self.rest_angle = self.model.edge_rest_angle
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.edge_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = math.pi * 0.3
self.activation_scale = torch.tensor(active_scale, device=adapter)
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/jelly.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# build sinusoidal input phases
phases = torch.zeros(self.phase_count, device=self.model.adapter)
for p in range(self.phase_count):
phases[p] = math.sin(self.phase_freq*self.sim_time + p * self.phase_step)
# compute activations (rest angles)
activation = (self.network(phases)) * self.activation_strength * self.activation_scale
self.model.edge_rest_angle = self.rest_angle + activation
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.sim_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.sim_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_qd * self.model.particle_mass[:, None], 0)
act_loss = torch.norm(activation) * self.activation_penalty
loss = loss - com_loss[1] - act_loss
return loss
def run(self):
with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
bending = Bending(adapter='cpu')
#bending.load('jelly_10358.net')
#bending.run()
#bending.train('lbfgs')
bending.train('sgd')
| 8,059 |
Python
| 30.119691 | 160 | 0.540514 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_urdf.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
t = torch.tensor((1.0), requires_grad=True)
i = t.item()
print(i)
from urdfpy import URDF
#robot = URDF.load("assets/trifinger/urdf/trifinger_with_stage.urdf")
#robot = URDF.load("assets/franka_description/robots/franka_panda.urdf")
#robot = URDF.load("assets/anymal_b_simple_description/urdf/anymal.urdf")
#robot = URDF.load("assets/kinova_description/urdf/kinova.urdf")
#robot = URDF.load("assets/ur5/urdf/ur5_robot.urdf")
#robot = URDF.load("assets/kuka_allegro_description/allegro.urdf")
robot = URDF.load("assets/allegro_hand_description/allegro_hand_description_left.urdf")
for link in robot.links:
dir(link)
print(link)
for joint in robot.joints:
print(joint)
robot.show()
| 1,141 |
Python
| 29.052631 | 87 | 0.765118 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_cloth.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = True
class Cloth:
sim_duration = 5.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps # 60 frames per second, 16 updates between frames,
# sim_steps = int(sim_duration/sim_dt)
sim_steps = int(sim_duration/sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 100
train_rate = 0.01
phase_count = 4
def __init__(self, dim=20, mode="cloth", adapter='cpu'):
torch.manual_seed(42)
height = 2.5
builder = df.sim.ModelBuilder()
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 1.04),
vel=(0.0, 0.0, 0.0),
dim_x=dim,
dim_y=dim,
cell_x=0.2,
cell_y=0.2,
mass=400 / (dim**2)) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
attach0 = 0
attach1 = 20
anchor0 = builder.add_particle(pos=builder.particle_q[attach0] - (1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
anchor1 = builder.add_particle(pos=builder.particle_q[attach1] + (1.0, 0.0, 0.0), vel=(0.0, 0.0, 0.0), mass=0.0)
builder.add_spring(anchor0, attach0, 10000.0, 1000.0, 0)
builder.add_spring(anchor1, attach1, 10000.0, 1000.0, 0)
self.model = builder.finalize(adapter)
self.model.tri_ke = 10000.0
self.model.tri_ka = 10000.0
self.model.tri_kd = 100.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
self.model.ground = False
self.model.tri_collisions = False
# set optimization targets
self.model.spring_rest_length.requires_grad_()
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cloth.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
#self.integrator = df.sim.SemiImplicitIntegrator()
self.integrator = df.sim.XPBDIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
with df.ScopedTimer("forward", False):
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
if (self.state.particle_q != self.state.particle_q).sum() != 0:
print("NaN found in state")
import pdb
pdb.set_trace()
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_pos[1]
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
def closure():
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward", False):
l = self.loss(render)
with df.ScopedTimer("backward", False):
l.backward()
with df.ScopedTimer("save", False):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
param = self.model.spring_rest_length
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.data.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.spring_rest_length],
lr=0.01,
tolerance_grad=1.e-5,
tolerance_change=0.01,
line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([self.model.spring_rest_length], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.spring_rest_length], lr=self.train_rate * 0.01, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.zero_grad()
optimizer.step(closure)
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cuda')
cloth.run()
#cloth.train('adam')
# for dim in range(20, 400, 20):
# cloth = Cloth(dim=dim)
# cloth.run()
| 7,006 |
Python
| 31.142202 | 157 | 0.515273 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_fem_contact.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import cProfile
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import test_util
from pxr import Usd, UsdGeom, Gf
class FEMContact:
sim_duration = 10.0 # seconds
sim_substeps = 64
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 16
train_rate = 0.01 #1.0/(sim_dt*sim_dt)
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 2.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
solid = True
if (solid):
builder.add_soft_grid(
pos=(0.5, 1.0, 0.0),
rot=(0.0, 0.0, 0.0, 1.0),
vel=(0.0, 0.0, 0.0),
dim_x=3,
dim_y=10,
dim_z=3,
cell_x=0.1,
cell_y=0.1,
cell_z=0.1,
density=1000.0,
k_mu=10000.0,
k_lambda=10000.0,
k_damp=1.0)
else:
builder.add_cloth_grid(
pos=(-0.7, 1.0, -0.7),
rot=df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi*0.5),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=20,
cell_x=0.075,
cell_y=0.075,
mass=0.2)
# art = builder.add_articulation()
# link = builder.add_link(
# parent=-1,
# X_pj=df.transform_identity(),
# axis=(0.0, 0.0, 0.0),
# type=df.JOINT_FREE,
# armature=0.0)
# builder.add_shape_sphere(
# body=link,
# pos=(0.0, 0.5, 0.0),
# rot=df.quat_identity(),
# radius=0.25)
# builder.add_shape_box(
# body=link,
# pos=(0.0, 0.5, 0.0),
# rot=df.quat_identity(),
# hx=0.5,
# hy=0.25,
# hz=0.5)
builder.add_articulation()
test_util.build_tree(builder, angle=0.0, length=0.25, width=0.1, max_depth=3, joint_stiffness=10000.0, joint_damping=100.0)
builder.joint_X_pj[0] = df.transform((-0.5, 0.5, 0.0), df.quat_identity())
# mesh = Usd.Stage.Open("assets/torus.stl.usda")
# geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/mesh"))
# points = geom.GetPointsAttr().Get()
# tet_indices = geom.GetPrim().GetAttribute("tetraIndices").Get()
# tri_indices = geom.GetFaceVertexIndicesAttr().Get()
# tri_counts = geom.GetFaceVertexCountsAttr().Get()
# r = df.quat_multiply(df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.5), df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.0))
# builder.add_soft_mesh(pos=(0.0, 2.0, 0.0),
# rot=r,
# scale=0.25,
# vel=(1.5, 0.0, 0.0),
# vertices=points,
# indices=tet_indices,
# density=10.0,
# k_mu=1000.0,
# k_lambda=1000.0,
# k_damp=1.0)
self.model = builder.finalize(adapter)
#self.model.tet_kl = 1000.0
#self.model.tet_km = 1000.0
#self.model.tet_kd = 1.0
# disable triangle dynamics (just used for rendering)
if (solid):
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
else:
self.model.tri_ke = 1000.0
self.model.tri_ka = 1000.0
self.model.tri_kd = 10.0
self.model.tri_kb = 0.0
self.model.edge_ke = 100.0
self.model.edge_kd = 0.1
self.model.contact_ke = 1.e+4*2.0
self.model.contact_kd = 10.0
self.model.contact_kf = 10.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.05
self.model.ground = True
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tet_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.3
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/fem_contact.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
#self.renderer.add_sphere((0.0, 0.5, 0.0), 0.25, "collider")
#self.renderer.add_box((0.0, 0.5, 0.0), (0.25, 0.25, 0.25), "collider")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
return loss
def run(self, profile=False, render=True):
df.config.no_grad = True
with torch.no_grad():
with df.ScopedTimer("run"):
if profile:
cp = cProfile.Profile()
cp.clear()
cp.enable()
# run forward dynamics
if profile:
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
else:
l = self.loss(render)
if profile:
cp.disable()
cp.print_stats(sort='tottime')
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
fem = FEMContact(adapter='cuda')
fem.run(profile=False, render=True)
#fem.train('lbfgs')
#fem.train('sgd')
| 9,689 |
Python
| 29.28125 | 160 | 0.484467 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_ballistic.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
import math
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Ballistic:
sim_duration = 2.0 # seconds
sim_substeps = 10
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 5
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
builder = df.sim.ModelBuilder()
builder.add_particle((0, 1.0, 0.0), (0.1, 0.0, 0.0), 1.0)
self.model = builder.finalize(adapter)
self.target = torch.tensor((2.0, 1.0, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/ballistic.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
loss = torch.norm(self.state.particle_q[0] - self.target)
return loss
def train(self, mode='gd'):
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
l = self.loss()
l.backward()
print(l)
with torch.no_grad():
self.model.particle_v -= self.train_rate * self.model.particle_v.grad
self.model.particle_v.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([self.model.particle_v], self.train_rate, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([self.model.particle_v], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
optimizer.step()
self.stage.Save()
#---------
ballistic = Ballistic(adapter='cpu')
ballistic.train('lbfgs')
| 3,437 |
Python
| 25.446154 | 152 | 0.566773 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_franka.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = False
name = "franka"
regularization = 1.e-3
env_count = 1
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
# cartpole
for i in range(self.env_count):
test_util.urdf_load(
builder,
"assets/franka_description/robots/franka_panda.urdf",
df.transform((0.0, 0.0, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
floating=False,
limit_ke=1.e+3,
limit_kd=1.e+2)
for i in range(len(builder.joint_target_kd)):
builder.joint_target_kd[i] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.env_count, self.sim_steps), device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
robot.run()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e+1)
| 8,591 |
Python
| 27.54485 | 165 | 0.505762 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_util.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import urdfpy
import math
import numpy as np
import os
import xml.etree.ElementTree as ET
import dflex as df
def urdf_add_collision(builder, link, collisions, shape_ke, shape_kd, shape_kf, shape_mu):
# add geometry
for collision in collisions:
origin = urdfpy.matrix_to_xyz_rpy(collision.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
geo = collision.geometry
if (geo.box):
builder.add_shape_box(
link,
pos,
rot,
geo.box.size[0]*0.5,
geo.box.size[1]*0.5,
geo.box.size[2]*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.sphere):
builder.add_shape_sphere(
link,
pos,
rot,
geo.sphere.radius,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.cylinder):
# cylinders in URDF are aligned with z-axis, while dFlex uses x-axis
r = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
builder.add_shape_capsule(
link,
pos,
df.quat_multiply(rot, r),
geo.cylinder.radius,
geo.cylinder.length*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.mesh):
for m in geo.mesh.meshes:
faces = []
vertices = []
for v in m.vertices:
vertices.append(np.array(v))
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
mesh = df.Mesh(vertices, faces)
builder.add_shape_mesh(
link,
pos,
rot,
mesh,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
def urdf_load(
builder,
filename,
xform,
floating=False,
armature=0.0,
shape_ke=1.e+4,
shape_kd=1.e+3,
shape_kf=1.e+2,
shape_mu=0.25,
limit_ke=100.0,
limit_kd=10.0):
robot = urdfpy.URDF.load(filename)
# maps from link name -> link index
link_index = {}
builder.add_articulation()
# add base
if (floating):
root = builder.add_link(-1, df.transform_identity(), (0,0,0), df.JOINT_FREE)
# set dofs to transform
start = builder.joint_q_start[root]
builder.joint_q[start + 0] = xform[0][0]
builder.joint_q[start + 1] = xform[0][1]
builder.joint_q[start + 2] = xform[0][2]
builder.joint_q[start + 3] = xform[1][0]
builder.joint_q[start + 4] = xform[1][1]
builder.joint_q[start + 5] = xform[1][2]
builder.joint_q[start + 6] = xform[1][3]
else:
root = builder.add_link(-1, xform, (0,0,0), df.JOINT_FIXED)
urdf_add_collision(builder, root, robot.links[0].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
link_index[robot.links[0].name] = root
# add children
for joint in robot.joints:
type = None
axis = (0.0, 0.0, 0.0)
if (joint.joint_type == "revolute" or joint.joint_type == "continuous"):
type = df.JOINT_REVOLUTE
axis = joint.axis
if (joint.joint_type == "prismatic"):
type = df.JOINT_PRISMATIC
axis = joint.axis
if (joint.joint_type == "fixed"):
type = df.JOINT_FIXED
if (joint.joint_type == "floating"):
type = df.JOINT_FREE
parent = -1
if joint.parent in link_index:
parent = link_index[joint.parent]
origin = urdfpy.matrix_to_xyz_rpy(joint.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
lower = -1.e+3
upper = 1.e+3
damping = 0.0
# limits
if (joint.limit):
if (joint.limit.lower != None):
lower = joint.limit.lower
if (joint.limit.upper != None):
upper = joint.limit.upper
# damping
if (joint.dynamics):
if (joint.dynamics.damping):
damping = joint.dynamics.damping
# add link
link = builder.add_link(
parent=parent,
X_pj=df.transform(pos, rot),
axis=axis,
type=type,
limit_lower=lower,
limit_upper=upper,
limit_ke=limit_ke,
limit_kd=limit_kd,
damping=damping)
# add collisions
urdf_add_collision(builder, link, robot.link_map[joint.child].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
# add ourselves to the index
link_index[joint.child] = link
# build an articulated tree
def build_tree(
builder,
angle,
max_depth,
width=0.05,
length=0.25,
density=1000.0,
joint_stiffness=0.0,
joint_damping=0.0,
shape_ke = 1.e+4,
shape_kd = 1.e+3,
shape_kf = 1.e+2,
shape_mu = 0.5,
floating=False):
def build_recursive(parent, depth):
if (depth >= max_depth):
return
X_pj = df.transform((length * 2.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), angle))
type = df.JOINT_REVOLUTE
axis = (0.0, 0.0, 1.0)
if (depth == 0 and floating == True):
X_pj = df.transform((0.0, 0.0, 0.0), df.quat_identity())
type = df.JOINT_FREE
link = builder.add_link(
parent,
X_pj,
axis,
type,
stiffness=joint_stiffness,
damping=joint_damping)
# box
# shape = builder.add_shape_box(
# link,
# pos=(length, 0.0, 0.0),
# hx=length,
# hy=width,
# hz=width,
# ke=shape_ke,
# kd=shape_kd,
# kf=shape_kf,
# mu=shape_mu)
# capsule
shape = builder.add_shape_capsule(
link,
pos=(length, 0.0, 0.0),
radius=width,
half_width=length,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
# recurse
#build_tree_recursive(builder, link, angle, width, depth + 1, max_depth, shape_ke, shape_kd, shape_kf, shape_mu, floating)
build_recursive(link, depth + 1)
#
build_recursive(-1, 0)
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
class Skeleton:
def __init__(self, skeleton_file, muscle_file, builder, filter):
self.parse_skeleton(skeleton_file, builder, filter)
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = len(builder.joint_q)
self.dof_start = len(builder.joint_qd)
type_map = {
"Ball": df.JOINT_BALL,
"Revolute": df.JOINT_REVOLUTE,
"Prismatic": df.JOINT_PRISMATIC,
"Free": df.JOINT_FREE,
"Fixed": df.JOINT_FIXED
}
builder.add_articulation()
for child in root:
if (child.tag == "Node"):
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = df.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
body_type = body.attrib["type"]
body_mass = body.attrib["mass"]
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = np.array([-1.e+3])
joint_upper = np.array([1.e+3])
try:
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")
except:
pass
if ("axis" in joint.attrib):
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = df.transform(body_t_s, df.quat_from_matrix(body_R_s))
joint_X_s = df.transform(joint_t_s, df.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
mesh_file = mesh_base + ".usd"
#-----------------------------------
# one time conversion, put meshes into local body space (and meter units)
# stage = Usd.Stage.Open("./assets/snu/OBJ/" + mesh_file)
# geom = UsdGeom.Mesh.Get(stage, "/" + mesh_base + "_obj/defaultobject/defaultobject")
# body_X_bs = df.transform_inverse(body_X_s)
# joint_X_bs = df.transform_inverse(joint_X_s)
# points = geom.GetPointsAttr().Get()
# for i in range(len(points)):
# p = df.transform_point(joint_X_bs, points[i]*0.01)
# points[i] = Gf.Vec3f(p.tolist()) # cm -> meters
# geom.GetPointsAttr().Set(points)
# extent = UsdGeom.Boundable.ComputeExtentFromPlugins(geom, 0.0)
# geom.GetExtentAttr().Set(extent)
# stage.Save()
#--------------------------------------
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = df.transform_multiply(df.transform_inverse(parent_X_s), joint_X_s)
body_X_c = df.transform_multiply(df.transform_inverse(joint_X_s), body_X_s)
if (parent_link == -1):
joint_X_p = df.transform_identity()
# add link
link = builder.add_link(
parent=parent_link,
X_pj=joint_X_p,
axis=joint_axis,
type=joint_type,
damping=2.0,
stiffness=10.0,
limit_lower=joint_lower[0],
limit_upper=joint_upper[0])
# add shape
shape = builder.add_shape_box(
body=link,
pos=body_X_c[0],
rot=body_X_c[1],
hx=body_size[0]*0.5,
hy=body_size[1]*0.5,
hz=body_size[2]*0.5,
ke=1.e+3*5.0,
kd=1.e+2*2.0,
kf=1.e+2,
mu=0.5)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if (child.tag == "Unit"):
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if (way_link == -1):
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = df.transform_point(df.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen)
self.muscles = muscles
| 14,815 |
Python
| 29.802495 | 130 | 0.468512 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_contact.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Contact:
sim_duration = 2.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 16
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
builder = df.sim.ModelBuilder()
builder.add_particle((0.0, 1.5, 0.0), (0.0, 0.0, 0.0), 0.25)
self.target_pos = torch.tensor((3.0, 0.0, 0.0), device=adapter)
self.target_index = 0
self.model = builder.finalize(adapter)
self.model.contact_ke = 1.e+3
self.model.contact_kf = 10.0
self.model.contact_kd = 10.0
self.model.contact_mu = 0.25
self.model.particle_qd.requires_grad = True
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/contact.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
self.stage.Save()
loss = torch.norm(self.state.particle_q[self.target_index] - self.target_pos)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
param = self.model.particle_qd
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
l = self.loss()
l.backward()
print("loss: " + str(l.item()))
print("v: " + str(param))
print("vgrad: " + str(param.grad))
print("--------------------")
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], 1.0, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
print(param)
optimizer.step()
self.stage.Save()
#---------
contact = Contact(adapter='cpu')
contact.train('lbfgs')
| 3,942 |
Python
| 25.112583 | 124 | 0.54693 |
RoboticExplorationLab/Deep-ILC/dflex/tests/kit_walker.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
use_omni = False
if use_omni:
import omni.usd
class Experiment:
name = "kit_walker"
network_file = None
record = True
render_time = 0.0
render_enabled = True
def __init__(self):
pass
def reset(self, adapter='cuda'):
self.episode_duration = 5.0 # seconds
self.frame_dt = 1.0/60.0
self.frame_count = int(self.episode_duration/self.frame_dt)
self.sim_substeps = 64
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_time = 0.0
self.train_max_iters = 10000
self.train_iter = 0
self.train_rate = 0.025
self.train_loss = []
self.train_loss_best = math.inf
self.phase_count = 8
self.phase_step = math.pi / self.phase_count * 2.0
self.phase_freq = 5.0
self.render_time = 0.0
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.train_loss = []
self.optimizer = None
#mesh = Usd.Stage.Open("assets/prop.usda")
if use_omni == False:
stage = Usd.Stage.Open("kit_walker.usda")
else:
stage = omni.usd.get_context().get_stage()
# ostrich
# geom = UsdGeom.Mesh(stage.GetPrimAtPath("/ostrich"))
# points = geom.GetPointsAttr().Get()
# builder.add_soft_mesh(pos=(0.0, 0.0, 0.0),
# rot=df.quat_identity(),
# scale=2.0,
# vel=(0.0, 0.0, 0.0),
# vertices=points,
# indices=tet_indices,
# density=1.0,
# k_mu=2000.0,
# k_lambda=2000.0,
# k_damp=1.0)
# bear
geom = UsdGeom.Mesh(stage.GetPrimAtPath("/bear"))
points = geom.GetPointsAttr().Get()
xform = geom.ComputeLocalToWorldTransform(0.0)
for i in range(len(points)):
points[i] = xform.Transform(points[i])
tet_indices = geom.GetPrim().GetAttribute("tetraIndices").Get()
tri_indices = geom.GetFaceVertexIndicesAttr().Get()
tri_counts = geom.GetFaceVertexCountsAttr().Get()
builder.add_soft_mesh(pos=(0.0, 0.0, 0.0),
rot=df.quat_identity(),
scale=2.0,
vel=(0.0, 0.0, 0.0),
vertices=points,
indices=tet_indices,
density=1.0,
k_mu=2000.0,
k_lambda=2000.0,
k_damp=2.0)
# # table
# geom = UsdGeom.Mesh(stage.GetPrimAtPath("/table"))
# points = geom.GetPointsAttr().Get()
# builder.add_soft_mesh(pos=(0.0, 0.0, 0.0),
# rot=df.quat_identity(),
# scale=1.0,
# vel=(0.0, 0.0, 0.0),
# vertices=points,
# indices=tet_indices,
# density=1.0,
# k_mu=1000.0,
# k_lambda=1000.0,
# k_damp=1.0)
#builder.add_soft_grid(pos=(0.0, 0.5, 0.0), rot=(0.0, 0.0, 0.0, 1.0), vel=(0.0, 0.0, 0.0), dim_x=1, dim_y=2, dim_z=1, cell_x=0.5, cell_y=0.5, cell_z=0.5, density=1.0)
# s = 2.0
# builder.add_particle((0.0, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((s, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, 0.5, s), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, s + 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_tetrahedron(1, 3, 0, 2)
self.model = builder.finalize(adapter)
#self.model.tet_kl = 1000.0
#self.model.tet_km = 1000.0
#self.model.tet_kd = 1.0
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.contact_ke = 1.e+3*2.0
self.model.contact_kd = 0.1
self.model.contact_kf = 10.0
self.model.contact_mu = 0.7
self.model.particle_radius = 0.05
self.model.ground = True
#self.model.gravity = torch.tensor((0.0, -1.0, 0.0), device=adapter)
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tet_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.3
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = stage#Usd.Stage.CreateNew("outputs/fem.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if self.network_file:
self.load(self.network_file)
def inference(self):
# build sinusoidal input phases
with df.ScopedTimer("inference", False):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
for p in range(self.phase_count):
phases[p] = math.sin(self.phase_freq*self.sim_time + p * self.phase_step)
# compute activations
self.model.tet_activations = self.network(phases) * self.activation_strength
def simulate(self, no_grad=False):
# set grad mode
df.config.no_grad = no_grad
for i in range(self.sim_substeps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
def render(self):
with df.ScopedTimer("render", False):
if (self.record):
self.render_time += self.frame_dt
if (self.stage):
self.renderer.update(self.state, self.render_time)
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for f in range(self.frame_count):
self.inference()
self.simulate()
if (self.render_enabled):
self.render()
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_qd, 0)
#act_loss = torch.norm(self.model.tet_activations)*self.activation_penalty
loss = loss - com_loss[0] + torch.norm(com_loss[1]) + torch.norm(com_loss[2])# + act_loss
return loss
def run(self, profile=False):
self.inference()
self.simulate(no_grad=True)
if (self.render_enabled):
self.render()
def train(self, mode='gd'):
# create optimizer if requested
if (self.optimizer == None):
# L-BFGS
if (mode == 'lbfgs'):
self.optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
self.optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5, nesterov=True)
# closure for evaluating loss (called by optimizers)
def closure():
if (self.optimizer):
self.optimizer.zero_grad()
# render every N steps
render = True
with df.ScopedTimer("forward"):
l = self.loss(render)
# save best network so far
if (l < self.train_loss_best):
self.train_loss_best = float(l)
self.save()
self.train_loss.append(float(l))
df.log("Iteration: {} Loss: {}".format(len(self.train_loss), l.item()))
# save USD file
if use_omni == False:
try:
self.stage.Save()
except:
print("Usd save error")
# calculate gradient
with df.ScopedTimer("backward"):
l.backward()
return l
# perform optimization step
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
closure()
with torch.no_grad():
params = self.network.parameters()
for p in params:
if p.grad is None:
continue
p -= self.train_rate * p.grad
p.grad.zero_()
else:
self.optimizer.step(closure)
self.train_iter += 1
def save(self):
torch.save(self.network, self.name + str(self.train_iter) + ".pt")
def load(self, file):
self.network = torch.load(file)
self.network.eval()
df.log("Loaded pretrained network: " + file)
#---------
experiment = Experiment()
if use_omni == False:
experiment.reset(adapter='cuda')
#experiment.load("kit_walker19.pt")
#experiment.train_iter = 19
# with df.ScopedTimer("update", detailed=False):
# for i in range(experiment.frame_count):
# experiment.run()
# experiment.stage.Save()
experiment.render_enabled = False
#with torch.autograd.profiler.profile() as prof:
with df.ScopedTimer("train", detailed=True):
#for i in range(experiment.train_max_iters):
experiment.train('adam')
#print(prof.key_averages().table())
| 11,009 |
Python
| 29.414365 | 174 | 0.512762 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_lift_drag.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
from torch.utils.tensorboard import SummaryWriter
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
# uncomment to output timers
df.ScopedTimer.enabled = False
class Cloth:
sim_duration = 2.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 4
train_rate = 0.01 / sim_substeps
phase_count = 4
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
height = 2.5
builder = df.sim.ModelBuilder()
builder.add_cloth_grid(pos=(0.0, height, 0.0),
rot=df.quat_from_axis_angle((1.0, 0.5, 0.0), math.pi * 0.5),
vel=(0.0, 0.0, 0.0),
dim_x=16,
dim_y=16,
cell_x=0.125,
cell_y=0.125,
mass=1.0) #, fix_left=True, fix_right=True, fix_top=True, fix_bottom=True)
self.model = builder.finalize(adapter)
self.model.tri_ke = 10000.0
self.model.tri_ka = 10000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 10.0
self.model.tri_drag = 5.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
self.model.ground = False
self.target = torch.tensor((8.0, 0.0, 0.0), device=adapter)
self.initial_velocity = torch.tensor((1.0, 0.0, 0.0), requires_grad=True, device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/drag.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
# reset state
self.sim_time = 0.0
self.state = self.model.state()
self.state.particle_qd = self.state.particle_qd + self.initial_velocity
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
# run simulation
for i in range(0, self.sim_steps):
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
with df.ScopedTimer("render", False):
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# compute loss
with df.ScopedTimer("loss", False):
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss + torch.norm(com_pos - self.target)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
writer = SummaryWriter()
writer.add_hparams({"lr": self.train_rate, "mode": mode}, {})
# param to train
self.step_count = 0
self.render_steps = 1
optimizer = None
param = self.initial_velocity
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % self.render_steps) == 0):
render = True
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
with df.ScopedTimer("save"):
if (render):
self.stage.Save()
print(str(self.step_count) + ": " + str(l))
writer.add_scalar("loss", l.item(), self.step_count)
writer.flush()
self.step_count += 1
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], lr=0.1, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam([param], lr=self.train_rate * 4.0)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate * (1.0 / 32.0), momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.step(closure)
writer.close()
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
cloth = Cloth(adapter='cpu')
cloth.train('lbfgs')
#cloth.run()
| 6,236 |
Python
| 28.559242 | 156 | 0.533355 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_cartpole.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 4.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 10.0
ground = True
name = "cartpole"
regularization = 1.e-3
env_count = 16
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
link_width = 0.5
max_depth = depth
# cartpole
for i in range(self.env_count):
test_util.urdf_load(builder, "assets/" + self.name + ".urdf", df.transform((0.0, 2.5, -2.0 + i*2.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)), floating=False)
builder.joint_q[i*2 + 0] = 0
builder.joint_q[i*2 + 1] = -math.pi*0.5# + i*0.25
self.pole_angle_penalty = 10.0
self.pole_velocity_penalty = 0.5
self.cart_action_penalty = 1.e-7
self.cart_velocity_penalty = 1.0
self.cart_position_penalty = 2.0
if self.name == "cartpole":
self.marker_body = 2
self.marker_offset = 1.0
self.discount_scale = 2.0
self.discount_factor = 0.5
if self.name == "cartpole_double":
self.marker_body = 3
self.marker_offset = 0.5
self.discount_scale = 6.0
self.discount_factor = 0.5
# # humanoid
# test_util.urdf_load(
# builder,
# "assets/humanoid.urdf",
# df.transform((0.0, 1.5, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
# floating=True,
# shape_ke=1.e+3*5.0,
# shape_kd=1.e+3,
# shape_kf=1.e+2,
# shape_mu=0.5)
# # set pd-stiffness
# for i in range(len(builder.joint_target_ke)):
# builder.joint_target_ke[i] = 10.0
# builder.joint_target_kd[i] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.model.joint_q.requires_grad_()
self.model.joint_qd.requires_grad_()
self.actions = torch.zeros((self.env_count, self.sim_steps), device=adapter, requires_grad=True)
#self.actions = torch.zeros(1, device=adapter, requires_grad=True)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# apply actions
self.state.joint_act[::2] = self.actions[:, i] # assign actions to cart DOF 0, 2, 4, etc
#self.state.joint_act = self.state.joint_q*-50.0 - self.state.joint_qd*1.0
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, update_mass_matrix=(i%1==0))
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# reward
reward_start = 2.0
if self.sim_time > reward_start:
discount_time = (self.sim_time - reward_start)
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
pole_rot = self.state.joint_q[1::2] # 1,3,5
pole_vel = self.state.joint_qd[1::2] # 1,3,5
cart_pos = self.state.joint_q[0::2] # 0,2,4
cart_vel = self.state.joint_qd[0::2] # 0,2,4
actions = self.actions.view(-1)
loss = loss + (torch.dot(pole_rot, pole_rot)*self.pole_angle_penalty +
torch.dot(pole_vel, pole_vel)*self.pole_velocity_penalty +
torch.dot(cart_pos, cart_pos)*self.cart_position_penalty +
torch.dot(cart_vel, cart_vel)*self.cart_velocity_penalty)*discount
loss = loss + torch.dot(actions, actions)*self.cart_action_penalty
return loss
def run(self):
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.actions]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.actions, "outputs/" + self.name + ".pt")
def load(self):
self.actions = torch.load("outputs/" + self.name + ".pt")
#---------
robot = Robot(depth=1, mode='dflex', render=True, adapter='cuda')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
#df.config.no_grad = False
#robot.run()
robot.train(mode='lbfgs')
#robot.verify(eps=1.e+1)
| 10,960 |
Python
| 29.030137 | 185 | 0.506296 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_snu.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class HumanoidSNU:
sim_duration = 1.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 0.01
ground = False
name = "humanoid_snu_neck"
regularization = 1.e-3
env_count = 16
env_dofs = 2
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(41)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.filter = {}
if self.name == "humanoid_snu_arm":
self.filter = { "ShoulderR", "ArmR", "ForeArmR", "HandR", "Torso", "Neck" }
self.ground = False
if self.name == "humanoid_snu_neck":
self.filter = { "Torso", "Neck", "Head", "ShoulderR", "ShoulderL"}
self.ground = False
self.node_map, self.xform_map, self.mesh_map = test_util.parse_skeleton("assets/snu/arm.xml", builder, self.filter)
self.muscles = test_util.parse_muscles("assets/snu/muscle284.xml", builder, self.node_map, self.xform_map)
# set initial position 1m off the ground
if self.name == "humanoid_snu":
builder.joint_q[1] = 1.0
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
self.activations = torch.zeros((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.activations = torch.rand((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
self.model.joint_q.requires_grad = True
self.model.joint_qd.requires_grad = True
self.model.muscle_activation.requires_grad = True
self.target_penalty = 1.0
self.velocity_penalty = 0.1
self.action_penalty = 0.0
self.muscle_strength = 40.0
self.discount_scale = 2.0
self.discount_factor = 1.0
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
else:
self.renderer = None
self.set_target((-0.1, 0.1, 0.5), "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, dtype=torch.float32, device=self.adapter)
if (self.renderer):
self.renderer.add_sphere(self.target.tolist(), 0.05, name)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
self.model.collide(self.state)
if (self.render):
traj = []
for e in range(self.env_count):
traj.append([])
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# apply actions
self.model.muscle_activation = (torch.tanh(4.0*self.activations[0] - 2.0)*0.5 + 0.5)*self.muscle_strength
#self.model.muscle_activation = self.activations[0]*self.muscle_strength
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
# draw end effector tracer
# for e in range(self.env_count):
# X_pole = df.transform_point(df.transform_expand(self.state.body_X_sc[e*3 + self.marker_body].tolist()), (0.0, 0.0, self.marker_offset))
# traj[e].append((X_pole[0], X_pole[1], X_pole[2]))
# # render trajectory
# self.renderer.add_line_strip(traj[e], (1.0, 1.0, 1.0), self.render_time, "traj_" + str(e))
for mesh, link in self.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
#self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
for m in range(self.model.muscle_count):
start = self.model.muscle_start[m]
end = self.model.muscle_start[m+1]
points = []
for w in range(start, end):
link = self.model.muscle_links[w]
point = self.model.muscle_points[w].cpu()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=self.muscles[m].name, radius=0.0075, color=(self.model.muscle_activation[m]/self.muscle_strength, 0.2, 0.5), time=self.render_time)
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# loss
if self.name == "humanoid_snu_arm":
hand_pos = self.state.body_X_sc[self.node_map["HandR"]][0:3]
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
# loss = loss + (torch.norm(hand_pos - self.target)*self.target_penalty +
# torch.norm(self.state.joint_qd)*self.velocity_penalty +
# torch.norm(self.model.muscle_activation)*self.action_penalty)*discount
#loss = loss + torch.norm(self.state.joint_qd)
loss = loss + torch.norm(hand_pos - self.target)*self.target_penalty
if self.name == "humanoid_snu_neck":
# rotate a vector
def transform_vector_torch(t, x):
axis = t[3:6]
w = t[6]
return x * (2.0 *w*w - 1.0) + torch.cross(axis, x) * w * 2.0 + axis * torch.dot(axis, x) * 2.0
forward_dir = torch.tensor((0.0, 0.0, 1.0), dtype=torch.float32, device=self.adapter)
up_dir = torch.tensor((0.0, 1.0, 0.0), dtype=torch.float32, device=self.adapter)
target_dir = torch.tensor((1.0, 0.0, 0.1), dtype=torch.float32, device=self.adapter)
head_forward = transform_vector_torch(self.state.body_X_sc[self.node_map["Head"]], forward_dir)
head_up = transform_vector_torch(self.state.body_X_sc[self.node_map["Head"]], up_dir)
loss = loss - torch.dot(head_forward, target_dir)*self.target_penalty - torch.dot(head_up, up_dir)*self.target_penalty
return loss
def run(self):
df.config.no_grad = True
with torch.no_grad():
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.activations]
def closure():
if (optimizer):
optimizer.zero_grad()
# render ever y N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss()
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
# for e in range(self.env_count):
# print(self.actions.grad[e][0:20])
#print(self.activations.grad)
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9)#, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.activations, "outputs/" + self.name + ".pt")
def load(self):
self.activations = torch.load("outputs/" + self.name + ".pt")
#---------
env = HumanoidSNU(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.no_grad = True
#df.config.check_grad = True
#df.config.verify_fp = True
#robot.load()
#env.run()
#env.load()
env.train(mode='adam')
#robot.verify(eps=1.e+1)
| 12,760 |
Python
| 31.306329 | 201 | 0.518103 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_walker.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Walker:
sim_duration = 5.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
train_iters = 50
train_rate = 0.0001
def __init__(self, mode="walker", adapter='cpu'):
self.phase_count = 8
self.phase_step = math.pi / self.phase_count * 2.0
self.phase_freq = 20.0
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
walker = Usd.Stage.Open("assets/walker.usda")
mesh = UsdGeom.Mesh(walker.GetPrimAtPath("/Grid/Grid"))
points = mesh.GetPointsAttr().Get()
indices = mesh.GetFaceVertexIndicesAttr().Get()
for p in points:
builder.add_particle(tuple(p), (0.0, 0.0, 0.0), 1.0)
for t in range(0, len(indices), 3):
i = indices[t + 0]
j = indices[t + 1]
k = indices[t + 2]
builder.add_triangle(i, j, k)
self.model = builder.finalize(adapter)
self.model.tri_ke = 10000.0
self.model.tri_ka = 10000.0
self.model.tri_kd = 100.0
self.model.tri_lift = 0.0
self.model.tri_drag = 0.0
self.edge_ke = 0.0
self.edge_kd = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 1000.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.01
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tri_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.2
self.activation_penalty = 0.1
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/walker.usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
# build sinusoidal phase inputs
for p in range(self.phase_count):
phases[p] = math.cos(4.0*self.sim_time*math.pi/(2.0*self.phase_count)*(2.0*p + 1.0)) #self.phase_freq*self.sim_time + p * self.phase_step)
self.model.tri_activations = self.network(phases) * self.activation_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
if (render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
com_pos = torch.mean(self.state.particle_q, 0)
com_vel = torch.mean(self.state.particle_qd, 0)
# use integral of velocity over course of the run
loss = loss - com_vel[0] + torch.norm(self.model.tri_activations) * self.activation_penalty
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
l = self.loss(render)
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=0.1, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.25)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
walker = Walker(adapter='cpu')
walker.train('lbfgs')
#walker.run()
| 6,109 |
Python
| 27.685446 | 158 | 0.560485 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_cage.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import time
# include parent path
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Cage:
sim_duration = 2.0 # seconds
sim_substeps = 8
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 20
train_rate = 0.1 #1.0/(sim_dt*sim_dt)
def __init__(self, mode="quad", adapter='cpu'):
builder = df.sim.ModelBuilder()
if (mode == "quad"):
# anchors
builder.add_particle((-1.0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, -1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, -1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# ball
builder.add_particle((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), 1.0)
ke = 1.e+2
kd = 10.0
# springs
builder.add_spring(0, 4, ke, kd, 0)
builder.add_spring(1, 4, ke, kd, 0)
builder.add_spring(2, 4, ke, kd, 0)
builder.add_spring(3, 4, ke, kd, 0)
self.target_pos = torch.tensor((0.85, 0.5, 0.0), device=adapter)
self.target_index = 4
if (mode == "box"):
# anchors
builder.add_particle((-1.0, -1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, -1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, 1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((-1.0, 1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, -1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, -1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, 1.0, -1.0), (0.0, 0.0, 0.0), 0.0)
builder.add_particle((1.0, 1.0, 1.0), (0.0, 0.0, 0.0), 0.0)
# ball
builder.add_particle((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), 1.0)
ke = 1.e+2
kd = 10.0
target = 8
# springs
builder.add_spring(0, target, ke, kd, 0)
builder.add_spring(1, target, ke, kd, 0)
builder.add_spring(2, target, ke, kd, 0)
builder.add_spring(3, target, ke, kd, 0)
builder.add_spring(4, target, ke, kd, 0)
builder.add_spring(5, target, ke, kd, 0)
builder.add_spring(6, target, ke, kd, 0)
builder.add_spring(7, target, ke, kd, 0)
self.target_pos = torch.tensor((0.85, 0.5, -0.75), device=adapter)
self.target_index = target
if (mode == "chain"):
# anchor
builder.add_particle((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), 0.0)
segments = 4
segment_length = 1.0
ke = 1.e+2
kd = 10.0
for i in range(1, segments + 1):
builder.add_particle((segment_length * i, 0.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i - 1, i, ke, kd, 0)
# bending spring
if (i > 1):
builder.add_spring(i - 2, i, ke * 4.0, kd, 0)
self.target_pos = torch.tensor((3.0, 0.0, 0.0), device=adapter)
self.target_index = segments
self.model = builder.finalize(adapter)
self.model.particle_radius = 0.05
self.model.ground = False
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# set optimization targets
self.model.spring_rest_length.requires_grad_()
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/cage.usda")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.renderer.add_sphere(self.target_pos.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self):
#-----------------------
# run simulation
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# print("state: ", self.state.particle_q[self.target_index])
if (i % self.sim_substeps) == 0:
self.renderer.update(self.state, self.sim_time)
self.sim_time += self.sim_dt
# print(self.state.particle_q[self.target_index])
loss = torch.norm(self.state.particle_q[self.target_index] - self.target_pos)
return loss
def run(self):
l = self.loss()
self.stage.Save()
def train(self, mode='gd'):
# param to train
param = self.model.spring_rest_length
# Gradient Descent
if (mode == 'gd'):
for i in range(self.train_iters):
# with torch.autograd.detect_anomaly():
l = self.loss()
print(l)
l.backward()
with torch.no_grad():
param -= self.train_rate * param.grad
param.grad.zero_()
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS([param], self.train_rate, tolerance_grad=1.e-5, history_size=4, line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
return l
for i in range(self.train_iters):
optimizer.step(closure)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD([param], lr=self.train_rate, momentum=0.8)
for i in range(self.train_iters):
optimizer.zero_grad()
l = self.loss()
l.backward()
print(l)
optimizer.step()
self.stage.Save()
#---------
cage = Cage("box", adapter='cpu')
cage.train('gd')
#cage.run()
| 6,656 |
Python
| 29.122172 | 136 | 0.510968 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_articulation_fk.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import tinyobjloader
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class Robot:
sim_duration = 10.0 # seconds
sim_substeps = 4
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 0.05 #1.0/(sim_dt*sim_dt)
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
x = 0.0
w = 0.5
max_depth = 3
# create a branched tree
builder.add_articulation()
test_util.build_tree(builder, angle=0.0, width=w, max_depth=max_depth)
# add weight
if (True):
radius = 0.1
X_pj = df.transform((w * 2.0, 0.0, 0.0), df.quat_from_axis_angle( (0.0, 0.0, 1.0), 0.0))
X_cm = df.transform((radius, 0.0, 0.0), df.quat_identity())
parent = len(builder.body_mass)-1
link = builder.add_link(parent, X_pj, (0.0, 0.0, 1.0), df.JOINT_REVOLUTE)
shape = builder.add_shape_sphere(link, pos=(0.0, 0.0, 0.0), radius=radius)
self.model = builder.finalize(adapter)
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1000.0
self.model.contact_kf = 100.0
self.model.contact_mu = 0.75
self.model.ground = False
self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
# base state
self.state = self.model.state()
self.state.joint_q.requires_grad_()
# ik target
self.target = torch.tensor((1.0, 2.0, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/articulation_fk.usda")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
if (True):
self.state.body_X_sc, self.state.body_X_sm = df.adjoint.launch(df.eval_rigid_fk,
1,
[ # inputs
self.model.articulation_start,
self.model.joint_type,
self.model.joint_parent,
self.model.joint_q_start,
self.model.joint_qd_start,
self.state.joint_q,
self.model.joint_X_pj,
self.model.joint_X_cm,
self.model.joint_axis
],
[ # outputs
self.state.body_X_sc,
self.state.body_X_sm
],
adapter='cpu',
preserve_output=True)
p = self.state.body_X_sm[3][0:3]
err = torch.norm(p - self.target)
# try:
# art_start = self.art.articulation_start.clone()
# art_end = self.art.articulation_end.clone()
# joint_type = self.art.joint_type.clone()
# joint_parent = self.art.joint_parent.clone()
# joint_q_start = self.art.joint_q_start.clone()
# joint_qd_start = self.art.joint_qd_start.clone()
# joint_q = self.art.joint_q.clone()
# joint_X_pj = self.art.joint_X_pj.clone()
# joint_X_cm = self.art.joint_X_cm.clone()
# joint_axis = self.art.joint_axis.clone()
# torch.autograd.gradcheck(df.EvalRigidFowardKinematicsFunc.apply, (
# art_start,
# art_end,
# joint_type,
# joint_parent,
# joint_q_start,
# joint_qd_start,
# joint_q,
# joint_X_pj,
# joint_X_cm,
# joint_axis,
# 'cpu'), eps=1e-3, atol=1e-3, raise_exception=True)
# except Exception as e:
# print("failed: " + str(e))
# render
with df.ScopedTimer("render", False):
if (self.stage):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
#self.stage.Save()
self.sim_time += self.sim_dt
return err
def run(self):
#with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [self.state.joint_q]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=0.2, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
robot = Robot(adapter='cpu')
#robot.run()
mode = 'lbfgs'
robot.set_target((1.0, 2.0, 0.0), "target_1")
robot.train(mode)
robot.set_target((1.0, -2.0, 0.0), "target_2")
robot.train(mode)
robot.set_target((-1.0, -2.0, 0.0), "target_3")
robot.train(mode)
robot.set_target((-2.0, 2.0, 0.0), "target_4")
robot.train(mode)
#rigid.stage.Save()
| 8,503 |
Python
| 28.425605 | 141 | 0.49359 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_fem.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import cProfile
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class FEM:
sim_duration = 5.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 16
train_rate = 0.01 #1.0/(sim_dt*sim_dt)
phase_count = 8
phase_step = math.pi / phase_count * 2.0
phase_freq = 2.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
mesh = Usd.Stage.Open("assets/prop.usda")
geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/mesh"))
points = geom.GetPointsAttr().Get()
tet_indices = geom.GetPrim().GetAttribute("tetraIndices").Get()
tri_indices = geom.GetFaceVertexIndicesAttr().Get()
tri_counts = geom.GetFaceVertexCountsAttr().Get()
r = df.quat_multiply(df.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.0))
builder.add_soft_mesh(pos=(0.0, 2.0, 0.0),
rot=r,
scale=1.0,
vel=(1.5, 0.0, 0.0),
vertices=points,
indices=tet_indices,
density=1.0,
k_mu=1000.0,
k_lambda=1000.0,
k_damp=1.0)
#builder.add_soft_grid(pos=(0.0, 0.5, 0.0), rot=(0.0, 0.0, 0.0, 1.0), vel=(0.0, 0.0, 0.0), dim_x=1, dim_y=2, dim_z=1, cell_x=0.5, cell_y=0.5, cell_z=0.5, density=1.0)
# s = 2.0
# builder.add_particle((0.0, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((s, 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, 0.5, s), (0.0, 0.0, 0.0), 1.0)
# builder.add_particle((0.0, s + 0.5, 0.0), (0.0, 0.0, 0.0), 1.0)
# builder.add_tetrahedron(1, 3, 0, 2)
self.model = builder.finalize(adapter)
#self.model.tet_kl = 1000.0
#self.model.tet_km = 1000.0
#self.model.tet_kd = 1.0
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.contact_ke = 1.e+4
self.model.contact_kd = 1.0
self.model.contact_kf = 10.0
self.model.contact_mu = 0.5
self.model.particle_radius = 0.05
self.model.ground = True
# one fully connected layer + tanh activation
self.network = torch.nn.Sequential(torch.nn.Linear(self.phase_count, self.model.tet_count, bias=False), torch.nn.Tanh()).to(adapter)
self.activation_strength = 0.3
self.activation_penalty = 0.0
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/fem.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# build sinusoidal input phases
with df.ScopedTimer("inference", False):
phases = torch.zeros(self.phase_count, device=self.model.adapter)
for p in range(self.phase_count):
phases[p] = math.sin(self.phase_freq * self.sim_time + p * self.phase_step)
# compute activations (rest angles)
self.model.tet_activations = self.network(phases) * self.activation_strength
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_qd, 0)
#act_loss = torch.norm(selfactivation)*self.activation_penalty
loss = loss - com_loss[0] # - act_loss
return loss
def run(self, profile=False, render=True):
df.config.no_grad = True
with torch.no_grad():
with df.ScopedTimer("run"):
if profile:
cp = cProfile.Profile()
cp.clear()
cp.enable()
# run forward dynamics
if profile:
self.state = self.model.state()
for i in range(0, self.sim_steps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
else:
l = self.loss(render)
if profile:
cp.disable()
cp.print_stats(sort='tottime')
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
def closure():
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(self.network.parameters(), lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(self.network.parameters(), lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(self.network.parameters(), lr=self.train_rate, momentum=0.5)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
fem = FEM(adapter='cuda')
fem.run(profile=False, render=True)
#fem.train('lbfgs')
#fem.train('sgd')
| 8,649 |
Python
| 30.918819 | 174 | 0.513239 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_beam.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
from pxr import Usd, UsdGeom, Gf
class Beam:
sim_duration = 3.0 # seconds
sim_substeps = 32
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 1.0
def __init__(self, device='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
builder.add_soft_grid(pos=(0.0, 0.0, 0.0),
rot=df.quat_identity(),
vel=(0.0, 0.0, 0.0),
dim_x=20,
dim_y=2,
dim_z=2,
cell_x=0.1,
cell_y=0.1,
cell_z=0.1,
density=10.0,
k_mu=1000.0,
k_lambda=1000.0,
k_damp=5.0,
fix_left=True,
fix_right=True)
self.model = builder.finalize(device)
# disable triangle dynamics (just used for rendering)
self.model.tri_ke = 0.0
self.model.tri_ka = 0.0
self.model.tri_kd = 0.0
self.model.tri_kb = 0.0
self.model.particle_radius = 0.05
self.model.ground = False
self.target = torch.tensor((-0.5)).to(device)
self.material = torch.tensor((100.0, 50.0, 5.0), requires_grad=True, device=device)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/beam.usd")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#-----------------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# clamp material params to reasonable range
mat_min = torch.tensor((1.e+1, 1.e+1, 5.0), device=self.model.adapter)
mat_max = torch.tensor((1.e+5, 1.e+5, 5.0), device=self.model.adapter)
mat_val = torch.max(torch.min(mat_max, self.material), mat_min)
# broadcast stiffness params to all tets
self.model.tet_materials = mat_val.expand((self.model.tet_count, 3)).contiguous()
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
# loss
with df.ScopedTimer("loss", False):
com_loss = torch.mean(self.state.particle_q, 0)
# minimize y
loss = loss - torch.norm(com_loss[1] - self.target)
return loss
def run(self):
with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [
self.material,
]
def closure():
if optimizer:
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
# with torch.autograd.detect_anomaly():
with df.ScopedTimer("forward"):
l = self.loss(render)
with df.ScopedTimer("backward"):
l.backward()
print(self.material)
print(self.material.grad)
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
for param in params:
param -= self.train_rate * param.grad
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.5, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
beam = Beam(device='cpu')
#beam.run()
#beam.train('lbfgs')
beam.train('gd')
| 6,623 |
Python
| 28.704036 | 141 | 0.495697 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_rigid_bounce.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class RigidBounce:
frame_dt = 1.0/60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration/frame_dt)
sim_substeps = 16
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
train_iters = 128
train_rate = 0.01
ground = True
name = "rigid_bounce"
def __init__(self, depth=1, mode='numpy', render=True, adapter='cpu'):
torch.manual_seed(42)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
builder.add_articulation()
# add sphere
link = builder.add_link(-1, df.transform((0.0, 0.0, 0.0), df.quat_identity()), (0,0,0), df.JOINT_FREE)
shape = builder.add_shape_sphere(
link,
(0.0, 0.0, 0.0),
df.quat_identity(),
radius=0.1,
ke=1.e+4,
kd=10.0,
kf=1.e+2,
mu=0.25)
builder.joint_q[1] = 1.0
#v_s = df.get_body_twist((0.0, 0.0, 0.0), (1.0, -1.0, 0.0), builder.joint_q[0:3])
w_m = (0.0, 0.0, 3.0) # angular velocity (expressed in world space)
v_m = (0.0, 0.0, 0.0) # linear velocity at center of mass (expressed in world space)
p_m = builder.joint_q[0:3] # position of the center of mass (expressed in world space)
# set body0 twist
builder.joint_qd[0:6] = df.get_body_twist(w_m, v_m, p_m)
# get decomposed velocities
print(df.get_body_angular_velocity(builder.joint_qd[0:6]))
print(df.get_body_linear_velocity(builder.joint_qd[0:6], p_m))
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
# initial velocity
#self.model.joint_qd[3] = 0.5
#self.model.joint_qd[4] = -0.5
#self.model.joint_qd[2] = 1.0
self.model.joint_qd.requires_grad_()
self.target = torch.tensor((1.0, 1.0, 0.0), dtype=torch.float32, device=adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = torch.tensor(x, device='cpu')
self.renderer.add_sphere(self.target.tolist(), 0.1, name)
def loss(self, render=True):
#---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for f in range(0, self.episode_frames):
# df.config.no_grad = True
#df.config.verify_fp = True
# simulate
with df.ScopedTimer("fk-id-dflex", detailed=False, active=False):
for i in range(0, self.sim_substeps):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.render):
self.render_time += self.frame_dt
self.renderer.update(self.state, self.render_time)
try:
self.stage.Save()
except:
print("USD save error")
#loss = loss + torch.dot(self.state.joint_qd[3:6], self.state.joint_qd[3:6])*self.balance_penalty*discount
pos = self.state.joint_q[0:3]
loss = torch.norm(pos-self.target)
return loss
def run(self):
df.config.no_grad = True
#with torch.no_grad():
l = self.loss()
def verify(self, eps=1.e-4):
frame = 60
params = self.model.joint_qd
n = len(params)
# evaluate analytic gradient
l = self.loss(render=False)
l.backward()
# evaluate numeric gradient
grad_analytic = self.model.joint_qd.grad.tolist()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(n):
mid = params[i].item()
params[i] = mid - eps
left = self.loss(render=False)
params[i] = mid + eps
right = self.loss(render=False)
# reset
params[i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
# param to train
self.step_count = 0
self.best_loss = math.inf
render_freq = 1
optimizer = None
params = [self.model.joint_qd]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
# save best trajectory
if (l.item() < self.best_loss):
self.save()
self.best_loss = l.item()
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.model.joint_qd, "outputs/" + self.name + ".pt")
def load(self):
self.model.joint_qd = torch.load("outputs/" + self.name + ".pt")
#---------
robot = RigidBounce(depth=1, mode='dflex', render=True, adapter='cpu')
#df.config.check_grad = True
#df.config.no_grad = True
robot.run()
#df.config.verify_fp = True
#robot.load()
#robot.train(mode='lbfgs')
#robot.verify(eps=1.e-3)
| 8,881 |
Python
| 27.196825 | 118 | 0.516158 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_rigid_slide.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import tinyobjloader
import numpy as np
from pxr import Usd, UsdGeom, Gf
class RigidSlide:
sim_duration = 3.0 # seconds
sim_substeps = 16
sim_dt = (1.0 / 60.0) / sim_substeps
sim_steps = int(sim_duration / sim_dt)
sim_time = 0.0
train_iters = 64
train_rate = 0.1
discount_scale = 1.0
discount_factor = 0.5
def __init__(self, adapter='cpu'):
torch.manual_seed(42)
# load mesh
usd = Usd.Stage.Open("assets/suzanne.usda")
geom = UsdGeom.Mesh(usd.GetPrimAtPath("/Suzanne/Suzanne"))
points = geom.GetPointsAttr().Get()
indices = geom.GetFaceVertexIndicesAttr().Get()
counts = geom.GetFaceVertexCountsAttr().Get()
builder = df.sim.ModelBuilder()
mesh = df.sim.Mesh(points, indices)
articulation = builder.add_articulation()
rigid = builder.add_link(
parent=-1,
X_pj=df.transform((0.0, 0.0, 0.0), df.quat_identity()),
axis=(0.0, 0.0, 0.0),
type=df.JOINT_FREE)
ke = 1.e+4
kd = 1.e+3
kf = 1.e+3
mu = 0.5
# shape = builder.add_shape_mesh(
# rigid,
# mesh=mesh,
# scale=(0.2, 0.2, 0.2),
# density=1000.0,
# ke=1.e+4,
# kd=1000.0,
# kf=1000.0,
# mu=0.75)
radius = 0.1
#shape = builder.add_shape_sphere(rigid, pos=(0.0, 0.0, 0.0), ke=ke, kd=kd, kf=kf, mu=mu, radius=radius)
#shape = builder.add_shape_capsule(rigid, pos=(0.0, 0.0, 0.0), radius=radius, half_width=0.5)
shape = builder.add_shape_box(rigid, pos=(0.0, 0.0, 0.0), hx=radius, hy=radius, hz=radius, ke=ke, kd=kd, kf=kf, mu=mu)
builder.joint_q[1] = radius
self.model = builder.finalize(adapter)
self.model.joint_qd.requires_grad = True
self.vel = torch.tensor((1.0, 0.0, 0.0), dtype=torch.float32, device=adapter, requires_grad=True)
self.target = torch.tensor((3.0, 0.2, 0.0), device=adapter)
#-----------------------
# set up Usd renderer
self.stage = Usd.Stage.CreateNew("outputs/rigid_slide.usda")
if (self.stage):
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
self.renderer.add_sphere(self.target.tolist(), 0.1, "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def loss(self, render=True):
#---------------
# run simulation
# construct contacts once at startup
self.model.joint_qd = torch.cat((torch.tensor((0.0, 0.0, 0.0), dtype=torch.float32, device=self.model.adapter), self.vel))
self.sim_time = 0.0
self.state = self.model.state()
self.model.collide(self.state)
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
for i in range(0, self.sim_steps):
# forward dynamics
with df.ScopedTimer("simulate", False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
self.sim_time += self.sim_dt
# render
with df.ScopedTimer("render", False):
if (self.stage and render and (i % self.sim_substeps == 0)):
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
#com = self.state.joint_q[0:3]
com = self.state.body_X_sm[0, 0:3]
loss = loss + torch.norm(com - self.target)
return loss
def run(self):
#with torch.no_grad():
l = self.loss()
if (self.stage):
self.stage.Save()
def train(self, mode='gd'):
# param to train
self.step_count = 0
render_freq = 1
optimizer = None
params = [self.vel]
def closure():
if (optimizer):
optimizer.zero_grad()
# render every N steps
render = False
if ((self.step_count % render_freq) == 0):
render = True
with df.ScopedTimer("forward"):
#with torch.autograd.detect_anomaly():
l = self.loss(render)
with df.ScopedTimer("backward"):
#with torch.autograd.detect_anomaly():
l.backward()
print("vel: " + str(params[0]))
print("grad: " + str(params[0].grad))
print("--------")
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
if (render):
self.stage.Save()
except:
print("USD save error")
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-5, tolerance_change=0.01, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
optimizer = torch.optim.Adam(params, lr=self.train_rate)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
optimizer.step(closure)
# final save
try:
if (render):
self.stage.Save()
except:
print("USD save error")
def save(self, file):
torch.save(self.network, file)
def load(self, file):
self.network = torch.load(file)
self.network.eval()
#---------
rigid = RigidSlide(adapter='cpu')
#rigid.run()
rigid.train('adam')
| 7,018 |
Python
| 28.124481 | 141 | 0.526503 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_snu_mlp.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import os
import sys
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
# to allow tests to import the module they belong to
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
from pxr import Usd, UsdGeom, Gf
import test_util
class MultiLayerPerceptron(nn.Module):
def __init__(self, n_in, n_out, n_hd, adapter, inference=False):
super(MultiLayerPerceptron,self).__init__()
self.n_in = n_in
self.n_out = n_out
self.n_hd = n_hd
#self.ll = nn.Linear(n_in, n_out)
self.fc1 = nn.Linear(n_in, n_hd).to(adapter)
self.fc2 = nn.Linear(n_hd, n_hd).to(adapter)
self.fc3 = nn.Linear(n_hd, n_out).to(adapter)
self.bn1 = nn.LayerNorm(n_in, elementwise_affine=False).to(adapter)
self.bn2 = nn.LayerNorm(n_hd, elementwise_affine=False).to(adapter)
self.bn3 = nn.LayerNorm(n_out, elementwise_affine=False).to(adapter)
def forward(self, x: torch.Tensor):
x = F.leaky_relu(self.bn2(self.fc1(x)))
x = F.leaky_relu(self.bn2(self.fc2(x)))
x = torch.tanh(self.bn3(self.fc3(x))-2.0)
return x
class HumanoidSNU:
train_iters = 100000000
train_rate = 0.001
train_size = 128
train_batch_size = 4
train_batch_iters = 128
train_batch_count = int(train_size/train_batch_size)
train_data = None
ground = True
name = "humanoid_snu_lower"
regularization = 1.e-3
inference = False
initial_y = 1.0
def __init__(self, depth=1, mode='numpy', render=True, sim_duration=1.0, adapter='cpu', inference=False):
self.sim_duration = sim_duration # seconds
self.sim_substeps = 16
self.sim_dt = (1.0 / 60.0) / self.sim_substeps
self.sim_steps = int(self.sim_duration / self.sim_dt)
self.sim_time = 0.0
torch.manual_seed(41)
np.random.seed(41)
builder = df.sim.ModelBuilder()
self.adapter = adapter
self.mode = mode
self.render = render
self.filter = {}
if self.name == "humanoid_snu_arm":
self.filter = { "ShoulderR", "ArmR", "ForeArmR", "HandR", "Torso", "Neck" }
self.ground = False
if self.name == "humanoid_snu_neck":
self.filter = { "Torso", "Neck", "Head", "ShoulderR", "ShoulderL" }
self.ground = False
if self.name == "humanoid_snu_lower":
self.filter = { "Pelvis", "FemurR", "TibiaR", "TalusR", "FootThumbR", "FootPinkyR", "FemurL", "TibiaL", "TalusL", "FootThumbL", "FootPinkyL"}
self.ground = True
self.initial_y = 1.0
if self.name == "humanoid_snu":
self.filter = {}
self.ground = True
self.skeletons = []
self.inference = inference
# if (self.inference):
# self.train_batch_size = 1
for i in range(self.train_batch_size):
skeleton = test_util.Skeleton("assets/snu/arm.xml", "assets/snu/muscle284.xml", builder, self.filter)
# set initial position 1m off the ground
builder.joint_q[skeleton.coord_start + 0] = i*1.5
builder.joint_q[skeleton.coord_start + 1] = self.initial_y
# offset on z-axis
#builder.joint_q[skeleton.coord_start + 2] = 10.0
# initial velcoity
#builder.joint_qd[skeleton.dof_start + 5] = 3.0
self.skeletons.append(skeleton)
# finalize model
self.model = builder.finalize(adapter)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=adapter)
#self.model.gravity = torch.tensor((0.0, 0.0, 0.0), device=adapter)
#self.activations = torch.zeros((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
#self.activations = torch.rand((1, len(self.muscles)), dtype=torch.float32, device=adapter, requires_grad=True)
self.network = MultiLayerPerceptron(3, len(self.skeletons[0].muscles), 128, adapter)
self.model.joint_q.requires_grad = True
self.model.joint_qd.requires_grad = True
self.model.muscle_activation.requires_grad = True
self.target_penalty = 1.0
self.velocity_penalty = 0.1
self.action_penalty = 0.0
self.muscle_strength = 40.0
self.discount_scale = 2.0
self.discount_factor = 1.0
# generate training data
targets = []
for i in range(self.train_size):
# generate a random point in -1, 1 away from the head
t = np.random.rand(2)*2.0 - 1.0
t[1] += 0.5
targets.append((t[0], t[1] + 0.5, 1.0))
self.train_data = torch.tensor(targets, dtype=torch.float32, device=self.adapter)
#-----------------------
# set up Usd renderer
if (self.render):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
else:
self.renderer = None
self.set_target(torch.tensor((0.75, 0.4, 0.5), dtype=torch.float32, device=self.adapter), "target")
self.integrator = df.sim.SemiImplicitIntegrator()
def set_target(self, x, name):
self.target = x
if (self.renderer):
self.renderer.add_sphere(self.target.tolist(), 0.05, name, self.render_time)
def loss(self):
#---------------
# run simulation
self.sim_time = 0.0
# initial state
self.state = self.model.state()
loss = torch.zeros(1, requires_grad=True, device=self.model.adapter)
# apply actions
#self.model.muscle_activation = self.activations[0]*self.muscle_strength
# compute activations for each target in the batch
targets = self.train_data[0:self.train_batch_size]
activations = torch.flatten(self.network(targets))
self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# one time collision
self.model.collide(self.state)
for i in range(self.sim_steps):
# apply random actions per-frame
#self.model.muscle_activation = (activations*0.5 + 0.5 + torch.rand_like(activations,dtype=torch.float32, device=self.model.adapter))*self.muscle_strength
# simulate
with df.ScopedTimer("fd", detailed=False, active=False):
self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
#if self.inference:
#x = math.cos(self.sim_time*0.5)*0.5
#y = math.sin(self.sim_time*0.5)*0.5
# t = self.sim_time*0.5
# x = math.sin(t)*0.5
# y = math.sin(t)*math.cos(t)*0.5
# self.set_target(torch.tensor((x, y + 0.5, 1.0), dtype=torch.float32, device=self.adapter), "target")
# activations = self.network(self.target)
# self.model.muscle_activation = (activations*0.5 + 0.5)*self.muscle_strength
# render
with df.ScopedTimer("render", False):
if (self.render and (i % self.sim_substeps == 0)):
with torch.no_grad():
muscle_start = 0
skel_index = 0
for s in self.skeletons:
for mesh, link in s.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
#self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
self.renderer.add_mesh(mesh, "../assets/snu/OBJ/" + mesh + ".usd", X_sc, 1.0, self.render_time)
for m in range(len(s.muscles)):#.self.model.muscle_count):
start = self.model.muscle_start[muscle_start + m].item()
end = self.model.muscle_start[muscle_start + m + 1].item()
points = []
for w in range(start, end):
link = self.model.muscle_links[w].item()
point = self.model.muscle_points[w].cpu().numpy()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=s.muscles[m].name + str(skel_index), radius=0.0075, color=(self.model.muscle_activation[muscle_start + m]/self.muscle_strength, 0.2, 0.5), time=self.render_time)
muscle_start += len(s.muscles)
skel_index += 1
# render scene
self.render_time += self.sim_dt * self.sim_substeps
self.renderer.update(self.state, self.render_time)
self.sim_time += self.sim_dt
# loss
if self.name == "humanoid_snu_arm":
hand_pos = self.state.body_X_sc[self.node_map["HandR"]][0:3]
discount_time = self.sim_time
discount = math.pow(self.discount_factor, discount_time*self.discount_scale)
# loss = loss + (torch.norm(hand_pos - self.target)*self.target_penalty +
# torch.norm(self.state.joint_qd)*self.velocity_penalty +
# torch.norm(self.model.muscle_activation)*self.action_penalty)*discount
#loss = loss + torch.norm(self.state.joint_qd)
loss = loss + torch.norm(hand_pos - self.target)*self.target_penalty
if self.name == "humanoid_snu_neck":
# rotate a vector
def transform_vector_torch(t, x):
axis = t[3:6]
w = t[6]
return x * (2.0 *w*w - 1.0) + torch.cross(axis, x) * w * 2.0 + axis * torch.dot(axis, x) * 2.0
forward_dir = torch.tensor((0.0, 0.0, 1.0), dtype=torch.float32, device=self.adapter)
up_dir = torch.tensor((0.0, 1.0, 0.0), dtype=torch.float32, device=self.adapter)
for i in range(self.train_batch_size):
skel = self.skeletons[i]
head_pos = self.state.body_X_sc[skel.node_map["Head"]][0:3]
head_forward = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], forward_dir)
head_up = transform_vector_torch(self.state.body_X_sc[skel.node_map["Head"]], up_dir)
target_dir = self.train_data[i] - head_pos
loss_forward = torch.dot(head_forward, target_dir)*self.target_penalty
loss_up = torch.dot(head_up, up_dir)*self.target_penalty*0.5
loss_penalty = torch.dot(activations, activations)*self.action_penalty
loss = loss - loss_forward - loss_up + loss_penalty
#self.writer.add_scalar("loss_forward", loss_forward.item(), self.step_count)
#self.writer.add_scalar("loss_up", loss_up.item(), self.step_count)
#self.writer.add_scalar("loss_penalty", loss_penalty.item(), self.step_count)
return loss
def run(self):
df.config.no_grad = True
self.inference = True
with torch.no_grad():
l = self.loss()
if (self.render):
self.stage.Save()
def verify(self, eps=1.e-4):
params = self.actions
n = 1#len(params)
self.render = False
# evaluate analytic gradient
l = self.loss()
l.backward()
# evaluate numeric gradient
grad_analytic = params.grad.cpu().numpy()
grad_numeric = np.zeros(n)
with torch.no_grad():
df.config.no_grad = True
for i in range(1):
mid = params[0][i].item()
params[0][i] = mid - eps
left = self.loss()
params[0][i] = mid + eps
right = self.loss()
# reset
params[0][i] = mid
# numeric grad
grad_numeric[i] = (right-left)/(2.0*eps)
# report
print("grad_numeric: " + str(grad_numeric))
print("grad_analytic: " + str(grad_analytic))
def train(self, mode='gd'):
self.writer = SummaryWriter()
self.writer.add_hparams({"lr": self.train_rate, "mode": mode}, {})
# param to train
self.step_count = 0
self.best_loss = math.inf
optimizer = None
scheduler = None
params = self.network.parameters()#[self.activations]
def closure():
batch = int(self.step_count/self.train_batch_iters)%self.train_batch_count
print("Batch: " + str(batch) + " Iter: " + str(self.step_count%self.train_batch_iters))
if (optimizer):
optimizer.zero_grad()
# compute loss on all examples
with df.ScopedTimer("forward"):#, detailed=True):
l = self.loss()
# compute gradient
with df.ScopedTimer("backward"):#, detailed=True):
l.backward()
# batch stats
self.writer.add_scalar("loss_batch", l.item(), self.step_count)
self.writer.flush()
print(str(self.step_count) + ": " + str(l))
self.step_count += 1
with df.ScopedTimer("save"):
try:
self.stage.Save()
except:
print("USD save error")
# save network
if (l < self.best_loss):
self.save()
self.best_loss = l
return l
with df.ScopedTimer("step"):
if (mode == 'gd'):
# simple Gradient Descent
for i in range(self.train_iters):
closure()
with torch.no_grad():
params[0] -= self.train_rate * params[0].grad
params[0].grad.zero_()
else:
# L-BFGS
if (mode == 'lbfgs'):
optimizer = torch.optim.LBFGS(params, lr=1.0, tolerance_grad=1.e-9)#, line_search_fn="strong_wolfe")
# Adam
if (mode == 'adam'):
last_LR = 1e-5
init_LR = 1e-3
decay_LR_steps = 2000
gamma = math.exp(math.log(last_LR/init_LR)/decay_LR_steps)
optimizer = torch.optim.Adam(params, lr=self.train_rate, weight_decay=1e-5)
#scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = gamma)
# SGD
if (mode == 'sgd'):
optimizer = torch.optim.SGD(params, lr=self.train_rate, momentum=0.8, nesterov=True)
# train
for i in range(self.train_iters):
print("Step: " + str(i))
if optimizer:
optimizer.step(closure)
if scheduler:
scheduler.step()
# final save
try:
self.stage.Save()
except:
print("USD save error")
def save(self):
torch.save(self.network, "outputs/" + self.name + ".pt")
def load(self, suffix=""):
self.network = torch.load("outputs/" + self.name + suffix + ".pt")
if self.inference:
self.network.eval()
else:
self.network.train()
#---------
#env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda')
#env.train(mode='adam')
env = HumanoidSNU(depth=1, mode='dflex', render=True, sim_duration=2.0, adapter='cuda', inference=True)
#env.load()
env.run()
| 17,357 |
Python
| 32.445087 | 235 | 0.526358 |
RoboticExplorationLab/Deep-ILC/dflex/tests/test_adjoint.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import torch
import time
import cProfile
import numpy as np
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
| 626 |
Python
| 25.124999 | 82 | 0.785942 |
RoboticExplorationLab/Deep-ILC/dflex/tests/assets/humanoid.xml
|
<!-- ======================================================
This file is part of MuJoCo.
Copyright 2009-2015 Roboti LLC.
Model :: Humanoid
Mujoco :: Advanced physics simulation engine
Source : www.roboti.us
Version : 1.31
Released : 23Apr16
Author :: Vikash Kumar
Contacts : kumar@roboti.us
Last edits : 30Apr'16, 30Nov'15, 26Sept'15
====================================================== -->
<mujoco model='humanoid (v1.31)'>
<compiler inertiafromgeom='true' angle='degree'/>
<default>
<joint limited='true' damping='1' armature='0' />
<geom contype='1' conaffinity='1' condim='1' rgba='0.8 0.6 .4 1'
margin="0.001" solref=".02 1" solimp=".8 .8 .01" material="geom"/>
<motor ctrlrange='-.4 .4' ctrllimited='true'/>
</default>
<option timestep='0.002' iterations="50" solver="PGS">
<flag energy="enable"/>
</option>
<size nkey='5'/>
<visual>
<map fogstart="3" fogend="5" force="0.1"/>
<quality shadowsize="2048"/>
</visual>
<asset>
<texture type="skybox" builtin="gradient" width="100" height="100" rgb1=".4 .6 .8"
rgb2="0 0 0"/>
<texture name="texgeom" type="cube" builtin="flat" mark="cross" width="127" height="1278"
rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" random="0.01"/>
<texture name="texplane" type="2d" builtin="checker" rgb1=".2 .3 .4" rgb2=".1 0.15 0.2"
width="100" height="100"/>
<material name='MatPlane' reflectance='0.5' texture="texplane" texrepeat="1 1" texuniform="true"/>
<material name='geom' texture="texgeom" texuniform="true"/>
</asset>
<worldbody>
<geom name='floor' pos='0 0 0' size='10 10 0.125' type='plane' material="MatPlane" condim='3'/>
<body name='torso' pos='0 0 1.4'>
<light mode='trackcom' directional='false' diffuse='.8 .8 .8' specular='0.3 0.3 0.3' pos='0 0 4.0' dir='0 0 -1'/>
<joint name='root' type='free' pos='0 0 0' limited='false' damping='0' armature='0' stiffness='0'/>
<geom name='torso1' type='capsule' fromto='0 -.07 0 0 .07 0' size='0.07' />
<geom name='head' type='sphere' pos='0 0 .19' size='.09'/>
<geom name='uwaist' type='capsule' fromto='-.01 -.06 -.12 -.01 .06 -.12' size='0.06'/>
<body name='lwaist' pos='-.01 0 -0.260' quat='1.000 0 -0.002 0' >
<geom name='lwaist' type='capsule' fromto='0 -.06 0 0 .06 0' size='0.06' />
<joint name='abdomen_z' type='hinge' pos='0 0 0.065' axis='0 0 1' range='-45 45' damping='5' stiffness='20' armature='0.02' />
<joint name='abdomen_y' type='hinge' pos='0 0 0.065' axis='0 1 0' range='-75 30' damping='5' stiffness='10' armature='0.02' />
<body name='pelvis' pos='0 0 -0.165' quat='1.000 0 -0.002 0' >
<joint name='abdomen_x' type='hinge' pos='0 0 0.1' axis='1 0 0' range='-35 35' damping='5' stiffness='10' armature='0.02' />
<geom name='butt' type='capsule' fromto='-.02 -.07 0 -.02 .07 0' size='0.09' />
<body name='right_thigh' pos='0 -0.1 -0.04' >
<joint name='right_hip_x' type='hinge' pos='0 0 0' axis='1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_z' type='hinge' pos='0 0 0' axis='0 0 1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='right_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='right_thigh1' type='capsule' fromto='0 0 0 0 0.01 -.34' size='0.06' />
<body name='right_shin' pos='0 0.01 -0.403' >
<joint name='right_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='right_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='right_foot' pos='0 0 -.39' >
<joint name='right_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='right_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='right_foot_cap1' type='capsule' fromto='-.07 -0.02 0 0.14 -0.04 0' size='0.027' />
<geom name='right_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 0.02 0' size='0.027' />
</body>
</body>
</body>
<body name='left_thigh' pos='0 0.1 -0.04' >
<joint name='left_hip_x' type='hinge' pos='0 0 0' axis='-1 0 0' range='-25 5' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_z' type='hinge' pos='0 0 0' axis='0 0 -1' range='-60 35' damping='5' stiffness='10' armature='0.01' />
<joint name='left_hip_y' type='hinge' pos='0 0 0' axis='0 1 0' range='-120 20' damping='5' stiffness='20' armature='0.01' />
<geom name='left_thigh1' type='capsule' fromto='0 0 0 0 -0.01 -.34' size='0.06' />
<body name='left_shin' pos='0 -0.01 -0.403' >
<joint name='left_knee' type='hinge' pos='0 0 .02' axis='0 -1 0' range='-160 -2' stiffness='1' armature='0.006' />
<geom name='left_shin1' type='capsule' fromto='0 0 0 0 0 -.3' size='0.049' />
<body name='left_foot' pos='0 0 -.39' >
<joint name='left_ankle_y' type='hinge' pos='0 0 0.08' axis='0 1 0' range='-50 50' damping='5' stiffness='4' armature='0.008' />
<joint name='left_ankle_x' type='hinge' pos='0 0 0.08' axis='1 0 0.5' range='-50 50' damping='5' stiffness='1' armature='0.006' />
<geom name='left_foot_cap1' type='capsule' fromto='-.07 0.02 0 0.14 0.04 0' size='0.027' />
<geom name='left_foot_cap2' type='capsule' fromto='-.07 0 0 0.14 -0.02 0' size='0.027' />
</body>
</body>
</body>
</body>
</body>
<body name='right_upper_arm' pos='0 -0.17 0.06' >
<joint name='right_shoulder1' type='hinge' pos='0 0 0' axis='2 1 1' range='-85 60' stiffness='1' armature='0.0068' />
<joint name='right_shoulder2' type='hinge' pos='0 0 0' axis='0 -1 1' range='-85 60' stiffness='1' armature='0.0051' />
<geom name='right_uarm1' type='capsule' fromto='0 0 0 .16 -.16 -.16' size='0.04 0.16' />
<body name='right_lower_arm' pos='.18 -.18 -.18' >
<joint name='right_elbow' type='hinge' pos='0 0 0' axis='0 -1 1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='right_larm' type='capsule' fromto='0.01 0.01 0.01 .17 .17 .17' size='0.031' />
<geom name='right_hand' type='sphere' pos='.18 .18 .18' size='0.04'/>
</body>
</body>
<body name='left_upper_arm' pos='0 0.17 0.06' >
<joint name='left_shoulder1' type='hinge' pos='0 0 0' axis='2 -1 1' range='-60 85' stiffness='1' armature='0.0068' />
<joint name='left_shoulder2' type='hinge' pos='0 0 0' axis='0 1 1' range='-60 85' stiffness='1' armature='0.0051' />
<geom name='left_uarm1' type='capsule' fromto='0 0 0 .16 .16 -.16' size='0.04 0.16' />
<body name='left_lower_arm' pos='.18 .18 -.18' >
<joint name='left_elbow' type='hinge' pos='0 0 0' axis='0 -1 -1' range='-90 50' stiffness='0' armature='0.0028' />
<geom name='left_larm' type='capsule' fromto='0.01 -0.01 0.01 .17 -.17 .17' size='0.031' />
<geom name='left_hand' type='sphere' pos='.18 -.18 .18' size='0.04'/>
</body>
</body>
</body>
</worldbody>
<tendon>
<fixed name='left_hipknee'>
<joint joint='left_hip_y' coef='-1'/>
<joint joint='left_knee' coef='1'/>
</fixed>
<fixed name='right_hipknee'>
<joint joint='right_hip_y' coef='-1'/>
<joint joint='right_knee' coef='1'/>
</fixed>
</tendon>
<keyframe>
<key qpos='-0.0233227 0.00247283 0.0784829 0.728141 0.00223397 -0.685422 -0.00181805 -0.000580139 -0.245119 0.0329713 -0.0461148 0.0354257 0.252234 -0.0347763 -0.4663 -0.0313013 0.0285638 0.0147285 0.264063 -0.0346441 -0.559198 0.021724 -0.0333332 -0.718563 0.872778 0.000260393 0.733088 0.872748' />
<key qpos='0.0168601 -0.00192002 0.127167 0.762693 0.00191588 0.646754 -0.00210291 -0.000199049 0.0573113 -4.05731e-005 0.0134177 -0.00468944 0.0985945 -0.282695 -0.0469067 0.00874203 0.0263262 -0.00295056 0.0984851 -0.282098 -0.044293 0.00475795 0.127371 -0.42895 0.882402 -0.0980573 0.428506 0.88193' />
<key qpos='0.000471586 0.0317577 0.210587 0.758805 -0.583984 0.254155 0.136322 -0.0811633 0.0870309 -0.0935227 0.0904958 -0.0278004 -0.00978614 -0.359193 0.139761 -0.240168 0.060149 0.237062 -0.00622109 -0.252598 -0.00376874 -0.160597 0.25253 -0.278634 0.834376 -0.990444 -0.169065 0.652876' />
<key qpos='-0.0602175 0.048078 0.194579 -0.377418 -0.119412 -0.675073 -0.622553 0.139093 0.0710746 -0.0506027 0.0863461 0.196165 -0.0276685 -0.521954 -0.267784 0.179051 0.0371897 0.0560134 -0.032595 -0.0480022 0.0357436 0.108502 0.963806 0.157805 0.873092 -1.01145 -0.796409 0.24736' />
</keyframe>
<actuator>
<motor name='abdomen_y' gear='200' joint='abdomen_y' />
<motor name='abdomen_z' gear='200' joint='abdomen_z' />
<motor name='abdomen_x' gear='200' joint='abdomen_x' />
<motor name='right_hip_x' gear='200' joint='right_hip_x' />
<motor name='right_hip_z' gear='200' joint='right_hip_z' />
<motor name='right_hip_y' gear='600' joint='right_hip_y' />
<motor name='right_knee' gear='400' joint='right_knee' />
<motor name='right_ankle_x' gear='100' joint='right_ankle_x' />
<motor name='right_ankle_y' gear='100' joint='right_ankle_y' />
<motor name='left_hip_x' gear='200' joint='left_hip_x' />
<motor name='left_hip_z' gear='200' joint='left_hip_z' />
<motor name='left_hip_y' gear='600' joint='left_hip_y' />
<motor name='left_knee' gear='400' joint='left_knee' />
<motor name='left_ankle_x' gear='100' joint='left_ankle_x' />
<motor name='left_ankle_y' gear='100' joint='left_ankle_y' />
<motor name='right_shoulder1' gear='100' joint='right_shoulder1' />
<motor name='right_shoulder2' gear='100' joint='right_shoulder2' />
<motor name='right_elbow' gear='200' joint='right_elbow' />
<motor name='left_shoulder1' gear='100' joint='left_shoulder1' />
<motor name='left_shoulder2' gear='100' joint='left_shoulder2' />
<motor name='left_elbow' gear='200' joint='left_elbow' />
</actuator>
</mujoco>
| 11,517 |
XML
| 68.385542 | 314 | 0.528784 |
RoboticExplorationLab/Deep-ILC/dflex/docs/index.rst
|
Welcome to dFlex's documentation!
==================================
dFlex is a differentiable multiphysics engine for PyTorch. It is written entirely in Python and supports reverse mode differentiation w.r.t. to any simulation inputs.
It includes a USD-based visualization module (:class:`dflex.render`), which can generate time-sampled USD files, or update an existing stage on-the-fly.
Prerequisites
-------------
* Python 3.6
* PyTorch 1.4.0 or higher
* Pixar USD lib (for visualization)
Pre-built USD Python libraries can be downloaded from https://developer.nvidia.com/usd, once they are downloaded you should follow the instructions to add them to your PYTHONPATH environment variable.
.. toctree::
:maxdepth: 3
:caption: Contents:
modules/model
modules/sim
modules/render
Quick Start
-----------------
First ensure that the package is installed in your local Python environment (use the -e option if you will be doing development):
.. code-block::
pip install -e dflex
Then, to use the engine you can import the simulation module as follows:
.. code-block::
import dflex
To build physical models there is a helper class available in :class:`dflex.model.ModelBuilder`. This can be used to create models programmatically from Python. For example, to create a chain of particles:
.. code-block::
builder = dflex.model.ModelBuilder()
# anchor point (zero mass)
builder.add_particle((0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# build chain
for i in range(1,10):
builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), 1.0)
builder.add_spring(i-1, i, 1.e+3, 0.0, 0)
# add ground plane
builder.add_shape_plane((0.0, 1.0, 0.0, 0.0), 0)
Once you have built your model you must convert it to a finalized PyTorch simulation data structure using :func:`dflex.model.ModelBuilder.finalize()`:
.. code-block::
model = builder.finalize('cpu')
The model object represents static (non-time varying) data such as constraints, collision shapes, etc. The model is stored in PyTorch tensors, allowing differentiation with respect to both model and state.
Time Stepping
-------------
To advance the simulation forward in time (forward dynamics), we use an `integrator` object. dFlex currently offers semi-implicit and fully implicit (planned), via. the :class:`dflex.sim.SemiImplicitIntegrator` class as follows:
.. code-block::
sim_dt = 1.0/60.0
sim_steps = 100
integrator = dflex.sim.SemiImplicitIntegrator()
for i in range(0, sim_steps):
state = integrator.forward(model, state, sim_dt)
Rendering
---------
To visualize the scene dFlex supports a USD-based update via. the :class:`dflex.render.UsdRenderer` class. To create a renderer you must first create the USD stage, and the physical model.
.. code-block::
import dflex.render
stage = Usd.Stage.CreateNew("test.usda")
renderer = dflex.render.UsdRenderer(model, stage)
renderer.draw_points = True
renderer.draw_springs = True
renderer.draw_shapes = True
Each frame the renderer should be updated with the current model state and the current elapsed simulation time:
.. code-block::
renderer.update(state, sim_time)
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 3,311 |
reStructuredText
| 27.8 | 228 | 0.700393 |
RoboticExplorationLab/Deep-ILC/dflex/docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../dflex'))
# -- Project information -----------------------------------------------------
project = 'dFlex'
copyright = '2020, NVIDIA'
author = 'NVIDIA'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
# 'sphinx.ext.autosummary',
'sphinx.ext.todo',
'autodocsumm'
]
# put type hints inside the description instead of the signature (easier to read)
autodoc_typehints = 'description'
# document class *and* __init__ methods
autoclass_content = 'both' #
todo_include_todos = True
intersphinx_mapping = {
'python': ("https://docs.python.org/3", None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'PyTorch': ('http://pytorch.org/docs/master/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| 2,515 |
Python
| 32.105263 | 81 | 0.659245 |
RoboticExplorationLab/Deep-ILC/dflex/docs/modules/sim.rst
|
dflex.sim
===========
.. currentmodule:: dflex.sim
.. toctree::
:maxdepth: 2
.. automodule:: dflex.sim
:members:
:undoc-members:
:show-inheritance:
| 171 |
reStructuredText
| 12.230768 | 28 | 0.567251 |
RoboticExplorationLab/Deep-ILC/dflex/docs/modules/model.rst
|
dflex.model
===========
.. currentmodule:: dflex.model
.. toctree::
:maxdepth: 2
model.modelbuilder
model.model
model.state
| 151 |
reStructuredText
| 10.692307 | 30 | 0.569536 |
RoboticExplorationLab/Deep-ILC/dflex/docs/modules/model.model.rst
|
dflex.model.Model
========================
.. autoclasssumm:: dflex.model.Model
.. autoclass:: dflex.model.Model
:members:
:undoc-members:
:show-inheritance:
| 173 |
reStructuredText
| 14.81818 | 36 | 0.583815 |
RoboticExplorationLab/Deep-ILC/dflex/docs/modules/model.modelbuilder.rst
|
dflex.model.ModelBuilder
========================
.. autoclasssumm:: dflex.model.ModelBuilder
.. autoclass:: dflex.model.ModelBuilder
:members:
:undoc-members:
:show-inheritance:
| 194 |
reStructuredText
| 16.727271 | 43 | 0.628866 |
RoboticExplorationLab/Deep-ILC/env_utils/common.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
# if there's overlap between args_list and commandline input, use commandline input
def solve_argv_conflict(args_list):
arguments_to_be_removed = []
arguments_size = []
for argv in sys.argv[1:]:
if argv.startswith('-'):
size_count = 1
for i, args in enumerate(args_list):
if args == argv:
arguments_to_be_removed.append(args)
for more_args in args_list[i+1:]:
if not more_args.startswith('-'):
size_count += 1
else:
break
arguments_size.append(size_count)
break
for args, size in zip(arguments_to_be_removed, arguments_size):
args_index = args_list.index(args)
for _ in range(size):
args_list.pop(args_index)
def print_error(*message):
print('\033[91m', 'ERROR ', *message, '\033[0m')
raise RuntimeError
def print_ok(*message):
print('\033[92m', *message, '\033[0m')
def print_warning(*message):
print('\033[93m', *message, '\033[0m')
def print_info(*message):
print('\033[96m', *message, '\033[0m')
from datetime import datetime
def get_time_stamp():
now = datetime.now()
year = now.strftime('%Y')
month = now.strftime('%m')
day = now.strftime('%d')
hour = now.strftime('%H')
minute = now.strftime('%M')
second = now.strftime('%S')
return '{}-{}-{}-{}-{}-{}'.format(month, day, year, hour, minute, second)
import argparse
def parse_model_args(model_args_path):
fp = open(model_args_path, 'r')
model_args = eval(fp.read())
model_args = argparse.Namespace(**model_args)
return model_args
import torch
import numpy as np
import random
import os
def seeding(seed=0, torch_deterministic=False):
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed
| 2,965 |
Python
| 31.23913 | 91 | 0.629005 |
RoboticExplorationLab/Deep-ILC/env_utils/torch_utils.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import timeit
import math
import numpy as np
import gc
import torch
import cProfile
log_output = ""
def log(s):
print(s)
global log_output
log_output = log_output + s + "\n"
# short hands
# torch quat/vector utils
def to_torch(x, dtype=torch.float, device='cuda:0', requires_grad=False):
return torch.tensor(x, dtype=dtype, device=device, requires_grad=requires_grad)
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([x, y, z, w], dim=-1).view(shape)
return quat
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def quat_apply(a, b):
shape = b.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 3)
xyz = a[:, :3]
t = xyz.cross(b, dim=-1) * 2
return (b + a[:, 3:] * t + xyz.cross(t, dim=-1)).view(shape)
@torch.jit.script
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
shape = q.shape
q_w = q[:, -1]
q_vec = q[:, :3]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * \
torch.bmm(q_vec.view(shape[0], 1, 3), v.view(
shape[0], 3, 1)).squeeze(-1) * 2.0
return a - b + c
@torch.jit.script
def quat_axis(q, axis=0):
# type: (Tensor, int) -> Tensor
basis_vec = torch.zeros(q.shape[0], 3, device=q.device)
basis_vec[:, axis] = 1
return quat_rotate(q, basis_vec)
@torch.jit.script
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape)
@torch.jit.script
def quat_unit(a):
return normalize(a)
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([xyz, w], dim=-1))
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def tf_inverse(q, t):
q_inv = quat_conjugate(q)
return q_inv, -quat_apply(q_inv, t)
@torch.jit.script
def tf_apply(q, t, v):
return quat_apply(q, v) + t
@torch.jit.script
def tf_vector(q, v):
return quat_apply(q, v)
@torch.jit.script
def tf_combine(q1, t1, q2, t2):
return quat_mul(q1, q2), quat_apply(q1, t2) + t1
@torch.jit.script
def get_basis_vector(q, v):
return quat_rotate(q, v)
def mem_report():
'''Report the memory usage of the tensor.storage in pytorch
Both on CPUs and GPUs are reported'''
def _mem_report(tensors, mem_type):
'''Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation '''
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel*element_size /1024/1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
# print('%s\t\t%s\t\t%.2f' % (
# element_type,
# size,
# mem) )
print('Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes' % (mem_type, total_numel, total_mem) )
gc.collect()
LEN = 65
objects = gc.get_objects()
#print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, 'GPU')
_mem_report(host_tensors, 'CPU')
print('='*LEN)
def grad_norm(params):
grad_norm = 0.
for p in params:
if p.grad is not None:
grad_norm += torch.sum(p.grad ** 2)
return torch.sqrt(grad_norm)
def print_leaf_nodes(grad_fn, id_set):
if grad_fn is None:
return
if hasattr(grad_fn, 'variable'):
mem_id = id(grad_fn.variable)
if not(mem_id in id_set):
print('is leaf:', grad_fn.variable.is_leaf)
print(grad_fn.variable)
id_set.add(mem_id)
# print(grad_fn)
for i in range(len(grad_fn.next_functions)):
print_leaf_nodes(grad_fn.next_functions[i][0], id_set)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
return kl.mean()
| 6,536 |
Python
| 27.176724 | 114 | 0.568696 |
RoboticExplorationLab/Deep-ILC/env_utils/average_meter.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
import numpy as np
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy()
| 1,368 |
Python
| 34.102563 | 82 | 0.65424 |
RoboticExplorationLab/Deep-ILC/env_utils/load_utils.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import urdfpy
import math
import numpy as np
import os
import torch
import random
import xml.etree.ElementTree as ET
import dflex as df
def set_np_formatting():
np.set_printoptions(edgeitems=30, infstr='inf',
linewidth=4000, nanstr='nan', precision=2,
suppress=False, threshold=10000, formatter=None)
def set_seed(seed, torch_deterministic=False):
if seed == -1 and torch_deterministic:
seed = 42
elif seed == -1:
seed = np.random.randint(0, 10000)
print("Setting seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch_deterministic:
# refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
return seed
def urdf_add_collision(builder, link, collisions, shape_ke, shape_kd, shape_kf, shape_mu):
# add geometry
for collision in collisions:
origin = urdfpy.matrix_to_xyz_rpy(collision.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
geo = collision.geometry
if (geo.box):
builder.add_shape_box(
link,
pos,
rot,
geo.box.size[0]*0.5,
geo.box.size[1]*0.5,
geo.box.size[2]*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.sphere):
builder.add_shape_sphere(
link,
pos,
rot,
geo.sphere.radius,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.cylinder):
# cylinders in URDF are aligned with z-axis, while dFlex uses x-axis
r = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
builder.add_shape_capsule(
link,
pos,
df.quat_multiply(rot, r),
geo.cylinder.radius,
geo.cylinder.length*0.5,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
if (geo.mesh):
for m in geo.mesh.meshes:
faces = []
vertices = []
for v in m.vertices:
vertices.append(np.array(v))
for f in m.faces:
faces.append(int(f[0]))
faces.append(int(f[1]))
faces.append(int(f[2]))
mesh = df.Mesh(vertices, faces)
builder.add_shape_mesh(
link,
pos,
rot,
mesh,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
def urdf_load(
builder,
filename,
xform,
floating=False,
armature=0.0,
shape_ke=1.e+4,
shape_kd=1.e+4,
shape_kf=1.e+2,
shape_mu=0.25,
limit_ke=100.0,
limit_kd=1.0):
robot = urdfpy.URDF.load(filename)
# maps from link name -> link index
link_index = {}
builder.add_articulation()
# add base
if (floating):
root = builder.add_link(-1, df.transform_identity(), (0,0,0), df.JOINT_FREE)
# set dofs to transform
start = builder.joint_q_start[root]
builder.joint_q[start + 0] = xform[0][0]
builder.joint_q[start + 1] = xform[0][1]
builder.joint_q[start + 2] = xform[0][2]
builder.joint_q[start + 3] = xform[1][0]
builder.joint_q[start + 4] = xform[1][1]
builder.joint_q[start + 5] = xform[1][2]
builder.joint_q[start + 6] = xform[1][3]
else:
root = builder.add_link(-1, xform, (0,0,0), df.JOINT_FIXED)
urdf_add_collision(builder, root, robot.links[0].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
link_index[robot.links[0].name] = root
# add children
for joint in robot.joints:
type = None
axis = (0.0, 0.0, 0.0)
if (joint.joint_type == "revolute" or joint.joint_type == "continuous"):
type = df.JOINT_REVOLUTE
axis = joint.axis
if (joint.joint_type == "prismatic"):
type = df.JOINT_PRISMATIC
axis = joint.axis
if (joint.joint_type == "fixed"):
type = df.JOINT_FIXED
if (joint.joint_type == "floating"):
type = df.JOINT_FREE
parent = -1
if joint.parent in link_index:
parent = link_index[joint.parent]
origin = urdfpy.matrix_to_xyz_rpy(joint.origin)
pos = origin[0:3]
rot = df.rpy2quat(*origin[3:6])
lower = -1.e+3
upper = 1.e+3
damping = 0.0
# limits
if (joint.limit):
if (joint.limit.lower != None):
lower = joint.limit.lower
if (joint.limit.upper != None):
upper = joint.limit.upper
# damping
if (joint.dynamics):
if (joint.dynamics.damping):
damping = joint.dynamics.damping
# add link
link = builder.add_link(
parent=parent,
X_pj=df.transform(pos, rot),
axis=axis,
type=type,
limit_lower=lower,
limit_upper=upper,
limit_ke=limit_ke,
limit_kd=limit_kd,
damping=damping)
# add collisions
urdf_add_collision(builder, link, robot.link_map[joint.child].collisions, shape_ke, shape_kd, shape_kf, shape_mu)
# add ourselves to the index
link_index[joint.child] = link
# build an articulated tree
def build_tree(
builder,
angle,
max_depth,
width=0.05,
length=0.25,
density=1000.0,
joint_stiffness=0.0,
joint_damping=0.0,
shape_ke = 1.e+4,
shape_kd = 1.e+3,
shape_kf = 1.e+2,
shape_mu = 0.5,
floating=False):
def build_recursive(parent, depth):
if (depth >= max_depth):
return
X_pj = df.transform((length * 2.0, 0.0, 0.0), df.quat_from_axis_angle((0.0, 0.0, 1.0), angle))
type = df.JOINT_REVOLUTE
axis = (0.0, 0.0, 1.0)
if (depth == 0 and floating == True):
X_pj = df.transform((0.0, 0.0, 0.0), df.quat_identity())
type = df.JOINT_FREE
link = builder.add_link(
parent,
X_pj,
axis,
type,
stiffness=joint_stiffness,
damping=joint_damping)
# capsule
shape = builder.add_shape_capsule(
link,
pos=(length, 0.0, 0.0),
radius=width,
half_width=length,
ke=shape_ke,
kd=shape_kd,
kf=shape_kf,
mu=shape_mu)
# recurse
#build_tree_recursive(builder, link, angle, width, depth + 1, max_depth, shape_ke, shape_kd, shape_kf, shape_mu, floating)
build_recursive(link, depth + 1)
#
build_recursive(-1, 0)
# Mujoco file format parser
def parse_mjcf(
filename,
builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=1e4,
contact_kd=1e4,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=100.0,
limit_kd=10.0,
armature=0.01,
radians=False,
load_stiffness=False,
load_armature=False):
file = ET.parse(filename)
root = file.getroot()
type_map = {
"ball": df.JOINT_BALL,
"hinge": df.JOINT_REVOLUTE,
"slide": df.JOINT_PRISMATIC,
"free": df.JOINT_FREE,
"fixed": df.JOINT_FIXED
}
def parse_float(node, key, default):
if key in node.attrib:
return float(node.attrib[key])
else:
return default
def parse_bool(node, key, default):
if key in node.attrib:
if node.attrib[key] == "true":
return True
else:
return False
else:
return default
def parse_vec(node, key, default):
if key in node.attrib:
return np.fromstring(node.attrib[key], sep=" ")
else:
return np.array(default)
def parse_body(body, parent, last_joint_pos):
body_name = body.attrib["name"]
body_pos = np.fromstring(body.attrib["pos"], sep=" ")
# last_joint_pos = np.zeros(3)
#-----------------
# add body for each joint, we assume the joints attached to one body have the same joint_pos
for i, joint in enumerate(body.findall("joint")):
joint_name = joint.attrib["name"]
joint_type = type_map[joint.attrib.get("type", 'hinge')]
joint_axis = parse_vec(joint, "axis", (0.0, 0.0, 0.0))
joint_pos = parse_vec(joint, "pos", (0.0, 0.0, 0.0))
joint_limited = parse_bool(joint, "limited", True)
if joint_limited:
if radians:
joint_range = parse_vec(joint, "range", (np.deg2rad(-170.), np.deg2rad(170.)))
else:
joint_range = np.deg2rad(parse_vec(joint, "range", (-170.0, 170.0)))
else:
joint_range = np.array([-1.e+6, 1.e+6])
if load_stiffness:
joint_stiffness = parse_float(joint, 'stiffness', stiffness)
else:
joint_stiffness = stiffness
joint_damping = parse_float(joint, 'damping', damping)
if load_armature:
joint_armature = parse_float(joint, "armature", armature)
else:
joint_armature = armature
joint_axis = df.normalize(joint_axis)
if (parent == -1):
body_pos = np.array((0.0, 0.0, 0.0))
#-----------------
# add body
link = builder.add_link(
parent,
X_pj=df.transform(body_pos + joint_pos - last_joint_pos, df.quat_identity()),
axis=joint_axis,
type=joint_type,
limit_lower=joint_range[0],
limit_upper=joint_range[1],
limit_ke=limit_ke,
limit_kd=limit_kd,
stiffness=joint_stiffness,
damping=joint_damping,
armature=joint_armature)
# assume that each joint is one body in simulation
parent = link
body_pos = [0.0, 0.0, 0.0]
last_joint_pos = joint_pos
#-----------------
# add shapes to the last joint in the body
for geom in body.findall("geom"):
geom_name = geom.attrib["name"]
geom_type = geom.attrib["type"]
geom_size = parse_vec(geom, "size", [1.0])
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
geom_rot = parse_vec(geom, "quat", (0.0, 0.0, 0.0, 1.0))
if (geom_type == "sphere"):
builder.add_shape_sphere(
link,
pos=geom_pos - last_joint_pos, # position relative to the parent frame
rot=geom_rot,
radius=geom_size[0],
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
elif (geom_type == "capsule"):
if ("fromto" in geom.attrib):
geom_fromto = parse_vec(geom, "fromto", (0.0, 0.0, 0.0, 1.0, 0.0, 0.0))
start = geom_fromto[0:3]
end = geom_fromto[3:6]
# compute rotation to align dflex capsule (along x-axis), with mjcf fromto direction
axis = df.normalize(end-start)
angle = math.acos(np.dot(axis, (1.0, 0.0, 0.0)))
axis = df.normalize(np.cross(axis, (1.0, 0.0, 0.0)))
geom_pos = (start + end)*0.5
geom_rot = df.quat_from_axis_angle(axis, -angle)
geom_radius = geom_size[0]
geom_width = np.linalg.norm(end-start)*0.5
else:
geom_radius = geom_size[0]
geom_width = geom_size[1]
geom_pos = parse_vec(geom, "pos", (0.0, 0.0, 0.0))
if ("axisangle" in geom.attrib):
axis_angle = parse_vec(geom, "axisangle", (0.0, 1.0, 0.0, 0.0))
geom_rot = df.quat_from_axis_angle(axis_angle[0:3], axis_angle[3])
if ("quat" in geom.attrib):
q = parse_vec(geom, "quat", df.quat_identity())
geom_rot = q
geom_rot = df.quat_multiply(geom_rot, df.quat_from_axis_angle((0.0, 1.0, 0.0), -math.pi*0.5))
builder.add_shape_capsule(
link,
pos=geom_pos - last_joint_pos,
rot=geom_rot,
radius=geom_radius,
half_width=geom_width,
density=density,
ke=contact_ke,
kd=contact_kd,
kf=contact_kf,
mu=contact_mu)
else:
print("Type: " + geom_type + " unsupported")
#-----------------
# recurse
for child in body.findall("body"):
parse_body(child, link, last_joint_pos)
#-----------------
# start articulation
builder.add_articulation()
world = root.find("worldbody")
for body in world.findall("body"):
parse_body(body, -1, np.zeros(3))
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
self.muscle_strength = 0.0
class Skeleton:
def __init__(self, skeleton_file, muscle_file, builder,
filter={},
visualize_shapes=True,
stiffness=5.0,
damping=2.0,
contact_ke=5000.0,
contact_kd=2000.0,
contact_kf=1000.0,
contact_mu=0.5,
limit_ke=1000.0,
limit_kd=10.0,
armature = 0.05):
self.armature = armature
self.stiffness = stiffness
self.damping = damping
self.contact_ke = contact_ke
self.contact_kd = contact_kd
self.contact_kf = contact_kf
self.limit_ke = limit_ke
self.limit_kd = limit_kd
self.contact_mu = contact_mu
self.visualize_shapes = visualize_shapes
self.parse_skeleton(skeleton_file, builder, filter)
if muscle_file != None:
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = len(builder.joint_q)
self.dof_start = len(builder.joint_qd)
type_map = {
"Ball": df.JOINT_BALL,
"Revolute": df.JOINT_REVOLUTE,
"Prismatic": df.JOINT_PRISMATIC,
"Free": df.JOINT_FREE,
"Fixed": df.JOINT_FIXED
}
builder.add_articulation()
for child in root:
if (child.tag == "Node"):
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = df.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
body_type = body.attrib["type"]
body_mass = float(body.attrib["mass"])
x=body_size[0]
y=body_size[1]
z=body_size[2]
density = body_mass / (x*y*z)
max_body_mass = 15.0
mass_scale = body_mass / max_body_mass
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = -1.e+3
joint_upper = 1.e+3
if (joint_type == type_map["Revolute"]):
if ("lower" in joint.attrib):
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")[0]
if ("upper" in joint.attrib):
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")[0]
# print(joint_type, joint_lower, joint_upper)
if ("axis" in joint.attrib):
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = df.transform(body_t_s, df.quat_from_matrix(body_R_s))
joint_X_s = df.transform(joint_t_s, df.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
mesh_file = mesh_base + ".usd"
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = df.transform_multiply(df.transform_inverse(parent_X_s), joint_X_s)
body_X_c = df.transform_multiply(df.transform_inverse(joint_X_s), body_X_s)
if (parent_link == -1):
joint_X_p = df.transform_identity()
# add link
link = builder.add_link(
parent=parent_link,
X_pj=joint_X_p,
axis=joint_axis,
type=joint_type,
limit_lower=joint_lower,
limit_upper=joint_upper,
limit_ke=self.limit_ke * mass_scale,
limit_kd=self.limit_kd * mass_scale,
damping=self.damping,
stiffness=self.stiffness * math.sqrt(mass_scale),
armature=self.armature)
# armature=self.armature * math.sqrt(mass_scale))
# add shape
shape = builder.add_shape_box(
body=link,
pos=body_X_c[0],
rot=body_X_c[1],
hx=x*0.5,
hy=y*0.5,
hz=z*0.5,
density=density,
ke=self.contact_ke,
kd=self.contact_kd,
kf=self.contact_kf,
mu=self.contact_mu)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if (child.tag == "Unit"):
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
m.muscle_strength = unit_f0
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if (way_link == -1):
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = df.transform_point(df.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen)
self.muscles = muscles
| 22,759 |
Python
| 30.523546 | 130 | 0.482622 |
RoboticExplorationLab/Deep-ILC/env_utils/dataset.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
class CriticDataset:
def __init__(self, batch_size, obs, target_values, states_grad=None, states_joint=None, early_term=None, shuffle = False, drop_last = False):
self.obs = obs.view(-1, obs.shape[-1])
if states_joint is not None:
self.states = states_joint.view(-1, states_joint.shape[-1])
else:
self.states = states_joint
if states_grad is not None:
self.states_grad = states_grad.view(-1, states_grad.shape[-1])
else:
self.states_grad = states_grad
if early_term is not None:
self.early_term = early_term.view(-1)
self.target_values = target_values.view(-1)
self.batch_size = batch_size
if shuffle:
self.shuffle()
if drop_last:
self.length = self.obs.shape[0] // self.batch_size
else:
self.length = ((self.obs.shape[0] - 1) // self.batch_size) + 1
def shuffle(self):
index = np.random.permutation(self.obs.shape[0])
self.obs = self.obs[index, :]
self.target_values = self.target_values[index]
if self.states is not None:
self.states = self.states[index]
self.states_grad = self.states_grad[index]
self.early_term = self.early_term[index]
def __len__(self):
return self.length
def __getitem__(self, index):
start_idx = index * self.batch_size
end_idx = min((index + 1) * self.batch_size, self.obs.shape[0])
if self.states is not None:
return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx], 'states': self.states[start_idx:end_idx], 'states_grad': self.states_grad[start_idx:end_idx], 'early_term': self.early_term[start_idx:end_idx]}
else:
return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx]}
| 2,391 |
Python
| 43.296295 | 258 | 0.627771 |
RoboticExplorationLab/Deep-ILC/env_utils/time_report.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import time
from utils.common import *
class Timer:
def __init__(self, name):
self.name = name
self.start_time = None
self.time_total = 0.
def on(self):
assert self.start_time is None, "Timer {} is already turned on!".format(self.name)
self.start_time = time.time()
def off(self):
assert self.start_time is not None, "Timer {} not started yet!".format(self.name)
self.time_total += time.time() - self.start_time
self.start_time = None
def report(self):
print_info('Time report [{}]: {:.2f} seconds'.format(self.name, self.time_total))
def clear(self):
self.start_time = None
self.time_total = 0.
class TimeReport:
def __init__(self):
self.timers = {}
def add_timer(self, name):
assert name not in self.timers, "Timer {} already exists!".format(name)
self.timers[name] = Timer(name = name)
def start_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].on()
def end_timer(self, name):
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].off()
def report(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
else:
print_info("------------Time Report------------")
for timer_name in self.timers.keys():
self.timers[timer_name].report()
print_info("-----------------------------------")
def clear_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].clear()
else:
for timer_name in self.timers.keys():
self.timers[timer_name].clear()
def pop_timer(self, name = None):
if name is not None:
assert name in self.timers, "Timer {} does not exist!".format(name)
self.timers[name].report()
del self.timers[name]
else:
self.report()
self.timers = {}
| 2,688 |
Python
| 34.853333 | 90 | 0.58631 |
RoboticExplorationLab/Deep-ILC/env_utils/running_mean_std.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Tuple
import torch
class RunningMeanStd(object):
def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = (), device = 'cuda:0'):
"""
Calulates the running mean and std of a data stream
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param epsilon: helps with arithmetic issues
:param shape: the shape of the data stream's output
"""
self.mean = torch.zeros(shape, dtype = torch.float32, device = device)
self.var = torch.ones(shape, dtype = torch.float32, device = device)
self.count = epsilon
def to(self, device):
rms = RunningMeanStd(device = device)
rms.mean = self.mean.to(device).clone()
rms.var = self.var.to(device).clone()
rms.count = self.count
return rms
@torch.no_grad()
def update(self, arr: torch.tensor) -> None:
batch_mean = torch.mean(arr, dim = 0)
batch_var = torch.var(arr, dim = 0, unbiased = False)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean: torch.tensor, batch_var: torch.tensor, batch_count: int) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + torch.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def normalize(self, arr:torch.tensor, un_norm = False) -> torch.tensor:
if not un_norm:
result = (arr - self.mean) / torch.sqrt(self.var + 1e-5)
else:
result = arr * torch.sqrt(self.var + 1e-5) + self.mean
return result
| 2,462 |
Python
| 40.745762 | 111 | 0.638099 |
RoboticExplorationLab/CGAC/diffrl_conda.yml
|
name: shac
channels:
- pytorch
- defaults
dependencies:
- python=3.8.13=h12debd9_0
- pytorch=1.11.0=py3.8_cuda11.3_cudnn8.2.0_0
- torchvision=0.12.0=py38_cu113
- pip:
- pyyaml==6.0
- tensorboard==2.8.0
- tensorboardx==2.5
- urdfpy==0.0.22
- usd-core==22.3
| 288 |
YAML
| 18.266665 | 46 | 0.611111 |
RoboticExplorationLab/CGAC/README.md
|
# SHAC
This repository contains the implementation for the paper [Practical Critic Gradient based Actor Critic for On-Policy Reinforcement Learning](https://openreview.net/forum?id=ddl_4qQKFmY) (L4DC 2023).
In this paper, we present a different class of on-policy algorithms based on SARSA, which estimate the policy gradient using the critic-action gradients. We show that they are better suited than existing baselines (like PPO) especially when using highly parallelizable simulators. We observe that the critic gradient based on-policy method (CGAC) consistently achieves higher episode returns. Furthermore, in environments with high dimensional action space, CGAC also trains much faster (in wall-clock time) than the corresponding baselines.
## Installation
- `git clone https://github.com/NVlabs/DiffRL.git --recursive`
#### Prerequisites
- In the project folder, create a virtual environment in Anaconda:
```
conda env create -f cgac.yml
conda activate cgac
```
- dflex
```
cd dflex
pip install -e .
```
- rl_games, forked from [rl-games](https://github.com/Denys88/rl_games) (used for PPO and SAC training):
````
cd externals/rl_games
pip install -e .
````
- Install an older version of protobuf required for TensorboardX:
````
pip install protobuf==3.20.0
````
Install Isaacgym and Isaacgymenvs with instructions from their [github repo](https://github.com/NVIDIA-Omniverse/IsaacGymEnvs).
## Training
The results might slightly differ from the paper due to the randomness of the cuda and different Operating System/GPU/Python versions. The experiments in this paper were done on CentOS 7 with a NVIDIA GTX 2080 gpu.
#### CGAC (Our Method)
Run the following commands with the appropriate flags to train a model corresponding to each environment from inside the `cgac` folder.
```
python main.py --env-name AntEnv --cuda --num_actors 4096 --batch_size_update 4096 --critic_hidden 1024 1024 256 --actor_hidden 256 256 --critic_act elu --actor_act elu --num_critic_updates 2 --grad_norm 20 --final_targ_ent_coeff 3.5 --no_automatic_entropy_tuning False --alpha_schedule constant --alpha 1e-1 --final_lr 1e-5 --clip_actor_gn --seed 1 --final
python main.py --env-name HumanoidEnv --cuda --num_actors 4096 --batch_size_update 4096 --critic_hidden 1024 1024 256 --actor_hidden 256 256 --critic_act elu --actor_act elu --num_critic_updates 2 --grad_norm 20 --final_targ_ent_coeff 7.5 --seed 0 --final
python main.py --env-name SNUHumanoidEnv --cuda --num_actors 4096 --batch_size_update 4096 --critic_hidden 512 512 256 --actor_hidden 256 256 --critic_act elu --actor_act elu --num_critic_updates 6 --grad_norm 20 --final_targ_ent_coeff 3.5 --seed 0 --val_horizon 8 --final
python main.py --env-name AllegroHand --cuda --num_actors 16384 --batch_size_update 32768 --critic_hidden 1024 512 256 --actor_hidden 512 256 --critic_act elu --actor_act elu --num_critic_updates 2 --final_targ_ent_coeff 5 --alpha 0.2 --init_targ_ent_coeff 0.1 --val_horizon 8 --tau_value 0.05 --tau_policy 1.0 --final
```
#### Baseline Algorithms
In order to train the baselines for the Dflex environments (Ant, Humanoid, SNUHumanoid), simply run the following commands in the `examples` folder while switching the env-name of choice.
```
python train_script.py --env Ant --algo sac --num-seeds 5
python train_script.py --env Ant --algo ppo --num-seeds 5
```
### Acknowledgments
The dflex env and code for testing the baselines was borrowed from the [DiffRL](https://github.com/NVlabs/DiffRL.git) repo. We also build on code from (https://github.com/pranz24/pytorch-soft-actor-critic) for various aspects of the model. We thank the authors of these repos for making their code public.
## Citation
If you find our paper or code is useful, please consider citing:
```
@inproceedings{
gurumurthy2023practical,
title={Practical Critic Gradient based Actor Critic for On-Policy Reinforcement Learning},
author={Swaminathan Gurumurthy and Zachary Manchester and J Zico Kolter},
booktitle={5th Annual Learning for Dynamics {\&} Control Conference},
year={2023},
url={https://openreview.net/forum?id=ddl_4qQKFmY}
}
```
| 4,164 |
Markdown
| 48.583333 | 541 | 0.745677 |
RoboticExplorationLab/CGAC/examples/train_script.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import argparse
configs = {'Ant': 'ant.yaml', 'CartPole': 'cartpole_swing_up.yaml', 'Hopper': 'hopper.yaml', 'Cheetah': 'cheetah.yaml', 'Humanoid': 'humanoid.yaml', 'SNUHumanoid': 'snu_humanoid.yaml'}
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Ant', choices=['Ant', 'CartPole', 'Hopper', 'Cheetah', 'Humanoid', 'SNUHumanoid'])
parser.add_argument('--algo', type=str, default='sac', choices=['ppo', 'sac'])
parser.add_argument('--num-seeds', type=int, default=5)
parser.add_argument('--save-dir', type=str, default='./logs/')
args = parser.parse_args()
''' generate seeds '''
seeds = []
for i in range(args.num_seeds):
seeds.append(i)
''' generate commands '''
commands = []
for i in range(len(seeds)):
seed = seeds[i]
save_dir = os.path.join(args.save_dir, args.env, args.algo, str(seed)+'varplots')
config_path = os.path.join('./cfg', args.algo, configs[args.env])
script_name = 'train_rl.py'
cmd = 'python {} '\
'--cfg {} '\
'--seed {} '\
'--logdir {} '\
'--no-time-stamp'\
.format(script_name, config_path, seed, save_dir)
commands.append(cmd)
for command in commands:
os.system(command)
| 1,656 |
Python
| 36.65909 | 184 | 0.669686 |
RoboticExplorationLab/CGAC/examples/train_rl.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
from rl_games.common import env_configurations, experiment, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.torch_runner import Runner
from rl_games.algos_torch import torch_ext
import argparse
import envs
import os
import sys
import yaml
import numpy as np
import copy
import torch
from utils.common import *
def create_dflex_env(**kwargs):
env_fn = getattr(envs, cfg_train["params"]["diff_env"]["name"])
env = env_fn(num_envs=cfg_train["params"]["config"]["num_actors"], \
render=args.render, seed=args.seed, \
episode_length=cfg_train["params"]["diff_env"].get("episode_length", 1000), \
no_grad=True, stochastic_init=cfg_train['params']['diff_env']['stochastic_env'], \
MM_caching_frequency=cfg_train['params']['diff_env'].get('MM_caching_frequency', 1))
print('num_envs = ', env.num_envs)
print('num_actions = ', env.num_actions)
print('num_obs = ', env.num_obs)
frames = kwargs.pop('frames', 1)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
self.full_state = {}
self.rl_device = "cuda:0"
self.full_state["obs"] = self.env.reset(force_reset=True).to(self.rl_device)
print(self.full_state["obs"].shape)
def step(self, actions):
self.full_state["obs"], reward, is_done, info = self.env.step(actions.to(self.env.device))
return self.full_state["obs"].to(self.rl_device), reward.to(self.rl_device), is_done.to(self.rl_device), info
def reset(self):
self.full_state["obs"] = self.env.reset(force_reset=True)
return self.full_state["obs"].to(self.rl_device)
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
print(info['action_space'], info['observation_space'])
return info
vecenv.register('DFLEX', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('dflex', {
'env_creator': lambda **kwargs: create_dflex_env(**kwargs),
'vecenv_type': 'DFLEX'})
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--num_envs", "type": int, "default": 0, "help": "Number of envirnments"},
{"name": "--cfg", "type": str, "default": "./cfg/rl/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights, only for rl_games RL library"},
{"name": "--rl_device", "type": str, "default": "cuda:0",
"help": "Choose CPU or GPU device for inferencing policy network"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."},
{"name": "--logdir", "type": str, "default": "logs/tmp/rl/"},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"}]
# parse arguments
args = parse_arguments(
description="RL Policy",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
if args.num_envs > 0:
cfg_train["params"]["config"]["num_actors"] = args.num_envs
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
# save config
if cfg_train['params']['general']['train']:
log_dir = cfg_train["params"]["general"]["logdir"]
os.makedirs(log_dir, exist_ok = True)
# save config
yaml.dump(cfg_train, open(os.path.join(log_dir, 'cfg.yaml'), 'w'))
runner = Runner()
runner.load(cfg_train)
runner.reset()
runner.run(vargs)
| 6,658 |
Python
| 34.801075 | 124 | 0.611745 |
RoboticExplorationLab/CGAC/envs/cartpole_swing_up.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
import ipdb
class CartPoleSwingUpEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=1024, seed=0, episode_length=240, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = False):
num_obs = 5
num_act = 1
super(CartPoleSwingUpEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# action parameters
self.action_strength = 1000.
# loss related
self.pole_angle_penalty = 1.0
self.pole_velocity_penalty = 0.1
self.cart_position_penalty = 0.05
self.cart_velocity_penalty = 0.1
self.cart_action_penalty = 0.0
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "CartPoleSwingUp_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1. / 60.
self.sim_substeps = 4
self.sim_dt = self.dt
if self.visualize:
self.env_dist = 1.0
else:
self.env_dist = 0.0
self.num_joint_q = 2
self.num_joint_qd = 2
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.urdf_load(self.builder,
os.path.join(asset_folder, 'cartpole.urdf'),
df.transform((0.0, 2.5, 0.0 + self.env_dist * i), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
floating=False,
shape_kd=1e4,
limit_kd=1.)
self.builder.joint_q[i * self.num_joint_q + 1] = -math.pi
self.model = self.builder.finalize(self.device)
self.model.ground = False
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype = torch.float, device = self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
self.start_joint_q = self.state.joint_q.clone()
self.start_joint_qd = self.state.joint_qd.clone()
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 40):
try:
self.stage.Save()
except:
print('USD save error')
self.num_frames -= 40
def step(self, actions):
with df.ScopedTimer("simulate", active=False, detailed=False):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions
self.state.joint_act.view(self.num_envs, -1)[:, 0:1] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
# ipdb.set_trace()
try:
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
except:
ipdb.set_trace()
#self.obs_buf_before_reset = self.obs_buf.clone()
with df.ScopedTimer("reset", active=False, detailed=False):
if len(env_ids) > 0:
self.reset(env_ids)
with df.ScopedTimer("render", active=False, detailed=False):
self.render()
#self.extras = {'obs_before_reset': self.obs_buf_before_reset}
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids=None, force_reset=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# fixed start state
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = self.start_joint_q.view(-1, self.num_joint_q)[env_ids, :].clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = self.start_joint_qd.view(-1, self.num_joint_qd)[env_ids, :].clone()
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] \
+ np.pi * (torch.rand(size=(len(env_ids), self.num_joint_q), device=self.device) - 0.5)
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] \
+ 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad(): # TODO: check with Miles
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.state.joint_act.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
self.state = self.model.state()
self.state.joint_q = checkpoint['joint_q'].clone().detach()
self.state.joint_qd = checkpoint['joint_qd'].clone().detach()
self.state.joint_act = checkpoint['actions'].clone().detach()
self.progress_buf = checkpoint['progress_buf'].clone().detach()
self.state.joint_q.requires_grad_(True)
self.state.joint_qd.requires_grad_(True)
# self.state.joint_act.requires_grad_(True)
'''
This function starts collecting a new trajectory from the current states but cut off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and return the observation vectors
'''
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def calculateObservations(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0:1]
theta = self.state.joint_q.view(self.num_envs, -1)[:, 1:2]
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0:1]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1:2]
# observations: [x, xdot, sin(theta), cos(theta), theta_dot]
self.obs_buf = torch.cat([x, xdot, torch.sin(theta), torch.cos(theta), theta_dot], dim = -1)
def calculateReward(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0]
theta = tu.normalize_angle(self.state.joint_q.view(self.num_envs, -1)[:, 1])
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1]
self.rew_buf = -torch.pow(theta, 2.) * self.pole_angle_penalty \
- torch.pow(theta_dot, 2.) * self.pole_velocity_penalty \
- torch.pow(x, 2.) * self.cart_position_penalty \
- torch.pow(xdot, 2.) * self.cart_velocity_penalty \
- torch.sum(self.actions ** 2, dim = -1) * self.cart_action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 9,908 |
Python
| 39.117409 | 187 | 0.581348 |
RoboticExplorationLab/CGAC/envs/__init__.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
from envs.ant import AntEnv
from envs.cheetah import CheetahEnv
from envs.hopper import HopperEnv
from envs.snu_humanoid import SNUHumanoidEnv
from envs.cartpole_swing_up import CartPoleSwingUpEnv
from envs.humanoid import HumanoidEnv
| 694 |
Python
| 48.642854 | 76 | 0.832853 |
RoboticExplorationLab/CGAC/envs/snu_humanoid.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd, UsdGeom, Gf
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class SNUHumanoidEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1):
self.filter = { "Pelvis", "FemurR", "TibiaR", "TalusR", "FootThumbR", "FootPinkyR", "FemurL", "TibiaL", "TalusL", "FootThumbL", "FootPinkyL"}
self.skeletons = []
self.muscle_strengths = []
self.mtu_actuations = True
self.inv_control_freq = 1
# "humanoid_snu_lower"
self.num_joint_q = 29
self.num_joint_qd = 24
self.num_dof = self.num_joint_q - 7 # 22
self.num_muscles = 152
self.str_scale = 0.6
num_act = self.num_joint_qd - 6 # 18
num_obs = 71 # 13 + 22 + 18 + 18
if self.mtu_actuations:
num_obs = 53 # 71 - 18
if self.mtu_actuations:
num_act = self.num_muscles
super(SNUHumanoidEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.progress_buf_mask = torch.zeros_like(self.progress_buf)
self.ep_lens = torch.zeros_like(self.progress_buf) + 10
self.init_sim()
# other parameters
self.termination_height = 0.46
self.termination_tolerance = 0.05
self.height_rew_scale = 4.0
self.action_strength = 100.0
self.action_penalty = -0.001
self.joint_vel_obs_scaling = 0.1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + "HumanoidSNU_Low_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 48
self.sim_dt = self.dt
self.ground = True
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([10000.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
if self.visualize:
self.env_dist = 2.0
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 1.0
self.asset_folder = os.path.join(os.path.dirname(__file__), 'assets/snu')
asset_path = os.path.join(self.asset_folder, "human.xml")
muscle_path = os.path.join(self.asset_folder, "muscle284.xml")
for i in range(self.num_environments):
if self.mtu_actuations:
skeleton = lu.Skeleton(asset_path, muscle_path, self.builder, self.filter,
stiffness=5.0,
damping=2.0,
contact_ke=5e3,
contact_kd=2e3,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=1e3,
limit_kd=1e1,
armature=0.05)
else:
skeleton = lu.Skeleton(asset_path, None, self.builder, self.filter,
stiffness=5.0,
damping=2.0,
contact_ke=5e3,
contact_kd=2e3,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=1e3,
limit_kd=1e1,
armature=0.05)
# set initial position 1m off the ground
self.builder.joint_q[skeleton.coord_start + 2] = i * self.env_dist
self.builder.joint_q[skeleton.coord_start + 1] = start_height
self.builder.joint_q[skeleton.coord_start + 3:skeleton.coord_start + 7] = self.start_rot
self.start_pos.append([self.builder.joint_q[skeleton.coord_start], start_height, self.builder.joint_q[skeleton.coord_start + 2]])
self.skeletons.append(skeleton)
num_muscles = len(self.skeletons[0].muscles)
num_q = int(len(self.builder.joint_q)/self.num_environments)
num_qd = int(len(self.builder.joint_qd)/self.num_environments)
print(num_q, num_qd)
print("Start joint_q: ", self.builder.joint_q[0:num_q])
print("Num muscles: ", num_muscles)
self.start_joint_q = self.builder.joint_q[7:num_q].copy()
self.start_joint_target = self.start_joint_q.copy()
for m in self.skeletons[0].muscles:
self.muscle_strengths.append(self.str_scale * m.muscle_strength)
for mi in range(len(self.muscle_strengths)):
self.muscle_strengths[mi] = self.str_scale * self.muscle_strengths[mi]
self.muscle_strengths = tu.to_torch(self.muscle_strengths, device=self.device).repeat(self.num_envs)
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
with torch.no_grad():
muscle_start = 0
skel_index = 0
for s in self.skeletons:
for mesh, link in s.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
mesh_path = os.path.join(self.asset_folder, "OBJ/" + mesh + ".usd")
self.renderer.add_mesh(mesh, mesh_path, X_sc, 1.0, self.render_time)
for m in range(len(s.muscles)):
start = self.model.muscle_start[muscle_start + m].item()
end = self.model.muscle_start[muscle_start + m + 1].item()
points = []
for w in range(start, end):
link = self.model.muscle_links[w].item()
point = self.model.muscle_points[w].cpu().numpy()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=s.muscles[m].name + str(skel_index), radius=0.0075, color=(self.model.muscle_activation[muscle_start + m]/self.muscle_strengths[m], 0.2, 0.5), time=self.render_time)
muscle_start += len(s.muscles)
skel_index += 1
self.render_time += self.dt * self.inv_control_freq
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 1):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= 1
def step(self, actions, force_done_ids=None):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
actions = actions * 0.5 + 0.5
##### an ugly fix for simulation nan values #### # reference: https://github.com/pytorch/pytorch/issues/15131
def create_hook():
def hook(grad):
torch.nan_to_num(grad, 0.0, 0.0, 0.0, out = grad)
return hook
if self.state.joint_q.requires_grad:
self.state.joint_q.register_hook(create_hook())
if self.state.joint_qd.requires_grad:
self.state.joint_qd.register_hook(create_hook())
if actions.requires_grad:
actions.register_hook(create_hook())
#################################################
self.actions = actions.clone()
for ci in range(self.inv_control_freq):
if self.mtu_actuations:
self.model.muscle_activation = actions.view(-1) * self.muscle_strengths
else:
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.progress_buf_mask *= 0
self.calculateObservations()
self.calculateReward()
if force_done_ids is not None:
self.reset_buf[force_done_ids] = 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
# if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
if force_done_ids is not None:
self.progress_buf_mask[force_done_ids] = self.episode_length
with df.ScopedTimer("render", False):
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True, eplenupdate=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone().contiguous()
self.state.joint_qd = self.state.joint_qd.clone().contiguous()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.unsqueeze(0).expand(len(env_ids),-1).clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.unsqueeze(0).expand(len(env_ids),-1).clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf_mask[env_ids] = self.progress_buf[env_ids].clone()
if eplenupdate:
self.ep_lens = torch.cat([self.progress_buf[env_ids].clone(), self.ep_lens], dim=0)[:200]
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {} # NOTE: any other things to restore?
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
self.state.joint_q.requires_grad_(True)
self.state.joint_qd.requires_grad_(True)
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self, state=None, actions=None):
update_self_obs = False
if state is None:
state = self.state
actions = self.actions
update_self_obs = True
torso_pos = state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:33
self.joint_vel_obs_scaling * state.joint_qd.view(self.num_envs, -1)[:, 6:], # 33:51
up_vec[:, 1:2], # 51
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1)], # 52
dim = -1)
if update_self_obs:
self.obs_buf = obs_buf
return obs_buf
def calculateObservationsEx(self, state=None, actions=None):
update_self_obs = False
bsz = actions.shape[0]
torso_pos = state.joint_q.view(bsz, -1)[:, 0:3]
torso_rot = state.joint_q.view(bsz, -1)[:, 3:7]
lin_vel = state.joint_qd.view(bsz, -1)[:, 3:6]
ang_vel = state.joint_qd.view(bsz, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets[:1] + self.start_pos[:1] - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot[:1].expand(bsz, -1))
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1[:1].expand(bsz, -1))
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0[:1].expand(bsz, -1))
obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
state.joint_q.view(bsz, -1)[:, 7:], # 11:33
self.joint_vel_obs_scaling * state.joint_qd.view(bsz, -1)[:, 6:], # 33:51
up_vec[:, 1:2], # 51
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1)], # 52
dim = -1)
return obs_buf
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 51]
heading_reward = self.obs_buf[:, 52]
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_tolerance)
height_reward = torch.clip(height_diff, -1.0, self.termination_tolerance)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward) # JIE: not smooth
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
act_penalty = torch.sum(torch.abs(self.actions), dim = -1) * self.action_penalty #torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + act_penalty
# reset agents
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# an ugly fix for simulation nan values
nan_masks = torch.logical_or(torch.isnan(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isnan(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isnan(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
inf_masks = torch.logical_or(torch.isinf(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isinf(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isinf(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
invalid_value_masks = torch.logical_or((torch.abs(self.state.joint_q.view(self.num_environments, -1)) > 1e6).sum(-1) > 0,
(torch.abs(self.state.joint_qd.view(self.num_environments, -1)) > 1e6).sum(-1) > 0)
invalid_masks = torch.logical_or(invalid_value_masks, torch.logical_or(nan_masks, inf_masks))
self.reset_buf = torch.where(invalid_masks, torch.ones_like(self.reset_buf), self.reset_buf)
self.rew_buf[invalid_masks] = 0.
| 21,635 |
Python
| 43.518518 | 248 | 0.560296 |
RoboticExplorationLab/CGAC/envs/humanoid.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class HumanoidEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1):
num_obs = 76
num_act = 21
super(HumanoidEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.progress_buf_mask = torch.zeros_like(self.progress_buf)
self.ep_lens = torch.zeros_like(self.progress_buf) + 10
self.init_sim()
# other parameters
self.termination_height = 0.74
self.motor_strengths = [
200,
200,
200,
200,
200,
600,
400,
100,
100,
200,
200,
600,
400,
100,
100,
100,
100,
200,
100,
100,
200]
self.motor_scale = 0.35
self.motor_strengths = tu.to_torch(self.motor_strengths, dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.action_penalty = -0.002
self.joint_vel_obs_scaling = 0.1
self.termination_tolerance = 0.1
self.height_rew_scale = 10.0
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Humanoid_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 48
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 28
self.num_joint_qd = 27
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([200.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
if self.visualize:
self.env_dist = 2.5
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 1.35
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.parse_mjcf(os.path.join(asset_folder, "humanoid.xml"), self.builder,
stiffness=5.0,
damping=0.1,
contact_ke=2.e+4,
contact_kd=5.e+3,
contact_kf=1.e+3,
contact_mu=0.75,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=0.007,
load_stiffness=True,
load_armature=True)
# base transform
start_pos_z = i*self.env_dist
self.start_pos.append([0.0, start_height, start_pos_z])
self.builder.joint_q[i*self.num_joint_q:i*self.num_joint_q + 3] = self.start_pos[-1]
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 7] = self.start_rot
num_q = int(len(self.builder.joint_q)/self.num_environments)
num_qd = int(len(self.builder.joint_qd)/self.num_environments)
print(num_q, num_qd)
print("Start joint_q: ", self.builder.joint_q[0:num_q])
self.start_joint_q = self.builder.joint_q[7:num_q].copy()
self.start_joint_target = self.start_joint_q.copy()
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
num_act = int(len(self.state.joint_act) / self.num_environments) - 6
print('num_act = ', num_act)
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 1):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= 1
def step(self, actions, force_done_ids=None):
actions = actions.view((self.num_envs, self.num_actions))
# todo - make clip range a parameter
actions = torch.clip(actions, -1., 1.)
##### an ugly fix for simulation nan values #### # reference: https://github.com/pytorch/pytorch/issues/15131
def create_hook():
def hook(grad):
torch.nan_to_num(grad, 0.0, 0.0, 0.0, out = grad)
return hook
if self.state.joint_q.requires_grad:
self.state.joint_q.register_hook(create_hook())
if self.state.joint_qd.requires_grad:
self.state.joint_qd.register_hook(create_hook())
if actions.requires_grad:
actions.register_hook(create_hook())
#################################################
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.motor_scale * self.motor_strengths
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.progress_buf_mask *= 0
self.calculateObservations()
self.calculateReward()
if force_done_ids is not None:
self.reset_buf[force_done_ids] = 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
# if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
if force_done_ids is not None:
self.progress_buf_mask[force_done_ids] = self.episode_length
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True, eplenupdate=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone().contiguous()
self.state.joint_qd = self.state.joint_qd.clone().contiguous()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.unsqueeze(0).expand(len(env_ids),-1).clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.unsqueeze(0).expand(len(env_ids),-1).clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] + 0.2 * (torch.rand(size=(len(env_ids), self.num_joint_q - 7), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf_mask[env_ids] = self.progress_buf[env_ids].clone()
if eplenupdate:
self.ep_lens = torch.cat([self.progress_buf[env_ids].clone(), self.ep_lens], dim=0)[:200]
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
self.state.joint_q.requires_grad_(True)
self.state.joint_qd.requires_grad_(True)
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self, state=None, actions=None):
update_self_obs = False
if state is None:
state = self.state
actions = self.actions
update_self_obs = True
torso_pos = state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:32
self.joint_vel_obs_scaling * state.joint_qd.view(self.num_envs, -1)[:, 6:], # 32:53
up_vec[:, 1:2], # 53:54
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1), # 54:55
actions.clone()], # 55:76
dim = -1)
if update_self_obs:
self.obs_buf = obs_buf
return obs_buf
def calculateObservationsEx(self, state=None, actions=None):
update_self_obs = False
bsz = actions.shape[0]
torso_pos = state.joint_q.view(bsz, -1)[:, 0:3]
torso_rot = state.joint_q.view(bsz, -1)[:, 3:7]
lin_vel = state.joint_qd.view(bsz, -1)[:, 3:6]
ang_vel = state.joint_qd.view(bsz, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets[:1] + self.start_pos[:1] - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot[:1].expand(bsz, -1))
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1[:1].expand(bsz, -1))
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0[:1].expand(bsz, -1))
obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
state.joint_q.view(bsz, -1)[:, 7:], # 11:32
self.joint_vel_obs_scaling * state.joint_qd.view(bsz, -1)[:, 6:], # 32:53
up_vec[:, 1:2], # 53:54
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1), # 54:55
actions.clone()], # 55:76
dim = -1)
return obs_buf
def calculateObservationsExt(self, state_joint, obs):
actions = obs[:, -self.actions.shape[-1]:]
bsz = obs.shape[0]
joint_q_shape = self.state.joint_q.view(self.num_envs, -1).shape[-1]
joint_qd_shape = self.state.joint_qd.view(self.num_envs, -1).shape[-1]
joint_q = state_joint[:, :joint_q_shape]
joint_qd = state_joint[:, joint_q_shape:]
torso_pos = joint_q[:, 0:3]
torso_rot = joint_q[:, 3:7]
lin_vel = joint_qd[:, 3:6]
ang_vel = joint_qd[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets[:1] + self.start_pos[:1] - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot[:1].expand(bsz, -1))
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1[:1].expand(bsz, -1))
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0[:1].expand(bsz, -1))
obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
joint_q[:, 7:], # 11:32
self.joint_vel_obs_scaling * joint_qd[:, 6:], # 32:53
up_vec[:, 1:2], # 53:54
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1), # 54:55
actions.clone()], # 55:76
dim = -1)
# if update_self_obs:
# self.obs_buf = obs_buf
return obs_buf
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 53]
heading_reward = self.obs_buf[:, 54]
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_tolerance)
height_reward = torch.clip(height_diff, -1.0, self.termination_tolerance)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward)
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + height_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# an ugly fix for simulation nan values
nan_masks = torch.logical_or(torch.isnan(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isnan(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isnan(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
inf_masks = torch.logical_or(torch.isinf(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isinf(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isinf(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
invalid_value_masks = torch.logical_or((torch.abs(self.state.joint_q.view(self.num_environments, -1)) > 1e6).sum(-1) > 0,
(torch.abs(self.state.joint_qd.view(self.num_environments, -1)) > 1e6).sum(-1) > 0)
invalid_masks = torch.logical_or(invalid_value_masks, torch.logical_or(nan_masks, inf_masks))
self.reset_buf = torch.where(invalid_masks, torch.ones_like(self.reset_buf), self.reset_buf)
self.invalid_masks = invalid_masks
self.rew_buf[invalid_masks] = 0.
def calculateRewardEx(self, obs_buf, actions):
up_reward = 0.1 * obs_buf[:, 53]
heading_reward = obs_buf[:, 54]
height_diff = obs_buf[:, 0] - (self.termination_height + self.termination_tolerance)
height_reward = torch.clip(height_diff, -1.0, self.termination_tolerance)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward)
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
progress_reward = obs_buf[:, 5]
rew_buf = progress_reward + up_reward + heading_reward + height_reward + torch.sum(actions ** 2, dim = -1) * self.action_penalty
rew_buf[invalid_masks] = 0.
return rew_buf
| 21,064 |
Python
| 42.794179 | 248 | 0.569218 |
RoboticExplorationLab/CGAC/externals/rl_games/setup.py
|
"""Setup script for rl_games"""
import sys
import os
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
print(find_packages())
setup(name='rl-games',
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Denys88/rl_games",
packages = ['.','rl_games','docs'],
package_data={'rl_games':['*'],'docs':['*'],},
version='1.1.0',
author='Denys Makoviichuk, Viktor Makoviichuk',
author_email='trrrrr97@gmail.com, victor.makoviychuk@gmail.com',
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
#packages=["rlg"],
include_package_data=True,
install_requires=[
# this setup is only for pytorch
#
'gym>=0.17.2',
'numpy>=1.16.0',
'tensorboard>=1.14.0',
'tensorboardX>=1.6',
'setproctitle',
'psutil',
'pyyaml'
],
)
| 1,300 |
Python
| 27.91111 | 70 | 0.559231 |
RoboticExplorationLab/CGAC/externals/rl_games/runner.py
|
import numpy as np
import argparse, copy, os, yaml
import ray, signal
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
#import warnings
#warnings.filterwarnings("error")
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-tf", "--tf", required=False, help="run tensorflow runner", action='store_true')
ap.add_argument("-t", "--train", required=False, help="train network", action='store_true')
ap.add_argument("-p", "--play", required=False, help="play(test) network", action='store_true')
ap.add_argument("-c", "--checkpoint", required=False, help="path to checkpoint")
ap.add_argument("-f", "--file", required=True, help="path to config")
ap.add_argument("-na", "--num_actors", type=int, default=0, required=False,
help="number of envs running in parallel, if larger than 0 will overwrite the value in yaml config")
os.makedirs("nn", exist_ok=True)
os.makedirs("runs", exist_ok=True)
args = vars(ap.parse_args())
config_name = args['file']
print('Loading config: ', config_name)
with open(config_name, 'r') as stream:
config = yaml.safe_load(stream)
if args['num_actors'] > 0:
config['params']['config']['num_actors'] = args['num_actors']
if args['tf']:
from rl_games.tf14_runner import Runner
else:
from rl_games.torch_runner import Runner
ray.init(object_store_memory=1024*1024*1000)
#signal.signal(signal.SIGINT, exit_gracefully)
runner = Runner()
try:
runner.load(config)
except yaml.YAMLError as exc:
print(exc)
runner.reset()
runner.run(args)
ray.shutdown()
| 1,739 |
Python
| 33.799999 | 120 | 0.615296 |
RoboticExplorationLab/CGAC/externals/rl_games/README.md
|
# RL Games: High performance RL library
## Papers and related links
* Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning: https://arxiv.org/abs/2108.10470
* Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger: https://s2r2-ig.github.io/ https://arxiv.org/abs/2108.09779
* Is Independent Learning All You Need in the StarCraft Multi-Agent Challenge? <https://arxiv.org/abs/2011.09533>
## Some results on interesting environments
* [NVIDIA Isaac Gym](docs/ISAAC_GYM.md)




* [Starcraft 2 Multi Agents](docs/SMAC.md)
* [BRAX](docs/BRAX.md)
* [Old TF1.x results](docs/BRAX.md)
## Config file
* [Configuration](docs/CONFIG_PARAMS.md)
Implemented in Pytorch:
* PPO with the support of asymmetric actor-critic variant
* Support of end-to-end GPU accelerated training pipeline with Isaac Gym and Brax
* Masked actions support
* Multi-agent training, decentralized and centralized critic variants
* Self-play
Implemented in Tensorflow 1.x (not updates now):
* Rainbow DQN
* A2C
* PPO
# Installation
For maximum training performance a preliminary installation of Pytorch 1.9+ with CUDA 11.1 is highly recommended:
```conda install pytorch torchvision cudatoolkit=11.1 -c pytorch -c nvidia``` or:
```pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.htm```
Then:
```pip install rl-games```
# Training
**NVIDIA Isaac Gym**
Download and follow the installation instructions from https://developer.nvidia.com/isaac-gym
Run from ```python/rlgpu``` directory:
Ant
```python rlg_train.py --task Ant --headless```
```python rlg_train.py --task Ant --play --checkpoint nn/Ant.pth --num_envs 100```
Humanoid
```python rlg_train.py --task Humanoid --headless```
```python rlg_train.py --task Humanoid --play --checkpoint nn/Humanoid.pth --num_envs 100```
Shadow Hand block orientation task
```python rlg_train.py --task ShadowHand --headless```
```python rlg_train.py --task ShadowHand --play --checkpoint nn/ShadowHand.pth --num_envs 100```
**Atari Pong**
```python runner.py --train --file rl_games/configs/atari/ppo_pong.yaml```
```python runner.py --play --file rl_games/configs/atari/ppo_pong.yaml --checkpoint nn/PongNoFrameskip.pth```
**Brax Ant**
```python runner.py --train --file rl_games/configs/brax/ppo_ant.yaml```
```python runner.py --play --file rl_games/configs/atari/ppo_ant.yaml --checkpoint nn/Ant_brax.pth```
# Release Notes
1.1.0
* Added to pypi: ```pip install rl-games```
* Added reporting env (sim) step fps, without policy inference. Improved naming.
* Renames in yaml config for better readability: steps_num to horizon_length amd lr_threshold to kl_threshold
# Troubleshouting
* Some of the supported envs are not installed with setup.py, you need to manually install them
* Starting from rl-games 1.1.0 old yaml configs won't be compatible with the new version:
* ```steps_num``` should be changed to ```horizon_length``` amd ```lr_threshold``` to ```kl_threshold```
| 3,558 |
Markdown
| 35.690721 | 151 | 0.737493 |
RoboticExplorationLab/CGAC/externals/rl_games/tests/simple_test.py
|
import pytest
def test_true():
assert True
| 48 |
Python
| 8.799998 | 16 | 0.6875 |
RoboticExplorationLab/CGAC/externals/rl_games/docs/SMAC.md
|
## Starcraft 2 Multiple Agents Results
* Starcraft 2 Multiple Agents Results with PPO (https://github.com/oxwhirl/smac)
* Every agent was controlled independently and has restricted information
* All the environments were trained with a default difficulty level 7
* No curriculum, just baseline PPO
* Full state information wasn't used for critic, actor and critic recieved the same agent observations
* Most results are significantly better by win rate and were trained on a single PC much faster than QMIX (https://arxiv.org/pdf/1902.04043.pdf), MAVEN (https://arxiv.org/pdf/1910.07483.pdf) or QTRAN
* No hyperparameter search
* 4 frames + conv1d actor-critic network
* Miniepoch num was set to 1, higher numbers didn't work
* Simple MLP networks didnot work good on hard envs
[](https://www.youtube.com/watch?v=F_IfFz-s-iQ)
# How to run configs:
# Pytorch
* ```python runner.py --train --file rl_games/configs/smac/3m_torch.yaml```
* ```python runner.py --play --file rl_games/configs/smac/3m_torch.yaml --checkpoint 'nn/3m_cnn'```
# Tensorflow
* ```python runner.py --tf --train --file rl_games/configs/smac/3m_torch.yaml```
* ```python runner.py --tf --play --file rl_games/configs/smac/3m_torch.yaml --checkpoint 'nn/3m_cnn'```
* ```tensorboard --logdir runs```
# Results on some environments:
* 2m_vs_1z took near 2 minutes to achive 100% WR
* corridor took near 2 hours for 95+% WR
* MMM2 4 hours for 90+% WR
* 6h_vs_8z got 82% WR after 8 hours of training
* 5m_vs_6m got 72% WR after 8 hours of training
# Plots:
FPS in these plots is calculated on per env basis except MMM2 (it was scaled by number of agents which is 10), to get a win rate per number of environmental steps info, the same as used in plots in QMIX, MAVEN, QTRAN or Deep Coordination Graphs (https://arxiv.org/pdf/1910.00091.pdf) papers FPS numbers under the horizontal axis should be devided by number of agents in player's team.
* 2m_vs_1z:

* 3s5z_vs_3s6z:

* 3s_vs_5z:

* corridor:

* 5m_vs_6m:

* MMM2:

| 2,266 |
Markdown
| 48.282608 | 384 | 0.735658 |
RoboticExplorationLab/CGAC/externals/rl_games/docs/OTHER.md
|
## Old Tensorflow results
* Double dueling DQN vs DQN with the same parameters

Near 90 minutes to learn with this setup.
* Different DQN Configurations tests
Light grey is noisy 1-step dddqn.
Noisy 3-step dddqn was even faster.
Best network (configuration 5) needs near 20 minutes to learn, on NVIDIA 1080.
Currently the best setup for pong is noisy 3-step double dueling network.
In pong_runs.py different experiments could be found.
Less then 200k frames to take score > 18.

DQN has more optimistic Q value estimations.
# Other Games Results
This results are not stable. Just best games, for good average results you need to train network more then 10 million steps.
Some games need 50m steps.
* 5 million frames two step noisy double dueling dqn:
[](https://youtu.be/Lu9Cm9K_6ms)
* Random lucky game in Space Invaders after less then one hour learning:
[](https://www.youtube.com/watch?v=LO0RL437rh4)
# A2C and PPO Results
* More than 2 hours for Pong to achieve 20 score with one actor playing.
* 8 Hours for Supermario lvl1
[](https://www.youtube.com/watch?v=T9ujS3HIvMY)
* PPO with LSTM layers
[](https://www.youtube.com/watch?v=fjY4AWbmhHg)

| 1,627 |
Markdown
| 36.860464 | 124 | 0.75968 |
RoboticExplorationLab/CGAC/externals/rl_games/docs/BRAX.md
|
# Brax (https://github.com/google/brax)
## How to run:
* **Ant** ```python runner.py --train --file rl_games/configs/brax/ppo_ant.yaml```
* **Humanoid** ```python runner.py --train --file rl_games/configs/brax/ppo_humanoid.yaml```
## Visualization:
* run **brax_visualization.ipynb**
## Results:
* **Ant** fps step: 1692066.6 fps total: 885603.1

* **Humanoid** fps step: 1244450.3 fps total: 661064.5

* **ur5e** fps step: 1116872.3 fps total: 627117.0



| 672 |
Markdown
| 34.421051 | 92 | 0.671131 |
RoboticExplorationLab/CGAC/externals/rl_games/docs/ISAAC_GYM.md
|
## Isaac Gym Results
https://developer.nvidia.com/isaac-gym
Coming.
| 69 |
Markdown
| 12.999997 | 38 | 0.753623 |
RoboticExplorationLab/CGAC/externals/rl_games/docs/CONFIG_PARAMS.md
|
# Yaml Config Description
Coming.
| 37 |
Markdown
| 8.499998 | 27 | 0.72973 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/torch_runner.py
|
import numpy as np
import copy
import torch
import yaml
from rl_games import envs
from rl_games.common import object_factory
from rl_games.common import env_configurations
from rl_games.common import experiment
from rl_games.common import tr_helpers
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import model_builder
from rl_games.algos_torch import a2c_continuous
from rl_games.algos_torch import a2c_discrete
from rl_games.algos_torch import players
from rl_games.common.algo_observer import DefaultAlgoObserver
from rl_games.algos_torch import sac_agent
class Runner:
def __init__(self, algo_observer=None):
self.algo_factory = object_factory.ObjectFactory()
self.algo_factory.register_builder('a2c_continuous', lambda **kwargs : a2c_continuous.A2CAgent(**kwargs))
self.algo_factory.register_builder('a2c_discrete', lambda **kwargs : a2c_discrete.DiscreteA2CAgent(**kwargs))
self.algo_factory.register_builder('sac', lambda **kwargs: sac_agent.SACAgent(**kwargs))
#self.algo_factory.register_builder('dqn', lambda **kwargs : dqnagent.DQNAgent(**kwargs))
self.player_factory = object_factory.ObjectFactory()
self.player_factory.register_builder('a2c_continuous', lambda **kwargs : players.PpoPlayerContinuous(**kwargs))
self.player_factory.register_builder('a2c_discrete', lambda **kwargs : players.PpoPlayerDiscrete(**kwargs))
self.player_factory.register_builder('sac', lambda **kwargs : players.SACPlayer(**kwargs))
#self.player_factory.register_builder('dqn', lambda **kwargs : players.DQNPlayer(**kwargs))
self.model_builder = model_builder.ModelBuilder()
self.network_builder = network_builder.NetworkBuilder()
self.algo_observer = algo_observer
torch.backends.cudnn.benchmark = True
def reset(self):
pass
def load_config(self, params):
self.seed = params.get('seed', None)
self.algo_params = params['algo']
self.algo_name = self.algo_params['name']
self.load_check_point = params['load_checkpoint']
self.exp_config = None
if self.seed:
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
np.random.seed(self.seed)
if self.load_check_point:
print('Found checkpoint')
print(params['load_path'])
self.load_path = params['load_path']
self.model = self.model_builder.load(params)
self.config = copy.deepcopy(params['config'])
self.config['reward_shaper'] = tr_helpers.DefaultRewardsShaper(**self.config['reward_shaper'])
self.config['network'] = self.model
self.config['logdir'] = params['general'].get('logdir', './')
has_rnd_net = self.config.get('rnd_config', None) != None
if has_rnd_net:
print('Adding RND Network')
network = self.model_builder.network_factory.create(params['config']['rnd_config']['network']['name'])
network.load(params['config']['rnd_config']['network'])
self.config['rnd_config']['network'] = network
has_central_value_net = self.config.get('central_value_config', None) != None
if has_central_value_net:
print('Adding Central Value Network')
network = self.model_builder.network_factory.create(params['config']['central_value_config']['network']['name'])
network.load(params['config']['central_value_config']['network'])
self.config['central_value_config']['network'] = network
def load(self, yaml_conf):
self.default_config = yaml_conf['params']
self.load_config(copy.deepcopy(self.default_config))
if 'experiment_config' in yaml_conf:
self.exp_config = yaml_conf['experiment_config']
def get_prebuilt_config(self):
return self.config
def run_train(self):
print('Started to train')
if self.algo_observer is None:
self.algo_observer = DefaultAlgoObserver()
if self.exp_config:
self.experiment = experiment.Experiment(self.default_config, self.exp_config)
exp_num = 0
exp = self.experiment.get_next_config()
while exp is not None:
exp_num += 1
print('Starting experiment number: ' + str(exp_num))
self.reset()
self.load_config(exp)
if 'features' not in self.config:
self.config['features'] = {}
self.config['features']['observer'] = self.algo_observer
#if 'soft_augmentation' in self.config['features']:
# self.config['features']['soft_augmentation'] = SoftAugmentation(**self.config['features']['soft_augmentation'])
agent = self.algo_factory.create(self.algo_name, base_name='run', config=self.config)
self.experiment.set_results(*agent.train())
exp = self.experiment.get_next_config()
else:
self.reset()
self.load_config(self.default_config)
if 'features' not in self.config:
self.config['features'] = {}
self.config['features']['observer'] = self.algo_observer
#if 'soft_augmentation' in self.config['features']:
# self.config['features']['soft_augmentation'] = SoftAugmentation(**self.config['features']['soft_augmentation'])
agent = self.algo_factory.create(self.algo_name, base_name='run', config=self.config)
if self.load_check_point and (self.load_path is not None):
agent.restore(self.load_path)
agent.train()
def create_player(self):
return self.player_factory.create(self.algo_name, config=self.config)
def create_agent(self, obs_space, action_space):
return self.algo_factory.create(self.algo_name, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
def run(self, args):
if 'checkpoint' in args and args['checkpoint'] is not None:
if len(args['checkpoint']) > 0:
self.load_path = args['checkpoint']
if args['train']:
self.run_train()
elif args['play']:
print('Started to play')
player = self.create_player()
player.restore(self.load_path)
player.run()
else:
self.run_train()
| 6,556 |
Python
| 43.304054 | 148 | 0.624619 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/tf14_runner.py
|
import tensorflow as tf
import numpy as np
import yaml
import ray
import copy
from rl_games.common import object_factory
from rl_games.common import env_configurations
from rl_games.common import experiment
from rl_games.common import tr_helpers
from rl_games.algos_tf14 import network_builder
from rl_games.algos_tf14 import model_builder
from rl_games.algos_tf14 import a2c_continuous
from rl_games.algos_tf14 import a2c_discrete
from rl_games.algos_tf14 import dqnagent
from rl_games.algos_tf14 import players
class Runner:
def __init__(self):
self.algo_factory = object_factory.ObjectFactory()
self.algo_factory.register_builder('a2c_continuous', lambda **kwargs : a2c_continuous.A2CAgent(**kwargs))
self.algo_factory.register_builder('a2c_discrete', lambda **kwargs : a2c_discrete.A2CAgent(**kwargs))
self.algo_factory.register_builder('dqn', lambda **kwargs : dqnagent.DQNAgent(**kwargs))
self.player_factory = object_factory.ObjectFactory()
self.player_factory.register_builder('a2c_continuous', lambda **kwargs : players.PpoPlayerContinuous(**kwargs))
self.player_factory.register_builder('a2c_discrete', lambda **kwargs : players.PpoPlayerDiscrete(**kwargs))
self.player_factory.register_builder('dqn', lambda **kwargs : players.DQNPlayer(**kwargs))
self.model_builder = model_builder.ModelBuilder()
self.network_builder = network_builder.NetworkBuilder()
self.sess = None
def reset(self):
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.8)
config = tf.ConfigProto(gpu_options=gpu_options)
tf.reset_default_graph()
if self.sess:
self.sess.close()
self.sess = tf.InteractiveSession(config=config)
def load_config(self, params):
self.seed = params.get('seed', None)
self.algo_params = params['algo']
self.algo_name = self.algo_params['name']
self.load_check_point = params['load_checkpoint']
self.exp_config = None
if self.seed:
tf.set_random_seed(self.seed)
np.random.seed(self.seed)
if self.load_check_point:
self.load_path = params['load_path']
self.model = self.model_builder.load(params)
self.config = copy.deepcopy(params['config'])
self.config['reward_shaper'] = tr_helpers.DefaultRewardsShaper(**self.config['reward_shaper'], is_torch=False)
self.config['network'] = self.model
def load(self, yaml_conf):
self.default_config = yaml_conf['params']
self.load_config(copy.deepcopy(self.default_config))
if 'experiment_config' in yaml_conf:
self.exp_config = yaml_conf['experiment_config']
def get_prebuilt_config(self):
return self.config
def run_train(self):
print('Started to train')
ray.init(object_store_memory=1024*1024*1000)
shapes = env_configurations.get_obs_and_action_spaces_from_config(self.config)
obs_space = shapes['observation_space']
action_space = shapes['action_space']
print('obs_space:', obs_space)
print('action_space:', action_space)
if self.exp_config:
self.experiment = experiment.Experiment(self.default_config, self.exp_config)
exp_num = 0
exp = self.experiment.get_next_config()
while exp is not None:
exp_num += 1
print('Starting experiment number: ' + str(exp_num))
self.reset()
self.load_config(exp)
agent = self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
self.experiment.set_results(*agent.train())
exp = self.experiment.get_next_config()
else:
self.reset()
self.load_config(self.default_config)
agent = self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
if self.load_check_point or (self.load_path is not None):
agent.restore(self.load_path)
agent.train()
def create_player(self):
return self.player_factory.create(self.algo_name, sess=self.sess, config=self.config)
def create_agent(self, obs_space, action_space):
return self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
def run(self, args):
if 'checkpoint' in args:
self.load_path = args['checkpoint']
if args['train']:
self.run_train()
elif args['play']:
print('Started to play')
player = self.player_factory.create(self.algo_name, sess=self.sess, config=self.config)
player.restore(self.load_path)
player.run()
ray.shutdown()
| 5,099 |
Python
| 39.8 | 175 | 0.643656 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/test_network.py
|
import torch
from torch import nn
import torch.nn.functional as F
class TestNet(nn.Module):
def __init__(self, params, **kwargs):
nn.Module.__init__(self)
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
num_inputs = 0
assert(type(input_shape) is dict)
for k,v in input_shape.items():
num_inputs +=v[0]
self.central_value = params.get('central_value', False)
self.value_size = kwargs.pop('value_size', 1)
self.linear1 = nn.Linear(num_inputs, 256)
self.linear2 = nn.Linear(256, 128)
self.linear3 = nn.Linear(128, 64)
self.mean_linear = nn.Linear(64, actions_num)
self.value_linear = nn.Linear(64, 1)
def is_rnn(self):
return False
def forward(self, obs):
obs = obs['obs']
obs = torch.cat([obs['pos'], obs['info']], axis=-1)
x = F.relu(self.linear1(obs))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
action = self.mean_linear(x)
value = self.value_linear(x)
if self.central_value:
return value, None
return action, value, None
from rl_games.algos_torch.network_builder import NetworkBuilder
class TestNetBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
def build(self, name, **kwargs):
return TestNet(self.params, **kwargs)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
| 1,596 |
Python
| 28.036363 | 63 | 0.589599 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/smac_env.py
|
import gym
import numpy as np
from smac.env import StarCraft2Env
class SMACEnv(gym.Env):
def __init__(self, name="3m", **kwargs):
gym.Env.__init__(self)
self.seed = kwargs.pop('seed', None)
self.reward_sparse = kwargs.get('reward_sparse', False)
self.use_central_value = kwargs.pop('central_value', False)
self.random_invalid_step = kwargs.pop('random_invalid_step', False)
self.replay_save_freq = kwargs.pop('replay_save_freq', 10000)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.env = StarCraft2Env(map_name=name, seed=self.seed, **kwargs)
self.env_info = self.env.get_env_info()
self._game_num = 0
self.n_actions = self.env_info["n_actions"]
self.n_agents = self.env_info["n_agents"]
self.action_space = gym.spaces.Discrete(self.n_actions)
one_hot_agents = 0
if self.apply_agent_ids:
one_hot_agents = self.n_agents
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(self.env_info['obs_shape']+one_hot_agents, ), dtype=np.float32)
self.state_space = gym.spaces.Box(low=0, high=1, shape=(self.env_info['state_shape'], ), dtype=np.float32)
self.obs_dict = {}
def _preproc_state_obs(self, state, obs):
# todo: remove from self
if self.apply_agent_ids:
num_agents = self.n_agents
obs = np.array(obs)
all_ids = np.eye(num_agents, dtype=np.float32)
obs = np.concatenate([obs, all_ids], axis=-1)
self.obs_dict["obs"] = np.array(obs)
self.obs_dict["state"] = np.array(state)
if self.use_central_value:
return self.obs_dict
else:
return self.obs_dict["obs"]
def get_number_of_agents(self):
return self.n_agents
def reset(self):
if self._game_num % self.replay_save_freq == 1:
print('saving replay')
self.env.save_replay()
self._game_num += 1
obs, state = self.env.reset() # rename, to think remove
obs_dict = self._preproc_state_obs(state, obs)
return obs_dict
def _preproc_actions(self, actions):
actions = actions.copy()
rewards = np.zeros_like(actions)
mask = self.get_action_mask()
for ind, action in enumerate(actions, start=0):
avail_actions = np.nonzero(mask[ind])[0]
if action not in avail_actions:
actions[ind] = np.random.choice(avail_actions)
#rewards[ind] = -0.05
return actions, rewards
def step(self, actions):
fixed_rewards = None
if self.random_invalid_step:
actions, fixed_rewards = self._preproc_actions(actions)
reward, done, info = self.env.step(actions)
if done:
battle_won = info.get('battle_won', False)
if not battle_won and self.reward_sparse:
reward = -1.0
obs = self.env.get_obs()
state = self.env.get_state()
obses = self._preproc_state_obs(state, obs)
rewards = np.repeat (reward, self.n_agents)
dones = np.repeat (done, self.n_agents)
if fixed_rewards is not None:
rewards += fixed_rewards
return obses, rewards, dones, info
def get_action_mask(self):
return np.array(self.env.get_avail_actions(), dtype=np.bool)
def has_action_mask(self):
return not self.random_invalid_step
| 3,500 |
Python
| 34.01 | 133 | 0.587714 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/connect4_selfplay.py
|
import gym
import numpy as np
from pettingzoo.classic import connect_four_v0
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
class ConnectFourSelfPlay(gym.Env):
def __init__(self, name="connect_four_v0", **kwargs):
gym.Env.__init__(self)
self.name = name
self.is_determenistic = kwargs.pop('is_determenistic', False)
self.is_human = kwargs.pop('is_human', False)
self.random_agent = kwargs.pop('random_agent', False)
self.config_path = kwargs.pop('config_path')
self.agent = None
self.env = connect_four_v0.env()#gym.make(name, **kwargs)
self.action_space = self.env.action_spaces['player_0']
observation_space = self.env.observation_spaces['player_0']
shp = observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(shp[:-1] + (shp[-1] * 2,)), dtype=np.uint8)
self.obs_deque = deque([], maxlen=2)
self.agent_id = 0
def _get_legal_moves(self, agent_id):
name = 'player_0' if agent_id == 0 else 'player_1'
action_ids = self.env.infos[name]['legal_moves']
mask = np.zeros(self.action_space.n, dtype = np.bool)
mask[action_ids] = True
return mask, action_ids
def env_step(self, action):
obs = self.env.step(action)
info = {}
name = 'player_0' if self.agent_id == 0 else 'player_1'
reward = self.env.rewards[name]
done = self.env.dones[name]
return obs, reward, done, info
def get_obs(self):
return np.concatenate(self.obs_deque,-1).astype(np.uint8) * 255
def reset(self):
if self.agent == None:
self.create_agent(self.config_path)
self.agent_id = np.random.randint(2)
obs = self.env.reset()
self.obs_deque.append(obs)
self.obs_deque.append(obs)
if self.agent_id == 1:
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(0)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(op_obs, mask, self.is_determenistic).item()
obs, _, _, _ = self.env_step(opponent_action)
self.obs_deque.append(obs)
return self.get_obs()
def create_agent(self, config):
with open(config, 'r') as stream:
config = yaml.safe_load(stream)
runner = Runner()
runner.load(config)
config = runner.get_prebuilt_config()
#'RAYLIB has bug here, CUDA_VISIBLE_DEVICES become unset'
if 'CUDA_VISIBLE_DEVICES' in os.environ:
os.environ.pop('CUDA_VISIBLE_DEVICES')
self.agent = runner.create_player()
self.agent.model.eval()
def step(self, action):
obs, reward, done, info = self.env_step(action)
self.obs_deque.append(obs)
if done:
if reward == 1:
info['battle_won'] = 1
else:
info['battle_won'] = 0
return self.get_obs(), reward, done, info
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(1-self.agent_id)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(op_obs, mask, self.is_determenistic).item()
obs, reward, done,_ = self.env_step(opponent_action)
if done:
if reward == -1:
info['battle_won'] = 0
else:
info['battle_won'] = 1
self.obs_deque.append(obs)
return self.get_obs(), reward, done, info
def render(self, mode='ansi'):
self.env.render(mode)
def update_weights(self, weigths):
self.agent.set_weights(weigths)
def get_action_mask(self):
mask, _ = self._get_legal_moves(self.agent_id)
return mask
def has_action_mask(self):
return True
| 4,505 |
Python
| 33.396946 | 113 | 0.552719 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/__init__.py
|
from rl_games.envs.connect4_network import ConnectBuilder
from rl_games.envs.test_network import TestNetBuilder
from rl_games.algos_torch import model_builder
model_builder.register_network('connect4net', ConnectBuilder)
model_builder.register_network('testnet', TestNetBuilder)
| 282 |
Python
| 30.444441 | 61 | 0.833333 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/connect4_network.py
|
import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self):
super(ConvBlock, self).__init__()
self.action_size = 7
self.conv1 = nn.Conv2d(4, 128, 3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(128)
def forward(self, s):
s = s['obs'].contiguous()
#s = s.view(-1, 3, 6, 7) # batch_size x channels x board_x x board_y
s = F.relu(self.bn1(self.conv1(s)))
return s
class ResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += residual
out = F.relu(out)
return out
class OutBlock(nn.Module):
def __init__(self):
super(OutBlock, self).__init__()
self.conv = nn.Conv2d(128, 3, kernel_size=1) # value head
self.bn = nn.BatchNorm2d(3)
self.fc1 = nn.Linear(3*6*7, 32)
self.fc2 = nn.Linear(32, 1)
self.conv1 = nn.Conv2d(128, 32, kernel_size=1) # policy head
self.bn1 = nn.BatchNorm2d(32)
self.fc = nn.Linear(6*7*32, 7)
def forward(self,s):
v = F.relu(self.bn(self.conv(s))) # value head
v = v.view(-1, 3*6*7) # batch_size X channel X height X width
v = F.relu(self.fc1(v))
v = F.relu(self.fc2(v))
v = torch.tanh(v)
p = F.relu(self.bn1(self.conv1(s))) # policy head
p = p.view(-1, 6*7*32)
p = self.fc(p)
return p, v, None
class ConnectNet(nn.Module):
def __init__(self, blocks):
super(ConnectNet, self).__init__()
self.blocks = blocks
self.conv = ConvBlock()
for block in range(self.blocks):
setattr(self, "res_%i" % block,ResBlock())
self.outblock = OutBlock()
def is_rnn(self):
return False
def forward(self,s):
s = s.permute((0, 3, 1, 2))
s = self.conv(s)
for block in range(self.blocks):
s = getattr(self, "res_%i" % block)(s)
s = self.outblock(s)
return s
from rl_games.algos_torch.network_builder import NetworkBuilder
class ConnectBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
self.blocks = params['blocks']
def build(self, name, **kwargs):
return ConnectNet(self.blocks)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
| 2,992 |
Python
| 28.93 | 78 | 0.558489 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/brax.py
|
from rl_games.common.ivecenv import IVecEnv
import gym
import numpy as np
import torch
import torch.utils.dlpack as tpack
def jax_to_torch(tensor):
from jax._src.dlpack import (to_dlpack,)
tensor = to_dlpack(tensor)
tensor = tpack.from_dlpack(tensor)
return tensor
def torch_to_jax(tensor):
from jax._src.dlpack import (from_dlpack,)
tensor = tpack.to_dlpack(tensor)
tensor = from_dlpack(tensor)
return tensor
class BraxEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
import brax
from brax import envs
import jax
import jax.numpy as jnp
self.batch_size = num_actors
env_fn = envs.create_fn(env_name=kwargs.pop('env_name', 'ant'))
self.env = env_fn(
action_repeat=1,
batch_size=num_actors,
episode_length=kwargs.pop('episode_length', 1000))
obs_high = np.inf * np.ones(self.env.observation_size)
self.observation_space = gym.spaces.Box(-obs_high, obs_high, dtype=np.float32)
action_high = np.ones(self.env.action_size)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
def step(first_state, state, action):
def test_done(a, b):
if a is first_state.done or a is first_state.metrics or a is first_state.reward:
return b
test_shape = [a.shape[0],] + [1 for _ in range(len(a.shape) - 1)]
return jnp.where(jnp.reshape(state.done, test_shape), a, b)
state = self.env.step(state, action)
state = jax.tree_multimap(test_done, first_state, state)
return state, state.obs, state.reward, state.done, {}
def reset(key):
state = self.env.reset(key)
return state, state.obs
self._reset = jax.jit(reset, backend='gpu')
self._step = jax.jit(step, backend='gpu')
def step(self, action):
action = torch_to_jax(action)
self.state, next_obs, reward, is_done, info = self._step(self.first_state, self.state, action)
#next_obs = np.asarray(next_obs).astype(np.float32)
#reward = np.asarray(reward).astype(np.float32)
#is_done = np.asarray(is_done).astype(np.long)
next_obs = jax_to_torch(next_obs)
reward = jax_to_torch(reward)
is_done = jax_to_torch(is_done)
return next_obs, reward, is_done, info
def reset(self):
import jax
import jax.numpy as jnp
rng = jax.random.PRNGKey(seed=0)
rng = jax.random.split(rng, self.batch_size)
self.first_state, _ = self._reset(rng)
self.state, obs = self._reset(rng)
#obs = np.asarray(obs).astype(np.float32)
return jax_to_torch(obs)
def get_number_of_agents(self):
return 1
def get_env_info(self):
info = {}
info['action_space'] = self.action_space
info['observation_space'] = self.observation_space
return info
def create_brax_env(**kwargs):
return BraxEnv("", kwargs.pop('num_actors', 256), **kwargs)
| 3,131 |
Python
| 32.677419 | 102 | 0.600767 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/multiwalker.py
|
import gym
import numpy as np
from pettingzoo.sisl import multiwalker_v6
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
import rl_games.envs.connect4_network
class MultiWalker(gym.Env):
def __init__(self, name="multiwalker", **kwargs):
gym.Env.__init__(self)
self.name = name
self.env = multiwalker_v6.parallel_env()
self.use_central_value = kwargs.pop('central_value', False)
self.use_prev_actions = kwargs.pop('use_prev_actions', False)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.add_timeouts = kwargs.pop('add_timeouts', False)
self.action_space = self.env.action_spaces['walker_0']
self.steps_count = 0
obs_len = self.env.observation_spaces['walker_0'].shape[0]
add_obs = 0
if self.apply_agent_ids:
add_obs = 3
if self.use_prev_actions:
obs_len += self.action_space.shape[0]
self.observation_space = gym.spaces.Box(-1, 1, shape =(obs_len + add_obs,))
if self.use_central_value:
self.state_space = gym.spaces.Box(-1, 1, shape =(obs_len*3,))
def step(self, action):
self.steps_count += 1
actions = {'walker_0' : action[0], 'walker_1' : action[1], 'walker_2' : action[2],}
obs, reward, done, info = self.env.step(actions)
if self.use_prev_actions:
obs = {
k: np.concatenate([v, actions[k]]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
rewards = np.stack([reward['walker_0'], reward['walker_1'], reward['walker_2']])
dones = np.stack([done['walker_0'], done['walker_1'], done['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses, rewards, dones, info
def reset(self):
obs = self.env.reset()
self.steps_count = 0
if self.use_prev_actions:
zero_actions = np.zeros(self.action_space.shape[0])
obs = {
k: np.concatenate([v, zero_actions]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses
def render(self, mode='ansi'):
self.env.render(mode)
def get_number_of_agents(self):
return 3
def has_action_mask(self):
return False
| 3,195 |
Python
| 37.047619 | 91 | 0.554617 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/slimevolley_selfplay.py
|
import gym
import numpy as np
import slimevolleygym
import yaml
from rl_games.torch_runner import Runner
import os
class SlimeVolleySelfplay(gym.Env):
def __init__(self, name="SlimeVolleyDiscrete-v0", **kwargs):
gym.Env.__init__(self)
self.name = name
self.is_determenistic = kwargs.pop('is_determenistic', False)
self.config_path = kwargs.pop('config_path')
self.agent = None
self.pos_scale = 1
self.neg_scale = kwargs.pop('neg_scale', 1)
self.sum_rewards = 0
self.env = gym.make(name, **kwargs)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
def reset(self):
if self.agent == None:
self.create_agent(self.config_path)
obs = self.env.reset()
self.opponent_obs = obs
self.sum_rewards = 0
return obs
def create_agent(self, config='rl_games/configs/ma/ppo_slime_self_play.yaml'):
with open(config, 'r') as stream:
config = yaml.safe_load(stream)
runner = Runner()
from rl_games.common.env_configurations import get_env_info
config['params']['config']['env_info'] = get_env_info(self)
runner.load(config)
config = runner.get_prebuilt_config()
'RAYLIB has bug here, CUDA_VISIBLE_DEVICES become unset'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
self.agent = runner.create_player()
def step(self, action):
op_obs = self.agent.obs_to_torch(self.opponent_obs)
opponent_action = self.agent.get_action(op_obs, self.is_determenistic).item()
obs, reward, done, info = self.env.step(action, opponent_action)
self.sum_rewards += reward
if reward < 0:
reward = reward * self.neg_scale
self.opponent_obs = info['otherObs']
if done:
info['battle_won'] = np.sign(self.sum_rewards)
return obs, reward, done, info
def render(self,mode):
self.env.render(mode)
def update_weights(self, weigths):
self.agent.set_weights(weigths)
| 2,148 |
Python
| 32.578124 | 85 | 0.607542 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/test/__init__.py
|
import gym
gym.envs.register(
id='TestRnnEnv-v0',
entry_point='rl_games.envs.test.rnn_env:TestRNNEnv',
max_episode_steps=100500,
)
gym.envs.register(
id='TestAsymmetricEnv-v0',
entry_point='rl_games.envs.test.test_asymmetric_env:TestAsymmetricCritic'
)
| 279 |
Python
| 22.333331 | 78 | 0.709677 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/test/rnn_env.py
|
import gym
import numpy as np
class TestRNNEnv(gym.Env):
def __init__(self, **kwargs):
gym.Env.__init__(self)
self.obs_dict = {}
self.max_steps = kwargs.pop('max_steps', 21)
self.show_time = kwargs.pop('show_time', 1)
self.min_dist = kwargs.pop('min_dist', 2)
self.max_dist = kwargs.pop('max_dist', 8)
self.hide_object = kwargs.pop('hide_object', False)
self.use_central_value = kwargs.pop('use_central_value', False)
self.apply_dist_reward = kwargs.pop('apply_dist_reward', False)
self.apply_exploration_reward = kwargs.pop('apply_exploration_reward', False)
self.multi_head_value = kwargs.pop('multi_head_value', False)
if self.multi_head_value:
self.value_size = 2
else:
self.value_size = 1
self.multi_discrete_space = kwargs.pop('multi_discrete_space', False)
if self.multi_discrete_space:
self.action_space = gym.spaces.Tuple([gym.spaces.Discrete(2),gym.spaces.Discrete(3)])
else:
self.action_space = gym.spaces.Discrete(4)
self.multi_obs_space = kwargs.pop('multi_obs_space', False)
if self.multi_obs_space:
spaces = {
'pos': gym.spaces.Box(low=0, high=1, shape=(2, ), dtype=np.float32),
'info': gym.spaces.Box(low=0, high=1, shape=(4, ), dtype=np.float32),
}
self.observation_space = gym.spaces.Dict(spaces)
else:
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(6, ), dtype=np.float32)
self.state_space = self.observation_space
if self.apply_exploration_reward:
pass
self.reset()
def get_number_of_agents(self):
return 1
def reset(self):
self._curr_steps = 0
self._current_pos = [0,0]
bound = self.max_dist - self.min_dist
rand_dir = - 2 * np.random.randint(0, 2, (2,)) + 1
self._goal_pos = rand_dir * np.random.randint(self.min_dist, self.max_dist+1, (2,))
obs = np.concatenate([self._current_pos, self._goal_pos, [1, 0]], axis=None)
obs = obs.astype(np.float32)
if self.multi_obs_space:
obs = {
'pos': obs[:2],
'info': obs[2:]
}
if self.use_central_value:
obses = {}
obses["obs"] = obs
obses["state"] = obs
else:
obses = obs
return obses
def step_categorical(self, action):
if self._curr_steps > 1:
if action == 0:
self._current_pos[0] += 1
if action == 1:
self._current_pos[0] -= 1
if action == 2:
self._current_pos[1] += 1
if action == 3:
self._current_pos[1] -= 1
def step_multi_categorical(self, action):
if self._curr_steps > 1:
if action[0] == 0:
self._current_pos[0] += 1
if action[0] == 1:
self._current_pos[0] -= 1
if action[1] == 0:
self._current_pos[1] += 1
if action[1] == 1:
self._current_pos[1] -= 1
if action[1] == 2:
pass
def step(self, action):
info = {}
self._curr_steps += 1
if self.multi_discrete_space:
self.step_multi_categorical(action)
else:
self.step_categorical(action)
reward = [0.0, 0.0]
done = False
dist = self._current_pos - self._goal_pos
if (dist**2).sum() < 0.0001:
reward[0] = 1.0
info = {'scores' : 1}
done = True
elif self._curr_steps == self.max_steps:
info = {'scores' : 0}
done = True
dist_coef = -0.1
if self.apply_dist_reward:
reward[1] = dist_coef * np.abs(dist).sum() / self.max_dist
show_object = 0
if self.hide_object:
obs = np.concatenate([self._current_pos, [0,0], [show_object, self._curr_steps]], axis=None)
else:
show_object = 1
obs = np.concatenate([self._current_pos, self._goal_pos, [show_object, self._curr_steps]], axis=None)
obs = obs.astype(np.float32)
#state = state.astype(np.float32)
if self.multi_obs_space:
obs = {
'pos': obs[:2],
'info': obs[2:]
}
if self.use_central_value:
state = np.concatenate([self._current_pos, self._goal_pos, [show_object, self._curr_steps]], axis=None)
obses = {}
obses["obs"] = obs
if self.multi_obs_space:
obses["state"] = {
'pos': state[:2],
'info': state[2:]
}
else:
obses["state"] = state.astype(np.float32)
else:
obses = obs
if self.multi_head_value:
pass
else:
reward = reward[0] + reward[1]
return obses, np.array(reward).astype(np.float32), done, info
def has_action_mask(self):
return False
| 5,217 |
Python
| 34.020134 | 115 | 0.500096 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/test/test_asymmetric_env.py
|
import gym
import numpy as np
from rl_games.common.wrappers import MaskVelocityWrapper
class TestAsymmetricCritic(gym.Env):
def __init__(self, wrapped_env_name, **kwargs):
gym.Env.__init__(self)
self.apply_mask = kwargs.pop('apply_mask', True)
self.use_central_value = kwargs.pop('use_central_value', True)
self.env = gym.make(wrapped_env_name)
if self.apply_mask:
if wrapped_env_name not in ["CartPole-v1", "Pendulum-v0", "LunarLander-v2", "LunarLanderContinuous-v2"]:
raise 'unsupported env'
self.mask = MaskVelocityWrapper(self.env, wrapped_env_name).mask
else:
self.mask = 1
self.n_agents = 1
self.use_central_value = True
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.state_space = self.env.observation_space
def get_number_of_agents(self):
return self.n_agents
def reset(self):
obs = self.env.reset()
obs_dict = {}
obs_dict["obs"] = obs * self.mask
obs_dict["state"] = obs
if self.use_central_value:
obses = obs_dict
else:
obses = obs_dict["obs"].astype(np.float32)
return obses
def step(self, actions):
obs, rewards, dones, info = self.env.step(actions)
obs_dict = {}
obs_dict["obs"] = obs * self.mask
obs_dict["state"] = obs
if self.use_central_value:
obses = obs_dict
else:
obses = obs_dict["obs"].astype(np.float32)
return obses, rewards, dones, info
def has_action_mask(self):
return False
| 1,715 |
Python
| 31.377358 | 116 | 0.580758 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/envs/diambra/diambra.py
|
import gym
import numpy as np
import os
import random
from diambra_environment.diambraGym import diambraGym
from diambra_environment.makeDiambraEnv import make_diambra_env
class DiambraEnv(gym.Env):
def __init__(self, **kwargs):
gym.Env.__init__(self)
self.seed = kwargs.pop('seed', None)
self.difficulty = kwargs.pop('difficulty', 3)
self.env_path = kwargs.pop('env_path', "/home/trrrrr/Documents/github/ml/diambra/DIAMBRAenvironment-main")
self.character = kwargs.pop('character', 'Raidou')
self.frame_stack = kwargs.pop('frame_stack', 3)
self.attacks_buttons = kwargs.pop('attacks_buttons', False)
self._game_num = 0
self.n_agents = 1
self.rank = random.randint(0, 100500)
repo_base_path = os.path.abspath(self.env_path) # Absolute path to your DIAMBRA environment
env_kwargs = {}
env_kwargs["gameId"] = "doapp"
env_kwargs["roms_path"] = os.path.join(repo_base_path, "roms/") # Absolute path to roms
env_kwargs["mame_diambra_step_ratio"] = 6
env_kwargs["render"] = False
env_kwargs["lock_fps"] = False # Locks to 60 FPS
env_kwargs["sound"] = env_kwargs["lock_fps"] and env_kwargs["render"]
env_kwargs["player"] = "Random"
env_kwargs["difficulty"] = self.difficulty
env_kwargs["characters"] = [[self.character, "Random"], [self.character, "Random"]]
env_kwargs["charOutfits"] = [2, 2]
gym_kwargs = {}
gym_kwargs["P2brain"] = None
gym_kwargs["continue_game"] = 0.0
gym_kwargs["show_final"] = False
gym_kwargs["gamePads"] = [None, None]
gym_kwargs["actionSpace"] = ["discrete", "multiDiscrete"]
#gym_kwargs["attackButCombinations"] = [False, False]
gym_kwargs["attackButCombinations"] = [self.attacks_buttons, self.attacks_buttons]
gym_kwargs["actBufLen"] = 12
wrapper_kwargs = {}
wrapper_kwargs["hwc_obs_resize"] = [128, 128, 1]
wrapper_kwargs["normalize_rewards"] = True
wrapper_kwargs["clip_rewards"] = False
wrapper_kwargs["frame_stack"] = self.frame_stack
wrapper_kwargs["dilation"] = 1
wrapper_kwargs["scale"] = True
wrapper_kwargs["scale_mod"] = 0
key_to_add = []
key_to_add.append("actionsBuf")
key_to_add.append("ownHealth")
key_to_add.append("oppHealth")
key_to_add.append("ownPosition")
key_to_add.append("oppPosition")
key_to_add.append("stage")
key_to_add.append("character")
self.env = make_diambra_env(diambraGym, env_prefix="Train" + str(self.rank), seed= self.rank,
diambra_kwargs=env_kwargs,
diambra_gym_kwargs=gym_kwargs,
wrapper_kwargs=wrapper_kwargs,
key_to_add=key_to_add)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
def _preproc_state_obs(self, obs):
return obs
def reset(self):
self._game_num += 1
obs = self.env.reset() # rename, to think remove
obs_dict = self._preproc_state_obs(obs)
return obs_dict
def step(self, actions):
obs, reward, done, info = self.env.step(actions)
return obs, reward, done, info
def has_action_mask(self):
return False
| 3,496 |
Python
| 38.292134 | 114 | 0.588673 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/torch_ext.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.optimizer import Optimizer
numpy_to_torch_dtype_dict = {
np.dtype('bool') : torch.bool,
np.dtype('uint8') : torch.uint8,
np.dtype('int8') : torch.int8,
np.dtype('int16') : torch.int16,
np.dtype('int32') : torch.int32,
np.dtype('int64') : torch.int64,
np.dtype('float16') : torch.float16,
np.dtype('float32') : torch.float32,
np.dtype('float64') : torch.float64,
np.dtype('complex64') : torch.complex64,
np.dtype('complex128') : torch.complex128,
}
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma, reduce=True):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
if reduce:
return kl.mean()
else:
return kl
def mean_mask(input, mask, sum_mask):
return (input * rnn_masks).sum() / sum_mask
def shape_whc_to_cwh(shape):
#if len(shape) == 2:
# return (shape[1], shape[0])
if len(shape) == 3:
return (shape[2], shape[0], shape[1])
return shape
def safe_filesystem_op(func, *args, **kwargs):
"""
This is to prevent spurious crashes related to saving checkpoints or restoring from checkpoints in a Network
Filesystem environment (i.e. NGC cloud or SLURM)
"""
num_attempts = 5
for attempt in range(num_attempts):
try:
return func(*args, **kwargs)
except Exception as exc:
print(f'Exception {exc} when trying to execute {func} with args:{args} and kwargs:{kwargs}...')
wait_sec = 2 ** attempt
print(f'Waiting {wait_sec} before trying again...')
time.sleep(wait_sec)
raise RuntimeError(f'Could not execute {func}, give up after {num_attempts} attempts...')
def safe_save(state, filename):
return safe_filesystem_op(torch.save, state, filename)
def safe_load(filename):
return safe_filesystem_op(torch.load, filename)
def save_checkpoint(filename, state):
print("=> saving checkpoint '{}'".format(filename + '.pth'))
safe_save(state, filename + '.pth')
def load_checkpoint(filename):
print("=> loading checkpoint '{}'".format(filename))
state = safe_load(filename)
return state
def parameterized_truncated_normal(uniform, mu, sigma, a, b):
normal = torch.distributions.normal.Normal(0, 1)
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(torch.from_numpy(np.array(alpha)))
p = alpha_normal_cdf + (normal.cdf(torch.from_numpy(np.array(beta))) - alpha_normal_cdf) * uniform
p = p.numpy()
one = np.array(1, dtype=p.dtype)
epsilon = np.array(np.finfo(p.dtype).eps, dtype=p.dtype)
v = np.clip(2 * p - 1, -one + epsilon, one - epsilon)
x = mu + sigma * np.sqrt(2) * torch.erfinv(torch.from_numpy(v))
x = torch.clamp(x, a, b)
return x
def truncated_normal(uniform, mu=0.0, sigma=1.0, a=-2, b=2):
return parameterized_truncated_normal(uniform, mu, sigma, a, b)
def sample_truncated_normal(shape=(), mu=0.0, sigma=1.0, a=-2, b=2):
return truncated_normal(torch.from_numpy(np.random.uniform(0, 1, shape)), mu, sigma, a, b)
def variance_scaling_initializer(tensor, mode='fan_in',scale = 2.0):
fan = torch.nn.init._calculate_correct_fan(tensor, mode)
print(fan, scale)
sigma = np.sqrt(scale / fan)
with torch.no_grad():
tensor[:] = sample_truncated_normal(tensor.size(), sigma=sigma)
return tensor
def random_sample(obs_batch, prob):
num_batches = obs_batch.size()[0]
permutation = torch.randperm(num_batches, device=obs_batch.device)
start = 0
end = int(prob * num_batches)
indices = permutation[start:end]
return torch.index_select(obs_batch, 0, indices)
def mean_list(val):
return torch.mean(torch.stack(val))
def apply_masks(losses, mask=None):
sum_mask = None
if mask is not None:
mask = mask.unsqueeze(1)
sum_mask = mask.numel()#
#sum_mask = mask.sum()
res_losses = [(l * mask).sum() / sum_mask for l in losses]
else:
res_losses = [torch.mean(l) for l in losses]
return res_losses, sum_mask
def normalization_with_masks(values, masks):
sum_mask = masks.sum()
values_mask = values * masks
values_mean = values_mask.sum() / sum_mask
min_sqr = ((((values_mask)**2)/sum_mask).sum() - ((values_mask/sum_mask).sum())**2)
values_std = torch.sqrt(min_sqr * sum_mask / (sum_mask-1))
normalized_values = (values_mask - values_mean) / (values_std + 1e-8)
return normalized_values
class CoordConv2d(nn.Conv2d):
pool = {}
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels + 2, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
@staticmethod
def get_coord(x):
key = int(x.size(0)), int(x.size(2)), int(x.size(3)), x.type()
if key not in CoordConv2d.pool:
theta = torch.Tensor([[[1, 0, 0], [0, 1, 0]]])
coord = torch.nn.functional.affine_grid(theta, torch.Size([1, 1, x.size(2), x.size(3)])).permute([0, 3, 1, 2]).repeat(
x.size(0), 1, 1, 1).type_as(x)
CoordConv2d.pool[key] = coord
return CoordConv2d.pool[key]
def forward(self, x):
return torch.nn.functional.conv2d(torch.cat([x, self.get_coord(x).type_as(x)], 1), self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class LayerNorm2d(nn.Module):
"""
Layer norm the just works on the channel axis for a Conv2d
Ref:
- code modified from https://github.com/Scitator/Run-Skeleton-Run/blob/master/common/modules/LayerNorm.py
- paper: https://arxiv.org/abs/1607.06450
Usage:
ln = LayerNormConv(3)
x = Variable(torch.rand((1,3,4,2)))
ln(x).size()
"""
def __init__(self, features, eps=1e-6):
super().__init__()
self.register_buffer("gamma", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.register_buffer("beta", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.eps = eps
self.features = features
def _check_input_dim(self, input):
if input.size(1) != self.gamma.nelement():
raise ValueError('got {}-feature tensor, expected {}'
.format(input.size(1), self.features))
def forward(self, x):
self._check_input_dim(x)
x_flat = x.transpose(1,-1).contiguous().view((-1, x.size(1)))
mean = x_flat.mean(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
std = x_flat.std(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
return self.gamma.expand_as(x) * (x - mean) / (std + self.eps) + self.beta.expand_as(x)
class DiscreteActionsEncoder(nn.Module):
def __init__(self, actions_max, mlp_out, emb_size, num_agents, use_embedding):
super().__init__()
self.actions_max = actions_max
self.emb_size = emb_size
self.num_agents = num_agents
self.use_embedding = use_embedding
if use_embedding:
self.embedding = torch.nn.Embedding(actions_max, emb_size)
else:
self.emb_size = actions_max
self.linear = torch.nn.Linear(self.emb_size * num_agents, mlp_out)
def forward(self, discrete_actions):
if self.use_embedding:
emb = self.embedding(discrete_actions)
else:
emb = torch.nn.functional.one_hot(discrete_actions, num_classes=self.actions_max)
emb = emb.view( -1, self.emb_size * self.num_agents).float()
emb = self.linear(emb)
return emb
def get_model_gradients(model):
grad_list = []
for param in model.parameters():
grad_list.append(param.grad)
return grad_list
def get_mean(v):
if len(v) > 0:
mean = np.mean(v)
else:
mean = 0
return mean
class CategoricalMaskedNaive(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if self.masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
inf_mask = torch.log(masks.float())
logits = logits + inf_mask
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p[p_log_p != p_log_p] = 0
return -p_log_p.sum(-1)
class CategoricalMasked(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
self.device = self.masks.device
logits = torch.where(self.masks, logits, torch.tensor(-1e+8).to(self.device))
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def rsample(self):
u = torch.distributions.Uniform(low=torch.zeros_like(self.logits, device = self.logits.device), high=torch.ones_like(self.logits, device = self.logits.device)).sample()
#print(u.size(), self.logits.size())
rand_logits = self.logits -(-u.log()).log()
return torch.max(rand_logits, axis=-1)[1]
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p = torch.where(self.masks, p_log_p, torch.tensor(0.0).to(self.device))
return -p_log_p.sum(-1)
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy()
class IdentityRNN(nn.Module):
def __init__(self, in_shape, out_shape):
super(IdentityRNN, self).__init__()
assert(in_shape == out_shape)
self.identity = torch.nn.Identity()
def forward(self, x, h):
return self.identity(x), h
| 11,332 |
Python
| 35.092357 | 176 | 0.607395 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/sac_agent.py
|
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import vecenv
from rl_games.common import schedulers
from rl_games.common import experience
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from torch import optim
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import time
import os
class SACAgent:
def __init__(self, base_name, config):
print(config)
# TODO: Get obs shape and self.network
self.base_init(base_name, config)
self.num_seed_steps = config["num_seed_steps"]
self.gamma = config["gamma"]
self.critic_tau = config["critic_tau"]
self.batch_size = config["batch_size"]
self.init_alpha = config["init_alpha"]
self.learnable_temperature = config["learnable_temperature"]
self.replay_buffer_size = config["replay_buffer_size"]
self.num_steps_per_episode = config.get("num_steps_per_episode", 1)
self.normalize_input = config.get("normalize_input", False)
self.max_env_steps = config.get("max_env_steps", 1000) # temporary, in future we will use other approach
print(self.batch_size, self.num_actors, self.num_agents)
self.num_frames_per_epoch = self.num_actors * self.num_steps_per_episode
self.log_alpha = torch.tensor(np.log(self.init_alpha)).float().to(self.sac_device)
self.log_alpha.requires_grad = True
action_space = self.env_info['action_space']
self.actions_num = action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape)
net_config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape
}
self.model = self.network.build(net_config)
self.model.to(self.sac_device)
print("Number of Agents", self.num_actors, "Batch Size", self.batch_size)
self.actor_optimizer = torch.optim.Adam(self.model.sac_network.actor.parameters(),
lr=self.config['actor_lr'],
betas=self.config.get("actor_betas", [0.9, 0.999]))
self.critic_optimizer = torch.optim.Adam(self.model.sac_network.critic.parameters(),
lr=self.config["critic_lr"],
betas=self.config.get("critic_betas", [0.9, 0.999]))
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha],
lr=self.config["alpha_lr"],
betas=self.config.get("alphas_betas", [0.9, 0.999]))
self.replay_buffer = experience.VectorizedReplayBuffer(self.env_info['observation_space'].shape,
self.env_info['action_space'].shape,
self.replay_buffer_size,
self.sac_device)
self.target_entropy_coef = config.get("target_entropy_coef", 0.5)
self.target_entropy = self.target_entropy_coef * -self.env_info['action_space'].shape[0]
print("Target entropy", self.target_entropy)
self.step = 0
self.algo_observer = config['features']['observer']
# TODO: Is there a better way to get the maximum number of episodes?
self.max_episodes = torch.ones(self.num_actors, device=self.sac_device)*self.num_steps_per_episode
# self.episode_lengths = np.zeros(self.num_actors, dtype=int)
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.sac_device)
def base_init(self, base_name, config):
self.config = config
self.env_config = config.get('env_config', {})
self.num_actors = config.get('num_actors', 1)
self.env_name = config['env_name']
print("Env name:", self.env_name)
self.env_info = config.get('env_info')
if self.env_info is None:
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.env_info = self.vec_env.get_env_info()
self.sac_device = config.get('device', 'cuda:0')
#temporary:
self.ppo_device = self.sac_device
print('Env info:')
print(self.env_info)
self.rewards_shaper = config['reward_shaper']
self.observation_space = self.env_info['observation_space']
self.weight_decay = config.get('weight_decay', 0.0)
#self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.c_loss = nn.MSELoss()
# self.c2_loss = nn.SmoothL1Loss()
self.save_best_after = config.get('save_best_after', 500)
self.print_stats = config.get('print_stats', True)
self.rnn_states = None
self.name = base_name
self.max_epochs = self.config.get('max_epochs', 1e6)
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_agents = self.env_info.get('agents', 1)
self.obs_shape = self.observation_space.shape
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = torch_ext.AverageMeter(1, self.games_to_track).to(self.sac_device)
self.game_lengths = torch_ext.AverageMeter(1, self.games_to_track).to(self.sac_device)
self.obs = None
self.min_alpha = torch.tensor(np.log(1)).float().to(self.sac_device)
self.frame = 0
self.update_time = 0
self.last_mean_rewards = -100500
self.play_time = 0
self.epoch_num = 0
# self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
# print("Run Directory:", config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.experiment_dir = config.get('logdir', './')
self.nn_dir = os.path.join(self.experiment_dir, 'nn')
self.summaries_dir = os.path.join(self.experiment_dir, 'runs')
os.makedirs(self.experiment_dir, exist_ok=True)
os.makedirs(self.nn_dir, exist_ok=True)
os.makedirs(self.summaries_dir, exist_ok=True)
self.writer = SummaryWriter(self.summaries_dir)
print("Run Directory:", self.summaries_dir)
self.is_tensor_obses = None
self.is_rnn = False
self.last_rnn_indices = None
self.last_state_indices = None
def init_tensors(self):
if self.observation_space.dtype == np.uint8:
torch_dtype = torch.uint8
else:
torch_dtype = torch.float32
batch_size = self.num_agents * self.num_actors
self.current_rewards = torch.zeros(batch_size, dtype=torch.float32, device=self.sac_device)
self.current_lengths = torch.zeros(batch_size, dtype=torch.long, device=self.sac_device)
self.dones = torch.zeros((batch_size,), dtype=torch.uint8, device=self.sac_device)
@property
def alpha(self):
return self.log_alpha.exp()
@property
def device(self):
return self.sac_device
def get_full_state_weights(self):
state = self.get_weights()
state['steps'] = self.step
state['actor_optimizer'] = self.actor_optimizer.state_dict()
state['critic_optimizer'] = self.critic_optimizer.state_dict()
state['log_alpha_optimizer'] = self.log_alpha_optimizer.state_dict()
return state
def get_weights(self):
state = {'actor': self.model.sac_network.actor.state_dict(),
'critic': self.model.sac_network.critic.state_dict(),
'critic_target': self.model.sac_network.critic_target.state_dict()}
return state
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def set_weights(self, weights):
self.model.sac_network.actor.load_state_dict(weights['actor'])
self.model.sac_network.critic.load_state_dict(weights['critic'])
self.model.sac_network.critic_target.load_state_dict(weights['critic_target'])
if self.normalize_input:
self.running_mean_std.load_state_dict(weights['running_mean_std'])
def set_full_state_weights(self, weights):
self.set_weights(weights)
self.step = weights['step']
self.actor_optimizer.load_state_dict(weights['actor_optimizer'])
self.critic_optimizer.load_state_dict(weights['critic_optimizer'])
self.log_alpha_optimizer.load_state_dict(weights['log_alpha_optimizer'])
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def set_eval(self):
self.model.eval()
if self.normalize_input:
self.running_mean_std.eval()
def set_train(self):
self.model.train()
if self.normalize_input:
self.running_mean_std.train()
def update_critic(self, obs, action, reward, next_obs, not_done,
step):
with torch.no_grad():
dist = self.model.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.model.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha * log_prob
target_Q = reward + (not_done * self.gamma * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2 = self.model.critic(obs, action)
critic1_loss = self.c_loss(current_Q1, target_Q)
critic2_loss = self.c_loss(current_Q2, target_Q)
critic_loss = critic1_loss + critic2_loss
self.critic_optimizer.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_optimizer.step()
return critic_loss.detach(), critic1_loss.detach(), critic2_loss.detach()
def update_actor_and_alpha(self, obs, step):
for p in self.model.sac_network.critic.parameters():
p.requires_grad = False
dist = self.model.actor(obs)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
entropy = dist.entropy().sum(-1, keepdim=True).mean()
actor_Q1, actor_Q2 = self.model.critic(obs, action)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (torch.max(self.alpha.detach(), self.min_alpha) * log_prob - actor_Q)
actor_loss = actor_loss.mean()
self.actor_optimizer.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_optimizer.step()
for p in self.model.sac_network.critic.parameters():
p.requires_grad = True
if self.learnable_temperature:
alpha_loss = (self.alpha *
(-log_prob - self.target_entropy).detach()).mean()
self.log_alpha_optimizer.zero_grad(set_to_none=True)
alpha_loss.backward()
self.log_alpha_optimizer.step()
else:
alpha_loss = None
return actor_loss.detach(), entropy.detach(), self.alpha.detach(), alpha_loss # TODO: maybe not self.alpha
def soft_update_params(self, net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def update(self, step):
obs, action, reward, next_obs, done = self.replay_buffer.sample(self.batch_size)
not_done = ~done
obs = self.preproc_obs(obs)
next_obs = self.preproc_obs(next_obs)
critic_loss, critic1_loss, critic2_loss = self.update_critic(obs, action, reward, next_obs, not_done, step)
actor_loss, entropy, alpha, alpha_loss = self.update_actor_and_alpha(obs, step)
actor_loss_info = actor_loss, entropy, alpha, alpha_loss
self.soft_update_params(self.model.sac_network.critic, self.model.sac_network.critic_target,
self.critic_tau)
return actor_loss_info, critic1_loss, critic2_loss
def preproc_obs(self, obs):
if isinstance(obs, dict):
obs = obs['obs']
if self.normalize_input:
obs = self.running_mean_std(obs)
return obs
def env_step(self, actions):
obs, rewards, dones, infos = self.vec_env.step(actions) # (obs_space) -> (n, obs_space)
self.step += self.num_actors
if self.is_tensor_obses:
return obs, rewards, dones, infos
else:
return torch.from_numpy(obs).to(self.sac_device), torch.from_numpy(rewards).to(self.sac_device), torch.from_numpy(dones).to(self.sac_device), infos
def env_reset(self):
with torch.no_grad():
obs = self.vec_env.reset()
if self.is_tensor_obses is None:
self.is_tensor_obses = torch.is_tensor(obs)
print("Observations are tensors:", self.is_tensor_obses)
if self.is_tensor_obses:
return obs.to(self.sac_device)
else:
return torch.from_numpy(obs).to(self.sac_device)
def act(self, obs, action_dim, sample=False):
obs = self.preproc_obs(obs)
dist = self.model.actor(obs)
actions = dist.sample() if sample else dist.mean
actions = actions.clamp(*self.action_range)
assert actions.ndim == 2
return actions
def extract_actor_stats(self, actor_losses, entropies, alphas, alpha_losses, actor_loss_info):
actor_loss, entropy, alpha, alpha_loss = actor_loss_info
actor_losses.append(actor_loss)
entropies.append(entropy)
if alpha_losses is not None:
alphas.append(alpha)
alpha_losses.append(alpha_loss)
def play_steps(self, random_exploration=False):
total_time_start = time.time()
total_update_time = 0
total_time = 0
step_time = 0.0
actor_losses = []
entropies = []
alphas = []
alpha_losses = []
critic1_losses = []
critic2_losses = []
obs = self.obs
for _ in range(self.num_steps_per_episode):
self.set_eval()
if random_exploration:
action = torch.rand((self.num_actors, *self.env_info["action_space"].shape), device=self.sac_device) * 2 - 1
else:
with torch.no_grad():
action = self.act(obs.float(), self.env_info["action_space"].shape, sample=True)
step_start = time.time()
with torch.no_grad():
next_obs, rewards, dones, infos = self.env_step(action)
step_end = time.time()
self.current_rewards += rewards
self.current_lengths += 1
total_time += step_end - step_start
step_time += (step_end - step_start)
all_done_indices = dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
not_dones = 1.0 - dones.float()
self.algo_observer.process_infos(infos, done_indices)
no_timeouts = self.current_lengths != self.max_env_steps
dones = dones * no_timeouts
self.current_rewards = self.current_rewards * not_dones
self.current_lengths = self.current_lengths * not_dones
if isinstance(obs, dict):
obs = obs['obs']
if isinstance(next_obs, dict):
next_obs = next_obs['obs']
rewards = self.rewards_shaper(rewards)
#if torch.min(obs) < -150 or torch.max(obs) > 150:
# print('ATATATA')
#else:
self.replay_buffer.add(obs, action, torch.unsqueeze(rewards, 1), next_obs, torch.unsqueeze(dones, 1))
self.obs = obs = next_obs.clone()
if not random_exploration:
self.set_train()
update_time_start = time.time()
actor_loss_info, critic1_loss, critic2_loss = self.update(self.epoch_num)
update_time_end = time.time()
update_time = update_time_end - update_time_start
self.extract_actor_stats(actor_losses, entropies, alphas, alpha_losses, actor_loss_info)
critic1_losses.append(critic1_loss)
critic2_losses.append(critic2_loss)
else:
update_time = 0
total_update_time += update_time
total_time_end = time.time()
total_time = total_time_end - total_time_start
play_time = total_time - total_update_time
return step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses
def train_epoch(self):
if self.epoch_num < self.num_seed_steps:
step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.play_steps(random_exploration=True)
else:
step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.play_steps(random_exploration=False)
return step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses
def train(self):
self.init_tensors()
self.algo_observer.after_init(self)
self.last_mean_rewards = -100500
total_time = 0
# rep_count = 0
self.frame = 0
self.obs = self.env_reset()
while True:
self.epoch_num += 1
step_time, play_time, update_time, epoch_total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.train_epoch()
total_time += epoch_total_time
scaled_time = epoch_total_time
scaled_play_time = play_time
curr_frames = self.num_frames_per_epoch
self.frame += curr_frames
frame = self.frame #TODO: Fix frame
# print(frame)
self.writer.add_scalar('performance/step_inference_rl_update_fps', curr_frames / scaled_time, frame)
self.writer.add_scalar('performance/step_inference_fps', curr_frames / scaled_play_time, frame)
self.writer.add_scalar('performance/step_fps', curr_frames / step_time, frame)
self.writer.add_scalar('performance/rl_update_time', update_time, frame)
self.writer.add_scalar('performance/step_inference_time', play_time, frame)
self.writer.add_scalar('performance/step_time', step_time, frame)
if self.epoch_num >= self.num_seed_steps:
self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(actor_losses).item(), frame)
self.writer.add_scalar('losses/c1_loss', torch_ext.mean_list(critic1_losses).item(), frame)
self.writer.add_scalar('losses/c2_loss', torch_ext.mean_list(critic2_losses).item(), frame)
self.writer.add_scalar('losses/entropy', torch_ext.mean_list(entropies).item(), frame)
if alpha_losses[0] is not None:
self.writer.add_scalar('losses/alpha_loss', torch_ext.mean_list(alpha_losses).item(), frame)
self.writer.add_scalar('info/alpha', torch_ext.mean_list(alphas).item(), frame)
self.writer.add_scalar('info/epochs', self.epoch_num, frame)
self.algo_observer.after_print_stats(frame, self.epoch_num, total_time)
mean_rewards = 0
mean_lengths = 0
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
self.writer.add_scalar('rewards/step', mean_rewards, frame)
self.writer.add_scalar('rewards/iter', mean_rewards, self.epoch_num)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/step', mean_lengths, frame)
# self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards and self.epoch_num >= self.save_best_after:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
# self.save("./nn/" + self.config['name'])
self.save(os.path.join(self.nn_dir, self.config['name']))
# if self.last_mean_rewards > self.config.get('score_to_win', float('inf')):
# print('Network won!')
# self.save("./nn/" + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards))
# return self.last_mean_rewards, self.epoch_num
if self.epoch_num > self.max_epochs:
# self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards))
self.save(os.path.join(self.nn_dir, 'last_' + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards)))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, self.epoch_num
update_time = 0
if self.print_stats:
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'epoch: {self.epoch_num} fps step: {fps_step:.1f} fps total: {fps_total:.1f} reward: {mean_rewards:.3f} episode len: {mean_lengths:.3f}')
| 22,630 |
Python
| 41.943074 | 186 | 0.595095 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/d2rl.py
|
import torch
class D2RLNet(torch.nn.Module):
def __init__(self, input_size,
units,
activations,
norm_func_name = None):
torch.nn.Module.__init__(self)
self.activations = torch.nn.ModuleList(activations)
self.linears = torch.nn.ModuleList([])
self.norm_layers = torch.nn.ModuleList([])
self.num_layers = len(units)
last_size = input_size
for i in range(self.num_layers):
self.linears.append(torch.nn.Linear(last_size, units[i]))
last_size = units[i] + input_size
if norm_func_name == 'layer_norm':
self.norm_layers.append(torch.nn.LayerNorm(units[i]))
elif norm_func_name == 'batch_norm':
self.norm_layers.append(torch.nn.BatchNorm1d(units[i]))
else:
self.norm_layers.append(torch.nn.Identity())
def forward(self, input):
x = self.linears[0](input)
x = self.activations[0](x)
x = self.norm_layers[0](x)
for i in range(1,self.num_layers):
x = torch.cat([x,input], dim=1)
x = self.linears[i](x)
x = self.norm_layers[i](x)
x = self.activations[i](x)
return x
| 1,259 |
Python
| 37.181817 | 71 | 0.544083 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/players.py
|
from rl_games.common.player import BasePlayer
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.tr_helpers import unsqueeze_obs
import gym
import torch
from torch import nn
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class PpoPlayerContinuous(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
self.mask = [False]
self.normalize_input = self.config['normalize_input']
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
self.running_mean_std.eval()
def get_action(self, obs, is_determenistic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
mu = res_dict['mus']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if is_determenistic:
current_action = mu
else:
current_action = action
current_action = torch.squeeze(current_action.detach())
return rescale_actions(self.actions_low, self.actions_high, torch.clamp(current_action, -1.0, 1.0))
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
if type(self.action_space) is gym.spaces.Discrete:
self.actions_num = self.action_space.n
self.is_multi_discrete = False
if type(self.action_space) is gym.spaces.Tuple:
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
self.mask = [False]
self.normalize_input = self.config['normalize_input']
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.value_size
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
self.running_mean_std.eval()
def get_masked_action(self, obs, action_masks, is_determenistic = True):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
action_masks = torch.Tensor(action_masks).to(self.device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'action_masks' : action_masks,
'rnn_states' : self.states
}
self.model.eval()
with torch.no_grad():
neglogp, value, action, logits, self.states = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=-1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def get_action(self, obs, is_determenistic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
self.model.eval()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class SACPlayer(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
self.actions_num = self.action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = torch_ext.shape_whc_to_cwh(self.state_shape)
self.normalize_input = False
config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
# if self.normalize_input:
# self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
# self.running_mean_std.eval()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.sac_network.actor.load_state_dict(checkpoint['actor'])
self.model.sac_network.critic.load_state_dict(checkpoint['critic'])
self.model.sac_network.critic_target.load_state_dict(checkpoint['critic_target'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def get_action(self, obs, sample=False):
dist = self.model.actor(obs)
actions = dist.sample() if sample else dist.mean
actions = actions.clamp(*self.action_range).to(self.device)
assert actions.ndim == 2
return actions
def reset(self):
pass
| 7,933 |
Python
| 36.074766 | 108 | 0.576831 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/self_play_manager.py
|
import numpy as np
class SelfPlayManager:
def __init__(self, config, writter):
self.config = config
self.writter = writter
self.update_score = self.config['update_score']
self.games_to_check = self.config['games_to_check']
self.check_scores = self.config.get('check_scores', False)
self.env_update_num = self.config.get('env_update_num', 1)
self.env_indexes = np.arange(start=0, stop=self.env_update_num)
self.updates_num = 0
def update(self, algo):
self.updates_num += 1
if self.check_scores:
data = algo.game_scores
else:
data = algo.game_rewards
if len(data) >= self.games_to_check:
mean_scores = data.get_mean()
mean_rewards = algo.game_rewards.get_mean()
if mean_scores > self.update_score:
print('Mean scores: ', mean_scores, ' mean rewards: ', mean_rewards, ' updating weights')
algo.clear_stats()
self.writter.add_scalar('selfplay/iters_update_weigths', self.updates_num, algo.frame)
algo.vec_env.set_weights(self.env_indexes, algo.get_weights())
self.env_indexes = (self.env_indexes + 1) % (algo.num_actors)
self.updates_num = 0
| 1,332 |
Python
| 40.656249 | 105 | 0.572072 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/sac_helper.py
|
# from rl_games.algos_torch.network_builder import NetworkBuilder
from torch import distributions as pyd
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
def entropy(self):
return self.base_dist.entropy()
| 1,720 |
Python
| 28.169491 | 137 | 0.647093 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/a2c_discrete.py
|
from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch import ppg_aux
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class DiscreteA2CAgent(a2c_common.DiscreteA2CBase):
def __init__(self, base_name, config):
a2c_common.DiscreteA2CBase.__init__(self, base_name, config)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1)
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.normalize_input:
if isinstance(self.observation_space, gym.spaces.Dict):
self.running_mean_std = RunningMeanStdObs(obs_shape).to(self.ppo_device)
else:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.ppo_device)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'num_steps' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'model' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', False)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if 'phasic_policy_gradients' in self.config:
self.has_phasic_policy_gradients = True
self.ppg_aux_loss = ppg_aux.PPGAux(self, self.config['phasic_policy_gradients'])
self.has_value_loss = (self.has_central_value \
and self.use_experimental_cv) \
or not self.has_phasic_policy_gradients
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
processed_obs = self._preproc_obs(obs['obs'])
action_masks = torch.BoolTensor(action_masks).to(self.ppo_device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'action_masks' : action_masks,
'rnn_states' : self.rnn_states
}
with torch.no_grad():
res_dict = self.model(input_dict)
if self.has_central_value:
input_dict = {
'is_train': False,
'states' : obs['states'],
#'actions' : action,
}
value = self.get_central_value(input_dict)
res_dict['values'] = value
if self.normalize_value:
value = self.value_mean_std(value, True)
if self.is_multi_discrete:
action_masks = torch.cat(action_masks, dim=-1)
res_dict['action_masks'] = action_masks
return res_dict
def train_actor_critic(self, input_dict):
self.set_train()
self.calc_gradients(input_dict)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.last_lr
return self.train_result
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
if self.use_action_masks:
batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
a_loss = common_losses.actor_loss(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy = losses[0], losses[1], losses[2]
loss = a_loss + 0.5 *c_loss * self.critic_coef - entropy * self.entropy_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
kl_dist = 0.5 * ((old_action_log_probs_batch - action_log_probs)**2)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() # / sum_mask
else:
kl_dist = kl_dist.mean()
if self.has_phasic_policy_gradients:
c_loss = self.ppg_aux_loss.train_value(self,input_dict)
self.train_result = (a_loss, c_loss, entropy, kl_dist,self.last_lr, lr_mul)
| 7,889 |
Python
| 38.848485 | 142 | 0.566865 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/ppg_aux.py
|
from rl_games.common import tr_helpers
from rl_games.algos_torch import torch_ext
from rl_games.common import common_losses
from rl_games.common.datasets import DatasetList
import torch
from torch import nn
from torch import optim
import copy
class PPGAux:
def __init__(self, algo, config):
self.config = config
self.writer = algo.writer
self.mini_epoch = config['mini_epochs']
self.mini_batch = config['minibatch_size']
self.mixed_precision = algo.mixed_precision
self.is_rnn = algo.network.is_rnn()
self.kl_coef = config.get('kl_coef', 1.0)
self.n_aux = config.get('n_aux', 16)
self.is_continuous = True
self.last_lr = config['learning_rate']
self.optimizer = optim.Adam(algo.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=algo.weight_decay)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self._freeze_grads(algo.model)
self.value_optimizer = optim.Adam(filter(lambda p: p.requires_grad, algo.model.parameters()), float(self.last_lr), eps=1e-08, weight_decay=algo.weight_decay)
self.value_scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self._unfreeze_grads(algo.model)
self.dataset_list = DatasetList()
def _freeze_grads(self, model):
for param in model.parameters():
param.requires_grad = False
model.a2c_network.value.weight.requires_grad = True
model.a2c_network.value.bias.requires_grad = True
def _unfreeze_grads(self, model):
for param in model.parameters():
param.requires_grad = True
def train_value(self, algo, input_dict):
value_preds_batch = input_dict['old_values']
return_batch = input_dict['returns']
obs_batch = input_dict['obs']
actions_batch = input_dict['actions']
obs_batch = algo._preproc_obs(obs_batch)
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = algo.model(batch_dict)
values = res_dict['values']
c_loss = common_losses.critic_loss(value_preds_batch, values, algo.e_clip, return_batch, algo.clip_value)
losses, sum_mask = torch_ext.apply_masks([c_loss], rnn_masks)
c_loss = losses[0]
loss = c_loss
if algo.multi_gpu:
self.value_optimizer.zero_grad()
else:
for param in algo.model.parameters():
param.grad = None
self.value_scaler.scale(loss).backward()
if algo.truncate_grads:
if algo.multi_gpu:
self.value_optimizer.synchronize()
self.value_scaler.unscale_(self.value_optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
with self.value_optimizer.skip_synchronize():
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
else:
self.value_scaler.unscale_(self.value_optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
else:
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
return loss.detach()
def update(self, algo):
self.dataset_list.add_dataset(algo.dataset)
def train_net(self, algo):
self.update(algo)
if algo.epoch_num % self.n_aux != 0:
return
self.old_model = copy.deepcopy(algo.model)
self.old_model.eval()
dataset = self.dataset_list
for _ in range(self.mini_epoch):
for idx in range(len(dataset)):
loss_c, loss_kl = self.calc_gradients(algo, dataset[idx])
avg_loss_c = loss_c / len(dataset)
avg_loss_kl = loss_kl / len(dataset)
if self.writer != None:
self.writer.add_scalar('losses/pgg_loss_c', avg_loss_c, algo.frame)
self.writer.add_scalar('losses/pgg_loss_kl', avg_loss_kl, algo.frame)
self.dataset_list.clear()
def calc_gradients(self, algo, input_dict):
value_preds_batch = input_dict['old_values']
return_batch = input_dict['returns']
obs_batch = input_dict['obs']
actions_batch = input_dict['actions']
obs_batch = algo._preproc_obs(obs_batch)
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
#if self.use_action_masks:
# batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
with torch.no_grad():
old_dict = self.old_model(batch_dict.copy())
res_dict = algo.model(batch_dict)
values = res_dict['values']
if 'mu' in res_dict:
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
mu = res_dict['mus']
sigma = res_dict['sigmas']
#kl_loss = torch_ext.policy_kl(mu, sigma.detach(), old_mu_batch, old_sigma_batch, False)
kl_loss = torch.abs(mu - old_mu_batch)
else:
kl_loss = algo.model.kl(res_dict, old_dict)
c_loss = common_losses.critic_loss(value_preds_batch, values, algo.e_clip, return_batch, algo.clip_value)
losses, sum_mask = torch_ext.apply_masks([c_loss, kl_loss.unsqueeze(1)], rnn_masks)
c_loss, kl_loss = losses[0], losses[1]
loss = c_loss + kl_loss * self.kl_coef
if algo.multi_gpu:
self.optimizer.zero_grad()
else:
for param in algo.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
if algo.truncate_grads:
if algo.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
return c_loss, kl_loss
| 7,361 |
Python
| 39.010869 | 165 | 0.570439 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/central_value.py
|
import torch
from torch import nn
import numpy as np
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.common import schedulers
class CentralValueTrain(nn.Module):
def __init__(self, state_shape, value_size, ppo_device, num_agents, num_steps, num_actors, num_actions, seq_len, model, config, writter, max_epochs, multi_gpu):
nn.Module.__init__(self)
self.ppo_device = ppo_device
self.num_agents, self.num_steps, self.num_actors, self.seq_len = num_agents, num_steps, num_actors, seq_len
self.num_actions = num_actions
self.state_shape = state_shape
self.value_size = value_size
self.max_epochs = max_epochs
self.multi_gpu = multi_gpu
self.truncate_grads = config.get('truncate_grads', False)
state_config = {
'value_size' : value_size,
'input_shape' : state_shape,
'actions_num' : num_actions,
'num_agents' : num_agents,
'num_seqs' : num_actors
}
self.config = config
self.model = model.build('cvalue', **state_config)
self.lr = float(config['learning_rate'])
self.linear_lr = config.get('lr_schedule') == 'linear'
if self.linear_lr:
self.scheduler = schedulers.LinearScheduler(self.lr,
max_steps=self.max_epochs,
apply_to_entropy=False,
start_entropy_coef=0)
else:
self.scheduler = schedulers.IdentityScheduler()
self.mini_epoch = config['mini_epochs']
self.mini_batch = config['minibatch_size']
self.num_minibatches = self.num_steps * self.num_actors // self.mini_batch
self.clip_value = config['clip_value']
self.normalize_input = config['normalize_input']
self.writter = writter
self.weight_decay = config.get('weight_decay', 0.0)
self.optimizer = torch.optim.Adam(self.model.parameters(), float(self.lr), eps=1e-08, weight_decay=self.weight_decay)
self.frame = 0
self.epoch_num = 0
self.running_mean_std = None
self.grad_norm = config.get('grad_norm', 1)
self.truncate_grads = config.get('truncate_grads', False)
self.e_clip = config.get('e_clip', 0.2)
self.truncate_grad = self.config.get('truncate_grads', False)
if self.normalize_input:
self.running_mean_std = RunningMeanStd(state_shape)
self.is_rnn = self.model.is_rnn()
self.rnn_states = None
self.batch_size = self.num_steps * self.num_actors
if self.is_rnn:
self.rnn_states = self.model.get_default_rnn_state()
self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
num_seqs = self.num_steps * self.num_actors // self.seq_len
assert((self.num_steps * self.num_actors // self.num_minibatches) % self.seq_len == 0)
self.mb_rnn_states = [torch.zeros((s.size()[0], num_seqs, s.size()[2]), dtype = torch.float32, device=self.ppo_device) for s in self.rnn_states]
self.dataset = datasets.PPODataset(self.batch_size, self.mini_batch, True, self.is_rnn, self.ppo_device, self.seq_len)
def update_lr(self, lr):
if self.multi_gpu:
lr_tensor = torch.tensor([lr])
self.hvd.broadcast_value(lr_tensor, 'cv_learning_rate')
lr = lr_tensor.item()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def get_stats_weights(self):
if self.normalize_input:
return self.running_mean_std.state_dict()
else:
return {}
def set_stats_weights(self, weights):
self.running_mean_std.load_state_dict(weights)
def update_dataset(self, batch_dict):
value_preds = batch_dict['old_values']
returns = batch_dict['returns']
actions = batch_dict['actions']
rnn_masks = batch_dict['rnn_masks']
if self.num_agents > 1:
res = self.update_multiagent_tensors(value_preds, returns, actions, rnn_masks)
batch_dict['old_values'] = res[0]
batch_dict['returns'] = res[1]
batch_dict['actions'] = res[2]
if self.is_rnn:
batch_dict['rnn_states'] = self.mb_rnn_states
if self.num_agents > 1:
rnn_masks = res[3]
batch_dict['rnn_masks'] = rnn_masks
self.dataset.update_values_dict(batch_dict)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
for k,v in obs_batch.items():
obs_batch[k] = self._preproc_obs(v)
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
if self.normalize_input:
obs_batch = self.running_mean_std(obs_batch)
return obs_batch
def pre_step_rnn(self, rnn_indices, state_indices):
if self.num_agents > 1:
rnn_indices = rnn_indices[::self.num_agents]
shifts = rnn_indices % (self.num_steps // self.seq_len)
rnn_indices = (rnn_indices - shifts) // self.num_agents + shifts
state_indices = state_indices[::self.num_agents] // self.num_agents
for s, mb_s in zip(self.rnn_states, self.mb_rnn_states):
mb_s[:, rnn_indices, :] = s[:, state_indices, :]
def post_step_rnn(self, all_done_indices):
all_done_indices = all_done_indices[::self.num_agents] // self.num_agents
for s in self.rnn_states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
def forward(self, input_dict):
value, rnn_states = self.model(input_dict)
return value, rnn_states
def get_value(self, input_dict):
self.eval()
obs_batch = input_dict['states']
actions = input_dict.get('actions', None)
obs_batch = self._preproc_obs(obs_batch)
value, self.rnn_states = self.forward({'obs' : obs_batch, 'actions': actions,
'rnn_states': self.rnn_states})
if self.num_agents > 1:
value = value.repeat(1, self.num_agents)
value = value.view(value.size()[0]*self.num_agents, -1)
return value
def train_critic(self, input_dict):
self.train()
loss = self.calc_gradients(input_dict)
return loss.item()
def update_multiagent_tensors(self, value_preds, returns, actions, rnn_masks):
batch_size = self.batch_size
ma_batch_size = self.num_actors * self.num_agents * self.num_steps
value_preds = value_preds.view(self.num_actors, self.num_agents, self.num_steps, self.value_size).transpose(0,1)
returns = returns.view(self.num_actors, self.num_agents, self.num_steps, self.value_size).transpose(0,1)
value_preds = value_preds.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
returns = returns.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
if self.is_rnn:
rnn_masks = rnn_masks.view(self.num_actors, self.num_agents, self.num_steps).transpose(0,1)
rnn_masks = rnn_masks.flatten(0)[:batch_size]
return value_preds, returns, actions, rnn_masks
def train_net(self):
self.train()
loss = 0
for _ in range(self.mini_epoch):
for idx in range(len(self.dataset)):
loss += self.train_critic(self.dataset[idx])
avg_loss = loss / (self.mini_epoch * self.num_minibatches)
self.epoch_num += 1
self.lr, _ = self.scheduler.update(self.lr, 0, self.epoch_num, 0, 0)
self.update_lr(self.lr)
self.frame += self.batch_size
if self.writter != None:
self.writter.add_scalar('losses/cval_loss', avg_loss, self.frame)
self.writter.add_scalar('info/cval_lr', self.lr, self.frame)
return avg_loss
def calc_gradients(self, batch):
obs_batch = self._preproc_obs(batch['obs'])
value_preds_batch = batch['old_values']
returns_batch = batch['returns']
actions_batch = batch['actions']
rnn_masks_batch = batch.get('rnn_masks')
batch_dict = {'obs' : obs_batch,
'actions' : actions_batch,
'seq_length' : self.seq_len }
if self.is_rnn:
batch_dict['rnn_states'] = batch['rnn_states']
values, _ = self.forward(batch_dict)
loss = common_losses.critic_loss(value_preds_batch, values, self.e_clip, returns_batch, self.clip_value)
losses, _ = torch_ext.apply_masks([loss], rnn_masks_batch)
loss = losses[0]
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
loss.backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.optimizer.step()
else:
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.optimizer.step()
else:
self.optimizer.step()
return loss
| 9,703 |
Python
| 41.561403 | 164 | 0.587241 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/models.py
|
import rl_games.algos_torch.layers
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
import rl_games.common.divergence as divergence
from rl_games.algos_torch.torch_ext import CategoricalMasked
from torch.distributions import Categorical
from rl_games.algos_torch.sac_helper import SquashedNormal
class BaseModel():
def __init__(self):
pass
def is_rnn(self):
return False
def is_separate_critic(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2C.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
logits, value, states = self.a2c_network(input_dict)
if is_train:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
prev_neglogp = -categorical.log_prob(prev_actions)
entropy = categorical.entropy()
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : categorical.logits,
'values' : value,
'entropy' : entropy,
'rnn_states' : states
}
return result
else:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
selected_action = categorical.sample().long()
neglogp = -categorical.log_prob(selected_action)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'logits' : categorical.logits,
'rnn_states' : states
}
return result
class ModelA2CMultiDiscrete(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2CMultiDiscrete.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete_list(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
logits, value, states = self.a2c_network(input_dict)
if is_train:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
prev_actions = torch.split(prev_actions, 1, dim=-1)
prev_neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, prev_actions)]
prev_neglogp = torch.stack(prev_neglogp, dim=-1).sum(dim=-1)
entropy = [c.entropy() for c in categorical]
entropy = torch.stack(entropy, dim=-1).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : [c.logits for c in categorical],
'values' : value,
'entropy' : torch.squeeze(entropy),
'rnn_states' : states
}
return result
else:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
selected_action = [c.sample().long() for c in categorical]
neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, selected_action)]
selected_action = torch.stack(selected_action, dim=-1)
neglogp = torch.stack(neglogp, dim=-1).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'logits' : [c.logits for c in categorical],
'rnn_states' : states
}
return result
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2CContinuous.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['mu'], p_dict['sigma']
q = q_dict['mu'], q_dict['sigma']
return divergence.d_kl_normal(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
mu, sigma, value, states = self.a2c_network(input_dict)
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = -distr.log_prob(prev_actions).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'value' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample().squeeze()
neglogp = -distr.log_prob(selected_action).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : torch.squeeze(value),
'actions' : selected_action,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
net = self.network_builder.build('a2c', **config)
for name, _ in net.named_parameters():
print(name)
return ModelA2CContinuousLogStd.Network(net)
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
mu, logstd, value, states = self.a2c_network(input_dict)
sigma = torch.exp(logstd)
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = self.neglogp(prev_actions, mu, sigma, logstd)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'values' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample()
neglogp = self.neglogp(selected_action, mu, sigma, logstd)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
def neglogp(self, x, mean, std, logstd):
return 0.5 * (((x - mean) / std)**2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ logstd.sum(dim=-1)
class ModelSACContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelSACContinuous.Network(self.network_builder.build('sac', **config))
class Network(nn.Module):
def __init__(self, sac_network):
nn.Module.__init__(self)
self.sac_network = sac_network
def critic(self, obs, action):
return self.sac_network.critic(obs, action)
def critic_target(self, obs, action):
return self.sac_network.critic_target(obs, action)
def actor(self, obs):
return self.sac_network.actor(obs)
def is_rnn(self):
return False
def forward(self, input_dict):
is_train = input_dict.pop('is_train', True)
mu, sigma = self.sac_network(input_dict)
dist = SquashedNormal(mu, sigma)
return dist
| 10,919 |
Python
| 36.142857 | 140 | 0.514699 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/model_builder.py
|
from rl_games.common import object_factory
import rl_games.algos_torch
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import models
NETWORK_REGISTRY = {}
def register_network(name, target_class):
NETWORK_REGISTRY[name] = lambda **kwargs : target_class()
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs : models.ModelA2C(network))
self.model_factory.register_builder('multi_discrete_a2c', lambda network, **kwargs : models.ModelA2CMultiDiscrete(network))
self.model_factory.register_builder('continuous_a2c', lambda network, **kwargs : models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd', lambda network, **kwargs : models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('soft_actor_critic', lambda network, **kwargs : models.ModelSACContinuous(network))
#self.model_factory.register_builder('dqn', lambda network, **kwargs : models.AtariDQN(network))
self.network_factory = object_factory.ObjectFactory()
self.network_factory.set_builders(NETWORK_REGISTRY)
self.network_factory.register_builder('actor_critic', lambda **kwargs : network_builder.A2CBuilder())
self.network_factory.register_builder('resnet_actor_critic', lambda **kwargs : network_builder.A2CResnetBuilder())
self.network_factory.register_builder('rnd_curiosity', lambda **kwargs : network_builder.RNDCuriosityBuilder())
self.network_factory.register_builder('soft_actor_critic', lambda **kwargs: network_builder.SACBuilder())
def load(self, params):
self.model_name = params['model']['name']
self.network_name = params['network']['name']
network = self.network_factory.create(self.network_name)
network.load(params['network'])
model = self.model_factory.create(self.model_name, network=network)
return model
| 2,062 |
Python
| 53.289472 | 137 | 0.723084 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/moving_mean_std.py
|
import torch
import torch.nn as nn
import numpy as np
'''
updates moving statistics with momentum
'''
class MovingMeanStd(nn.Module):
def __init__(self, insize, momentum = 0.9998, epsilon=1e-05, per_channel=False, norm_only=False):
super(MovingMeanStd, self).__init__()
self.insize = insize
self.epsilon = epsilon
self.momentum = momentum
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("moving_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("moving_var", torch.ones(in_size, dtype = torch.float64))
def forward(self, input, unnorm=False):
if self.training:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.moving_mean = self.moving_mean * self.momentum + mean * (1 - self.momentum)
self.moving_var = self.moving_var * self.momentum + var * (1 - self.momentum)
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.moving_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.moving_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.moving_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.moving_mean
current_var = self.moving_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
| 2,521 |
Python
| 42.482758 | 101 | 0.554145 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/layers.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_weight", torch.zeros(out_features, in_features))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
self.register_buffer("epsilon_bias", torch.zeros(out_features))
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(3 / self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, input):
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias.data
return F.linear(input, self.weight + self.sigma_weight * self.epsilon_weight.data, bias)
class NoisyFactorizedLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):
super(NoisyFactorizedLinear, self).__init__(in_features, out_features, bias=bias)
sigma_init = sigma_zero / math.sqrt(in_features)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_input", torch.zeros(1, in_features))
self.register_buffer("epsilon_output", torch.zeros(out_features, 1))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
def forward(self, input):
self.epsison_input.normal_()
self.epsilon_output.normal_()
func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))
eps_in = func(self.epsilon_input.data)
eps_out = func(self.epsilon_output.data)
bias = self.bias
if bias is not None:
bias = bias + self.sigma_bias * eps_out.t()
noise_v = torch.mul(eps_in, eps_out)
return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
class LSTMWithDones(nn.Module):
def __init__(self, input_sz: int, hidden_sz: int):
super().__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
self.weight_ih = nn.Parameter(torch.Tensor(input_sz, hidden_sz * 4))
self.weight_hh = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz * 4))
self.bias = nn.Parameter(torch.Tensor(hidden_sz * 4))
self.init_weights()
def init_weights(self):
for p in self.parameters():
if p.data.ndimension() >= 2:
nn.init.xavier_uniform_(p.data)
else:
nn.init.zeros_(p.data)
def forward(self, x, dones, init_states):
"""Assumes x is of shape (batch, sequence, feature)"""
bs, seq_sz, _ = x.size()
hidden_seq = []
assert(init_states)
h_t, c_t = init_states
HS = self.hidden_size
for t in range(seq_sz):
d = dones[:, t]
h_t = h_t * (1 - d)
c_t = c_t * (1 - d)
x_t = x[:, t, :]
# batch the computations into a single matrix multiplication
gates = x_t @ self.weight_ih + h_t @ self.weight_hh + self.bias
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS*2]), # forget
torch.tanh(gates[:, HS*2:HS*3]),
torch.sigmoid(gates[:, HS*3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=1)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(1, 0).contiguous()
return hidden_seq, (h_t, c_t)
| 4,148 |
Python
| 39.67647 | 96 | 0.578833 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/network_builder.py
|
from rl_games.common import object_factory
from rl_games.algos_torch import torch_ext
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import numpy as np
from rl_games.algos_torch.d2rl import D2RLNet
from rl_games.algos_torch.sac_helper import SquashedNormal
import ipdb
def _create_initializer(func, **kwargs):
return lambda v : func(v, **kwargs)
class NetworkBuilder:
def __init__(self, **kwargs):
pass
def load(self, params):
pass
def build(self, name, **kwargs):
pass
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
class BaseNetwork(nn.Module):
def __init__(self, **kwargs):
nn.Module.__init__(self, **kwargs)
self.activations_factory = object_factory.ObjectFactory()
self.activations_factory.register_builder('relu', lambda **kwargs : nn.ReLU(**kwargs))
self.activations_factory.register_builder('tanh', lambda **kwargs : nn.Tanh(**kwargs))
self.activations_factory.register_builder('sigmoid', lambda **kwargs : nn.Sigmoid(**kwargs))
self.activations_factory.register_builder('elu', lambda **kwargs : nn.ELU(**kwargs))
self.activations_factory.register_builder('selu', lambda **kwargs : nn.SELU(**kwargs))
self.activations_factory.register_builder('softplus', lambda **kwargs : nn.Softplus(**kwargs))
self.activations_factory.register_builder('None', lambda **kwargs : nn.Identity())
self.init_factory = object_factory.ObjectFactory()
#self.init_factory.register_builder('normc_initializer', lambda **kwargs : normc_initializer(**kwargs))
self.init_factory.register_builder('const_initializer', lambda **kwargs : _create_initializer(nn.init.constant_,**kwargs))
self.init_factory.register_builder('orthogonal_initializer', lambda **kwargs : _create_initializer(nn.init.orthogonal_,**kwargs))
self.init_factory.register_builder('glorot_normal_initializer', lambda **kwargs : _create_initializer(nn.init.xavier_normal_,**kwargs))
self.init_factory.register_builder('glorot_uniform_initializer', lambda **kwargs : _create_initializer(nn.init.xavier_uniform_,**kwargs))
self.init_factory.register_builder('variance_scaling_initializer', lambda **kwargs : _create_initializer(torch_ext.variance_scaling_initializer,**kwargs))
self.init_factory.register_builder('random_uniform_initializer', lambda **kwargs : _create_initializer(nn.init.uniform_,**kwargs))
self.init_factory.register_builder('kaiming_normal', lambda **kwargs : _create_initializer(nn.init.kaiming_normal_,**kwargs))
self.init_factory.register_builder('orthogonal', lambda **kwargs : _create_initializer(nn.init.orthogonal_,**kwargs))
self.init_factory.register_builder('default', lambda **kwargs : nn.Identity() )
def is_separate_critic(self):
return False
def is_rnn(self):
return False
def get_default_rnn_state(self):
return None
def _calc_input_size(self, input_shape,cnn_layers=None):
if cnn_layers is None:
assert(len(input_shape) == 1)
return input_shape[0]
else:
return nn.Sequential(*cnn_layers)(torch.rand(1, *(input_shape))).flatten(1).data.size(1)
def _noisy_dense(self, inputs, units):
return layers.NoisyFactorizedLinear(inputs, units)
def _build_rnn(self, name, input, units, layers):
if name == 'identity':
return torch_ext.IdentityRNN(input, units)
if name == 'lstm':
return torch.nn.LSTM(input, units, layers, batch_first=True)
if name == 'gru':
return torch.nn.GRU(input, units, layers, batch_first=True)
if name == 'sru':
from sru import SRU
return SRU(input, units, layers, dropout=0, layer_norm=False)
def _build_sequential_mlp(self,
input_size,
units,
activation,
dense_func,
norm_only_first_layer=False,
norm_func_name = None):
print('build mlp:', input_size)
in_size = input_size
layers = []
need_norm = True
for unit in units:
layers.append(dense_func(in_size, unit))
layers.append(self.activations_factory.create(activation))
if not need_norm:
continue
if norm_only_first_layer and norm_func_name is not None:
need_norm = False
if norm_func_name == 'layer_norm':
layers.append(torch.nn.LayerNorm(unit))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm1d(unit))
in_size = unit
return nn.Sequential(*layers)
def _build_mlp(self,
input_size,
units,
activation,
dense_func,
norm_only_first_layer=False,
norm_func_name = None,
d2rl=False):
if d2rl:
act_layers = [self.activations_factory.create(activation) for i in range(len(units))]
return D2RLNet(input_size, units, act_layers, norm_func_name)
else:
return self._build_sequential_mlp(input_size, units, activation, dense_func, norm_func_name = None,)
def _build_conv(self, ctype, **kwargs):
print('conv_name:', ctype)
if ctype == 'conv2d':
return self._build_cnn2d(**kwargs)
if ctype == 'coord_conv2d':
return self._build_cnn2d(conv_func=torch_ext.CoordConv2d, **kwargs)
if ctype == 'conv1d':
return self._build_cnn1d(**kwargs)
def _build_cnn2d(self, input_shape, convs, activation, conv_func=torch.nn.Conv2d, norm_func_name=None):
in_channels = input_shape[0]
layers = []
for conv in convs:
layers.append(conv_func(in_channels=in_channels,
out_channels=conv['filters'],
kernel_size=conv['kernel_size'],
stride=conv['strides'], padding=conv['padding']))
conv_func=torch.nn.Conv2d
act = self.activations_factory.create(activation)
layers.append(act)
in_channels = conv['filters']
if norm_func_name == 'layer_norm':
layers.append(torch_ext.LayerNorm2d(in_channels))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm2d(in_channels))
return nn.Sequential(*layers)
def _build_cnn1d(self, input_shape, convs, activation, norm_func_name=None):
print('conv1d input shape:', input_shape)
in_channels = input_shape[0]
layers = []
for conv in convs:
layers.append(torch.nn.Conv1d(in_channels, conv['filters'], conv['kernel_size'], conv['strides'], conv['padding']))
act = self.activations_factory.create(activation)
layers.append(act)
in_channels = conv['filters']
if norm_func_name == 'layer_norm':
layers.append(torch.nn.LayerNorm(in_channels))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm2d(in_channels))
return nn.Sequential(*layers)
class A2CBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
self.value_size = kwargs.pop('value_size', 1)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
self.actor_cnn = nn.Sequential()
self.critic_cnn = nn.Sequential()
self.actor_mlp = nn.Sequential()
self.critic_mlp = nn.Sequential()
if self.has_cnn:
input_shape = torch_ext.shape_whc_to_cwh(input_shape)
cnn_args = {
'ctype' : self.cnn['type'],
'input_shape' : input_shape,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'norm_func_name' : self.normalization,
}
self.actor_cnn = self._build_conv(**cnn_args)
if self.separate:
self.critic_cnn = self._build_conv( **cnn_args)
mlp_input_shape = self._calc_input_size(input_shape, self.actor_cnn)
in_mlp_shape = mlp_input_shape
if len(self.units) == 0:
out_size = mlp_input_shape
else:
out_size = self.units[-1]
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
if self.rnn_concat_input:
rnn_in_size += in_mlp_shape
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
if self.separate:
self.a_rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
self.c_rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
if self.rnn_ln:
self.a_layer_norm = torch.nn.LayerNorm(self.rnn_units)
self.c_layer_norm = torch.nn.LayerNorm(self.rnn_units)
else:
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
if self.rnn_ln:
self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size' : in_mlp_shape,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
# # ipdb.set_trace()
# actor_args = {
# 'input_size' : in_mlp_shape,
# 'units' : [256, 256],
# 'activation' : self.activation,
# 'norm_func_name' : self.normalization,
# 'dense_func' : torch.nn.Linear,
# 'd2rl' : self.is_d2rl,
# 'norm_only_first_layer' : self.norm_only_first_layer
# }
self.actor_mlp = self._build_mlp(**mlp_args)
if self.separate:
self.critic_mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
'''
for multidiscrete actions num is a tuple
'''
if self.is_multi_discrete:
self.logits = torch.nn.ModuleList([torch.nn.Linear(out_size, num) for num in actions_num])
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32), requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
mlp_init = self.init_factory.create(**self.initializer)
if self.has_cnn:
cnn_init = self.init_factory.create(**self.cnn['initializer'])
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
cnn_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
def forward(self, obs_dict):
obs = obs_dict['obs']
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
if self.has_cnn:
# for obs shape 4
# input expected shape (B, W, H, C)
# convert to (B, C, W, H)
if len(obs.shape) == 4:
obs = obs.permute((0, 3, 1, 2))
if self.separate:
a_out = c_out = obs
a_out = self.actor_cnn(a_out)
a_out = a_out.contiguous().view(a_out.size(0), -1)
c_out = self.critic_cnn(c_out)
c_out = c_out.contiguous().view(c_out.size(0), -1)
if self.has_rnn:
if not self.is_rnn_before_mlp:
a_out_in = a_out
c_out_in = c_out
a_out = self.actor_mlp(a_out_in)
c_out = self.critic_mlp(c_out_in)
if self.rnn_concat_input:
a_out = torch.cat([a_out, a_out_in], dim=1)
c_out = torch.cat([c_out, c_out_in], dim=1)
batch_size = a_out.size()[0]
num_seqs = batch_size // seq_length
a_out = a_out.reshape(num_seqs, seq_length, -1)
c_out = c_out.reshape(num_seqs, seq_length, -1)
if self.rnn_name == 'sru':
a_out =a_out.transpose(0,1)
c_out =c_out.transpose(0,1)
if len(states) == 2:
a_states = states[0]
c_states = states[1]
else:
a_states = states[:2]
c_states = states[2:]
a_out, a_states = self.a_rnn(a_out, a_states)
c_out, c_states = self.c_rnn(c_out, c_states)
if self.rnn_name == 'sru':
a_out = a_out.transpose(0,1)
c_out = c_out.transpose(0,1)
else:
if self.rnn_ln:
a_out = self.a_layer_norm(a_out)
c_out = self.c_layer_norm(c_out)
a_out = a_out.contiguous().reshape(a_out.size()[0] * a_out.size()[1], -1)
c_out = c_out.contiguous().reshape(c_out.size()[0] * c_out.size()[1], -1)
if type(a_states) is not tuple:
a_states = (a_states,)
c_states = (c_states,)
states = a_states + c_states
if self.is_rnn_before_mlp:
a_out = self.actor_mlp(a_out)
c_out = self.critic_mlp(c_out)
else:
a_out = self.actor_mlp(a_out)
c_out = self.critic_mlp(c_out)
value = self.value_act(self.value(c_out))
if self.is_discrete:
logits = self.logits(a_out)
return logits, value, states
if self.is_multi_discrete:
logits = [logit(a_out) for logit in self.logits]
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(a_out))
if self.space_config['fixed_sigma']:
sigma = mu * 0.0 + self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(a_out))
return mu, sigma, value, states
else:
out = obs
out = self.actor_cnn(out)
out = out.flatten(1)
if self.has_rnn:
out_in = out
if not self.is_rnn_before_mlp:
out_in = out
out = self.actor_mlp(out)
if self.rnn_concat_input:
out = torch.cat([out, out_in], dim=1)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
if self.rnn_name == 'sru':
out = out.transpose(0,1)
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
if self.rnn_name == 'sru':
out = out.transpose(0,1)
if self.rnn_ln:
out = self.layer_norm(out)
if self.is_rnn_before_mlp:
out = self.actor_mlp(out)
if type(states) is not tuple:
states = (states,)
else:
out = self.actor_mlp(out)
value = self.value_act(self.value(out))
if self.central_value:
return value, states
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_multi_discrete:
logits = [logit(out) for logit in self.logits]
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu*0 + sigma, value, states
def is_separate_critic(self):
return self.separate
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
if not self.has_rnn:
return None
num_layers = self.rnn_layers
if self.rnn_name == 'identity':
rnn_units = 1
else:
rnn_units = self.rnn_units
if self.rnn_name == 'lstm':
if self.separate:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
if self.separate:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),)
def load(self, params):
self.separate = params.get('separate', False)
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_d2rl = params['mlp'].get('d2rl', False)
self.norm_only_first_layer = params['mlp'].get('norm_only_first_layer', False)
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_rnn = 'rnn' in params
self.has_space = 'space' in params
self.central_value = params.get('central_value', False)
self.joint_obs_actions_config = params.get('joint_obs_actions', None)
if self.has_space:
self.is_multi_discrete = 'multi_discrete'in params['space']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
else:
self.is_discrete = False
self.is_continuous = False
self.is_multi_discrete = False
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.rnn_ln = params['rnn'].get('layer_norm', False)
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
self.rnn_concat_input = params['rnn'].get('concat_input', False)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
net = A2CBuilder.Network(self.params, **kwargs)
return net
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2) # dynamic add padding based on the kernel_size
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False):
super().__init__()
self.use_bn = use_bn
self.conv = Conv2dAuto(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, bias=not use_bn)
if use_bn:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, activation='relu', use_bn=False, use_zero_init=True, use_attention=False):
super().__init__()
self.use_zero_init=use_zero_init
self.use_attention = use_attention
if use_zero_init:
self.alpha = nn.Parameter(torch.zeros(1))
self.activation = activation
self.conv1 = ConvBlock(channels, channels, use_bn)
self.conv2 = ConvBlock(channels, channels, use_bn)
self.activate1 = nn.ELU()
self.activate2 = nn.ELU()
if use_attention:
self.ca = ChannelAttention(channels)
self.sa = SpatialAttention()
def forward(self, x):
residual = x
x = self.activate1(x)
x = self.conv1(x)
x = self.activate2(x)
x = self.conv2(x)
if self.use_attention:
x = self.ca(x) * x
x = self.sa(x) * x
if self.use_zero_init:
x = x * self.alpha + residual
else:
x = x + residual
return x
class ImpalaSequential(nn.Module):
def __init__(self, in_channels, out_channels, activation='elu', use_bn=True, use_zero_init=False):
super().__init__()
self.conv = ConvBlock(in_channels, out_channels, use_bn)
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_block1 = ResidualBlock(out_channels, activation=activation, use_bn=use_bn, use_zero_init=use_zero_init)
self.res_block2 = ResidualBlock(out_channels, activation=activation, use_bn=use_bn, use_zero_init=use_zero_init)
def forward(self, x):
x = self.conv(x)
x = self.max_pool(x)
x = self.res_block1(x)
x = self.res_block2(x)
return x
class A2CResnetBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
input_shape = torch_ext.shape_whc_to_cwh(input_shape)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
self.value_size = kwargs.pop('value_size', 1)
NetworkBuilder.BaseNetwork.__init__(self, **kwargs)
self.load(params)
self.cnn = self._build_impala(input_shape, self.conv_depths)
mlp_input_shape = self._calc_input_size(input_shape, self.cnn)
in_mlp_shape = mlp_input_shape
if len(self.units) == 0:
out_size = mlp_input_shape
else:
out_size = self.units[-1]
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
#self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size' : in_mlp_shape,
'units' :self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear
}
self.mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
self.flatten_act = self.activations_factory.create(self.activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32), requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
mlp_init = self.init_factory.create(**self.initializer)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
#nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('elu'))
for m in self.mlp:
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if self.is_discrete:
mlp_init(self.logits.weight)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
mlp_init(self.value.weight)
def forward(self, obs_dict):
obs = obs_dict['obs']
obs = obs.permute((0, 3, 1, 2))
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
out = obs
out = self.cnn(out)
out = out.flatten(1)
out = self.flatten_act(out)
if self.has_rnn:
if not self.is_rnn_before_mlp:
out = self.mlp(out)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
#out = self.layer_norm(out)
if type(states) is not tuple:
states = (states,)
if self.is_rnn_before_mlp:
for l in self.mlp:
out = l(out)
else:
for l in self.mlp:
out = l(out)
value = self.value_act(self.value(out))
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu*0 + sigma, value, states
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous' in params['space']
self.is_multi_discrete = 'multi_discrete'in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
self.has_rnn = 'rnn' in params
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
self.has_cnn = True
self.conv_depths = params['cnn']['conv_depths']
def _build_impala(self, input_shape, depths):
in_channels = input_shape[0]
layers = nn.ModuleList()
for d in depths:
layers.append(ImpalaSequential(in_channels, d))
in_channels = d
return nn.Sequential(*layers)
def is_separate_critic(self):
return False
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
num_layers = self.rnn_layers
if self.rnn_name == 'lstm':
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)),
torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
def build(self, name, **kwargs):
net = A2CResnetBuilder.Network(self.params, **kwargs)
return net
class DiagGaussianActor(NetworkBuilder.BaseNetwork):
"""torch.distributions implementation of an diagonal Gaussian policy."""
def __init__(self, output_dim, log_std_bounds, **mlp_args):
super().__init__()
self.log_std_bounds = log_std_bounds
self.trunk = self._build_mlp(**mlp_args)
last_layer = list(self.trunk.children())[-2].out_features
self.trunk = nn.Sequential(*list(self.trunk.children()), nn.Linear(last_layer, output_dim))
def forward(self, obs):
mu, log_std = self.trunk(obs).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
#log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = torch.clamp(log_std, log_std_min, log_std_max)
#log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std + 1)
std = log_std.exp()
# TODO: Refactor
dist = SquashedNormal(mu, std)
# Modify to only return mu and std
return dist
class DoubleQCritic(NetworkBuilder.BaseNetwork):
"""Critic network, employes double Q-learning."""
def __init__(self, output_dim, **mlp_args):
super().__init__()
self.Q1 = self._build_mlp(**mlp_args)
last_layer = list(self.Q1.children())[-2].out_features
self.Q1 = nn.Sequential(*list(self.Q1.children()), nn.Linear(last_layer, output_dim))
self.Q2 = self._build_mlp(**mlp_args)
last_layer = list(self.Q2.children())[-2].out_features
self.Q2 = nn.Sequential(*list(self.Q2.children()), nn.Linear(last_layer, output_dim))
def forward(self, obs, action):
assert obs.size(0) == action.size(0)
obs_action = torch.cat([obs, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
return q1, q2
class SACBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
def build(self, name, **kwargs):
net = SACBuilder.Network(self.params, **kwargs)
return net
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
obs_dim = kwargs.pop('obs_dim')
action_dim = kwargs.pop('action_dim')
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
mlp_input_shape = input_shape
actor_mlp_args = {
'input_size' : obs_dim,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
critic_mlp_args = {
'input_size' : obs_dim + action_dim,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
print("Building Actor")
self.actor = self._build_actor(2*action_dim, self.log_std_bounds, **actor_mlp_args)
if self.separate:
print("Building Critic")
self.critic = self._build_critic(1, **critic_mlp_args)
print("Building Critic Target")
self.critic_target = self._build_critic(1, **critic_mlp_args)
self.critic_target.load_state_dict(self.critic.state_dict())
mlp_init = self.init_factory.create(**self.initializer)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
cnn_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
def _build_critic(self, output_dim, **mlp_args):
return DoubleQCritic(output_dim, **mlp_args)
def _build_actor(self, output_dim, log_std_bounds, **mlp_args):
return DiagGaussianActor(output_dim, log_std_bounds, **mlp_args)
def forward(self, obs_dict):
"""TODO"""
obs = obs_dict['obs']
mu, sigma = self.actor(obs)
return mu, sigma
def is_separate_critic(self):
return self.separate
def load(self, params):
self.separate = params.get('separate', True)
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_d2rl = params['mlp'].get('d2rl', False)
self.norm_only_first_layer = params['mlp'].get('norm_only_first_layer', False)
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_space = 'space' in params
self.value_shape = params.get('value_shape', 1)
self.central_value = params.get('central_value', False)
self.joint_obs_actions_config = params.get('joint_obs_actions', None)
self.log_std_bounds = params.get('log_std_bounds', None)
if self.has_space:
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
else:
self.is_discrete = False
self.is_continuous = False
'''
class DQNBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_dueling = params['dueling']
self.atoms = params['atoms']
self.is_noisy = params['noisy']
self.normalization = params.get('normalization', None)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
is_train = kwargs.pop('is_train', True)
if self.is_noisy:
dense_layer = self._noisy_dense
else:
dense_layer = torch.nn.Linear
with tf.variable_scope(name, reuse=reuse):
out = input
if self.has_cnn:
cnn_args = {
'name' :'dqn_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out = self._build_conv(**cnn_args)
out = tf.contrib.layers.flatten(out)
mlp_args = {
'name' :'dqn_mlp',
'input' : out,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train,
'dense_func' : dense_layer
}
if self.is_dueling:
if len(self.units) > 1:
mlp_args['units'] = self.units[:-1]
out = self._build_mlp(**mlp_args)
hidden_value = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_val')
hidden_advantage = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_adv')
value = dense_layer(inputs=hidden_value, units=self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), activation=tf.identity, kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='value')
advantage = dense_layer(inputs=hidden_advantage, units= actions_num * self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='advantage')
advantage = tf.reshape(advantage, shape = [-1, actions_num, self.atoms])
value = tf.reshape(value, shape = [-1, 1, self.atoms])
q_values = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
else:
mlp_args['units'] = self.units
out = self._build_mlp('dqn_mlp', out, self.units, self.activation, self.initializer, self.regularizer)
q_values = dense_layer(inputs=out, units=actions_num *self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='q_vals')
q_values = tf.reshape(q_values, shape = [-1, actions_num, self.atoms])
if self.atoms == 1:
return tf.squeeze(q_values)
else:
return q_values
'''
| 43,953 |
Python
| 42.176817 | 301 | 0.519623 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/a2c_continuous.py
|
from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch import ppg_aux
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class A2CAgent(a2c_common.ContinuousA2CBase):
def __init__(self, base_name, config):
a2c_common.ContinuousA2CBase.__init__(self, base_name, config)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1)
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.states = None
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.normalize_input:
if isinstance(self.observation_space,gym.spaces.Dict):
self.running_mean_std = RunningMeanStdObs(obs_shape).to(self.ppo_device)
else:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.ppo_device)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'num_steps' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'model' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', True)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if 'phasic_policy_gradients' in self.config:
self.has_phasic_policy_gradients = True
self.ppg_aux_loss = ppg_aux.PPGAux(self, self.config['phasic_policy_gradients'])
self.has_value_loss = (self.has_central_value \
and self.use_experimental_cv) \
or not self.has_phasic_policy_gradients
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
mu = res_dict['mus']
sigma = res_dict['sigmas']
a_loss = common_losses.actor_loss(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
b_loss = self.bound_loss(mu)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * self.entropy_coef + b_loss * self.bounds_loss_coef
pol_grad = torch.autograd.grad(loss, mu, retain_graph=True)[0]
snr_ppo = (pol_grad.mean(dim=0).abs()/pol_grad.std(dim=0)).mean().item()
self.snr_ppo.append(snr_ppo)
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
reduce_kl = not self.is_rnn
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
self.train_result = (a_loss, c_loss, entropy, \
kl_dist, self.last_lr, lr_mul, \
mu.detach(), sigma.detach(), b_loss)
def train_actor_critic(self, input_dict):
self.calc_gradients(input_dict)
return self.train_result
def bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.1
mu_loss_high = torch.clamp_max(mu - soft_bound, 0.0)**2
mu_loss_low = torch.clamp_max(-mu + soft_bound, 0.0)**2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss
| 7,600 |
Python
| 39.647059 | 142 | 0.570263 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_torch/running_mean_std.py
|
import torch
import torch.nn as nn
import numpy as np
'''
updates statistic from a full data
'''
class RunningMeanStd(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
super(RunningMeanStd, self).__init__()
print('RunningMeanStd: ', insize)
self.insize = insize
self.epsilon = epsilon
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("running_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("running_var", torch.ones(in_size, dtype = torch.float64))
self.register_buffer("count", torch.ones((), dtype = torch.float64))
def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def forward(self, input, unnorm=False):
if self.training:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.running_mean, self.running_var, self.count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count,
mean, var, input.size()[0] )
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.running_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.running_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.running_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.running_mean
current_var = self.running_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
if self.norm_only:
y = input/ torch.sqrt(current_var.float() + self.epsilon)
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
class RunningMeanStdObs(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
assert(insize is dict)
super(RunningMeanStdObs, self).__init__()
self.running_mean_std = nn.ModuleDict({
k : RunningMeanStd(v, epsilon, per_channel, norm_only) for k,v in insize.items()
})
def forward(self, input, unnorm=False):
res = {k : self.running_mean_std(v, unnorm) for k,v in input.items()}
return res
| 3,757 |
Python
| 41.224719 | 152 | 0.558957 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/tensorflow_utils.py
|
import tensorflow as tf
import numpy as np
import collections
from collections import deque, OrderedDict
def unflatten(vector, shapes):
i = 0
arrays = []
for shape in shapes:
size = np.prod(shape, dtype=np.int)
array = vector[i:(i + size)].reshape(shape)
arrays.append(array)
i += size
assert len(vector) == i, "Passed weight does not have the correct shape."
return arrays
class TensorFlowVariables(object):
"""A class used to set and get weights for Tensorflow networks.
Attributes:
sess (tf.Session): The tensorflow session used to run assignment.
variables (Dict[str, tf.Variable]): Extracted variables from the loss
or additional variables that are passed in.
placeholders (Dict[str, tf.placeholders]): Placeholders for weights.
assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.
"""
def __init__(self, output, sess=None, input_variables=None):
"""Creates TensorFlowVariables containing extracted variables.
The variables are extracted by performing a BFS search on the
dependency graph with loss as the root node. After the tree is
traversed and those variables are collected, we append input_variables
to the collected variables. For each variable in the list, the
variable has a placeholder and assignment operation created for it.
Args:
output (tf.Operation, List[tf.Operation]): The tensorflow
operation to extract all variables from.
sess (tf.Session): Session used for running the get and set
methods.
input_variables (List[tf.Variables]): Variables to include in the
list.
"""
self.sess = sess
if not isinstance(output, (list, tuple)):
output = [output]
queue = deque(output)
variable_names = []
explored_inputs = set(output)
# We do a BFS on the dependency graph of the input function to find
# the variables.
while len(queue) != 0:
tf_obj = queue.popleft()
if tf_obj is None:
continue
# The object put into the queue is not necessarily an operation,
# so we want the op attribute to get the operation underlying the
# object. Only operations contain the inputs that we can explore.
if hasattr(tf_obj, "op"):
tf_obj = tf_obj.op
for input_op in tf_obj.inputs:
if input_op not in explored_inputs:
queue.append(input_op)
explored_inputs.add(input_op)
# Tensorflow control inputs can be circular, so we keep track of
# explored operations.
for control in tf_obj.control_inputs:
if control not in explored_inputs:
queue.append(control)
explored_inputs.add(control)
if "Variable" in tf_obj.node_def.op:
variable_names.append(tf_obj.node_def.name)
self.variables = OrderedDict()
variable_list = [
v for v in tf.global_variables()
if v.op.node_def.name in variable_names
]
if input_variables is not None:
variable_list += input_variables
for v in variable_list:
self.variables[v.op.node_def.name] = v
self.placeholders = {}
self.assignment_nodes = {}
# Create new placeholders to put in custom weights.
for k, var in self.variables.items():
self.placeholders[k] = tf.placeholder(
var.value().dtype,
var.get_shape().as_list(),
name="Placeholder_" + k)
self.assignment_nodes[k] = var.assign(self.placeholders[k])
def set_session(self, sess):
"""Sets the current session used by the class.
Args:
sess (tf.Session): Session to set the attribute with.
"""
self.sess = sess
def get_flat_size(self):
"""Returns the total length of all of the flattened variables.
Returns:
The length of all flattened variables concatenated.
"""
return sum(
np.prod(v.get_shape().as_list()) for v in self.variables.values())
def _check_sess(self):
"""Checks if the session is set, and if not throw an error message."""
assert self.sess is not None, ("The session is not set. Set the "
"session either by passing it into the "
"TensorFlowVariables constructor or by "
"calling set_session(sess).")
def get_flat(self):
"""Gets the weights and returns them as a flat array.
Returns:
1D Array containing the flattened weights.
"""
self._check_sess()
return np.concatenate([
v.eval(session=self.sess).flatten()
for v in self.variables.values()
])
def set_flat(self, new_weights):
"""Sets the weights to new_weights, converting from a flat array.
Note:
You can only set all weights in the network using this function,
i.e., the length of the array must match get_flat_size.
Args:
new_weights (np.ndarray): Flat array containing weights.
"""
self._check_sess()
shapes = [v.get_shape().as_list() for v in self.variables.values()]
arrays = unflatten(new_weights, shapes)
placeholders = [
self.placeholders[k] for k, v in self.variables.items()
]
self.sess.run(
list(self.assignment_nodes.values()),
feed_dict=dict(zip(placeholders, arrays)))
def get_weights(self):
"""Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
"""
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
}
def set_weights(self, new_weights):
"""Sets the weights to new_weights.
Note:
Can set subsets of variables as well, by only passing in the
variables you want to be set.
Args:
new_weights (Dict): Dictionary mapping variable names to their
weights.
"""
self._check_sess()
assign_list = [
self.assignment_nodes[name] for name in new_weights.keys()
if name in self.assignment_nodes
]
assert assign_list, ("No variables in the input matched those in the "
"network. Possible cause: Two networks were "
"defined in the same TensorFlow graph. To fix "
"this, place each network definition in its own "
"tf.Graph.")
self.sess.run(
assign_list,
feed_dict={
self.placeholders[name]: value
for (name, value) in new_weights.items()
if name in self.placeholders
})
| 7,289 |
Python
| 39.5 | 79 | 0.571409 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/tf_moving_mean_std.py
|
import tensorflow as tf
from tensorflow.python.training.moving_averages import assign_moving_average
class MovingMeanStd(object):
def __init__(self, shape, epsilon, decay, clamp = 5.0):
self.moving_mean = tf.Variable(tf.constant(0.0, shape=shape, dtype=tf.float64), trainable=False)#, name='moving_mean')
self.moving_variance = tf.Variable(tf.constant(1.0, shape=shape, dtype=tf.float64), trainable=False)#, name='moving_variance' )
self.epsilon = epsilon
self.shape = shape
self.decay = decay
self.count = tf.Variable(tf.constant(epsilon, shape=shape, dtype=tf.float64), trainable=False)
self.clamp = clamp
def update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + tf.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def normalize(self, x, train=True):
x64 = tf.cast(x, tf.float64)
if train:
shape = x.get_shape().as_list()
if (len(shape) == 2):
axis = [0]
if (len(shape) == 3):
axis = [0, 1]
if (len(shape) == 4):
axis = [0, 1, 2]
mean, var = tf.nn.moments(x64, axis)
new_mean, new_var, new_count = self.update_mean_var_count_from_moments(self.moving_mean, self.moving_variance, self.count, mean, var, tf.cast(tf.shape(x)[0], tf.float64))
mean_op = self.moving_mean.assign(new_mean)
var_op = self.moving_variance.assign(tf.maximum(new_var, 1e-2))
count_op = self.count.assign(new_count)
with tf.control_dependencies([mean_op, var_op, count_op]):
res = tf.cast((x64 - self.moving_mean) / (tf.sqrt(self.moving_variance)), tf.float32)
return tf.clip_by_value(res, -self.clamp, self.clamp)
else:
res = tf.cast((x64 - self.moving_mean) / (tf.sqrt(self.moving_variance)), tf.float32)
return tf.clip_by_value(res, -self.clamp, self.clamp)
| 2,361 |
Python
| 49.255318 | 182 | 0.581957 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/players.py
|
from rl_games.common import env_configurations
from rl_games.algos_tf14 import dqnagent
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class BasePlayer(object):
def __init__(self, sess, config):
self.config = config
self.sess = sess
self.env_name = self.config['env_name']
self.env_spaces = env_configurations.get_env_info(self.config)
self.obs_space, self.action_space, self.num_agents = self.env_spaces['observation_space'], self.env_spaces['action_space'], self.env_spaces['agents']
self.env = None
self.env_config = self.config.get('env_config', None)
def restore(self, fn):
raise NotImplementedError('restore')
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def create_env(self):
return env_configurations.configurations[self.env_name]['env_creator']()
def get_action(self, obs, is_determenistic = False):
raise NotImplementedError('step')
def get_masked_action(self, obs, mask, is_determenistic = False):
raise NotImplementedError('step')
def reset(self):
raise NotImplementedError('raise')
def run(self, n_games=1000, n_game_life = 1, render= False):
self.env = self.create_env()
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
if has_masks_func:
has_masks = self.env.has_action_mask()
is_determenistic = True
for _ in range(n_games):
cr = 0
steps = 0
s = self.env.reset()
for _ in range(5000):
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(s, masks, is_determenistic)
else:
action = self.get_action(s, is_determenistic)
s, r, done, info = self.env.step(action)
cr += r
steps += 1
if render:
self.env.render(mode = 'human')
if not np.isscalar(done):
done = done.any()
if done:
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
game_res = info['battle_won']
if 'scores' in info:
game_res = info['scores']
print('reward:', np.mean(cr), 'steps:', steps, 'scores:', game_res)
sum_game_res += game_res
sum_rewards += np.mean(cr)
sum_steps += steps
break
print('av reward:', sum_rewards / n_games * n_game_life, 'av steps:', sum_steps / n_games * n_game_life, 'scores:', sum_game_res / n_games * n_game_life)
class PpoPlayerContinuous(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.network = config['network']
self.obs_ph = tf.placeholder('float32', (None, ) + self.obs_space.shape, name = 'obs')
self.actions_num = self.action_space.shape[0]
self.actions_low = self.action_space.low
self.actions_high = self.action_space.high
self.mask = [False]
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.normalize_input = self.config['normalize_input']
self.input_obs = self.obs_ph
if self.obs_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = self.obs_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=False)
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : 1,
'games_num' : 1,
'actions_num' : self.actions_num,
'prev_actions_ph' : None
}
self.last_state = None
if self.network.is_rnn():
self.neglop, self.value, self.action, _, self.mu, _, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.run_dict, reuse=False)
self.last_state = self.initial_state
else:
self.neglop, self.value, self.action, _, self.mu, _ = self.network(self.run_dict, reuse=False)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def get_action(self, obs, is_determenistic = True):
if is_determenistic:
ret_action = self.mu
else:
ret_action = self.action
if self.network.is_rnn():
action, self.last_state = self.sess.run([ret_action, self.lstm_state], {self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action = self.sess.run([ret_action], {self.obs_ph : obs})
action = np.squeeze(action)
return rescale_actions(self.actions_low, self.actions_high, np.clip(action, -1.0, 1.0))
def restore(self, fn):
self.saver.restore(self.sess, fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state
#self.mask = [True]
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.network = config['network']
self.use_action_masks = config.get('use_action_masks', False)
self.obs_ph = tf.placeholder(self.obs_space.dtype, (None, ) + self.obs_space.shape, name = 'obs')
self.actions_num = self.action_space.n
if self.use_action_masks:
print('using masks for action')
self.action_mask_ph = tf.placeholder('int32', (None, self.actions_num), name = 'actions_mask')
else:
self.action_mask_ph = None
self.mask = [False] * self.num_agents
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.normalize_input = self.config['normalize_input']
self.input_obs = self.obs_ph
if self.obs_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = self.obs_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=False)
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.num_agents,
'games_num' : self.num_agents,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
'action_mask_ph' : self.action_mask_ph
}
self.last_state = None
if self.network.is_rnn():
self.neglop , self.value, self.action, _,self.states_ph, self.masks_ph, self.lstm_state, self.initial_state, self.logits = self.network(self.run_dict, reuse=False)
self.last_state = self.initial_state * self.num_agents
else:
self.neglop , self.value, self.action, _, self.logits = self.network(self.run_dict, reuse=False)
self.variables = TensorFlowVariables([self.neglop, self.value, self.action], self.sess)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def get_action(self, obs, is_determenistic = True):
ret_action = self.action
if self.network.is_rnn():
action, self.last_state, logits = self.sess.run([ret_action, self.lstm_state, self.logits], {self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action, logits = self.sess.run([ret_action, self.logits], {self.obs_ph : obs})
if is_determenistic:
return np.argmax(logits, axis = -1).astype(np.int32)
else:
return int(np.squeeze(action))
def get_masked_action(self, obs, mask, is_determenistic = False):
#if is_determenistic:
ret_action = self.action
if self.network.is_rnn():
action, self.last_state, logits = self.sess.run([ret_action, self.lstm_state, self.logits], {self.action_mask_ph : mask, self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action, logits = self.sess.run([ret_action, self.logits], {self.action_mask_ph : mask, self.obs_ph : obs})
if is_determenistic:
logits = np.array(logits)
return np.argmax(logits, axis = -1).astype(np.int32)
else:
return np.squeeze(action).astype(np.int32)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state
class DQNPlayer(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.dqn = dqnagent.DQNAgent(sess, 'player', self.obs_space, self.action_space, config)
def get_action(self, obs, is_determenistic = False):
return self.dqn.get_action(np.squeeze(obs), 0.0)
def restore(self, fn):
self.dqn.restore(fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state
| 10,057 |
Python
| 38.754941 | 213 | 0.577608 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/networks.py
|
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
tfd = tfp.distributions
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def sample_noise(shape, mean = 0.0, std = 1.0):
noise = tf.random_normal(shape, mean = mean, stddev = std)
return noise
# Added by Andrew Liao
# for NoisyNet-DQN (using Factorised Gaussian noise)
# modified from ```dense``` function
def noisy_dense(inputs, units, name, bias=True, activation=tf.identity, mean = 0.0, std = 1.0):
# the function used in eq.7,8
def f(x):
return tf.multiply(tf.sign(x), tf.pow(tf.abs(x), 0.5))
# Initializer of \mu and \sigma
mu_init = tf.random_uniform_initializer(minval=-1*1/np.power(inputs.get_shape().as_list()[1], 0.5),
maxval=1*1/np.power(inputs.get_shape().as_list()[1], 0.5))
sigma_init = tf.constant_initializer(0.4/np.power(inputs.get_shape().as_list()[1], 0.5))
# Sample noise from gaussian
p = sample_noise([inputs.get_shape().as_list()[1], 1], mean = 0.0, std = 1.0)
q = sample_noise([1, units], mean = 0.0, std = 1.0)
f_p = f(p); f_q = f(q)
w_epsilon = f_p*f_q; b_epsilon = tf.squeeze(f_q)
# w = w_mu + w_sigma*w_epsilon
w_mu = tf.get_variable(name + "/w_mu", [inputs.get_shape()[1], units], initializer=mu_init)
w_sigma = tf.get_variable(name + "/w_sigma", [inputs.get_shape()[1], units], initializer=sigma_init)
w = w_mu + tf.multiply(w_sigma, w_epsilon)
ret = tf.matmul(inputs, w)
if bias:
# b = b_mu + b_sigma*b_epsilon
b_mu = tf.get_variable(name + "/b_mu", [units], initializer=mu_init)
b_sigma = tf.get_variable(name + "/b_sigma", [units], initializer=sigma_init)
b = b_mu + tf.multiply(b_sigma, b_epsilon)
return activation(ret + b)
else:
return activation(ret)
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def lstm(xs, ms, s, scope, nh, nin):
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(), dtype=tf.float32 )
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init() )
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, nin):
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init())
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init())
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
tk = 0
for idx, (x, m) in enumerate(zip(xs, ms)):
print(tk)
tk = tk + 1
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
'''
used lstm from openai baseline as the most convenient way to work with dones.
TODO: try to use more efficient tensorflow way
'''
def openai_lstm(name, inputs, states_ph, dones_ph, units, env_num, batch_num, layer_norm=True):
nbatch = batch_num
nsteps = nbatch // env_num
print('nbatch: ', nbatch)
print('env_num: ', env_num)
dones_ph = tf.to_float(dones_ph)
inputs_seq = batch_to_seq(inputs, env_num, nsteps)
dones_seq = batch_to_seq(dones_ph, env_num, nsteps)
nin = inputs.get_shape()[1].value
with tf.variable_scope(name):
if layer_norm:
hidden_seq, final_state = lnlstm(inputs_seq, dones_seq, states_ph, scope='lnlstm', nin=nin, nh=units)
else:
hidden_seq, final_state = lstm(inputs_seq, dones_seq, states_ph, scope='lstm', nin=nin, nh=units)
hidden = seq_to_batch(hidden_seq)
initial_state = np.zeros(states_ph.shape.as_list(), dtype=float)
return [hidden, final_state, initial_state]
def distributional_output(inputs, actions_num, atoms_num):
distributed_qs = tf.layers.dense(inputs=inputs, activation=tf.nn.softmax, units=atoms_num * actions_num)
distributed_qs = tf.reshape(distributed_qs, shape = [-1, actions_num, atoms_num])
distributed_qs = tf.nn.softmax(distributed_qs, dim = -1)
return distributed_qs
def distributional_noisy_output(inputs, actions_num, atoms_num, name, mean = 0.0, std = 1.0):
distributed_qs = noisy_dense(inputs=inputs, name=name, activation=tf.nn.softmax, units=atoms_num * actions_num, mean=mean, std=std)
distributed_qs = tf.reshape(distributed_qs, shape = [-1, actions_num, atoms_num])
distributed_qs = tf.nn.softmax(distributed_qs, dim = -1)
return distributed_qs
def atari_conv_net(inputs):
NUM_FILTERS_1 = 32
NUM_FILTERS_2 = 64
NUM_FILTERS_3 = 64
conv1 = tf.layers.conv2d(inputs=inputs,
filters=NUM_FILTERS_1,
kernel_size=[8, 8],
strides=(4, 4),
activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1,
filters=NUM_FILTERS_2,
kernel_size=[4, 4],
strides=(2, 2),
activation=tf.nn.relu)
conv3 = tf.layers.conv2d(inputs=conv2,
filters=NUM_FILTERS_3,
kernel_size=[3, 3],
strides=(1, 1),
activation=tf.nn.relu)
return conv3
def dqn_network(name, inputs, actions_num, atoms_num = 1, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten,
units=NUM_HIDDEN_NODES,
activation=tf.nn.relu)
if atoms_num == 1:
logits = tf.layers.dense(inputs=hidden, units=actions_num)
else:
logits = distributional_output(inputs=hidden, actions_num=actions_num, atoms_num=atoms_num)
return logits
'''
dueling_type = 'SIMPLE', 'AVERAGE', 'MAX'
'''
def dueling_dqn_network(name, inputs, actions_num, reuse=False, dueling_type = 'AVERAGE'):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hidden_advantage = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden_value, units=1)
advantage = tf.layers.dense(inputs=hidden_advantage, units=actions_num)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def dueling_dqn_network_with_batch_norm(name, inputs, actions_num, reuse=False, dueling_type = 'AVERAGE', is_train=True):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net_batch_norm(inputs, is_train)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hidden_advantage = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden_value, units=1)
advantage = tf.layers.dense(inputs=hidden_advantage, units=actions_num)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def noisy_dqn_network(name, inputs, actions_num, mean, std, atoms_num = 1, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = noisy_dense(inputs=flatten,
units=NUM_HIDDEN_NODES,
activation=tf.nn.relu, name = 'noisy_fc1')
if atoms_num == 1:
logits = noisy_dense(inputs=hidden, units=actions_num, name = 'noisy_fc2', mean = mean, std = std)
else:
logits = distributional_noisy_output(inputs=hidden, actions_num=actions_num, atoms_num = atoms_num, name = 'noisy_fc2', mean = mean, std = std)
return logits
'''
dueling_type = 'SIMPLE', 'AVERAGE', 'MAX'
'''
def noisy_dueling_dqn_network(name, inputs, actions_num, mean, std, reuse=False, dueling_type = 'AVERAGE'):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_v1', mean = mean, std = std)
hidden_advantage = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_a1', mean = mean, std = std)
value = noisy_dense(inputs=hidden_value, units=1, name = 'noisy_v2', mean = mean, std = std)
advantage = noisy_dense(inputs=hidden_advantage, units=actions_num, name = 'noisy_a2', mean = mean, std = std)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def noisy_dueling_dqn_network_with_batch_norm(name, inputs, actions_num, mean, std, reuse=False, dueling_type = 'AVERAGE', is_train=True):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net_batch_norm(inputs, is_train)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_v1', mean = mean, std = std)
hidden_advantage = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_a1', mean = mean, std = std)
value = noisy_dense(inputs=hidden_value, units=1, name = 'noisy_v2', mean = mean, std = std)
advantage = noisy_dense(inputs=hidden_advantage, units=actions_num, name = 'noisy_a2', mean = mean, std = std)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def default_small_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False, activation=tf.nn.elu):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 128
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 32
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, kernel_initializer=normc_initializer(0.01), activation=None)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False, activation=tf.nn.elu):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None, kernel_initializer=hidden_init)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, kernel_initializer=normc_initializer(0.01), activation=None)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network_separated_logstd(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden_init = normc_initializer(1.0) # tf.random_normal_initializer(stddev= 1.0)
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=tf.nn.elu, kernel_initializer=hidden_init)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None, kernel_initializer=hidden_init)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None,)
#std = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
#logstd = tf.layers.dense(inputs=hidden2a, units=actions_num)
logstd = tf.get_variable(name='log_std', shape=(actions_num), initializer=tf.constant_initializer(0.0), trainable=True)
return mu, mu * 0 + logstd, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden0 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.relu)
hidden1 = tf.layers.dense(inputs=hidden0, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden2, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2, units=actions_num, activation=None)
return logits, value
def default_a2c_lstm_network(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 128
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 64
LSTM_UNITS = 64
hidden0 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.relu)
hidden1 = tf.layers.dense(inputs=hidden0, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.float32, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden2, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def default_a2c_lstm_network_separated(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
LSTM_UNITS = 128
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
hidden = tf.concat((hidden1a, hidden1c), axis=1)
lstm_out, lstm_state, initial_state = openai_lstm('lstm_a', hidden, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
value = tf.layers.dense(inputs=lstm_outc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None, kernel_initializer=tf.random_uniform_initializer(-0.01, 0.01))
var = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_lstm_network_separated(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 32
NUM_HIDDEN_NODES2 = 32
#NUM_HIDDEN_NODES3 = 16
LSTM_UNITS = 16
hidden1c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
hidden1a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2* 2*LSTM_UNITS])
states_a, states_c = tf.split(states_ph, 2, axis=1)
lstm_outa, lstm_statae, initial_statea = openai_lstm('lstm_actions', hidden2a, dones_ph=dones_ph, states_ph=states_a, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
lstm_outc, lstm_statec, initial_statec = openai_lstm('lstm_critics', hidden2c, dones_ph=dones_ph, states_ph=states_c, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
initial_state = np.concatenate((initial_statea, initial_statec), axis=1)
lstm_state = tf.concat( values=(lstm_statae, lstm_statec), axis=1)
#lstm_outa = tf.layers.dense(inputs=lstm_outa, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
#lstm_outc = tf.layers.dense(inputs=lstm_outc, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=lstm_outc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_lstm_network(name, inputs, actions_num, env_num, batch_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 32
NUM_HIDDEN_NODES2 = 32
LSTM_UNITS = 16
hidden1 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden2, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_network_separated(name, inputs, actions_num, activation = tf.nn.elu, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 64
hidden1c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=activation)
hidden1a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def simple_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden1 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden2, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2, units=actions_num, activation=None)
return logits, value
def atari_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3a = atari_conv_net(inputs)
conv3c = atari_conv_net(inputs)
flattena = tf.contrib.layers.flatten(inputs = conv3a)
flattenc = tf.contrib.layers.flatten(inputs = conv3c)
hiddena = tf.layers.dense(inputs=flattena, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hiddenc = tf.layers.dense(inputs=flattenc, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hiddenc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hiddena, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hiddena, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hiddena, units=actions_num, activation=None)
return logits, value
def atari_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden, units=actions_num, activation=None)
return logits, value
def atari_a2c_network_lstm(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
LSTM_UNITS = 256
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
| 32,489 |
Python
| 50.984 | 181 | 0.645141 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/a2c_discrete.py
|
from rl_games.common import tr_helpers, vecenv
#from rl_games.algos_tf14 import networks
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque, OrderedDict
from tensorboardX import SummaryWriter
import gym
from datetime import datetime
def swap_and_flatten01(arr):
"""
swap and then flatten axes 0 and 1
"""
if arr is None:
return arr
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
class A2CAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
observation_shape = observation_space.shape
self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.self_play = config.get('self_play', False)
self.name = base_name
self.config = config
self.env_name = config['env_name']
self.ppo = config['ppo']
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_actors = config['num_actors']
self.env_config = self.config.get('env_config', {})
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.num_agents = self.vec_env.get_number_of_agents()
self.horizon_length = config['horizon_length']
self.seq_len = self.config['seq_length']
self.normalize_advantage = config['normalize_advantage']
self.normalize_input = self.config['normalize_input']
self.state_shape = observation_shape
self.critic_coef = config['critic_coef']
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.sess = sess
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.ignore_dead_batches = self.config.get('ignore_dead_batches', False)
self.dones = np.asarray([False]*self.num_actors *self.num_agents, dtype=np.bool)
self.current_rewards = np.asarray([0]*self.num_actors *self.num_agents, dtype=np.float32)
self.current_lengths = np.asarray([0]*self.num_actors *self.num_agents, dtype=np.float32)
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.game_scores = deque([], maxlen=self.games_to_track)
self.obs_ph = tf.placeholder(observation_space.dtype, (None, ) + observation_shape, name = 'obs')
self.target_obs_ph = tf.placeholder(observation_space.dtype, (None, ) + observation_shape, name = 'target_obs')
self.actions_num = action_space.n
self.actions_ph = tf.placeholder('int32', (None,), name = 'actions')
if self.use_action_masks:
self.action_mask_ph = tf.placeholder('int32', (None, self.actions_num), name = 'actions_mask')
else:
self.action_mask_ph = None
self.old_logp_actions_ph = tf.placeholder('float32', (None, ), name = 'old_logpactions')
self.rewards_ph = tf.placeholder('float32', (None,), name = 'rewards')
self.old_values_ph = tf.placeholder('float32', (None,), name = 'old_values')
self.advantages_ph = tf.placeholder('float32', (None,), name = 'advantages')
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
self.input_obs = self.obs_ph
self.input_target_obs = self.target_obs_ph
if observation_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_target_obs = tf.to_float(self.input_target_obs) / 255.0
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, self.epoch_num, config['max_epochs'], end_learning_rate=0.001, power=config.get('decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, self.epoch_num,config['max_epochs'], decay_rate = config['decay_rate'])
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = observation_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=True)
self.input_target_obs = self.moving_mean_std.normalize(self.input_target_obs, train=False)
games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.train_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.config['minibatch_size'],
'games_num' : games_num,
'actions_num' : self.actions_num,
'prev_actions_ph' : self.actions_ph,
'action_mask_ph' : None
}
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_target_obs,
'batch_num' : self.num_actors * self.num_agents,
'games_num' : self.num_actors * self.num_agents,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
'action_mask_ph' : self.action_mask_ph
}
self.states = None
if self.network.is_rnn():
self.logp_actions ,self.state_values, self.action, self.entropy, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_states_ph, self.target_masks_ph, self.target_lstm_state, self.target_initial_state, self.logits = self.network(self.run_dict, reuse=True)
self.states = self.target_initial_state
else:
self.logp_actions ,self.state_values, self.action, self.entropy = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.logits = self.network(self.run_dict, reuse=True)
self.saver = tf.train.Saver()
self.variables = TensorFlowVariables([self.target_action, self.target_state_values, self.target_neglogp], self.sess)
if self.is_train:
self.setup_losses()
self.sess.run(tf.global_variables_initializer())
def setup_losses(self):
curr_e_clip = self.e_clip * self.lr_multiplier
if (self.ppo):
self.prob_ratio = tf.exp(self.old_logp_actions_ph - self.logp_actions)
self.pg_loss_unclipped = -tf.multiply(self.advantages_ph, self.prob_ratio)
self.pg_loss_clipped = -tf.multiply(self.advantages_ph, tf.clip_by_value(self.prob_ratio, 1.- curr_e_clip, 1.+ curr_e_clip))
self.actor_loss = tf.maximum(self.pg_loss_unclipped, self.pg_loss_clipped)
else:
self.actor_loss = self.logp_actions * self.advantages_ph
self.actor_loss = tf.reduce_mean(self.actor_loss)
self.c_loss = (tf.squeeze(self.state_values) - self.rewards_ph)**2
if self.clip_value:
self.cliped_values = self.old_values_ph + tf.clip_by_value(tf.squeeze(self.state_values) - self.old_values_ph, - curr_e_clip, curr_e_clip)
self.c_loss_clipped = tf.square(self.cliped_values - self.rewards_ph)
self.critic_loss = tf.maximum(self.c_loss, self.c_loss_clipped)
else:
self.critic_loss = self.c_loss
self.critic_loss = tf.reduce_mean(self.critic_loss)
self.kl_approx = 0.5 * tf.stop_gradient(tf.reduce_mean((self.old_logp_actions_ph - self.logp_actions)**2))
if self.is_adaptive_lr:
self.current_lr = tf.where(self.kl_approx > (2.0 * self.kl_threshold), tf.maximum(self.current_lr / 1.5, 1e-6), self.current_lr)
self.current_lr = tf.where(self.kl_approx < (0.5 * self.kl_threshold), tf.minimum(self.current_lr * 1.5, 1e-2), self.current_lr)
self.loss = self.actor_loss + 0.5 * self.critic_coef * self.critic_loss - self.config['entropy_coef'] * self.entropy
self.reg_loss = tf.losses.get_regularization_loss()
self.loss += self.reg_loss
self.train_step = tf.train.AdamOptimizer(self.current_lr * self.lr_multiplier)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
grads = tf.gradients(self.loss, self.weights)
if self.config['truncate_grads']:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
grads = list(zip(grads, self.weights))
self.train_op = self.train_step.apply_gradients(grads)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def get_action_values(self, obs):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.target_obs_ph : obs}), None)
def get_masked_action_values(self, obs, action_masks):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp, self.logits]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.action_mask_ph: action_masks, self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.action_mask_ph: action_masks, self.target_obs_ph : obs}), None)
def get_values(self, obs):
if self.network.is_rnn():
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs})
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def play_steps(self):
# here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = []
epinfos = []
# for n in range number of steps
for _ in range(self.horizon_length):
if self.network.is_rnn():
mb_states.append(self.states)
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
if self.use_action_masks:
actions, values, neglogpacs, _, self.states = self.get_masked_action_values(self.obs, masks)
else:
actions, values, neglogpacs, self.states = self.get_action_values(self.obs)
actions = np.squeeze(actions)
values = np.squeeze(values)
neglogpacs = np.squeeze(neglogpacs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones.copy())
self.obs[:], rewards, self.dones, infos = self.vec_env.step(actions)
self.current_rewards += rewards
self.current_lengths += 1
for reward, length, done, info in zip(self.current_rewards[::self.num_agents], self.current_lengths[::self.num_agents], self.dones[::self.num_agents], infos):
if done:
self.game_rewards.append(reward)
self.game_lengths.append(length)
game_res = 1.0
if isinstance(info, dict):
game_res = info.get('battle_won', 0.5)
self.game_scores.append(game_res)
self.current_rewards = self.current_rewards * (1.0 - self.dones)
self.current_lengths = self.current_lengths * (1.0 - self.dones)
shaped_rewards = self.rewards_shaper(rewards)
epinfos.append(infos)
mb_rewards.append(shaped_rewards)
#using openai baseline approach
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_states = np.asarray(mb_states, dtype=np.float32)
last_values = self.get_values(self.obs)
last_values = np.squeeze(last_values)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
if self.network.is_rnn():
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states )), epinfos)
else:
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), None, epinfos)
return result
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def train(self):
self.obs = self.vec_env.reset()
batch_size = self.horizon_length * self.num_actors * self.num_agents
batch_size_envs = self.horizon_length * self.num_actors
minibatch_size = self.config['minibatch_size']
mini_epochs_num = self.config['mini_epochs']
num_minibatches = batch_size // minibatch_size
last_lr = self.config['learning_rate']
frame = 0
update_time = 0
self.last_mean_rewards = -100500
play_time = 0
epoch_num = 0
max_epochs = self.config.get('max_epochs', 1e6)
start_time = time.time()
total_time = 0
rep_count = 0
while True:
play_time_start = time.time()
epoch_num = self.update_epoch()
frame += batch_size_envs
obses, returns, dones, actions, values, neglogpacs, lstm_states, _ = self.play_steps()
advantages = returns - values
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
a_losses = []
c_losses = []
entropies = []
kls = []
play_time_end = time.time()
play_time = play_time_end - play_time_start
update_time_start = time.time()
if self.network.is_rnn():
total_games = batch_size // self.seq_len
num_games_batch = minibatch_size // self.seq_len
game_indexes = np.arange(total_games)
flat_indexes = np.arange(total_games * self.seq_len).reshape(total_games, self.seq_len)
lstm_states = lstm_states[::self.seq_len]
for _ in range(0, mini_epochs_num):
np.random.shuffle(game_indexes)
for i in range(0, num_minibatches):
batch = range(i * num_games_batch, (i + 1) * num_games_batch)
mb_indexes = game_indexes[batch]
mbatch = flat_indexes[mb_indexes].ravel()
dict = {}
dict[self.old_values_ph] = values[mbatch]
dict[self.old_logp_actions_ph] = neglogpacs[mbatch]
dict[self.advantages_ph] = advantages[mbatch]
dict[self.rewards_ph] = returns[mbatch]
dict[self.actions_ph] = actions[mbatch]
dict[self.obs_ph] = obses[mbatch]
dict[self.masks_ph] = dones[mbatch]
dict[self.states_ph] = lstm_states[mb_indexes]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_approx, self.current_lr, self.lr_multiplier, self.train_op]
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
a_loss, c_loss, entropy, kl, last_lr, lr_mul,_, _ = self.sess.run(run_ops, dict)
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
else:
for _ in range(0, mini_epochs_num):
permutation = np.random.permutation(batch_size)
obses = obses[permutation]
returns = returns[permutation]
actions = actions[permutation]
values = values[permutation]
neglogpacs = neglogpacs[permutation]
advantages = advantages[permutation]
for i in range(0, num_minibatches):
batch = range(i * minibatch_size, (i + 1) * minibatch_size)
dict = {self.obs_ph: obses[batch], self.actions_ph : actions[batch], self.rewards_ph : returns[batch],
self.advantages_ph : advantages[batch], self.old_logp_actions_ph : neglogpacs[batch], self.old_values_ph : values[batch]}
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_approx, self.current_lr, self.lr_multiplier, self.train_op]
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
a_loss, c_loss, entropy, kl, last_lr, lr_mul, _, _ = self.sess.run(run_ops, dict)
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
update_time_end = time.time()
update_time = update_time_end - update_time_start
sum_time = update_time + play_time
total_time = update_time_end - start_time
if True:
scaled_time = self.num_agents * sum_time
print('frames per seconds: ', batch_size / scaled_time)
self.writer.add_scalar('performance/fps', batch_size / scaled_time, frame)
self.writer.add_scalar('performance/update_time', update_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('losses/a_loss', np.mean(a_losses), frame)
self.writer.add_scalar('losses/c_loss', np.mean(c_losses), frame)
self.writer.add_scalar('losses/entropy', np.mean(entropies), frame)
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', np.mean(kls), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
if len(self.game_rewards) > 0:
mean_rewards = np.mean(self.game_rewards)
mean_lengths = np.mean(self.game_lengths)
mean_scores = np.mean(self.game_scores)
self.writer.add_scalar('rewards/mean', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/mean', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/time', mean_scores, total_time)
if rep_count % 10 == 0:
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
rep_count += 1
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.config['name'])
if self.last_mean_rewards > self.config['score_to_win']:
print('Network won!')
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
if epoch_num > max_epochs:
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0
| 22,809 |
Python
| 49.688889 | 232 | 0.577404 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/dqnagent.py
|
from rl_games.common import tr_helpers, vecenv, experience, env_configurations
from rl_games.common.categorical import CategoricalQ
from rl_games.algos_tf14 import networks, models
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque
from tensorboardX import SummaryWriter
from datetime import datetime
class DQNAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
observation_shape = observation_space.shape
actions_num = action_space.n
self.config = config
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.games_to_track = config.get('games_to_track', 100)
self.max_epochs = config.get('max_epochs', 1e6)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, end_learning_rate=0.001, power=config.get(config, 'decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, decay_rate = config['decay_rate'])
self.env_name = config['env_name']
self.network = config['network']
self.state_shape = observation_shape
self.actions_num = actions_num
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.epsilon = self.config['epsilon']
self.rewards_shaper = self.config['reward_shaper']
self.epsilon_processor = tr_helpers.LinearValueProcessor(self.config['epsilon'], self.config['min_epsilon'], self.config['epsilon_decay_frames'])
self.beta_processor = tr_helpers.LinearValueProcessor(self.config['priority_beta'], self.config['max_beta'], self.config['beta_decay_frames'])
if self.env_name:
self.env = env_configurations.configurations[self.env_name]['env_creator']()
self.sess = sess
self.horizon_length = self.config['horizon_length']
self.states = deque([], maxlen=self.horizon_length)
self.is_prioritized = config['replay_buffer_type'] != 'normal'
self.atoms_num = self.config['atoms_num']
self.is_categorical = self.atoms_num > 1
if self.is_categorical:
self.v_min = self.config['v_min']
self.v_max = self.config['v_max']
self.delta_z = (self.v_max - self.v_min) / (self.atoms_num - 1)
self.all_z = tf.range(self.v_min, self.v_max + self.delta_z, self.delta_z)
self.categorical = CategoricalQ(self.atoms_num, self.v_min, self.v_max)
if not self.is_prioritized:
self.exp_buffer = experience.ReplayBuffer(config['replay_buffer_size'], observation_space)
else:
self.exp_buffer = experience.PrioritizedReplayBuffer(config['replay_buffer_size'], config['priority_alpha'], observation_space)
self.sample_weights_ph = tf.placeholder(tf.float32, shape= [None,] , name='sample_weights')
self.obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'obs_ph')
self.actions_ph = tf.placeholder(tf.int32, shape=[None,], name = 'actions_ph')
self.rewards_ph = tf.placeholder(tf.float32, shape=[None,], name = 'rewards_ph')
self.next_obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'next_obs_ph')
self.is_done_ph = tf.placeholder(tf.float32, shape=[None,], name = 'is_done_ph')
self.is_not_done = 1 - self.is_done_ph
self.name = base_name
self.gamma = self.config['gamma']
self.gamma_step = self.gamma**self.horizon_length
self.input_obs = self.obs_ph
self.input_next_obs = self.next_obs_ph
if observation_space.dtype == np.uint8:
print('scaling obs')
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_next_obs = tf.to_float(self.input_next_obs) / 255.0
if self.atoms_num == 1:
self.setup_qvalues(actions_num)
else:
self.setup_cat_qvalues(actions_num)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
self.saver = tf.train.Saver()
self.assigns_op = [tf.assign(w_target, w_self, validate_shape=True) for w_self, w_target in zip(self.weights, self.target_weights)]
self.variables = TensorFlowVariables(self.qvalues, self.sess)
if self.env_name:
sess.run(tf.global_variables_initializer())
self._reset()
def _get_q(self, probs):
res = probs * self.all_z
return tf.reduce_sum(res, axis=2)
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def setup_cat_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.logits = self.network(config, reuse=False)
self.qvalues_c = tf.nn.softmax(self.logits, axis = 2)
self.qvalues = self._get_q(self.qvalues_c)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_logits = self.network(config, reuse=False)
self.target_qvalues_c = tf.nn.softmax(self.target_logits, axis = 2)
self.target_qvalues = self._get_q(self.target_qvalues_c)
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_logits = tf.stop_gradient(self.network(config, reuse=True))
self.next_qvalues_c = tf.nn.softmax(self.next_logits, axis = 2)
self.next_qvalues = self._get_q(self.next_qvalues_c)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_values = tf.reduce_sum(tf.expand_dims(tf.one_hot(self.actions_ph, actions_num), -1) * self.logits, reduction_indices = (1,))
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
else:
self.next_selected_actions = tf.argmax(self.target_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
self.proj_dir_ph = tf.placeholder(tf.float32, shape=[None, self.atoms_num], name = 'best_proj_dir')
log_probs = tf.nn.log_softmax( self.current_action_values, axis=1)
if self.is_prioritized:
# we need to return loss to update priority buffer
self.abs_errors = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1) + 1e-5
self.td_loss = self.abs_errors * self.sample_weights_ph
else:
self.td_loss = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1)
self.td_loss_mean = tf.reduce_mean(self.td_loss)
def setup_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.qvalues = self.network(config, reuse=False)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_qvalues = tf.stop_gradient(self.network(config, reuse=False))
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_qvalues = tf.stop_gradient(self.network(config, reuse=True))
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_qvalues = tf.reduce_sum(tf.one_hot(self.actions_ph, actions_num) * self.qvalues, reduction_indices = 1)
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( self.target_qvalues * self.next_selected_actions_onehot , reduction_indices=[1,] ))
else:
self.next_state_values_target = tf.stop_gradient(tf.reduce_max(self.target_qvalues, reduction_indices=1))
self.reference_qvalues = self.rewards_ph + self.gamma_step *self.is_not_done * self.next_state_values_target
if self.is_prioritized:
# we need to return l1 loss to update priority buffer
self.abs_errors = tf.abs(self.current_action_qvalues - self.reference_qvalues) + 1e-5
# the same as multiply gradients later (other way is used in different examples over internet)
self.td_loss = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.NONE) * self.sample_weights_ph
self.td_loss_mean = tf.reduce_mean(self.td_loss)
else:
self.td_loss_mean = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.MEAN)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
if self.env_name:
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def _reset(self):
self.states.clear()
if self.env_name:
self.state = self.env.reset()
self.total_reward = 0.0
self.total_shaped_reward = 0.0
self.step_count = 0
def get_qvalues(self, state):
return self.sess.run(self.qvalues, {self.obs_ph: state})
def get_action(self, state, epsilon=0.0):
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
qvals = self.get_qvalues([state])
action = np.argmax(qvals)
return action
def play_steps(self, steps, epsilon=0.0):
done_reward = None
done_shaped_reward = None
done_steps = None
steps_rewards = 0
cur_gamma = 1
cur_states_len = len(self.states)
# always break after one
while True:
if cur_states_len > 0:
state = self.states[-1][0]
else:
state = self.state
action = self.get_action(state, epsilon)
new_state, reward, is_done, _ = self.env.step(action)
#reward = reward * (1 - is_done)
self.step_count += 1
self.total_reward += reward
shaped_reward = self.rewards_shaper(reward)
self.total_shaped_reward += shaped_reward
self.states.append([new_state, action, shaped_reward])
if len(self.states) < steps:
break
for i in range(steps):
sreward = self.states[i][2]
steps_rewards += sreward * cur_gamma
cur_gamma = cur_gamma * self.gamma
next_state, current_action, _ = self.states[0]
self.exp_buffer.add(self.state, current_action, steps_rewards, new_state, is_done)
self.state = next_state
break
if is_done:
done_reward = self.total_reward
done_steps = self.step_count
done_shaped_reward = self.total_shaped_reward
self._reset()
return done_reward, done_shaped_reward, done_steps
def load_weigths_into_target_network(self):
self.sess.run(self.assigns_op)
def sample_batch(self, exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch
}
def sample_prioritized_batch(self, exp_replay, batch_size, beta):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch, sample_weights, sample_idxes = exp_replay.sample(batch_size, beta)
batch = { self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch, self.sample_weights_ph: sample_weights }
return [batch , sample_idxes]
def train(self):
mem_free_steps = 0
self.last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
self.load_weigths_into_target_network()
for _ in range(0, self.config['num_steps_fill_buffer']):
self.play_steps(self.horizon_length, self.epsilon)
steps_per_epoch = self.config['steps_per_epoch']
num_epochs_to_copy = self.config['num_epochs_to_copy']
batch_size = self.config['batch_size']
lives_reward = self.config['lives_reward']
episodes_to_log = self.config['episodes_to_log']
frame = 0
play_time = 0
update_time = 0
rewards = []
shaped_rewards = []
steps = []
losses = deque([], maxlen=100)
while True:
epoch_num = self.update_epoch()
t_play_start = time.time()
self.epsilon = self.epsilon_processor(frame)
self.beta = self.beta_processor(frame)
for _ in range(0, steps_per_epoch):
reward, shaped_reward, step = self.play_steps(self.horizon_length, self.epsilon)
if reward != None:
self.game_lengths.append(step)
self.game_rewards.append(reward)
#shaped_rewards.append(shaped_reward)
t_play_end = time.time()
play_time += t_play_end - t_play_start
# train
frame = frame + steps_per_epoch
t_start = time.time()
if self.is_categorical:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.horizon_length)
batch[self.proj_dir_ph] = projected
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.horizon_length)
batch[self.proj_dir_ph] = projected
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
else:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
losses.append(loss_t)
t_end = time.time()
update_time += t_end - t_start
total_time += update_time
if frame % 1000 == 0:
mem_free_steps += 1
if mem_free_steps == 10:
mem_free_steps = 0
tr_helpers.free_mem()
sum_time = update_time + play_time
print('frames per seconds: ', 1000 / (sum_time))
self.writer.add_scalar('performance/fps', 1000 / sum_time, frame)
self.writer.add_scalar('performance/upd_time', update_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('losses/td_loss', np.mean(losses), frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/lr', self.learning_rate*lr_mul, frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
self.writer.add_scalar('info/epsilon', self.epsilon, frame)
if self.is_prioritized:
self.writer.add_scalar('beta', self.beta, frame)
update_time = 0
play_time = 0
num_games = len(self.game_rewards)
if num_games > 10:
d = num_games / lives_reward
mean_rewards = np.sum(self.game_rewards) / d
mean_lengths = np.sum(self.game_lengths) / d
self.writer.add_scalar('rewards/mean', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/mean', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
if self.last_mean_rewards > self.config['score_to_win']:
print('network won!')
return self.last_mean_rewards, epoch_num
#clear_output(True)
# adjust agent parameters
if frame % num_epochs_to_copy == 0:
self.load_weigths_into_target_network()
if epoch_num >= self.max_epochs:
print('Max epochs reached')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(np.sum(self.game_rewards) * lives_reward / len(self.game_rewards)))
return self.last_mean_rewards, epoch_num
| 21,405 |
Python
| 48.322581 | 191 | 0.592245 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/models.py
|
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
from rl_games.algos_tf14 import networks
tfd = tfp.distributions
def entry_stop_gradients(target, mask):
mask_h = tf.abs(mask-1)
return tf.stop_gradient(mask_h * target) + mask * target
class BaseModel(object):
def is_rnn(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=False, is_train=is_train,reuse=reuse)
#if action_mask_ph is not None:
#masks = tf.layers.dense(tf.to_float(action_mask_ph), actions_num, activation=tf.nn.elu)
#logits = masks + logits
#logits = entry_stop_gradients(logits, tf.to_float(action_mask_ph))
probs = tf.nn.softmax(logits)
# Gumbel Softmax
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=probs)
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.stop_gradient(one_hot_actions))
return neglogp, value, action, entropy, logits
else:
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mu, sigma, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train = is_train, reuse=reuse)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mean, logstd, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train=True, reuse=reuse)
std = tf.exp(logstd)
norm_dist = tfd.Normal(mean, std)
action = mean + std * tf.random_normal(tf.shape(mean))
#action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph is None:
neglogp = self.neglogp(action, mean, std, logstd)
return neglogp, value, action, entropy, mean, std
prev_neglogp = self.neglogp(prev_actions_ph, mean, std, logstd)
return prev_neglogp, value, action, entropy, mean, std
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
class LSTMModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, logstd, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
std = tf.exp(logstd)
action = mu + std * tf.random_normal(tf.shape(mu))
norm_dist = tfd.Normal(mu, std)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, var, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
sigma = tf.sqrt(var)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=False, is_train=is_train, reuse=reuse)
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.nn.softmax(logits))
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_actions)
return neglogp, value, action, entropy, states_ph, masks_ph, lstm_state, initial_state, logits
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy, states_ph, masks_ph, lstm_state, initial_state
class AtariDQN(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
'''
TODO: fix is_train
'''
is_train = name == 'agent'
return self.network(name=name, inputs=inputs, actions_num=actions_num, is_train=is_train, reuse=reuse)
| 10,090 |
Python
| 40.356557 | 167 | 0.599405 |
RoboticExplorationLab/CGAC/externals/rl_games/rl_games/algos_tf14/model_builder.py
|
from rl_games.common import object_factory
import rl_games.algos_tf14
from rl_games.algos_tf14 import network_builder
from rl_games.algos_tf14 import models
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs : models.ModelA2C(network))
self.model_factory.register_builder('discrete_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2C(network))
self.model_factory.register_builder('continuous_a2c', lambda network, **kwargs : models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd', lambda network, **kwargs : models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('continuous_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_lstm_logstd', lambda network, **kwargs : models.LSTMModelA2CContinuousLogStd(network))
self.model_factory.register_builder('dqn', lambda network, **kwargs : models.AtariDQN(network))
self.network_factory = object_factory.ObjectFactory()
self.network_factory.register_builder('actor_critic', lambda **kwargs : network_builder.A2CBuilder())
self.network_factory.register_builder('dqn', lambda **kwargs : network_builder.DQNBuilder())
def load(self, params):
self.model_name = params['model']['name']
self.network_name = params['network']['name']
network = self.network_factory.create(self.network_name)
network.load(params['network'])
model = self.model_factory.create(self.model_name, network=network)
return model
| 1,761 |
Python
| 49.342856 | 146 | 0.721181 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.