python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
if not '__pypy__' in sys.builtin_module_names:
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'ExtendedBlockType', 'FeatSelectionConfig', 'DenseBlockType', 'EmbedBlockType', 'BlockType', 'MLPBlockConfig', 'CrossNetBlockConfig', 'FMBlockConfig', 'DotProcessorBlockConfig', 'CatBlockConfig', 'CINBlockConfig', 'AttentionBlockConfig', 'BlockConfig']
class ExtendedBlockType:
MLP_DENSE = 1
MLP_EMB = 2
CROSSNET = 3
FM_DENSE = 4
FM_EMB = 5
DOTPROCESSOR_DENSE = 6
DOTPROCESSOR_EMB = 7
CAT_DENSE = 8
CAT_EMB = 9
CIN = 10
ATTENTION = 11
_VALUES_TO_NAMES = {
1: "MLP_DENSE",
2: "MLP_EMB",
3: "CROSSNET",
4: "FM_DENSE",
5: "FM_EMB",
6: "DOTPROCESSOR_DENSE",
7: "DOTPROCESSOR_EMB",
8: "CAT_DENSE",
9: "CAT_EMB",
10: "CIN",
11: "ATTENTION",
}
_NAMES_TO_VALUES = {
"MLP_DENSE": 1,
"MLP_EMB": 2,
"CROSSNET": 3,
"FM_DENSE": 4,
"FM_EMB": 5,
"DOTPROCESSOR_DENSE": 6,
"DOTPROCESSOR_EMB": 7,
"CAT_DENSE": 8,
"CAT_EMB": 9,
"CIN": 10,
"ATTENTION": 11,
}
class FeatSelectionConfig:
"""
Attributes:
- block_id
- dense
- sparse
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.dense = []
(_etype3, _size0) = iprot.readListBegin()
if _size0 >= 0:
for _i4 in six.moves.range(_size0):
_elem5 = iprot.readI32()
self.dense.append(_elem5)
else:
while iprot.peekList():
_elem6 = iprot.readI32()
self.dense.append(_elem6)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.sparse = []
(_etype10, _size7) = iprot.readListBegin()
if _size7 >= 0:
for _i11 in six.moves.range(_size7):
_elem12 = iprot.readI32()
self.sparse.append(_elem12)
else:
while iprot.peekList():
_elem13 = iprot.readI32()
self.sparse.append(_elem13)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('FeatSelectionConfig')
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 1)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.dense != None:
oprot.writeFieldBegin('dense', TType.LIST, 2)
oprot.writeListBegin(TType.I32, len(self.dense))
for iter14 in self.dense:
oprot.writeI32(iter14)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sparse != None:
oprot.writeFieldBegin('sparse', TType.LIST, 3)
oprot.writeListBegin(TType.I32, len(self.sparse))
for iter15 in self.sparse:
oprot.writeI32(iter15)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.dense is not None:
value = pprint.pformat(self.dense, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dense=%s' % (value))
if self.sparse is not None:
value = pprint.pformat(self.sparse, indent=0)
value = padding.join(value.splitlines(True))
L.append(' sparse=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DenseBlockType:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DenseBlockType')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class EmbedBlockType:
"""
Attributes:
- comm_embed_dim
- dense_as_sparse
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.comm_embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.dense_as_sparse = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('EmbedBlockType')
if self.comm_embed_dim != None:
oprot.writeFieldBegin('comm_embed_dim', TType.I32, 1)
oprot.writeI32(self.comm_embed_dim)
oprot.writeFieldEnd()
if self.dense_as_sparse != None:
oprot.writeFieldBegin('dense_as_sparse', TType.BOOL, 2)
oprot.writeBool(self.dense_as_sparse)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.comm_embed_dim is not None:
value = pprint.pformat(self.comm_embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' comm_embed_dim=%s' % (value))
if self.dense_as_sparse is not None:
value = pprint.pformat(self.dense_as_sparse, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dense_as_sparse=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BlockType(object):
"""
Attributes:
- dense
- emb
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
DENSE = 1
EMB = 2
@staticmethod
def isUnion():
return True
def get_dense(self):
assert self.field == 1
return self.value
def get_emb(self):
assert self.field == 2
return self.value
def set_dense(self, value):
self.field = 1
self.value = value
def set_emb(self, value):
self.field = 2
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 6
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('dense', value)
if self.field == 2:
padding = ' ' * 4
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('emb', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
dense = DenseBlockType()
dense.read(iprot)
assert self.field == 0 and self.value is None
self.set_dense(dense)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
emb = EmbedBlockType()
emb.read(iprot)
assert self.field == 0 and self.value is None
self.set_emb(emb)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('BlockType')
if self.field == 1:
oprot.writeFieldBegin('dense', TType.STRUCT, 1)
dense = self.value
dense.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('emb', TType.STRUCT, 2)
emb = self.value
emb.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MLPBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
- arc
- ly_act
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype19, _size16) = iprot.readListBegin()
if _size16 >= 0:
for _i20 in six.moves.range(_size16):
_elem21 = FeatSelectionConfig()
_elem21.read(iprot)
self.input_feat_config.append(_elem21)
else:
while iprot.peekList():
_elem22 = FeatSelectionConfig()
_elem22.read(iprot)
self.input_feat_config.append(_elem22)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.arc = []
(_etype26, _size23) = iprot.readListBegin()
if _size23 >= 0:
for _i27 in six.moves.range(_size23):
_elem28 = iprot.readI32()
self.arc.append(_elem28)
else:
while iprot.peekList():
_elem29 = iprot.readI32()
self.arc.append(_elem29)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.ly_act = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MLPBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter30 in self.input_feat_config:
iter30.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 5)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter31 in self.arc:
oprot.writeI32(iter31)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ly_act != None:
oprot.writeFieldBegin('ly_act', TType.BOOL, 6)
oprot.writeBool(self.ly_act)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
if self.ly_act is not None:
value = pprint.pformat(self.ly_act, indent=0)
value = padding.join(value.splitlines(True))
L.append(' ly_act=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CrossNetBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- num_of_layers
- cross_feat_config
- batchnorm
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype35, _size32) = iprot.readListBegin()
if _size32 >= 0:
for _i36 in six.moves.range(_size32):
_elem37 = FeatSelectionConfig()
_elem37.read(iprot)
self.input_feat_config.append(_elem37)
else:
while iprot.peekList():
_elem38 = FeatSelectionConfig()
_elem38.read(iprot)
self.input_feat_config.append(_elem38)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.num_of_layers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.cross_feat_config = []
(_etype42, _size39) = iprot.readListBegin()
if _size39 >= 0:
for _i43 in six.moves.range(_size39):
_elem44 = FeatSelectionConfig()
_elem44.read(iprot)
self.cross_feat_config.append(_elem44)
else:
while iprot.peekList():
_elem45 = FeatSelectionConfig()
_elem45.read(iprot)
self.cross_feat_config.append(_elem45)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.batchnorm = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CrossNetBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter46 in self.input_feat_config:
iter46.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.I32, 4)
oprot.writeI32(self.num_of_layers)
oprot.writeFieldEnd()
if self.cross_feat_config != None:
oprot.writeFieldBegin('cross_feat_config', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.cross_feat_config))
for iter47 in self.cross_feat_config:
iter47.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.batchnorm != None:
oprot.writeFieldBegin('batchnorm', TType.BOOL, 6)
oprot.writeBool(self.batchnorm)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
if self.cross_feat_config is not None:
value = pprint.pformat(self.cross_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' cross_feat_config=%s' % (value))
if self.batchnorm is not None:
value = pprint.pformat(self.batchnorm, indent=0)
value = padding.join(value.splitlines(True))
L.append(' batchnorm=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class FMBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype51, _size48) = iprot.readListBegin()
if _size48 >= 0:
for _i52 in six.moves.range(_size48):
_elem53 = FeatSelectionConfig()
_elem53.read(iprot)
self.input_feat_config.append(_elem53)
else:
while iprot.peekList():
_elem54 = FeatSelectionConfig()
_elem54.read(iprot)
self.input_feat_config.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('FMBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter55 in self.input_feat_config:
iter55.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class DotProcessorBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype59, _size56) = iprot.readListBegin()
if _size56 >= 0:
for _i60 in six.moves.range(_size56):
_elem61 = FeatSelectionConfig()
_elem61.read(iprot)
self.input_feat_config.append(_elem61)
else:
while iprot.peekList():
_elem62 = FeatSelectionConfig()
_elem62.read(iprot)
self.input_feat_config.append(_elem62)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('DotProcessorBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter63 in self.input_feat_config:
iter63.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CatBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- type
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype67, _size64) = iprot.readListBegin()
if _size64 >= 0:
for _i68 in six.moves.range(_size64):
_elem69 = FeatSelectionConfig()
_elem69.read(iprot)
self.input_feat_config.append(_elem69)
else:
while iprot.peekList():
_elem70 = FeatSelectionConfig()
_elem70.read(iprot)
self.input_feat_config.append(_elem70)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.type = BlockType()
self.type.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CatBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter71 in self.input_feat_config:
iter71.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.STRUCT, 4)
self.type.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.type is not None:
value = pprint.pformat(self.type, indent=0)
value = padding.join(value.splitlines(True))
L.append(' type=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class CINBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- emb_config
- arc
- split_half
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype75, _size72) = iprot.readListBegin()
if _size72 >= 0:
for _i76 in six.moves.range(_size72):
_elem77 = FeatSelectionConfig()
_elem77.read(iprot)
self.input_feat_config.append(_elem77)
else:
while iprot.peekList():
_elem78 = FeatSelectionConfig()
_elem78.read(iprot)
self.input_feat_config.append(_elem78)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.emb_config = EmbedBlockType()
self.emb_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.arc = []
(_etype82, _size79) = iprot.readListBegin()
if _size79 >= 0:
for _i83 in six.moves.range(_size79):
_elem84 = iprot.readI32()
self.arc.append(_elem84)
else:
while iprot.peekList():
_elem85 = iprot.readI32()
self.arc.append(_elem85)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.split_half = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('CINBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter86 in self.input_feat_config:
iter86.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.emb_config != None:
oprot.writeFieldBegin('emb_config', TType.STRUCT, 4)
self.emb_config.write(oprot)
oprot.writeFieldEnd()
if self.arc != None:
oprot.writeFieldBegin('arc', TType.LIST, 5)
oprot.writeListBegin(TType.I32, len(self.arc))
for iter87 in self.arc:
oprot.writeI32(iter87)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.split_half != None:
oprot.writeFieldBegin('split_half', TType.BOOL, 6)
oprot.writeBool(self.split_half)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.emb_config is not None:
value = pprint.pformat(self.emb_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' emb_config=%s' % (value))
if self.arc is not None:
value = pprint.pformat(self.arc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arc=%s' % (value))
if self.split_half is not None:
value = pprint.pformat(self.split_half, indent=0)
value = padding.join(value.splitlines(True))
L.append(' split_half=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AttentionBlockConfig:
"""
Attributes:
- name
- block_id
- input_feat_config
- emb_config
- att_embed_dim
- num_of_heads
- num_of_layers
- dropout_prob
- use_res
- batchnorm
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.block_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.input_feat_config = []
(_etype91, _size88) = iprot.readListBegin()
if _size88 >= 0:
for _i92 in six.moves.range(_size88):
_elem93 = FeatSelectionConfig()
_elem93.read(iprot)
self.input_feat_config.append(_elem93)
else:
while iprot.peekList():
_elem94 = FeatSelectionConfig()
_elem94.read(iprot)
self.input_feat_config.append(_elem94)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.emb_config = EmbedBlockType()
self.emb_config.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.att_embed_dim = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.num_of_heads = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.num_of_layers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.FLOAT:
self.dropout_prob = iprot.readFloat()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.use_res = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.BOOL:
self.batchnorm = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AttentionBlockConfig')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8')) if UTF8STRINGS and not isinstance(self.name, bytes) else oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.block_id != None:
oprot.writeFieldBegin('block_id', TType.I32, 2)
oprot.writeI32(self.block_id)
oprot.writeFieldEnd()
if self.input_feat_config != None:
oprot.writeFieldBegin('input_feat_config', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.input_feat_config))
for iter95 in self.input_feat_config:
iter95.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.emb_config != None:
oprot.writeFieldBegin('emb_config', TType.STRUCT, 4)
self.emb_config.write(oprot)
oprot.writeFieldEnd()
if self.att_embed_dim != None:
oprot.writeFieldBegin('att_embed_dim', TType.I32, 5)
oprot.writeI32(self.att_embed_dim)
oprot.writeFieldEnd()
if self.num_of_heads != None:
oprot.writeFieldBegin('num_of_heads', TType.I32, 6)
oprot.writeI32(self.num_of_heads)
oprot.writeFieldEnd()
if self.num_of_layers != None:
oprot.writeFieldBegin('num_of_layers', TType.I32, 7)
oprot.writeI32(self.num_of_layers)
oprot.writeFieldEnd()
if self.dropout_prob != None:
oprot.writeFieldBegin('dropout_prob', TType.FLOAT, 8)
oprot.writeFloat(self.dropout_prob)
oprot.writeFieldEnd()
if self.use_res != None:
oprot.writeFieldBegin('use_res', TType.BOOL, 9)
oprot.writeBool(self.use_res)
oprot.writeFieldEnd()
if self.batchnorm != None:
oprot.writeFieldBegin('batchnorm', TType.BOOL, 10)
oprot.writeBool(self.batchnorm)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.block_id is not None:
value = pprint.pformat(self.block_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' block_id=%s' % (value))
if self.input_feat_config is not None:
value = pprint.pformat(self.input_feat_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' input_feat_config=%s' % (value))
if self.emb_config is not None:
value = pprint.pformat(self.emb_config, indent=0)
value = padding.join(value.splitlines(True))
L.append(' emb_config=%s' % (value))
if self.att_embed_dim is not None:
value = pprint.pformat(self.att_embed_dim, indent=0)
value = padding.join(value.splitlines(True))
L.append(' att_embed_dim=%s' % (value))
if self.num_of_heads is not None:
value = pprint.pformat(self.num_of_heads, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_heads=%s' % (value))
if self.num_of_layers is not None:
value = pprint.pformat(self.num_of_layers, indent=0)
value = padding.join(value.splitlines(True))
L.append(' num_of_layers=%s' % (value))
if self.dropout_prob is not None:
value = pprint.pformat(self.dropout_prob, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dropout_prob=%s' % (value))
if self.use_res is not None:
value = pprint.pformat(self.use_res, indent=0)
value = padding.join(value.splitlines(True))
L.append(' use_res=%s' % (value))
if self.batchnorm is not None:
value = pprint.pformat(self.batchnorm, indent=0)
value = padding.join(value.splitlines(True))
L.append(' batchnorm=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class BlockConfig(object):
"""
Attributes:
- mlp_block
- crossnet_block
- fm_block
- dotprocessor_block
- cat_block
- cin_block
- attention_block
"""
thrift_spec = None
__init__ = None
__EMPTY__ = 0
MLP_BLOCK = 1
CROSSNET_BLOCK = 2
FM_BLOCK = 3
DOTPROCESSOR_BLOCK = 4
CAT_BLOCK = 5
CIN_BLOCK = 6
ATTENTION_BLOCK = 7
@staticmethod
def isUnion():
return True
def get_mlp_block(self):
assert self.field == 1
return self.value
def get_crossnet_block(self):
assert self.field == 2
return self.value
def get_fm_block(self):
assert self.field == 3
return self.value
def get_dotprocessor_block(self):
assert self.field == 4
return self.value
def get_cat_block(self):
assert self.field == 5
return self.value
def get_cin_block(self):
assert self.field == 6
return self.value
def get_attention_block(self):
assert self.field == 7
return self.value
def set_mlp_block(self, value):
self.field = 1
self.value = value
def set_crossnet_block(self, value):
self.field = 2
self.value = value
def set_fm_block(self, value):
self.field = 3
self.value = value
def set_dotprocessor_block(self, value):
self.field = 4
self.value = value
def set_cat_block(self, value):
self.field = 5
self.value = value
def set_cin_block(self, value):
self.field = 6
self.value = value
def set_attention_block(self, value):
self.field = 7
self.value = value
def getType(self):
return self.field
def __repr__(self):
value = pprint.pformat(self.value)
member = ''
if self.field == 1:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('mlp_block', value)
if self.field == 2:
padding = ' ' * 15
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('crossnet_block', value)
if self.field == 3:
padding = ' ' * 9
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('fm_block', value)
if self.field == 4:
padding = ' ' * 19
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('dotprocessor_block', value)
if self.field == 5:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('cat_block', value)
if self.field == 6:
padding = ' ' * 10
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('cin_block', value)
if self.field == 7:
padding = ' ' * 16
value = padding.join(value.splitlines(True))
member = '\n %s=%s' % ('attention_block', value)
return "%s(%s)" % (self.__class__.__name__, member)
def read(self, iprot):
self.field = 0
self.value = None
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
mlp_block = MLPBlockConfig()
mlp_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_mlp_block(mlp_block)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
crossnet_block = CrossNetBlockConfig()
crossnet_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_crossnet_block(crossnet_block)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
fm_block = FMBlockConfig()
fm_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_fm_block(fm_block)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
dotprocessor_block = DotProcessorBlockConfig()
dotprocessor_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_dotprocessor_block(dotprocessor_block)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
cat_block = CatBlockConfig()
cat_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_cat_block(cat_block)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
cin_block = CINBlockConfig()
cin_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_cin_block(cin_block)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
attention_block = AttentionBlockConfig()
attention_block.read(iprot)
assert self.field == 0 and self.value is None
self.set_attention_block(attention_block)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, True], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeUnionBegin('BlockConfig')
if self.field == 1:
oprot.writeFieldBegin('mlp_block', TType.STRUCT, 1)
mlp_block = self.value
mlp_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 2:
oprot.writeFieldBegin('crossnet_block', TType.STRUCT, 2)
crossnet_block = self.value
crossnet_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 3:
oprot.writeFieldBegin('fm_block', TType.STRUCT, 3)
fm_block = self.value
fm_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 4:
oprot.writeFieldBegin('dotprocessor_block', TType.STRUCT, 4)
dotprocessor_block = self.value
dotprocessor_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 5:
oprot.writeFieldBegin('cat_block', TType.STRUCT, 5)
cat_block = self.value
cat_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 6:
oprot.writeFieldBegin('cin_block', TType.STRUCT, 6)
cin_block = self.value
cin_block.write(oprot)
oprot.writeFieldEnd()
if self.field == 7:
oprot.writeFieldBegin('attention_block', TType.STRUCT, 7)
attention_block = self.value
attention_block.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeUnionEnd()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(FeatSelectionConfig)
FeatSelectionConfig.thrift_spec = (
None, # 0
(1, TType.I32, 'block_id', None, None, 2, ), # 1
(2, TType.LIST, 'dense', (TType.I32,None), None, 2, ), # 2
(3, TType.LIST, 'sparse', (TType.I32,None), None, 2, ), # 3
)
FeatSelectionConfig.thrift_struct_annotations = {
}
FeatSelectionConfig.thrift_field_annotations = {
}
def FeatSelectionConfig__init__(self, block_id=None, dense=None, sparse=None,):
self.block_id = block_id
self.dense = dense
self.sparse = sparse
FeatSelectionConfig.__init__ = FeatSelectionConfig__init__
def FeatSelectionConfig__setstate__(self, state):
state.setdefault('block_id', None)
state.setdefault('dense', None)
state.setdefault('sparse', None)
self.__dict__ = state
FeatSelectionConfig.__getstate__ = lambda self: self.__dict__.copy()
FeatSelectionConfig.__setstate__ = FeatSelectionConfig__setstate__
all_structs.append(DenseBlockType)
DenseBlockType.thrift_spec = (
)
DenseBlockType.thrift_struct_annotations = {
}
DenseBlockType.thrift_field_annotations = {
}
all_structs.append(EmbedBlockType)
EmbedBlockType.thrift_spec = (
None, # 0
(1, TType.I32, 'comm_embed_dim', None, None, 2, ), # 1
(2, TType.BOOL, 'dense_as_sparse', None, False, 2, ), # 2
)
EmbedBlockType.thrift_struct_annotations = {
}
EmbedBlockType.thrift_field_annotations = {
}
def EmbedBlockType__init__(self, comm_embed_dim=None, dense_as_sparse=EmbedBlockType.thrift_spec[2][4],):
self.comm_embed_dim = comm_embed_dim
self.dense_as_sparse = dense_as_sparse
EmbedBlockType.__init__ = EmbedBlockType__init__
def EmbedBlockType__setstate__(self, state):
state.setdefault('comm_embed_dim', None)
state.setdefault('dense_as_sparse', False)
self.__dict__ = state
EmbedBlockType.__getstate__ = lambda self: self.__dict__.copy()
EmbedBlockType.__setstate__ = EmbedBlockType__setstate__
all_structs.append(BlockType)
BlockType.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'dense', [DenseBlockType, DenseBlockType.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'emb', [EmbedBlockType, EmbedBlockType.thrift_spec, False], None, 2, ), # 2
)
BlockType.thrift_struct_annotations = {
}
BlockType.thrift_field_annotations = {
}
def BlockType__init__(self, dense=None, emb=None,):
self.field = 0
self.value = None
if dense is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = dense
if emb is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = emb
BlockType.__init__ = BlockType__init__
all_structs.append(MLPBlockConfig)
MLPBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "MLPBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
(5, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 5
(6, TType.BOOL, 'ly_act', None, True, 2, ), # 6
)
MLPBlockConfig.thrift_struct_annotations = {
}
MLPBlockConfig.thrift_field_annotations = {
}
def MLPBlockConfig__init__(self, name=MLPBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None, arc=None, ly_act=MLPBlockConfig.thrift_spec[6][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
self.arc = arc
self.ly_act = ly_act
MLPBlockConfig.__init__ = MLPBlockConfig__init__
def MLPBlockConfig__setstate__(self, state):
state.setdefault('name', "MLPBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
state.setdefault('arc', None)
state.setdefault('ly_act', True)
self.__dict__ = state
MLPBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
MLPBlockConfig.__setstate__ = MLPBlockConfig__setstate__
all_structs.append(CrossNetBlockConfig)
CrossNetBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "CrossNetBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.I32, 'num_of_layers', None, 2, 2, ), # 4
(5, TType.LIST, 'cross_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 5
(6, TType.BOOL, 'batchnorm', None, False, 2, ), # 6
)
CrossNetBlockConfig.thrift_struct_annotations = {
}
CrossNetBlockConfig.thrift_field_annotations = {
}
def CrossNetBlockConfig__init__(self, name=CrossNetBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, num_of_layers=CrossNetBlockConfig.thrift_spec[4][4], cross_feat_config=None, batchnorm=CrossNetBlockConfig.thrift_spec[6][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.num_of_layers = num_of_layers
self.cross_feat_config = cross_feat_config
self.batchnorm = batchnorm
CrossNetBlockConfig.__init__ = CrossNetBlockConfig__init__
def CrossNetBlockConfig__setstate__(self, state):
state.setdefault('name', "CrossNetBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('num_of_layers', 2)
state.setdefault('cross_feat_config', None)
state.setdefault('batchnorm', False)
self.__dict__ = state
CrossNetBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
CrossNetBlockConfig.__setstate__ = CrossNetBlockConfig__setstate__
all_structs.append(FMBlockConfig)
FMBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "FMBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
)
FMBlockConfig.thrift_struct_annotations = {
}
FMBlockConfig.thrift_field_annotations = {
}
def FMBlockConfig__init__(self, name=FMBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None,):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
FMBlockConfig.__init__ = FMBlockConfig__init__
def FMBlockConfig__setstate__(self, state):
state.setdefault('name', "FMBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
self.__dict__ = state
FMBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
FMBlockConfig.__setstate__ = FMBlockConfig__setstate__
all_structs.append(DotProcessorBlockConfig)
DotProcessorBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "DotProcessorBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
)
DotProcessorBlockConfig.thrift_struct_annotations = {
}
DotProcessorBlockConfig.thrift_field_annotations = {
}
def DotProcessorBlockConfig__init__(self, name=DotProcessorBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None,):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
DotProcessorBlockConfig.__init__ = DotProcessorBlockConfig__init__
def DotProcessorBlockConfig__setstate__(self, state):
state.setdefault('name', "DotProcessorBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
self.__dict__ = state
DotProcessorBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
DotProcessorBlockConfig.__setstate__ = DotProcessorBlockConfig__setstate__
all_structs.append(CatBlockConfig)
CatBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "CatBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'type', [BlockType, BlockType.thrift_spec, True], None, 2, ), # 4
)
CatBlockConfig.thrift_struct_annotations = {
}
CatBlockConfig.thrift_field_annotations = {
}
def CatBlockConfig__init__(self, name=CatBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, type=None,):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.type = type
CatBlockConfig.__init__ = CatBlockConfig__init__
def CatBlockConfig__setstate__(self, state):
state.setdefault('name', "CatBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('type', None)
self.__dict__ = state
CatBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
CatBlockConfig.__setstate__ = CatBlockConfig__setstate__
all_structs.append(CINBlockConfig)
CINBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "CINBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'emb_config', [EmbedBlockType, EmbedBlockType.thrift_spec, False], None, 2, ), # 4
(5, TType.LIST, 'arc', (TType.I32,None), None, 2, ), # 5
(6, TType.BOOL, 'split_half', None, True, 2, ), # 6
)
CINBlockConfig.thrift_struct_annotations = {
}
CINBlockConfig.thrift_field_annotations = {
}
def CINBlockConfig__init__(self, name=CINBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, emb_config=None, arc=None, split_half=CINBlockConfig.thrift_spec[6][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.emb_config = emb_config
self.arc = arc
self.split_half = split_half
CINBlockConfig.__init__ = CINBlockConfig__init__
def CINBlockConfig__setstate__(self, state):
state.setdefault('name', "CINBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('emb_config', None)
state.setdefault('arc', None)
state.setdefault('split_half', True)
self.__dict__ = state
CINBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
CINBlockConfig.__setstate__ = CINBlockConfig__setstate__
all_structs.append(AttentionBlockConfig)
AttentionBlockConfig.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', True, "AttentionBlock", 2, ), # 1
(2, TType.I32, 'block_id', None, None, 2, ), # 2
(3, TType.LIST, 'input_feat_config', (TType.STRUCT,[FeatSelectionConfig, FeatSelectionConfig.thrift_spec, False]), None, 2, ), # 3
(4, TType.STRUCT, 'emb_config', [EmbedBlockType, EmbedBlockType.thrift_spec, False], None, 2, ), # 4
(5, TType.I32, 'att_embed_dim', None, 10, 2, ), # 5
(6, TType.I32, 'num_of_heads', None, 2, 2, ), # 6
(7, TType.I32, 'num_of_layers', None, 1, 2, ), # 7
(8, TType.FLOAT, 'dropout_prob', None, 0.00000, 2, ), # 8
(9, TType.BOOL, 'use_res', None, True, 2, ), # 9
(10, TType.BOOL, 'batchnorm', None, False, 2, ), # 10
)
AttentionBlockConfig.thrift_struct_annotations = {
}
AttentionBlockConfig.thrift_field_annotations = {
}
def AttentionBlockConfig__init__(self, name=AttentionBlockConfig.thrift_spec[1][4], block_id=None, input_feat_config=None, emb_config=None, att_embed_dim=AttentionBlockConfig.thrift_spec[5][4], num_of_heads=AttentionBlockConfig.thrift_spec[6][4], num_of_layers=AttentionBlockConfig.thrift_spec[7][4], dropout_prob=AttentionBlockConfig.thrift_spec[8][4], use_res=AttentionBlockConfig.thrift_spec[9][4], batchnorm=AttentionBlockConfig.thrift_spec[10][4],):
self.name = name
self.block_id = block_id
self.input_feat_config = input_feat_config
self.emb_config = emb_config
self.att_embed_dim = att_embed_dim
self.num_of_heads = num_of_heads
self.num_of_layers = num_of_layers
self.dropout_prob = dropout_prob
self.use_res = use_res
self.batchnorm = batchnorm
AttentionBlockConfig.__init__ = AttentionBlockConfig__init__
def AttentionBlockConfig__setstate__(self, state):
state.setdefault('name', "AttentionBlock")
state.setdefault('block_id', None)
state.setdefault('input_feat_config', None)
state.setdefault('emb_config', None)
state.setdefault('att_embed_dim', 10)
state.setdefault('num_of_heads', 2)
state.setdefault('num_of_layers', 1)
state.setdefault('dropout_prob', 0.00000)
state.setdefault('use_res', True)
state.setdefault('batchnorm', False)
self.__dict__ = state
AttentionBlockConfig.__getstate__ = lambda self: self.__dict__.copy()
AttentionBlockConfig.__setstate__ = AttentionBlockConfig__setstate__
all_structs.append(BlockConfig)
BlockConfig.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'mlp_block', [MLPBlockConfig, MLPBlockConfig.thrift_spec, False], None, 2, ), # 1
(2, TType.STRUCT, 'crossnet_block', [CrossNetBlockConfig, CrossNetBlockConfig.thrift_spec, False], None, 2, ), # 2
(3, TType.STRUCT, 'fm_block', [FMBlockConfig, FMBlockConfig.thrift_spec, False], None, 2, ), # 3
(4, TType.STRUCT, 'dotprocessor_block', [DotProcessorBlockConfig, DotProcessorBlockConfig.thrift_spec, False], None, 2, ), # 4
(5, TType.STRUCT, 'cat_block', [CatBlockConfig, CatBlockConfig.thrift_spec, False], None, 2, ), # 5
(6, TType.STRUCT, 'cin_block', [CINBlockConfig, CINBlockConfig.thrift_spec, False], None, 2, ), # 6
(7, TType.STRUCT, 'attention_block', [AttentionBlockConfig, AttentionBlockConfig.thrift_spec, False], None, 2, ), # 7
)
BlockConfig.thrift_struct_annotations = {
}
BlockConfig.thrift_field_annotations = {
}
def BlockConfig__init__(self, mlp_block=None, crossnet_block=None, fm_block=None, dotprocessor_block=None, cat_block=None, cin_block=None, attention_block=None,):
self.field = 0
self.value = None
if mlp_block is not None:
assert self.field == 0 and self.value is None
self.field = 1
self.value = mlp_block
if crossnet_block is not None:
assert self.field == 0 and self.value is None
self.field = 2
self.value = crossnet_block
if fm_block is not None:
assert self.field == 0 and self.value is None
self.field = 3
self.value = fm_block
if dotprocessor_block is not None:
assert self.field == 0 and self.value is None
self.field = 4
self.value = dotprocessor_block
if cat_block is not None:
assert self.field == 0 and self.value is None
self.field = 5
self.value = cat_block
if cin_block is not None:
assert self.field == 0 and self.value is None
self.field = 6
self.value = cin_block
if attention_block is not None:
assert self.field == 0 and self.value is None
self.field = 7
self.value = attention_block
BlockConfig.__init__ = BlockConfig__init__
fix_spec(all_structs)
del all_structs
|
AutoCTR-main
|
gen-py/block_config/ttypes.py
|
AutoCTR-main
|
trainers/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import torch
from config import ttypes as config
logger = logging.getLogger(__name__)
def build_loss(model, loss_config):
if loss_config.getType() == config.LossConfig.BCEWITHLOGITS:
logger.warning(
"Creating BCEWithLogitsLoss: {}".format(loss_config.get_bcewithlogits())
)
return torch.nn.BCEWithLogitsLoss(reduction="none")
elif loss_config.getType() == config.LossConfig.MSE:
logger.warning("Creating MSELoss: {}".format(loss_config.get_mse()))
return torch.nn.MSELoss(reduction="none")
elif loss_config.getType() == config.LossConfig.BCE:
logger.warning("Creating BCELoss: {}".format(loss_config.get_bce()))
return torch.nn.BCELoss(reduction="none")
else:
raise ValueError("Unknown loss type.")
# TODO add equal weight training and calibration for ads data
def apply_loss(loss, pred, label, weight=None):
E = loss(pred, label)
return torch.mean(E) if weight is None else torch.mean(E * weight.view(-1))
|
AutoCTR-main
|
trainers/loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import re
import time
import numpy as np
import torch
logger = logging.getLogger(__name__)
def log_train_info(
start_time,
i_batch="N/A",
i_epoch="N/A",
trainer_id="N/A",
num_batches=1,
total_loss=0,
batch_size=None,
num_samples=None,
sample_weight_sum=None,
ctr=None,
lock=None,
on_gpu=False,
trainer_logger=None,
):
"""
Args:
total_loss, the sum of the averaged per batch loss
"""
if on_gpu:
torch.cuda.synchronize()
curr_time = time.time()
if trainer_logger is None:
trainer_logger = logger
if lock is not None:
lock.acquire()
try:
if num_samples is None:
assert (
batch_size is not None
), "batch_size and num_samples cannot both be None."
num_samples = num_batches * batch_size
if sample_weight_sum is None:
assert (
batch_size is not None
), "batch_size and sample_weight_sum cannot both be None."
sample_weight_sum = num_batches * batch_size
loss = total_loss / sample_weight_sum
ne = calculate_ne(loss, ctr) if ctr is not None else "N/A"
trainer_logger.warning(
"Trainer {} finished iteration {} of epoch {}, "
"{:.2f} qps, "
"window loss: {}, "
"window NE: {}".format(
trainer_id,
i_batch,
i_epoch,
num_samples / (curr_time - start_time),
loss,
ne,
)
)
finally:
if lock is not None:
lock.release()
return (loss, ne) if ctr is not None else loss
log_eval_info = log_train_info
def log_tb_info_batch(
writer,
model,
pred,
label,
optimizer, # not used
logging_options,
iter,
start_time,
trainer_id=None,
total_loss=0,
batch_size=-1, # not used
num_batches=-1, # not used
sample_weight_sum=None,
avg_loss=None,
ctr=None,
lock=None,
):
"""
Note that the reported value is the mean of per batch mean,
which is different from mean of the whole history
Args:
total_loss, the sum of the averaged per batch loss
"""
if writer is None:
return
if lock is not None:
lock.acquire()
try:
if avg_loss is None:
assert (
total_loss is not None and sample_weight_sum is not None
), "cannot compute avg_loss"
avg_loss = total_loss / sample_weight_sum
writer.add_scalar(
"{}batch/train_metric/loss".format(
"" if trainer_id is None else "trainer_{}/".format(trainer_id)
),
avg_loss,
iter,
)
if ctr is not None:
ne = calculate_ne(avg_loss, ctr)
writer.add_scalar(
"{}batch/train_metric/ne".format(
"" if trainer_id is None else "trainer_{}/".format(trainer_id)
),
ne,
iter,
)
if logging_options.tb_log_pr_curve_batch:
writer.add_pr_curve("PR Curve", label, pred, iter)
if logging_options.tb_log_model_weight_hist:
for name, param in model.named_parameters():
if any(
re.search(pattern, name)
for pattern in logging_options.tb_log_model_weight_filter_regex
):
continue
writer.add_histogram(name, param.clone().cpu().data.numpy(), iter)
finally:
if lock is not None:
lock.release()
def need_to_log_batch(counter, logging_options, batch_size):
return (
logging_options.log_freq > 0
and (counter + 1) % max(1, int(logging_options.log_freq / batch_size)) == 0
)
def need_to_log_tb(counter, logging_options, batch_size):
tb_log_freq = logging_options.tb_log_freq
return (
tb_log_freq > 0 and (counter + 1) % max(1, int(tb_log_freq / batch_size)) == 0
)
def is_checkpoint(counter, ckp_interval, ckp_path):
return ckp_interval > 0 and ckp_path and (counter + 1) % ckp_interval == 0
def calculate_ne(logloss, ctr):
if ctr <= 0.0 or ctr >= 1.0:
logger.error("CTR should be between 0.0 and 1.0")
return 0.0 if logloss == 0.0 else np.inf
return -logloss / (ctr * np.log(ctr) + (1.0 - ctr) * np.log(1 - ctr))
|
AutoCTR-main
|
trainers/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import time
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .loss import apply_loss, build_loss
from .utils import log_tb_info_batch, log_train_info, need_to_log_batch, need_to_log_tb
from models.builder import save_model
try:
from fblearner.flow.util.visualization_utils import summary_writer
except ImportError:
pass
logger = logging.getLogger(__name__)
np.set_printoptions(precision=5)
torch.set_printoptions(precision=5)
THRESHOLD = -1 # -1 #7500
VAL_THRESHOLD = -1
def train(
model,
train_options,
train_dataloader=None,
batch_processor=None,
device=None,
val_dataloader=None,
trainer_id=0,
send_end=None,
train_dataloader_batches=None,
val_dataloader_batches=None,
batch_size=1024,
eval_dataloader=None,
eval_dataloader_batches=None,
save_model_name=None,
):
try:
writer = summary_writer()
except Exception:
logger.error("Failed to create the tensorboard summary writer.")
writer = None
prev_avg_val_loss, is_improving, is_local_optimal = None, True, False
optimizer = model.get_optimizers()
loss = build_loss(model, loss_config=train_options.loss)
output = []
logging_options = train_options.logging_config
batch_size = batch_size
if train_dataloader_batches is None:
train_dataloader_batches = train_dataloader
is_train_dataloader = True
else:
is_train_dataloader = False
if val_dataloader_batches is None:
val_dataloader_batches = val_dataloader
is_val_dataloader = True
else:
is_val_dataloader = False
if eval_dataloader_batches is None:
eval_dataloader_batches = eval_dataloader
is_eval_dataloader = True
else:
is_eval_dataloader = False
for i_epoch in range(0, train_options.nepochs):
start_time_epoch = time.time()
num_batches, avg_loss_epoch, q1, q2 = train_epoch(
model=model,
loss=loss,
optimizer=optimizer,
batch_processor=batch_processor,
trainer_id=trainer_id,
i_epoch=i_epoch,
device=device,
logging_options=logging_options,
writer=writer,
train_dataloader_batches=train_dataloader_batches,
batch_size=batch_size,
is_dataloader=is_train_dataloader,
)
logger.warning("Epoch:{}, Time for training: {}".format(i_epoch, time.time() - start_time_epoch))
avg_loss_epoch = log_train_info(
start_time=start_time_epoch,
i_batch=num_batches,
i_epoch=i_epoch,
trainer_id=trainer_id,
total_loss=avg_loss_epoch * num_batches * batch_size,
num_batches=num_batches,
batch_size=batch_size,
)
if writer is not None:
writer.add_scalar("train_metric/loss_epoch", avg_loss_epoch, i_epoch)
output.append({"i_epoch": i_epoch, "avg_train_loss": avg_loss_epoch})
if val_dataloader_batches is not None:
avg_val_loss, _, _, avg_auc = evaluate(
model=model,
loss=loss,
dataloader=val_dataloader_batches,
batch_processor=batch_processor,
device=device,
batch_size=batch_size,
is_dataloader=is_val_dataloader,
i_epoch=i_epoch,
)
output[-1]["avg_val_loss"] = avg_val_loss
output[-1]["roc_auc_score"] = avg_auc
if eval_dataloader_batches is not None:
avg_eval_loss, _, _, avg_eval_auc = evaluate(
model=model,
loss=loss,
dataloader=eval_dataloader_batches,
batch_processor=batch_processor,
device=device,
batch_size=batch_size,
is_dataloader=is_eval_dataloader,
i_epoch=i_epoch,
)
output[-1]["avg_eval_loss"] = avg_eval_loss
output[-1]["eval_roc_auc_score"] = avg_eval_auc
# check if local optimal
(
is_local_optimal,
is_improving,
prev_avg_val_loss,
) = _check_local_optimal(
i_epoch, is_improving, avg_val_loss, prev_avg_val_loss
)
# break if is local optimal
if is_local_optimal and train_options.early_stop_on_val_loss:
break
if save_model_name:
save_model(save_model_name, model)
if writer is not None:
writer.add_scalar("val_metric/loss_epoch", avg_val_loss, i_epoch)
logger.warning("Epoch:{}, validation loss: {}, roc_auc_score: {}, time: {}, q1: {}, q2: {}".format(i_epoch,
avg_val_loss,
avg_auc,
time.time() - start_time_epoch,
np.sum(
q1),
np.sum(
q2)))
if writer is not None:
writer.close()
if send_end:
send_end.send(output)
return output
def _check_local_optimal(i_epoch, is_improving, avg_val_loss, prev_avg_val_loss):
is_local_optimal = i_epoch > 0 and is_improving and avg_val_loss > prev_avg_val_loss
is_improving = i_epoch == 0 or prev_avg_val_loss > avg_val_loss
prev_avg_val_loss = avg_val_loss
return is_local_optimal, is_improving, prev_avg_val_loss
def train_epoch(
model,
loss,
optimizer,
batch_processor,
logging_options,
device,
trainer_id,
i_epoch,
lock=None,
writer=None,
train_dataloader_batches=None,
batch_size=1024,
is_dataloader=True,
):
model.train()
start_time, loss_val, num_batches, sample_weight_sum = time.time(), 0.0, 0, 0.0
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
loss_val_epoch, total_num_batches, sample_weight_sum_epoch = (
0.0,
len(train_dataloader_batches),
0.0,
)
batch_size = batch_size
q1, q2 = [], []
qq3 = time.perf_counter()
for i_batch, sample_batched in enumerate(train_dataloader_batches):
if not is_dataloader and i_batch <= THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
try:
label, feats, weight = batch_processor(mini_batch=sample_batched)
except:
i_epoch += 1
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
# forward pass
z_pred = model(feats=feats)
# backward pass
E = apply_loss(loss, z_pred, label, weight)
optimizer.zero_grad()
E.backward()
qq1 = time.perf_counter()
dd3 = qq1 - qq3
# torch.cuda.synchronize() # wait for mm to finish
qq2 = time.perf_counter()
optimizer.step()
# torch.cuda.synchronize() # wait for mm to finish
qq3 = time.perf_counter()
loss_val_batch = E.detach().cpu().numpy() * batch_size
sample_weight_sum_batch = (
batch_size if weight is None else torch.sum(weight).detach()
)
num_batches += 1
loss_val += loss_val_batch
loss_val_tb += loss_val_batch
loss_val_epoch += loss_val_batch
sample_weight_sum += sample_weight_sum_batch
sample_weight_sum_tb += sample_weight_sum_batch
sample_weight_sum_epoch += sample_weight_sum_batch
if need_to_log_batch(i_batch, logging_options, batch_size):
log_train_info(
i_batch=i_batch,
i_epoch=i_epoch,
trainer_id=trainer_id,
start_time=start_time,
total_loss=loss_val,
num_batches=num_batches,
sample_weight_sum=sample_weight_sum,
batch_size=batch_size,
lock=lock,
)
start_time, loss_val, num_batches, sample_weight_sum = (
time.time(),
0.0,
0,
0.0,
)
if writer is not None and need_to_log_tb(i_batch, logging_options, batch_size):
log_tb_info_batch(
writer=writer,
model=model,
pred=z_pred,
label=label,
optimizer=optimizer,
logging_options=logging_options,
iter=total_num_batches * i_epoch + i_batch,
start_time=start_time_tb,
trainer_id=trainer_id,
avg_loss=loss_val_tb / sample_weight_sum_tb,
lock=lock,
)
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
dd1 = qq2 - qq1
dd2 = qq3 - qq2
q1.append(dd2)
q2.append(dd3)
if not is_dataloader and i_batch > THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
avg_loss = loss_val_epoch / sample_weight_sum_epoch
return i_batch, avg_loss, q1, q2
def evaluate(model, loss, dataloader, batch_processor, device, batch_size=1024, is_dataloader=True, i_epoch=0):
model.eval()
preds = []
labels = []
batch_size = batch_size
loss_val, sample_weight_sum = 0.0, 0.0
for i_batch, sample_batched in enumerate(dataloader):
if not is_dataloader and i_batch <= VAL_THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > VAL_THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
try:
label, feats, weight = batch_processor(mini_batch=sample_batched)
except:
i_epoch += 1
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
# forward pass
z_pred = model(feats=feats)
# preds.append(z_pred.detach().cpu().numpy())
# labels.append(label.detach().cpu().numpy())
preds += z_pred.detach().cpu().numpy().tolist()
labels += label.detach().cpu().numpy().tolist()
E = apply_loss(loss, z_pred, label, weight)
loss_val += E.detach().cpu().numpy() * batch_size
sample_weight_sum += (
batch_size if weight is None else torch.sum(weight).detach().cpu().numpy()
)
if not is_dataloader and i_batch > VAL_THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
# logger.warning("loss_val: {}, weight_sum {}".format(dataloader, is_dataloader))
avg_loss = loss_val / sample_weight_sum
# labels = np.asarray(labels).flatten()
# preds = np.asarray(preds).flatten()
try:
avg_auc = roc_auc_score(labels, preds)
except Exception:
idx = np.isfinite(preds)
avg_auc = roc_auc_score(np.array(labels)[idx], np.array(preds)[idx])
return avg_loss, labels, preds, avg_auc
|
AutoCTR-main
|
trainers/simple_final.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import time
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from .loss import apply_loss, build_loss
from .utils import log_tb_info_batch, log_train_info, need_to_log_batch, need_to_log_tb
try:
from fblearner.flow.util.visualization_utils import summary_writer
except ImportError:
pass
logger = logging.getLogger(__name__)
np.set_printoptions(precision=5)
torch.set_printoptions(precision=5)
THRESHOLD = 30000 # 7500 # -1 #7500
VAL_THRESHOLD = 10000
def train(
model,
train_options,
train_dataloader=None,
batch_processor=None,
device=None,
val_dataloader=None,
trainer_id=0,
send_end=None,
train_dataloader_batches=None,
val_dataloader_batches=None,
batch_size=1024,
):
try:
writer = summary_writer()
except Exception:
logger.error("Failed to create the tensorboard summary writer.")
writer = None
prev_avg_val_loss, is_improving, is_local_optimal = None, True, False
optimizer = model.get_optimizers()
loss = build_loss(model, loss_config=train_options.loss)
output = []
logging_options = train_options.logging_config
batch_size = batch_size
if train_dataloader_batches is None:
train_dataloader_batches = train_dataloader
is_train_dataloader = True
else:
is_train_dataloader = False
if val_dataloader_batches is None:
val_dataloader_batches = val_dataloader
is_val_dataloader = True
else:
is_val_dataloader = False
for i_epoch in range(0, train_options.nepochs):
start_time_epoch = time.time()
num_batches, avg_loss_epoch, q1, q2 = train_epoch(
model=model,
loss=loss,
optimizer=optimizer,
batch_processor=batch_processor,
trainer_id=trainer_id,
i_epoch=i_epoch,
device=device,
logging_options=logging_options,
writer=writer,
train_dataloader_batches=train_dataloader_batches,
batch_size=batch_size,
is_dataloader=is_train_dataloader,
)
logger.warning("Epoch:{}, Time for training: {}".format(i_epoch, time.time() - start_time_epoch))
avg_loss_epoch = log_train_info(
start_time=start_time_epoch,
i_batch=num_batches,
i_epoch=i_epoch,
trainer_id=trainer_id,
total_loss=avg_loss_epoch * num_batches * batch_size,
num_batches=num_batches,
batch_size=batch_size,
)
if writer is not None:
writer.add_scalar("train_metric/loss_epoch", avg_loss_epoch, i_epoch)
output.append({"i_epoch": i_epoch, "avg_train_loss": avg_loss_epoch})
if val_dataloader_batches is not None:
avg_val_loss, _, _, avg_auc = evaluate(
model=model,
loss=loss,
dataloader=val_dataloader_batches,
batch_processor=batch_processor,
device=device,
batch_size=batch_size,
is_dataloader=is_val_dataloader,
i_epoch=i_epoch,
)
output[-1]["avg_val_loss"] = avg_val_loss
output[-1]["roc_auc_score"] = avg_auc
# check if local optimal
(
is_local_optimal,
is_improving,
prev_avg_val_loss,
) = _check_local_optimal(
i_epoch, is_improving, avg_val_loss, prev_avg_val_loss
)
# break if is local optimal
if is_local_optimal and train_options.early_stop_on_val_loss:
break
if writer is not None:
writer.add_scalar("val_metric/loss_epoch", avg_val_loss, i_epoch)
logger.warning("Epoch:{}, validation loss: {}, roc_auc_score: {}, time: {}, q1: {}, q2: {}".format(i_epoch, avg_val_loss, avg_auc, time.time() - start_time_epoch, np.sum(q1), np.sum(q2)))
if writer is not None:
writer.close()
if send_end:
send_end.send(output)
return output
def _check_local_optimal(i_epoch, is_improving, avg_val_loss, prev_avg_val_loss):
is_local_optimal = i_epoch > 0 and is_improving and avg_val_loss > prev_avg_val_loss
is_improving = i_epoch == 0 or prev_avg_val_loss > avg_val_loss
prev_avg_val_loss = avg_val_loss
return is_local_optimal, is_improving, prev_avg_val_loss
def train_epoch(
model,
loss,
optimizer,
batch_processor,
logging_options,
device,
trainer_id,
i_epoch,
lock=None,
writer=None,
train_dataloader_batches=None,
batch_size=1024,
is_dataloader=True,
):
model.train()
start_time, loss_val, num_batches, sample_weight_sum = time.time(), 0.0, 0, 0.0
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
loss_val_epoch, total_num_batches, sample_weight_sum_epoch = (
0.0,
len(train_dataloader_batches),
0.0,
)
batch_size = batch_size
q1, q2 = [], []
qq3 = time.perf_counter()
for i_batch, sample_batched in enumerate(train_dataloader_batches):
if not is_dataloader and i_batch <= THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
label, feats, weight = batch_processor(mini_batch=sample_batched)
# forward pass
z_pred = model(feats=feats)
# backward pass
E = apply_loss(loss, z_pred, label, weight)
optimizer.zero_grad()
E.backward()
qq1 = time.perf_counter()
dd3 = qq1 - qq3
# torch.cuda.synchronize() # wait for mm to finish
qq2 = time.perf_counter()
optimizer.step()
# torch.cuda.synchronize() # wait for mm to finish
qq3 = time.perf_counter()
loss_val_batch = E.detach().cpu().numpy() * batch_size
sample_weight_sum_batch = (
batch_size if weight is None else torch.sum(weight).detach()
)
num_batches += 1
loss_val += loss_val_batch
loss_val_tb += loss_val_batch
loss_val_epoch += loss_val_batch
sample_weight_sum += sample_weight_sum_batch
sample_weight_sum_tb += sample_weight_sum_batch
sample_weight_sum_epoch += sample_weight_sum_batch
if need_to_log_batch(i_batch, logging_options, batch_size):
log_train_info(
i_batch=i_batch,
i_epoch=i_epoch,
trainer_id=trainer_id,
start_time=start_time,
total_loss=loss_val,
num_batches=num_batches,
sample_weight_sum=sample_weight_sum,
batch_size=batch_size,
lock=lock,
)
start_time, loss_val, num_batches, sample_weight_sum = (
time.time(),
0.0,
0,
0.0,
)
if writer is not None and need_to_log_tb(i_batch, logging_options, batch_size):
log_tb_info_batch(
writer=writer,
model=model,
pred=z_pred,
label=label,
optimizer=optimizer,
logging_options=logging_options,
iter=total_num_batches * i_epoch + i_batch,
start_time=start_time_tb,
trainer_id=trainer_id,
avg_loss=loss_val_tb / sample_weight_sum_tb,
lock=lock,
)
start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
dd1 = qq2-qq1
dd2 = qq3-qq2
q1.append(dd2)
q2.append(dd3)
if not is_dataloader and i_batch > THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
avg_loss = loss_val_epoch / sample_weight_sum_epoch
return i_batch, avg_loss, q1, q2
def evaluate(model, loss, dataloader, batch_processor, device, batch_size=1024, is_dataloader=True, i_epoch=0):
model.eval()
preds = []
labels = []
batch_size = batch_size
loss_val, sample_weight_sum = 0.0, 0.0
for i_batch, sample_batched in enumerate(dataloader):
if not is_dataloader and i_batch <= VAL_THRESHOLD:
label, feats, weight = sample_batched
elif not is_dataloader and i_batch > VAL_THRESHOLD and i_epoch > 0:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
else:
label, feats, weight = batch_processor(mini_batch=sample_batched)
# forward pass
z_pred = model(feats=feats)
# preds.append(z_pred.detach().cpu().numpy())
# labels.append(label.detach().cpu().numpy())
preds += z_pred.detach().cpu().numpy().tolist()
labels += label.detach().cpu().numpy().tolist()
E = apply_loss(loss, z_pred, label, weight)
loss_val += E.detach().cpu().numpy() * batch_size
sample_weight_sum += (
batch_size if weight is None else torch.sum(weight).detach().cpu().numpy()
)
if not is_dataloader and i_batch > VAL_THRESHOLD:
label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=2)
avg_loss = loss_val / sample_weight_sum
# labels = np.asarray(labels).flatten()
# preds = np.asarray(preds).flatten()
try:
avg_auc = roc_auc_score(labels, preds)
except Exception:
idx = np.isfinite(preds)
if len(np.array(labels)[idx]) > 1:
logger.warning("Valid value for AUC: {}".format(idx))
avg_auc = roc_auc_score(np.array(labels)[idx], np.array(preds)[idx])
else:
avg_auc = np.nan
return avg_loss, labels, preds, avg_auc
|
AutoCTR-main
|
trainers/simple.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import json
import logging
import os
import pickle
import time
import numpy as np
import torch
# os.system(f"mount -o remount,size={60*1024*1024*1024} /dev/shm")
from thrift.protocol import TSimpleJSONProtocol
from thrift.util import Serializer
from config import ttypes as config
from models.nas_modules import NASRecNet
from trainers.simple_final import train as simple_train
from utils.data import prepare_data
from utils.search_utils import get_args, get_final_fit_trainer_config, get_phenotype
from torch.multiprocessing import Pipe, Process, set_start_method
set_start_method('spawn', force=True)
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (100000, rlimit[1]))
import GPUtil
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
jfactory = TSimpleJSONProtocol.TSimpleJSONProtocolFactory()
THRESHOLD = -1 # -1 # 7500
VAL_THRESHOLD = -1
if __name__ == "__main__":
# get arguments
args = get_args()
logger.warning("All Args: {}".format(args))
# set seeds
np.random.seed(args.numpy_seed)
torch.manual_seed(args.torch_seed)
excludeID = [int(id) for id in args.excludeID.split(",")] if args.excludeID else []
# get model
filenames, model_config_dicts = get_phenotype(args)
# get trainer config
input_summary, args = get_final_fit_trainer_config(args)
# change dataset to small dataset
input_summary["data_options"]["from_file"]["data_file"] = args.data_file
input_summary["data_options"]["from_file"]["batch_size"] = args.batch_size
# change train_options
input_summary["train_options"]["nepochs"] = args.nepochs
input_summary["train_options"]["logging_config"]["log_freq"] = 100000
input_summary["train_options"]["logging_config"]["tb_log_freq"] = 100000
# change performance_options
input_summary["performance_options"]["num_readers"] = args.num_workers
input_summary["performance_options"]["num_trainers"] = args.num_trainers
input_summary["performance_options"]["use_gpu"] = args.use_gpu
# change optimizer
input_summary["feature_options"]["dense"]["optim"]["adam"]["lr"] = args.learning_rate
input_summary["feature_options"]["sparse"]["optim"]["sparse_adam"]["lr"] = args.learning_rate
# # change feature hashing size
# for i, feature in enumerate(input_summary["feature_options"]["sparse"]["features"]):
# if feature["hash_size"] > args.hash_size:
# input_summary["feature_options"]["sparse"]["features"][i]["hash_size"] = args.hash_size
# data_options
splits = [float(p) for p in args.splits.split(":")]
input_summary["data_options"]["from_file"]["splits"] = splits
# extract feature config for searcher construction and trainer
train_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["train_options"]),
config.TrainConfig(),
)
# extract feature config for searcher construction and trainer
feature_config = Serializer.deserialize(
jfactory,
json.dumps(input_summary["feature_options"]),
config.FeatureConfig(),
)
data_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["data_options"]),
config.DataConfig(),
)
performance_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["performance_options"]),
config.PerformanceConfig(),
)
# for datasaving purpose
batch_processor, train_dataloader, val_dataloader, eval_dataloader, \
train_dataloader_batches, val_dataloader_batches, eval_dataloader_batches \
= {}, {}, {}, {}, {}, {}, {}
for id in range(args.total_gpus):
if id not in excludeID:
CUDA = 'cuda:' + str(id)
if len(batch_processor) == 0:
(
_, # datasets
batch_processor[CUDA],
train_dataloader,
val_dataloader,
eval_dataloader,
) = prepare_data(data_options, performance_options, CUDA, pin_memory=False)
else:
(
_, # datasets
batch_processor[CUDA],
_,
_,
_, # eval_dataloader
) = prepare_data(data_options, performance_options, CUDA, pin_memory=True)
train_dataloader = None
val_dataloader = None
eval_dataloader = None
train_dataloader_batches[CUDA] = None
val_dataloader_batches[CUDA] = None
eval_dataloader_batches[CUDA] = None
if args.save_batches:
train_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
train_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
# if args.save_val_batches:
val_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(val_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
val_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
# if args.save_val_batches:
eval_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(eval_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
eval_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
if args.save_batches:
for i_batch, sample_batched in enumerate(train_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
if i_batch <= THRESHOLD:
train_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
if args.save_val_batches:
for i_batch, sample_batched in enumerate(val_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
if i_batch <= VAL_THRESHOLD:
val_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
for i_batch, sample_batched in enumerate(eval_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
if i_batch <= VAL_THRESHOLD:
eval_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
try:
deviceIDs = GPUtil.getAvailable(order='random',
limit=1,
maxLoad=args.maxLoad,
maxMemory=args.maxMemory,
excludeID=excludeID)
CUDA = 'cuda:' + str(deviceIDs[0])
except Exception:
logger.warning("No available device!")
for model_id, model_config_dict in enumerate(model_config_dicts):
nasrec_net = Serializer.deserialize(
jfactory,
json.dumps(model_config_dict),
config.ModelConfig(),
)
tmp_model = NASRecNet(nasrec_net, feature_config)
tmp_model.to(device=CUDA)
svfolder = os.path.join(args.save_model_path, "results", "final_fit")
svname = os.path.join(svfolder, filenames[model_id].split("/")[-1][:-5] + ".ckp")
if not os.path.exists(svfolder):
os.makedirs(svfolder)
output = simple_train(tmp_model,
train_options,
train_dataloader,
batch_processor[CUDA],
CUDA,
val_dataloader,
0,
None, # send_end,
train_dataloader_batches[CUDA],
val_dataloader_batches[CUDA],
args.batch_size,
eval_dataloader,
eval_dataloader_batches[CUDA],
save_model_name= svname if args.save_model else None,
)
logger.warning("Outputs of Model {} is: {}".format(filenames[model_id], output))
|
AutoCTR-main
|
scripts/final_fit.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import pandas as pd
import math
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
def preprocess_criteo(datafile):
train_path="train.txt"
# train_path="train.txt"
train_path = os.path.join(datafile, train_path)
f1 = open(train_path,'r')
dic= {}
# generate three fold.
# train_x: value
# train_i: index
# train_y: label
f_train_value = open(os.path.join(datafile, 'train_x.txt'),'w')
f_train_index = open(os.path.join(datafile, 'train_i.txt'),'w')
f_train_label = open(os.path.join(datafile, 'train_y.txt'),'w')
num_dense, num_sparse = 13, 26
num_feature = num_dense + num_sparse
for i in range(num_feature):
dic[i] = {}
cnt_train = 0
#for debug
#limits = 10000
index = [1] * num_sparse
for line in f1:
cnt_train +=1
if cnt_train % 100000 ==0:
print('now train cnt : %d\n' % cnt_train)
#if cnt_train > limits:
# break
split = line.strip('\n').split('\t')
# 0-label, 1-13 numerical, 14-39 category
for i in range(num_dense, num_feature):
#dic_len = len(dic[i])
if split[i+1] not in dic[i]:
# [1, 0] 1 is the index for those whose appear times <= 10 0 indicates the appear times
dic[i][split[i+1]] = [1,0]
dic[i][split[i+1]][1] += 1
if dic[i][split[i+1]][0] == 1 and dic[i][split[i+1]][1] > 10:
index[i-num_dense] += 1
dic[i][split[i+1]][0] = index[i-num_dense]
f1.close()
print('total entries :%d\n' % (cnt_train - 1))
# calculate number of category features of every dimension
kinds = [num_dense]
for i in range(num_dense, num_feature):
kinds.append(index[i-num_dense])
print('number of dimensions : %d' % (len(kinds)-1))
print(kinds)
for i in range(1,len(kinds)):
kinds[i] += kinds[i-1]
print(kinds)
# make new data
f1 = open(train_path,'r')
cnt_train = 0
print('remake training data...\n')
for line in f1:
cnt_train +=1
if cnt_train % 100000 ==0:
print('now train cnt : %d\n' % cnt_train)
#if cnt_train > limits:
# break
entry = ['0'] * num_feature
index = [None] * num_feature
split = line.strip('\n').split('\t')
label = str(split[0])
for i in range(num_dense):
if split[i+1] != '':
entry[i] = (split[i+1])
index[i] = (i+1)
for i in range(num_dense, num_feature):
if split[i+1] != '':
entry[i] = '1'
index[i] = (dic[i][split[i+1]][0])
for j in range(num_sparse):
index[num_dense+j] += kinds[j]
index = [str(item) for item in index]
f_train_value.write(' '.join(entry)+'\n')
f_train_index.write(' '.join(index)+'\n')
f_train_label.write(label+'\n')
f1.close()
f_train_value.close()
f_train_index.close()
f_train_label.close()
def preprocess_avazu(datafile):
train_path = './train.csv'
f1 = open(train_path, 'r')
dic = {}
f_train_value = open('./train_x.txt', 'w')
f_train_index = open('./train_i.txt', 'w')
f_train_label = open('./train_y.txt', 'w')
debug = False
tune = False
Bound = [5] * 24
label_index = 1
Column = 24
numr_feat = []
numerical = [0] * Column
numerical[label_index] = -1
cate_feat = []
for i in range(Column):
if (numerical[i] == 0):
cate_feat.extend([i])
index_cnt = 0
index_others = [0] * Column
Max = [0] * Column
for i in numr_feat:
index_others[i] = index_cnt
index_cnt += 1
numerical[i] = 1
for i in cate_feat:
index_others[i] = index_cnt
index_cnt += 1
for i in range(Column):
dic[i] = dict()
cnt_line = 0
for line in f1:
cnt_line += 1
if (cnt_line == 1): continue # header
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
if (debug == True):
if (cnt_line >= 10000):
break
split = line.strip('\n').split(',')
for i in cate_feat:
if (split[i] != ''):
if split[i] not in dic[i]:
dic[i][split[i]] = [index_others[i], 0]
dic[i][split[i]][1] += 1
if (dic[i][split[i]][0] == index_others[i] and dic[i][split[i]][1] == Bound[i]):
dic[i][split[i]][0] = index_cnt
index_cnt += 1
if (tune == False):
label = split[label_index]
if (label != '0'): label = '1'
index = [0] * (Column - 1)
value = ['0'] * (Column - 1)
for i in range(Column):
cur = i
if (i == label_index): continue
if (i > label_index): cur = i - 1
if (numerical[i] == 1):
index[cur] = index_others[i]
if (split[i] != ''):
value[cur] = split[i]
# Max[i] = max(int(split[i]), Max[i])
else:
if (split[i] != ''):
index[cur] = dic[i][split[i]][0]
value[cur] = '1'
if (split[i] == ''):
value[cur] = '0'
f_train_index.write(' '.join(str(i) for i in index) + '\n')
f_train_value.write(' '.join(value) + '\n')
f_train_label.write(label + '\n')
f1.close()
f_train_index.close()
f_train_value.close()
f_train_label.close()
print ("Finished!")
print ("index_cnt = %d" % index_cnt)
# print ("max number for numerical features:")
# for i in numr_feat:
# print ("no.:%d max: %d" % (i, Max[i]))
def preprocess_kdd(datafile):
#coding=utf-8
#Email of the author: zjduan@pku.edu.cn
'''
0. Click:
1. Impression(numerical)
2. DisplayURL: (categorical)
3. AdID:(categorical)
4. AdvertiserID:(categorical)
5. Depth:(numerical)
6. Position:(numerical)
7. QueryID: (categorical) the key of the data file 'queryid_tokensid.txt'.
8. KeywordID: (categorical)the key of 'purchasedkeyword_tokensid.txt'.
9. TitleID: (categorical)the key of 'titleid_tokensid.txt'.
10. DescriptionID: (categorical)the key of 'descriptionid_tokensid.txt'.
11. UserID: (categorical)the key of 'userid_profile.txt'
12. User's Gender: (categorical)
13. User's Age: (categorical)
'''
train_path = './training.txt'
f1 = open(train_path, 'r')
f2 = open('./userid_profile.txt', 'r')
dic = {}
f_train_value = open('./train_x.txt', 'w')
f_train_index = open('./train_i.txt', 'w')
f_train_label = open('./train_y.txt', 'w')
debug = False
tune = False
Column = 12
Field = 13
numr_feat = [1,5,6]
numerical = [0] * Column
cate_feat = [2,3,4,7,8,9,10,11]
index_cnt = 0
index_others = [0] * (Field + 1)
Max = [0] * 12
numerical[0] = -1
for i in numr_feat:
index_others[i] = index_cnt
index_cnt += 1
numerical[i] = 1
for i in cate_feat:
index_others[i] = index_cnt
index_cnt += 1
for i in range(Field + 1):
dic[i] = dict()
###init user_dic
user_dic = dict()
cnt_line = 0
for line in f2:
cnt_line += 1
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
# if (debug == True):
# if (cnt_line >= 10000):
# break
split = line.strip('\n').split('\t')
user_dic[split[0]] = [split[1], split[2]]
if (split[1] not in dic[12]):
dic[12][split[1]] = [index_cnt, 0]
index_cnt += 1
if (split[2] not in dic[13]):
dic[13][split[2]] = [index_cnt, 0]
index_cnt += 1
cnt_line = 0
for line in f1:
cnt_line += 1
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
if (debug == True):
if (cnt_line >= 10000):
break
split = line.strip('\n').split('\t')
for i in cate_feat:
if (split[i] != ''):
if split[i] not in dic[i]:
dic[i][split[i]] = [index_others[i], 0]
dic[i][split[i]][1] += 1
if (dic[i][split[i]][0] == index_others[i] and dic[i][split[i]][1] == 10):
dic[i][split[i]][0] = index_cnt
index_cnt += 1
if (tune == False):
label = split[0]
if (label != '0'): label = '1'
index = [0] * Field
value = ['0'] * Field
for i in range(1, 12):
if (numerical[i] == 1):
index[i - 1] = index_others[i]
if (split[i] != ''):
value[i - 1] = split[i]
Max[i] = max(int(split[i]), Max[i])
else:
if (split[i] != ''):
index[i - 1] = dic[i][split[i]][0]
value[i - 1] = '1'
if (split[i] == ''):
value[i - 1] = '0'
if (i == 11 and split[i] == '0'):
value[i - 1] = '0'
### gender and age
if (split[11] == '' or (split[11] not in user_dic)):
index[12 - 1] = index_others[12]
value[12 - 1] = '0'
index[13 - 1] = index_others[13]
value[13 - 1] = '0'
else:
index[12 - 1] = dic[12][user_dic[split[11]][0]][0]
value[12 - 1] = '1'
index[13 - 1] = dic[13][user_dic[split[11]][1]][0]
value[13 - 1] = '1'
f_train_index.write(' '.join(str(i) for i in index) + '\n')
f_train_value.write(' '.join(value) + '\n')
f_train_label.write(label + '\n')
f1.close()
f_train_index.close()
f_train_value.close()
f_train_label.close()
print ("Finished!")
print ("index_cnt = %d" % index_cnt)
print ("max number for numerical features:")
for i in numr_feat:
print ("no.:%d max: %d" % (i, Max[i]))
def _load_data(_nrows=None, debug = False, datafile=""):
TRAIN_X = os.path.join(datafile, 'train_x.txt')
TRAIN_Y = os.path.join(datafile, 'train_y.txt')
print(TRAIN_X)
print(TRAIN_Y)
train_x = pd.read_csv(TRAIN_X,header=None,sep=' ',nrows=_nrows, dtype=np.float)
train_y = pd.read_csv(TRAIN_Y,header=None,sep=' ',nrows=_nrows, dtype=np.int32)
train_x = train_x.values
train_y = train_y.values.reshape([-1])
print('data loading done!')
print('training data : %d' % train_y.shape[0])
assert train_x.shape[0]==train_y.shape[0]
return train_x, train_y
def save_x_y(fold_index, train_x, train_y, datafile):
train_x_name = "train_x.npy"
train_y_name = "train_y.npy"
_get = lambda x, l: [x[i] for i in l]
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xv_train_, y_train_ = _get(train_x, part_index), _get(train_y, part_index)
save_dir_Xv = os.path.join(datafile, "part" + str(i+1))
save_dir_y = os.path.join(datafile, "part" + str(i+1))
if (os.path.exists(save_dir_Xv) == False):
os.makedirs(save_dir_Xv)
if (os.path.exists(save_dir_y) == False):
os.makedirs(save_dir_y)
save_path_Xv = os.path.join(save_dir_Xv, train_x_name)
save_path_y = os.path.join(save_dir_y, train_y_name)
np.save(save_path_Xv, Xv_train_)
np.save(save_path_y, y_train_)
def save_i(fold_index, datafile):
_get = lambda x, l: [x[i] for i in l]
TRAIN_I = os.path.join(datafile, 'train_i.txt')
train_i = pd.read_csv(TRAIN_I,header=None,sep=' ',nrows=None, dtype=np.int32)
train_i = train_i.values
feature_size = train_i.max() + 1
print ("feature_size = %d" % feature_size)
feature_size = [feature_size]
feature_size = np.array(feature_size)
np.save(os.path.join(datafile, "feature_size.npy"), feature_size)
# pivot = 40000000
# test_i = train_i[pivot:]
# train_i = train_i[:pivot]
# print("test_i size: %d" % len(test_i))
print("train_i size: %d" % len(train_i))
# np.save("../data/test/test_i.npy", test_i)
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xi_train_ = _get(train_i, part_index)
save_path_Xi = os.path.join(datafile, "part" + str(i+1), 'train_i.npy')
np.save(save_path_Xi, Xi_train_)
def stratifiedKfold(datafile):
train_x, train_y = _load_data(datafile=datafile)
print('loading data done!')
folds = list(StratifiedKFold(n_splits=10, shuffle=True,
random_state=2018).split(train_x, train_y))
fold_index = []
for i,(train_id, valid_id) in enumerate(folds):
fold_index.append(valid_id)
print("fold num: %d" % (len(fold_index)))
fold_index = np.array(fold_index)
np.save(os.path.join(datafile, "fold_index.npy"), fold_index)
save_x_y(fold_index, train_x, train_y, datafile=datafile)
print("save train_x_y done!")
fold_index = np.load(os.path.join(datafile, "fold_index.npy"), allow_pickle=True)
save_i(fold_index, datafile=datafile)
print("save index done!")
def scale(x):
if x > 2:
x = int(math.log(float(x))**2)
return x
def scale_dense_feat(datafile, dataset_name):
if args.dataset_name == "criteo":
num_dense = 13
elif args.dataset_name == "avazu":
return True
elif args.dataset_name == "kdd":
num_dense = 3
for i in range(1,11):
print('now part %d' % i)
data = np.load(os.path.join(datafile, 'part'+str(i), 'train_x.npy'), allow_pickle=True)
part = data[:,:num_dense]
for j in range(part.shape[0]):
if j % 100000 ==0:
print(j)
part[j] = list(map(scale, part[j]))
np.save(os.path.join(datafile, 'part' + str(i), 'train_x2.npy'), data)
def print_shape(name, var):
print("Shape of {}: {}".format(name, var.shape))
def check_existing_file(filename, force):
if os.path.isfile(filename):
print("file {} already exists!".format(filename))
if not force:
raise ValueError("aborting, use --force if you want to processed")
else:
print("Will override the file!")
def sample_data(args):
output_data_file = "{}{}.npz".format(args.data_file, args.save_filename)
check_existing_file(output_data_file, args.force)
data = np.load(args.sample_data_file, allow_pickle=True)
X_cat, X_int, y = data["X_cat"], data["X_int"], data["y"]
print_shape("X_cat", X_cat)
print_shape("X_int", X_int)
print_shape("y", y)
print("total number of data points: {}".format(len(y)))
print(
"saving first {} data points to {}{}.npz".format(
args.num_samples, args.data_file, args.save_filename
)
)
np.savez_compressed(
"{}{}.npz".format(args.data_file, args.save_filename),
X_int=X_int[0 : args.num_samples, :],
X_cat=X_cat[0 : args.num_samples, :],
y=y[0 : args.num_samples],
)
def compress_ids(feature, raw_to_new={}):
if raw_to_new is None:
start_idx = 1
raw_to_new = {}
else:
start_idx = 0
for i in range(len(feature)):
if feature[i] not in raw_to_new:
raw_to_new[feature[i]] = len(raw_to_new) + start_idx
feature[i] = raw_to_new[feature[i]]
return raw_to_new
def final_preprocess(datafile):
X_int = []
X_cat = []
y = []
missing_sparse = []
if args.dataset_name == "criteo":
num_dense, num_sparse = 13, 26
TRAIN_X = "train_x2.npy"
elif args.dataset_name == "avazu":
num_dense, num_sparse = 0, 23
TRAIN_X = "train_x.npy"
elif args.dataset_name == "kdd":
num_dense, num_sparse = 3, 10
TRAIN_X = "train_x2.npy"
TRAIN_Y = "train_y.npy"
TRAIN_I = "train_i.npy"
for i in [3,4,5,6,7,8,9,10,2,1]:#range(1,11): # todo
f = np.load(os.path.join(datafile, "part" + str(i), TRAIN_I), "r", allow_pickle=True)
g = np.load(os.path.join(datafile, "part" + str(i), TRAIN_X), "r", allow_pickle=True)
h = np.load(os.path.join(datafile, "part" + str(i), TRAIN_Y), "r", allow_pickle=True)
X_int_split = np.array(g[:, 0:num_dense])
X_cat_split = np.array(f[:, num_dense:])
y_split = h
missing_sparse_split = np.array(g[:,0:])
indices = np.arange(len(y_split))
indices = np.random.permutation(indices)
# shuffle data
X_cat_split = X_cat_split[indices]
X_int_split = X_int_split[indices]
y_split = y_split[indices].astype(np.float32)
missing_sparse_split = missing_sparse_split[indices]
X_int.append(X_int_split)
X_cat.append(X_cat_split)
y.append(y_split)
missing_sparse.append(missing_sparse_split)
X_int = np.concatenate(X_int)
X_cat = np.concatenate(X_cat)
y = np.concatenate(y)
missing_sparse = np.concatenate(missing_sparse)
print("expected feature size", X_cat.max() + 1)
flat = X_cat.flatten()
fset = set(flat)
print("expected size", len(fset))
missing_sparse_maps = []
for i in range(num_sparse):
missing_slice = missing_sparse[:,i]
if 0 in missing_slice:
locs = np.where(missing_slice==0)[0]
missing_sparse_maps.append({X_cat[locs[0],i]:0})
else:
missing_sparse_maps.append(None)
raw_to_new_ids = []
for i in range(X_cat.shape[1]):
print("compressing the ids for the {}-th feature.".format(i))
raw_to_new_ids.append(compress_ids(X_cat[:, i], missing_sparse_maps[i]))
total = 0
hashsizes = []
for i in range(len(raw_to_new_ids)):
hashsize = max(raw_to_new_ids[i].values())+1 # 1 is for the zero
hashsizes.append(hashsize)
print("sparse_" + str(i),"\t", hashsize)
total += hashsize
if args.dataset_name == "criteo":
hashsize_filename = "criteo_hashsizes.npy"
finaldata_filename = "criteo_processed.npz"
elif args.dataset_name == "avazu":
hashsize_filename = "avazu_hashsizes.npy"
finaldata_filename = "avazu_processed.npz"
elif args.dataset_name == "kdd":
hashsize_filename = "kdd2012_hashsizes.npy"
finaldata_filename = "kdd2012_processed.npz"
np.save(os.path.join(datafile, hashsize_filename), np.array(hashsizes))
np.savez_compressed(os.path.join(datafile, finaldata_filename), X_int=X_int, X_cat=X_cat, y=y)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse Data")
parser.add_argument("--dataset-name", default="criteo", choices=["criteo", "avazu", "kdd"])
parser.add_argument("--data-file", type=str, default="")
parser.add_argument("--sample-data-file", type=str, default="")
parser.add_argument("--save-filename", type=str, default="")
parser.add_argument("--mode", type=str, default="raw")
parser.add_argument("--num-samples", type=int, default=1000)
parser.add_argument("--force", action="store_true", default=False)
args = parser.parse_args()
if args.mode == "raw":
print(
"Load raw data and parse (compress id to consecutive space, "
"shuffle within ds) and save it."
)
if args.dataset_name == "criteo":
preprocess_criteo(datafile=args.data_file)
elif args.dataset_name == "avazu":
preprocess_avazu(datafile=args.data_file)
elif args.dataset_name == "kdd":
preprocess_kdd(datafile=args.data_file)
print("Start stratifiedKfold!")
stratifiedKfold(datafile=args.data_file)
print("Start scaling!")
scale_dense_feat(datafile=args.data_file, dataset_name=args.dataset_name)
print("Final preprocessing stage!")
final_preprocess(datafile=args.data_file)
print("Finish data preprocessing!")
elif args.mode == "sample":
print("Load processed data and take the first K data points and save it.")
sample_data(args)
else:
raise ValueError("Unknown mode: {}".format(args.mode))
|
AutoCTR-main
|
scripts/preprocess.py
|
AutoCTR-main
|
scripts/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.path.append('gen-py')
import json
import logging
import os
import pickle
import time
import copy
import numpy as np
import torch
from thrift.protocol import TSimpleJSONProtocol
from thrift.util import Serializer
from config import ttypes as config
from models.nas_modules import NASRecNet
from models.builder import load_model
from nasrec.builder import build_searcher, load_searcher, save_searcher
from nasrec.utils import reward_normalization
from trainers.simple import train as simple_train
from utils.data import prepare_data
from utils.search_utils import get_args, get_trainer_config, get_searcher_config
from torch.multiprocessing import Pipe, Process, set_start_method
from thop import profile
set_start_method('spawn', force=True)
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (100000, rlimit[1]))
import GPUtil
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
jfactory = TSimpleJSONProtocol.TSimpleJSONProtocolFactory()
if __name__ == "__main__":
# get arguments
args = get_args()
logger.warning("All Args: {}".format(args))
# set seeds
np.random.seed(args.numpy_seed)
torch.manual_seed(args.torch_seed)
excludeID = [int(id) for id in args.excludeID.split(",")] if args.excludeID else []
deviceIDs = GPUtil.getAvailable(order='first', limit=1, maxLoad=0.9, maxMemory=0.8, excludeID=excludeID)
CUDA = 'cuda:' + str(deviceIDs[0])
device = torch.device("cpu")
if args.use_gpu:
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
device = torch.device(CUDA)
else:
print("WARNING: CUDA is not available on this machine, proceed with CPU")
# load warm start emb dict
if args.warm_start_emb:
if args.data_set_name == "criteo":
ckp_name = "warm_start_criteo.ckp"
elif args.data_set_name == "avazu":
ckp_name = "warm_start_avazu.ckp"
elif args.data_set_name == "kdd2012":
ckp_name = "warm_start_kdd2012.ckp"
warm_start_filename = os.path.join(args.save_model_path, "models", ckp_name)
warm_start_model = load_model(warm_start_filename)
warm_start_emb_dict = warm_start_model.emb_dict
# get trainer config
input_summary, args = get_trainer_config(args)
# change dataset to small dataset
input_summary["data_options"]["from_file"]["data_file"] = args.data_file
input_summary["data_options"]["from_file"]["batch_size"] = args.batch_size
# change train_options
input_summary["train_options"]["nepochs"] = args.nepochs
input_summary["train_options"]["logging_config"]["log_freq"] = 100000
input_summary["train_options"]["logging_config"]["tb_log_freq"] = 100000
# change performance_options
input_summary["performance_options"]["num_readers"] = args.num_workers
input_summary["performance_options"]["num_trainers"] = args.num_trainers
input_summary["performance_options"]["use_gpu"] = args.use_gpu
# change optimizer
input_summary["feature_options"]["dense"]["optim"]["adam"]["lr"] = args.learning_rate
input_summary["feature_options"]["sparse"]["optim"]["sparse_adam"]["lr"] = args.learning_rate
# change feature hashing size
for i, feature in enumerate(input_summary["feature_options"]["sparse"]["features"]):
if feature["hash_size"] > args.hash_size:
input_summary["feature_options"]["sparse"]["features"][i]["hash_size"] = args.hash_size
# extract feature config for searcher construction and trainer
train_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["train_options"]),
config.TrainConfig(),
)
# extract feature config for searcher construction and trainer
feature_config = Serializer.deserialize(
jfactory,
json.dumps(input_summary["feature_options"]),
config.FeatureConfig(),
)
data_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["data_options"]),
config.DataConfig(),
)
performance_options = Serializer.deserialize(
jfactory,
json.dumps(input_summary["performance_options"]),
config.PerformanceConfig(),
)
# construct temporal directory to save models
if args.resume_file:
temp_dir = os.path.join(
args.save_model_path,
args.searcher_type,
args.data_set_name,
args.resume_file,
)
rewards = np.load(os.path.join(temp_dir, "rewards.npy"), allow_pickle=True).tolist()
all_roc_aucs = np.load(os.path.join(temp_dir, "all_roc_aucs.npy"), allow_pickle=True).tolist()
all_arc_vecs = np.load(os.path.join(temp_dir, "all_arc_vecs.npy"), allow_pickle=True).tolist()
all_actions = np.load(os.path.join(temp_dir, "all_actions.npy"), allow_pickle=True).tolist()
all_params = np.load(os.path.join(temp_dir, "all_params.npy"), allow_pickle=True).tolist()
all_flops = np.load(os.path.join(temp_dir, "all_flops.npy"), allow_pickle=True).tolist()
finished_model = np.load(os.path.join(temp_dir, "finished_model.npy"), allow_pickle=True).tolist()
fbl_meta = np.load(os.path.join(temp_dir, "fbl_meta.npy"), allow_pickle=True).tolist()
# unpickling meta data
with open(os.path.join(temp_dir, "meta.txt"), "rb") as fp:
[
best_val_loss,
best_model,
best_name,
best_fbl_id,
total_model,
epoch,
] = pickle.load(fp)
fp.close()
searcher = load_searcher(os.path.join(temp_dir, "searcher.ckp"))
if args.searcher_type == "evo":
is_initial = np.load(os.path.join(temp_dir, "is_initial.npy"), allow_pickle=True).tolist()
if args.searcher_type == "evo":
searcher.all_arc_vecs = all_arc_vecs
searcher.all_actions = all_actions
searcher.all_params = all_params
searcher.all_flops = all_flops
searcher.all_rewards = rewards
searcher.all_roc_aucs = all_roc_aucs
if args.survival_type == "age":
searcher.population_arc_queue = all_actions[-searcher.population_size:]
searcher.population_val_queue = rewards[-searcher.population_size:]
elif args.survival_type == "comb":
searcher.comb()
else:
if args.survival_type == "fit":
idx = sorted(range(len(rewards)), key=lambda i: rewards[i], reverse=True)[
-searcher.population_size:]
elif args.survival_type == "mix":
division = int(0.5 * searcher.population_size)
tmp_rewards = rewards[:-division]
idx = sorted(range(len(tmp_rewards)), key=lambda i: tmp_rewards[i], reverse=True)[-division:]
searcher.population_arc_queue = np.array(all_actions)[idx].tolist()
searcher.population_val_queue = np.array(rewards)[idx].tolist()
if args.survival_type == "mix":
searcher.population_arc_queue += all_actions[-division:]
searcher.population_val_queue += rewards[-division:]
logger.warning("Total_resume_length: arc_{}, val_{}".format(
len(searcher.population_arc_queue),
len(searcher.population_val_queue)
))
searcher.sampler_type = args.sampler_type
searcher.update_GBDT()
else:
if args.save_model_path:
temp_dir = os.path.join(
args.save_model_path,
args.searcher_type,
args.data_set_name,
time.strftime("%Y%m%d-%H%M%S"),
)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
# construct searcher
searcher_config = get_searcher_config(args)
searcher = build_searcher(searcher_config, feature_config)
searcher.to(device=device)
best_val_loss = np.Inf
best_model = None
best_name = None
best_fbl_id = None
fbl_meta = []
rewards = []
all_roc_aucs = []
finished_model = []
total_model = -1
epoch = 0
# for checking repreated architectures
all_arc_vecs = []
# mark all actions (block_configs)
all_actions = []
all_params = []
all_flops = []
if args.searcher_type == "evo":
is_initial = True
all_forward_node_ids = []
all_virtual_losses = []
logger.warning("The running history is save in {}".format(temp_dir))
fbl_run_queue = []
fbl_result_queue = []
fbl_device_queue = []
fbl_time_queue = []
fbl_name_queue = []
fbl_id_queue = []
nasrec_net_queue = []
nasrec_arc_vec_queue = []
action_queue = []
params_queue = []
flops_queue = []
# for datasaving purpose
batch_processor, train_dataloader, val_dataloader, \
val_dataloader_batches, train_dataloader_batches = {}, {}, {}, {}, {}
for id in range(args.total_gpus):
if id not in excludeID:
CUDA = 'cuda:' + str(id)
if len(batch_processor) == 0:
(
_, # datasets
batch_processor[CUDA],
train_dataloader,
val_dataloader,
_, # eval_dataloader
) = prepare_data(data_options, performance_options, CUDA)
else:
(
_, # datasets
batch_processor[CUDA],
_,
_,
_, # eval_dataloader
) = prepare_data(data_options, performance_options, CUDA)
if args.save_batches:
train_dataloader_batches[CUDA] = []
val_dataloader_batches[CUDA] = []
if len(batch_processor) == 1:
for i_batch, sample_batched in enumerate(train_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
train_dataloader_batches[CUDA].append(sample_batched)
for i_batch, sample_batched in enumerate(val_dataloader):
if i_batch % 100 == 0:
logger.warning("i_batch {}".format(i_batch))
val_dataloader_batches[CUDA].append(sample_batched)
mark = CUDA
else:
train_dataloader_batches[CUDA] = [[]] * len(train_dataloader_batches[mark])
val_dataloader_batches[CUDA] = [[]] * len(val_dataloader_batches[mark])
for i_batch, sample_batched in enumerate(train_dataloader_batches[mark]):
train_dataloader_batches[CUDA][i_batch] = {}
if i_batch % 100 == 0:
logger.warning("copy i_batch {}".format(i_batch))
for k, v in sample_batched.items():
train_dataloader_batches[CUDA][i_batch][k] = v.clone().detach()
# train_dataloader_batches[CUDA] = train_dataloader_batches[mark]
for i_batch, sample_batched in enumerate(train_dataloader_batches[CUDA]):
if i_batch % 100 == 0:
logger.warning("process_i_batch {}".format(i_batch))
train_dataloader_batches[CUDA][i_batch] = batch_processor[CUDA](
mini_batch=sample_batched)
for i_batch, sample_batched in enumerate(val_dataloader_batches[mark]):
val_dataloader_batches[CUDA][i_batch] = {}
if i_batch % 100 == 0:
logger.warning("copy i_batch {}".format(i_batch))
for k, v in sample_batched.items():
val_dataloader_batches[CUDA][i_batch][k] = v.clone().detach()
# val_dataloader_batches[CUDA] = val_dataloader_batches[mark]
for i_batch, sample_batched in enumerate(val_dataloader_batches[CUDA]):
if i_batch % 100 == 0:
logger.warning("process_i_batch {}".format(i_batch))
val_dataloader_batches[CUDA][i_batch] = batch_processor[CUDA](
mini_batch=sample_batched)
else:
train_dataloader_batches[CUDA] = None
val_dataloader_batches[CUDA] = None
if args.save_batches:
for i_batch, sample_batched in enumerate(train_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
train_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
for i_batch, sample_batched in enumerate(val_dataloader_batches[mark]):
if i_batch % 100 == 0:
logger.warning("process_first_cuda_i_batch {}".format(i_batch))
val_dataloader_batches[mark][i_batch] = batch_processor[mark](
mini_batch=sample_batched)
logger.warning("batch_processor {}".format(batch_processor))
# load historical samples (could from other searchers)
if args.historical_sample_path and args.historical_sample_num:
hist_dir = args.historical_sample_path
rewards = np.load(os.path.join(hist_dir, "rewards.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
all_actions = np.load(os.path.join(hist_dir, "all_actions.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
# TODO: all_params, all_flops
try:
all_params = np.load(os.path.join(hist_dir, "all_params.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
all_flops = np.load(os.path.join(hist_dir, "all_flops.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
except:
finished_model = np.load(os.path.join(hist_dir, "finished_model.npy"), allow_pickle=True).tolist()
all_params, all_flops = [], []
# Get the flops and params of the model
for i_batch, sample_batched in enumerate(train_dataloader_batches[CUDA]):
_, feats, _ = sample_batched
break
for nasrec_net_fp in finished_model:
with open(nasrec_net_fp, "r") as fp:
nasrec_net_config = json.load(fp)
nasrec_net = Serializer.deserialize(
jfactory,
json.dumps(nasrec_net_config),
config.ModelConfig(),
)
tmp_model = NASRecNet(nasrec_net, feature_config)
tmp_model.to(device=CUDA)
flops, params = profile(tmp_model, inputs=(feats, ), verbose=False)
flops = flops * 1.0 / args.batch_size
all_params.append(params)
all_flops.append(flops)
np.save(os.path.join(hist_dir, "all_params.npy"), np.array(all_params))
np.save(os.path.join(hist_dir, "all_flops.npy"), np.array(all_flops))
all_params = np.load(os.path.join(hist_dir, "all_params.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
all_flops = np.load(os.path.join(hist_dir, "all_flops.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
logger.warning(
"resume_all_params: {} all_flops: {}".format(all_params, all_flops)
)
# convert actions to vecs (we do not direcly read the vecs
# since we may change the vectorized expression of an arc)
all_arc_vecs = [
np.concatenate(searcher.dicts_to_vecs(action)) for action in all_actions
]
finished_model = np.load(os.path.join(hist_dir, "finished_model.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
fbl_meta = np.load(os.path.join(hist_dir, "fbl_meta.npy"), allow_pickle=True).tolist()[
: args.historical_sample_num
]
for mp_old in finished_model:
with open(mp_old, "r") as fp:
nasrec_net_old = json.load(fp)
fp.close()
mp_new = os.path.join(temp_dir, mp_old.split("/")[-1])
with open(mp_new, "w") as fp:
json.dump(nasrec_net_old, fp)
fp.close()
# unpickling meta data
best_idx = np.argmin(rewards)
best_val_loss = rewards[best_idx]
best_name, best_fbl_id = fbl_meta[best_idx]
logger.warning(
"resume_best_val_loss: {} best_idx: {} best_name {}, best_fbl_id {}".format(
best_val_loss, best_idx, best_name, best_fbl_id
)
)
best_model_filename = os.path.join(
hist_dir, finished_model[best_idx].split("/")[-1]
)
with open(best_model_filename, "r") as fp:
best_model = json.load(fp)
fp.close()
total_model = args.historical_sample_num
epoch = args.historical_sample_num
if args.searcher_type == "evo":
searcher.all_arc_vecs = all_arc_vecs
searcher.all_actions = all_actions
searcher.all_params = all_params
searcher.all_flops = all_flops
searcher.all_rewards = rewards
# searcher.all_roc_aucs = all_roc_aucs
if args.survival_type == "age":
searcher.population_arc_queue = all_actions[-searcher.population_size:]
searcher.population_val_queue = rewards[-searcher.population_size:]
elif args.survival_type == "comb":
searcher.comb()
else:
if args.survival_type == "fit":
idx = sorted(range(len(rewards)), key=lambda i: rewards[i], reverse=True)[
-searcher.population_size:]
elif args.survival_type == "mix":
division = int(0.5 * searcher.population_size)
tmp_rewards = rewards[:-division]
idx = sorted(range(len(tmp_rewards)), key=lambda i: tmp_rewards[i], reverse=True)[-division:]
searcher.population_arc_queue = np.array(all_actions)[idx].tolist()
searcher.population_val_queue = np.array(rewards)[idx].tolist()
if args.survival_type == "mix":
searcher.population_arc_queue += all_actions[-division:]
searcher.population_val_queue += rewards[-division:]
logger.warning("Total_hist_length: arc_{}, val_{}".format(
len(searcher.population_arc_queue),
len(searcher.population_val_queue)
))
if len(searcher.population_arc_queue) == searcher.population_size:
is_initial = False
searcher.sampler_type = args.sampler_type
searcher.update_GBDT()
while epoch < args.search_nepochs:
while len(fbl_run_queue) < args.num_machines:
logger.info(
"Using fblearner training with {} trainers.".format(args.num_trainers)
)
# Three steps NAS
# 1. generate arcs
if args.searcher_type == "evo":
nasrec_net, _, actions, nasrec_arc_vecs = searcher.sample(
batch_size=1, return_config=True, is_initial=is_initial
)
else:
nasrec_net, log_prob, actions, nasrec_arc_vecs = searcher.sample(
batch_size=1, return_config=True
)
nasrec_net = nasrec_net[0]
action = actions[0]
nasrec_arc_vec = nasrec_arc_vecs[0]
total_model += 1
# check if an arch has already been searched before
repeat_idx = (
[]
if not all_arc_vecs or args.repeat_checker_off
else np.where(
np.sum(abs(np.array(all_arc_vecs) - nasrec_arc_vec), 1) == 0
)[0]
)
if len(repeat_idx) != 0:
logger.warning("The architecture is same with: {}.".format(repeat_idx))
continue
repeat_idx_1 = (
[]
if not nasrec_arc_vec_queue or args.repeat_checker_off
else np.where(
np.sum(abs(np.array(nasrec_arc_vec_queue) - nasrec_arc_vec), 1) == 0
)[0]
)
# TODO: check correctness
if len(repeat_idx_1) != 0:
logger.warning("The architecture is same with the current running: {}.".format(repeat_idx_1))
continue
# 2. put on fblearner to get performance
model_option = Serializer.serialize(jfactory, nasrec_net)
model_option = json.loads(model_option)
input_summary["model_option"] = model_option
basename = (
"[exp autoctr] nasnet_model_search_"
+ args.searcher_type
+ "_macro_space_type_"
+ str(args.macro_space_type)
+ "_"
+ str(total_model)
+ "_updated_model_"
+ str(epoch)
)
try:
if len(repeat_idx) != 0:
break
# TODO: device
deviceIDs = GPUtil.getAvailable(order='random',
limit=1,
maxLoad=args.maxLoad,
maxMemory=args.maxMemory,
excludeID=excludeID)
CUDA = 'cuda:' + str(deviceIDs[0])
except Exception:
logger.warning("No available device!")
try:
recv_end, send_end = Pipe(False)
tmp_model = NASRecNet(nasrec_net, feature_config)
if args.warm_start_emb:
tmp_model.emb_dict = copy.deepcopy(warm_start_emb_dict)
tmp_model.to(device=CUDA)
# Get the flops and params of the model
for i_batch, sample_batched in enumerate(train_dataloader_batches[CUDA]):
_, feats, _ = sample_batched
break
flops, params = profile(tmp_model, inputs=(feats, ), verbose=False)
flops = flops * 1.0 / args.batch_size
logger.warning("The current flops {}, params {}".format(flops, params))
# launch a subprocess for model training
new_fbl_run = Process(target=simple_train,
args=(tmp_model,
train_options,
None, # train_dataloader[CUDA],
batch_processor[CUDA],
CUDA,
None,
0,
send_end,
train_dataloader_batches[CUDA],
val_dataloader_batches[CUDA],
args.batch_size
# args.save_batches,
))
new_fbl_run.start()
fbl_id_queue.append(total_model)
fbl_run_queue.append(new_fbl_run)
fbl_result_queue.append(recv_end)
fbl_device_queue.append(CUDA)
fbl_time_queue.append(0)
fbl_name_queue.append(basename)
nasrec_net_queue.append(model_option)
nasrec_arc_vec_queue.append(nasrec_arc_vec)
action_queue.append(action)
params_queue.append(params)
flops_queue.append(flops)
except Exception:
logger.warning("Model are cannot be registered now!!")
if len(repeat_idx) != 0:
# has repeated arch
(fbl_name, fbl_id) = fbl_meta[repeat_idx[0]]
rewards.append(rewards[repeat_idx[0]])
model_filename = finished_model[repeat_idx[0]]
with open(model_filename, "r") as fp:
nasrec_net = json.load(fp)
fp.close()
nasrec_arc_vec = all_arc_vecs[repeat_idx[0]]
action = all_actions[repeat_idx[0]]
params = all_params[repeat_idx[0]]
flops = all_flops[repeat_idx[0]]
else:
# check the status of all the current models
mark = args.num_machines
while mark == args.num_machines:
fbl_time_queue = [t + args.waiting_time for t in fbl_time_queue]
mark = 0
for i, fbl_run in enumerate(fbl_run_queue):
if (
fbl_run.exitcode is None
and fbl_time_queue[i] <= args.fbl_kill_time
):
mark += 1
else:
break
logger.warning("All model are currently running!")
time.sleep(args.waiting_time)
# get the terminated workflow
fbl_run = fbl_run_queue.pop(mark)
fbl_result = fbl_result_queue.pop(mark)
fbl_device = fbl_device_queue.pop(mark)
fbl_time = fbl_time_queue.pop(mark)
fbl_name = fbl_name_queue.pop(mark).split("_")
fbl_id = fbl_id_queue.pop(mark)
nasrec_net = nasrec_net_queue.pop(mark)
nasrec_arc_vec = nasrec_arc_vec_queue.pop(mark)
action = action_queue.pop(mark)
params = params_queue.pop(mark)
flops = flops_queue.pop(mark)
if fbl_time > args.fbl_kill_time:
fbl_run.terminate()
logger.warning(
"Model #_{} training Failed. ID: {}".format(fbl_name[-4], fbl_id)
)
epoch -= 1
continue
# there exist a model successed in queue
logger.warning("mark {}, len(fbl_run_queue) {}".format(mark, len(fbl_run_queue)))
try:
output = fbl_result.recv()
except Exception:
# Failed to extract results due to some transient issue.
logger.warning(
"The results of model #_{} are failed to be obtained. ID: {}. DeviceID: {}".format(
fbl_name[-4], fbl_id, fbl_device
)
)
epoch -= 1
continue
logger.warning(
"Outputs of Model f{}_M_{}_S_{}: {}".format(
fbl_id, fbl_name[-4], fbl_name[-1], output
)
)
if output[-2]["avg_val_loss"] is None or np.isnan(output[-2]["avg_val_loss"]) \
or output[-2]["roc_auc_score"] is None or np.isnan(output[-2]["roc_auc_score"]):
# Output is NaN sometimes.
logger.warning(
"Model #_{} validation output is Invalid (None)! ID: {}".format(
fbl_name[-4], fbl_id
)
)
epoch -= 1
continue
all_roc_aucs.append([output[-2]["avg_val_loss"], output[-2]["roc_auc_score"]])
if args.reward_type == "logloss":
rewards.append(output[-2]["avg_val_loss"])
elif args.reward_type == "auc":
rewards.append(1 - output[-2]["roc_auc_score"])
model_filename = os.path.join(
temp_dir, "M_" + str(fbl_name[-4]) + "_S_" + str(fbl_name[-1]) + ".json"
)
finished_model.append(model_filename)
fbl_meta.append((fbl_name, fbl_id))
all_arc_vecs.append(nasrec_arc_vec)
all_actions.append(action)
all_params.append(params)
all_flops.append(flops)
if args.save_model_path:
try:
logger.info("Saving model to {}".format(temp_dir))
with open(model_filename, "w") as fp:
json.dump(nasrec_net, fp)
fp.close()
np.save(os.path.join(temp_dir, "rewards.npy"), np.array(rewards))
np.save(os.path.join(temp_dir, "all_roc_aucs.npy"), np.array(all_roc_aucs))
np.save(
os.path.join(temp_dir, "all_arc_vecs.npy"), np.array(all_arc_vecs)
)
np.save(
os.path.join(temp_dir, "all_actions.npy"), np.array(all_actions)
)
np.save(
os.path.join(temp_dir, "all_params.npy"), np.array(all_params)
)
np.save(
os.path.join(temp_dir, "all_flops.npy"), np.array(all_flops)
)
np.save(
os.path.join(temp_dir, "finished_model.npy"),
np.array(finished_model),
)
np.save(os.path.join(temp_dir, "fbl_meta.npy"), np.array(fbl_meta))
if args.searcher_type == "evo":
np.save(os.path.join(temp_dir, "is_initial.npy"), np.array(is_initial))
except Exception:
logger.warning("Failed to save the model")
# update best arc
if rewards[-1] < best_val_loss:
best_fbl_id, best_model, best_val_loss, best_name = (
fbl_id,
nasrec_net,
rewards[-1],
fbl_name,
)
if args.save_model_path:
try:
logger.warning("Saving the best model to {}".format(temp_dir))
model_filename = os.path.join(temp_dir, "Best_Model" + ".json")
with open(model_filename, "w") as fp:
json.dump(best_model, fp)
fp.close()
with open(os.path.join(temp_dir, "best_model_id.txt"), "w") as fp:
fp.write(
"M_"
+ str(fbl_name[-4])
+ "_S_"
+ str(fbl_name[-1])
+ ".json"
+ "\n"
)
fp.close()
except Exception:
logger.warning("Failed to save the best model")
# pickling meta data for resume purpose
if args.save_model_path:
with open(os.path.join(temp_dir, "meta.txt"), "wb") as fp:
pickle.dump(
[
best_val_loss,
best_model,
best_name,
best_fbl_id,
total_model,
epoch,
],
fp,
)
fp.close()
logger.warning(
"{} model has been finished. The current best arc is: f{}_M_{}_S_{}. Its avg_val_loss is {}.".format(
len(rewards), best_fbl_id, best_name[-4], best_name[-1], best_val_loss
)
)
# 3. update searcher
epoch = len(rewards)
# epoch += 1
logger.warning("Searcher update epoch {}.".format(epoch))
if args.searcher_type == "evo":
searcher.all_arc_vecs = all_arc_vecs
searcher.all_actions = all_actions
searcher.all_params = all_params
searcher.all_flops = all_params
searcher.all_rewards = rewards
searcher.all_roc_aucs = all_roc_aucs
searcher.update([action], [rewards[-1]], survival_type=args.survival_type)
logger.warning("Total_length update: arc_{}, val_{}".format(
len(searcher.population_arc_queue),
len(searcher.population_val_queue)
))
if (
is_initial
and len(searcher.population_arc_queue) == args.population_size
):
is_initial = False
for proc in fbl_run_queue:
proc.terminate()
fbl_run_queue = []
fbl_time_queue = []
fbl_name_queue = []
fbl_id_queue = []
nasrec_net_queue = []
nasrec_arc_vec_queue = []
action_queue = []
params_queue = []
flops_queue = []
# save searcher
save_searcher(os.path.join(temp_dir, "searcher.ckp"), searcher)
# Kill all remaining workflows on fblearner
for proc in fbl_run_queue:
proc.terminate()
logger.warning(
"The best arc is: f{}_M_{}_S_{}. Its avg_val_loss is {}.".format(
best_fbl_id, best_name[-4], best_name[-1], best_val_loss
)
)
logger.warning("\nAll avg_val_loss are {}.".format(rewards))
if args.save_model_path:
try:
logger.warning("Saving the best model to {}".format(temp_dir))
model_filename = os.path.join(
temp_dir,
"Best_Model_M_"
+ str(fbl_name[-4])
+ "_S_"
+ str(fbl_name[-1])
+ ".json",
)
with open(model_filename, "w") as fp:
json.dump(best_model, fp)
fp.close()
except Exception:
logger.warning("Failed to save the best model")
|
AutoCTR-main
|
scripts/search.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE_CC-BY-NC4.0 file in the root directory of this source tree.
import argparse
import glob, json, os, re
import tarfile, zipfile
import urllib.request
import xml.etree.ElementTree as et
import numpy as np
import pandas as pd
from nltk.tokenize import sent_tokenize
from nltk_data.init import init_nltk_data
def download_files(directory, urls, unzipped_filename):
"""download files from the given URLs to a local directory"""
# Create a directory to store the downloaded files
download_directory = os.path.join(directory, "downloaded_files")
if not os.path.exists(download_directory):
os.mkdir(download_directory)
# Loop through the URLs and download each file
for dataset_name, url in urls.items():
filename = url.split("/")[-1]
filepath = os.path.join(download_directory, filename)
# Download the file
if os.path.exists(filepath):
print(f"Skipping downloading {dataset_name} as it already exists.")
else:
urllib.request.urlretrieve(url, filepath)
print(f"Successfully downloaded {dataset_name}")
if os.path.exists(
os.path.join(download_directory, unzipped_filename[dataset_name])
):
print(f"Skipping extracting {dataset_name} as it already has been done.")
else:
if dataset_name == "ReClor":
# Unzip the password-protected file
with zipfile.ZipFile(filepath, "r") as z:
z.extractall(
os.path.join(download_directory, "reclor"),
pwd=bytes("for_non-commercial_research_purpose_only", "utf-8"),
)
elif dataset_name == "MCScript2.0":
with zipfile.ZipFile(filepath, "r") as z:
z.extractall(os.path.join(download_directory, "mcscript"))
elif url[-3:] == "zip":
# Unzip the file
with zipfile.ZipFile(filepath, "r") as z:
z.extractall(download_directory)
elif url[-3:] == ".gz":
# Extract the archive to the same folder
with tarfile.open(filepath, "r") as t:
t.extractall(download_directory)
print(f"Successfully extracted {dataset_name}")
return download_directory
def process_sciq(download_directory):
"""process the SciQ json files and return Pandas df"""
train = pd.read_json(
os.path.join(download_directory, "SciQ dataset-2 3/train.json")
)
val = pd.read_json(os.path.join(download_directory, "SciQ dataset-2 3/valid.json"))
joined = pd.concat([train, val], keys=["train", "val"])
# remove fill-in-the-blank
sciQ = joined.loc[~joined.question.str.contains("_")]
# use NLTK sent tokenizer to count the number of sentences in the passage
sciQ["num_sentences"] = sciQ.support.apply(lambda x: sent_tokenize(x)).str.len()
sciQ["passage_id"] = sciQ.support.apply(hash)
# randomly shuffle answers
newcolnames = ["answer1", "answer2", "answer3", "answer4"]
np.random.seed(0)
sciQ[newcolnames] = sciQ.apply(
lambda x: pd.Series(
np.random.choice(
x[["distractor1", "distractor2", "distractor3", "correct_answer"]],
4,
replace=False,
),
index=newcolnames,
),
axis=1,
)
# retrieve correct answer
def get_correct_answer_num(row):
for i in [1, 2, 3, 4]:
if row["correct_answer"] == row["answer" + str(i)]:
return i
# finalize format and filter out long passages
sciQ["correct_answer_num"] = sciQ.apply(get_correct_answer_num, axis=1)
sciQ["passage_id"] = sciQ.groupby("support").ngroup()
sciQ_reset = (
sciQ.loc[sciQ.support.str.len() >= 1]
.reset_index()
.rename(columns={"support": "passage", "level_1": "question_id"})
)
sciQ_reset["split"] = sciQ_reset.level_0.apply(lambda x: "dev" if x == "val" else x)
sciQ_reset["dataset"] = "sciQ"
return sciQ_reset.loc[sciQ_reset.num_sentences <= 25][final_table_columns]
def process_multirc(download_directory):
"""process the MultiRC json files and return Pandas df"""
with open(os.path.join(download_directory, "splitv2/dev_83-fixedIds.json")) as f:
multirc_dev = json.load(f)["data"]
with open(os.path.join(download_directory, "splitv2/train_456-fixedIds.json")) as f:
multirc_train = json.load(f)["data"]
# unpack json format to pandas table
i = 0
multirc_dict = {}
reg_str = "</b>(.*?)<br>"
for split, data in {"dev": multirc_dev, "train": multirc_train}.items():
for para in data:
res = re.findall(reg_str, para["paragraph"]["text"])
para_text = " ".join(res)
num_sents = len(res)
for q in para["paragraph"]["questions"]:
multirc_dict[i] = {
"split": split,
"passage_id": para["id"],
"passage": para_text,
"num_sentences": num_sents,
"question_dict": q,
}
i += 1
unpacked = pd.DataFrame.from_dict(multirc_dict, orient="index")
# get number of answers and correct answers
def get_num_correct(q):
return sum(a["isAnswer"] for a in q["answers"])
unpacked["num_correct_answers"] = unpacked.question_dict.apply(get_num_correct)
unpacked["num_answers"] = unpacked.apply(
lambda x: len(x["question_dict"]["answers"]), axis=1
)
# filter questions that match Belebele format and where passages aren't too long
one_answer = unpacked.loc[
(unpacked.num_correct_answers == 1)
& (unpacked.num_answers >= 4)
& (unpacked.num_sentences <= 25)
].copy()
# randomly shuffle answers and reformat
np.random.seed(0)
newcolnames = [
"question",
"question_id",
"answer1",
"answer2",
"answer3",
"answer4",
"correct_answer",
"correct_answer_num",
]
def process_question(question):
newcols = {"question": question["question"], "question_id": question["idx"]}
answers = question["answers"]
while len(answers) != 4 or (not any(a["isAnswer"] for a in answers)):
answers = np.random.choice(question["answers"], 4, replace=False)
for i in [1, 2, 3, 4]:
newcols["answer" + str(i)] = answers[i - 1]["text"]
if answers[i - 1]["isAnswer"]:
newcols["correct_answer"] = answers[i - 1]["text"]
newcols["correct_answer_num"] = i
return pd.Series(newcols)
one_answer[newcolnames] = one_answer.question_dict.apply(process_question)
one_answer["dataset"] = "MultiRC"
return one_answer[final_table_columns]
def process_mcscript(download_directory):
"""process the MCScript xml files and return Pandas df"""
# unpack xml format to pandas table
mc_script_dict = {}
i = 0
# only using train data, not taking dev or test set.
xtree = et.parse(os.path.join(download_directory, f"mcscript/train-data.xml"))
xroot = xtree.getroot()
for node in xroot:
passage_id = node.attrib.get("id")
text = node.find("text").text
# use NLTK sent tokenizer to count the number of sentences in the passage
num_sentences = len(sent_tokenize(text))
for q in node.find("questions"):
mc_script_dict[i] = {
"split": "train",
"passage_id": passage_id,
"passage": text,
"question_id": q.attrib.get("id"),
"question": q.attrib.get("text"),
"num_sentences": num_sentences,
}
correct_answer = ""
correct_ans_id = -1
for ans in q:
ans_id = ans.attrib.get("id")
mc_script_dict[i]["answer_" + ans_id] = ans.attrib.get("text")
if ans.attrib.get("correct") == "True":
correct_answer = mc_script_dict[i]["answer_" + ans_id]
correct_ans_id = ans_id
if correct_ans_id == -1:
print(mc_script_dict[i])
mc_script_dict[i]["correct_answer"] = correct_answer
mc_script_dict[i]["correct_answer_id"] = "answer_" + correct_ans_id
i += 1
mc_script_unpacked = pd.DataFrame.from_dict(mc_script_dict, orient="index")
mc_script_unpacked = mc_script_unpacked.loc[mc_script_unpacked.num_sentences <= 25]
# shuffle and reformat questions
newcols = ["answer1", "answer2", "answer3", "answer4", "correct_answer_num"]
def process_mcscript_row(row):
new_dict = {}
similar_rows = mc_script_unpacked.loc[
(mc_script_unpacked.split == row.split)
& (mc_script_unpacked.passage_id == row.passage_id)
& (mc_script_unpacked.question_id != row.question_id)
]
similar_answers = similar_rows[["answer_0", "answer_1"]].to_numpy().flatten()
while len(new_dict.keys()) == 0:
if len(similar_rows) == 0:
two_ans = np.random.choice(
mc_script_unpacked.correct_answer, 2, replace=False
)
else:
two_ans = np.random.choice(similar_answers, 2, replace=False)
if (two_ans[0] in row[["answer_0", "answer_1"]]) or (
two_ans[1] in row[["answer_0", "answer_1"]]
):
continue
new_ans = np.random.choice(
np.concatenate([two_ans, row[["answer_0", "answer_1"]]]),
4,
replace=False,
)
for i in [1, 2, 3, 4]:
new_dict["answer" + str(i)] = new_ans[i - 1]
if new_ans[i - 1] == row["correct_answer"]:
new_dict["correct_answer_num"] = i
return pd.Series(new_dict)
np.random.seed(0)
mc_script_unpacked[newcols] = mc_script_unpacked.apply(process_mcscript_row, axis=1)
mc_script_unpacked["dataset"] = "MCScript2.0"
return mc_script_unpacked[final_table_columns]
def process_mctest(download_directory):
"""process the MCTest tsv files and return Pandas df"""
mc500_raw = {}
# not using test split
for split in ["train", "dev"]:
raw_df = pd.read_csv(
os.path.join(download_directory, f"MCTest/mc500.{split}.tsv"),
sep="\t",
names=[
"mc500_id",
"metadata",
"passage",
"question1",
"MC_answer1.1",
"MC_answer1.2",
"MC_answer1.3",
"MC_answer1.4",
"question2",
"MC_answer2.1",
"MC_answer2.2",
"MC_answer2.3",
"MC_answer2.4",
"question3",
"MC_answer3.1",
"MC_answer3.2",
"MC_answer3.3",
"MC_answer3.4",
"question4",
"MC_answer4.1",
"MC_answer4.2",
"MC_answer4.3",
"MC_answer4.4",
],
)
ans_df = pd.read_csv(
os.path.join(download_directory, f"MCTest/mc500.{split}.ans"),
sep="\t",
names=[
"question1_answer",
"question2_answer",
"question3_answer",
"question4_answer",
],
)
joined_df = raw_df.merge(ans_df, left_index=True, right_index=True)
mc500_raw[split] = joined_df
mc500_all_raw = pd.concat(mc500_raw.values())
# extract answer values to correct format
def get_answer_values(row, num):
conversion = {"A": "1", "B": "2", "C": "3", "D": "4"}
answer_column = (
"MC_answer" + str(num) + "." + conversion[row[f"question{str(num)}_answer"]]
)
return row[answer_column]
for num in [1, 2, 3, 4]:
mc500_all_raw[f"question{str(num)}_answer"] = mc500_all_raw.apply(
get_answer_values, args=[num], axis=1
)
# melt to get question and answer columns in one dataframe
dfq = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["question1", "question2", "question3", "question4"],
value_name="question",
var_name="question_number",
)
dfa1 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.1", "MC_answer2.1", "MC_answer3.1", "MC_answer4.1"],
value_name="MC_answer1",
)
dfa2 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.2", "MC_answer2.2", "MC_answer3.2", "MC_answer4.2"],
value_name="MC_answer2",
)
dfa3 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.3", "MC_answer2.3", "MC_answer3.3", "MC_answer4.3"],
value_name="MC_answer3",
)
dfa4 = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=["MC_answer1.4", "MC_answer2.4", "MC_answer3.4", "MC_answer4.4"],
value_name="MC_answer4",
)
dfca = mc500_all_raw.melt(
id_vars=["mc500_id", "passage"],
value_vars=[
"question1_answer",
"question2_answer",
"question3_answer",
"question4_answer",
],
value_name="correct_answer",
)
mc500_all = pd.concat(
[
dfq,
dfa1.drop(["mc500_id", "passage", "variable"], axis=1),
dfa2.drop(["mc500_id", "passage", "variable"], axis=1),
dfa3.drop(["mc500_id", "passage", "variable"], axis=1),
dfa4.drop(["mc500_id", "passage", "variable"], axis=1),
dfca.drop(["mc500_id", "passage", "variable"], axis=1),
],
axis=1,
)
# extract the prefix to the questions which details the number of sentences required in the passage to answer
mc500_all["sent_required"] = mc500_all.question.str.split(":").str[0].str.strip()
mc500_all["question"] = mc500_all.question.str.split(":").str[1].str.strip()
# use NLTK sent tokenizer to count the number of sentences in the passage
mc500_all["num_sentences"] = mc500_all.passage.apply(
lambda x: sent_tokenize(x)
).str.len()
def get_correct_answer_num(row):
for i in [1, 2, 3, 4]:
if row["MC_answer" + str(i)] == row["correct_answer"]:
return i
mc500_all["correct_answer_num"] = mc500_all.apply(get_correct_answer_num, axis=1)
mc500_all["passage_id"] = mc500_all.mc500_id.apply(lambda x: x.split(".")[-1])
mc500_all["question_id"] = mc500_all.question_number.str.replace("question", "")
mc500_all["dataset"] = "MCTest_500"
mc500_all["split"] = [a[1] for a in mc500_all.mc500_id.str.split(".")]
return mc500_all.loc[mc500_all.num_sentences <= 25].rename(
mapper=(lambda x: x.replace("MC_", "")), axis=1
)[final_table_columns]
def process_race(download_directory):
"""process the RACE txt files and return Pandas df"""
# unpack all the .txt files of the dataset into a single pandas table
race_dict = {}
i = 0
for split in ["dev", "train"]:
for level in ["middle", "high"]:
for file in glob.glob(
os.path.join(download_directory, f"RACE/{split}/{level}/*.txt")
):
with open(file) as f:
file_str = f.read()
file_dict = json.loads(file_str)
num_sentences = len(sent_tokenize(file_dict["article"]))
num_qs = len(file_dict["answers"])
for q in range(num_qs):
race_dict[i] = {
"split": split,
"level": level,
"passage_id": file_dict["id"],
"passage": file_dict["article"],
"question_id": q,
"question": file_dict["questions"][q],
"num_sentences": num_sentences,
}
# rename answer columns
for j in range(len(file_dict["options"][q])):
race_dict[i]["answer" + str(j + 1)] = file_dict["options"][q][j]
race_dict[i]["correct_answer_num"] = (
ord(file_dict["answers"][q]) - 64
)
race_dict[i]["correct_answer"] = file_dict["options"][q][
race_dict[i]["correct_answer_num"] - 1
]
i += 1
race_unpacked = pd.DataFrame.from_dict(race_dict, orient="index")
# remove fill-in-the-blank questions
race_unpacked = race_unpacked.loc[~race_unpacked.question.str.contains("_")]
race_unpacked["dataset"] = "RACE"
return race_unpacked.loc[race_unpacked.num_sentences <= 25][final_table_columns]
def process_reclor(download_directory):
"""process the ReClor json files and return Pandas df"""
# unpack the json format to into a pandas table
reclor_dict = {}
i = 0
for split in ["train", "val"]: # did not include test
with open(os.path.join(download_directory, f"reclor/{split}.json")) as f:
file_str = f.read()
file_dict = json.loads(file_str)
if split == "val":
split = "dev"
for item in file_dict:
idx = item["id_string"].split("_")[-1]
reclor_dict[i] = {
"split": split,
"passage_id": idx,
"question_id": idx,
"passage": item["context"],
"question": item["question"],
}
for j in range(len(item["answers"])):
reclor_dict[i]["answer" + str(j + 1)] = item["answers"][j]
reclor_dict[i]["correct_answer_num"] = item["label"] + 1
reclor_dict[i]["correct_answer"] = item["answers"][item["label"]]
i += 1
reclor_unpacked = pd.DataFrame.from_dict(reclor_dict, orient="index")
reclor_unpacked["dataset"] = "ReClor"
return reclor_unpacked[final_table_columns]
if __name__ == "__main__":
os.environ["HTTPS_PROXY"] = "http://fwdproxy:8080"
parser = argparse.ArgumentParser(
description="Assemble samples from numerous datasets and generate a JSON to serve as the training set for Belebele"
)
parser.add_argument(
"--data_path",
help="Path to the json dataset",
)
parser.add_argument(
"--downloads_path",
help="Path to folder where all the files required to assemble the training set will be downloaded",
default=".",
)
parser.add_argument(
"--output_file",
help="Path to file with the final training set (in tsv format)",
default="belebele_training_set.tsv",
)
args = parser.parse_args()
# the URLs to download
urls = {
"MultiRC": "https://cogcomp.seas.upenn.edu/multirc/data/mutlirc-v2.zip",
"MCScript2.0": "https://fedora.clarin-d.uni-saarland.de/sfb1102/MCScript-2.0.zip",
"MCTest": "https://mattr1.github.io/mctest/data/MCTest.zip",
"RACE": "http://www.cs.cmu.edu/~glai1/data/race/RACE.tar.gz",
"SciQ": "https://ai2-public-datasets.s3.amazonaws.com/sciq/SciQ.zip",
"ReClor": "https://github.com/yuweihao/reclor/releases/download/v1/reclor_data.zip",
}
# the name of the files once unzipped
unzipped_filenames = {
"MultiRC": "splitv2",
"ReClor": "reclor",
"RACE": "RACE",
"SciQ": "SciQ dataset-2 3",
"MCScript2.0": "mcscript",
"MCTest": "MCTest",
}
downloads_repo = download_files(args.downloads_path, urls, unzipped_filenames)
final_table_columns = [
"dataset",
"split",
"passage_id",
"question_id",
"passage",
"question",
"answer1",
"answer2",
"answer3",
"answer4",
"correct_answer",
"correct_answer_num",
]
init_nltk_data()
multirc_ready = process_multirc(downloads_repo)
print("Finished processing MultiRC.")
print("Starting to process MCScript2.0... this may take around 5 minutes")
mcscript_ready = process_mcscript(downloads_repo)
print("Finished processing MCScript2.0.")
mctest_ready = process_mctest(downloads_repo)
print("Finished processing MCTest.")
sciq_ready = process_sciq(downloads_repo)
print("Finished processing SciQ.")
reclor_ready = process_reclor(downloads_repo)
print("Finished processing ReClor.")
race_ready = process_race(downloads_repo)
print("Finished processing RACE... now joining them altogether.")
combined = pd.concat(
[
sciq_ready,
mcscript_ready,
mctest_ready,
multirc_ready,
race_ready,
reclor_ready,
]
)
combined.to_csv(args.output_file, sep="\t")
print(f"Finished creating training set and dumped into {args.output_file}")
print(
"Beware when loading the data from the tsv, there are many newline characters, double quotes, single quotes, etc., especially in the RACE passages."
)
|
belebele-main
|
assemble_training_set.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import find_packages, setup
REQUIRES = [
"matplotlib",
"torch",
"scipy",
"SQLAlchemy==1.4.46",
"dill",
"pandas",
"aepsych_client==0.3.0",
"statsmodels",
"ax-platform==0.3.1",
"botorch==0.8.3",
]
BENCHMARK_REQUIRES = ["tqdm", "pathos", "multiprocess"]
DEV_REQUIRES = BENCHMARK_REQUIRES + [
"coverage",
"flake8",
"black",
"numpy>=1.20",
"sqlalchemy-stubs", # for mypy stubs
"mypy",
"parameterized",
"scikit-learn", # used in unit tests
]
VISUALIZER_REQUIRES = [
"voila==0.3.6",
"ipywidgets==7.6.5",
]
with open("README.md", "r") as fh:
long_description = fh.read()
with open(os.path.join("aepsych", "version.py"), "r") as fh:
for line in fh.readlines():
if line.startswith("__version__"):
version = line.split('"')[1]
setup(
name="aepsych",
version=version,
python_requires=">=3.8",
packages=find_packages(),
description="Adaptive experimetation for psychophysics",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=REQUIRES,
extras_require={
"dev": DEV_REQUIRES,
"benchmark": BENCHMARK_REQUIRES,
"visualizer": VISUALIZER_REQUIRES,
},
entry_points={
"console_scripts": [
"aepsych_server = aepsych.server.server:main",
"aepsych_database = aepsych.server.utils:main",
],
},
)
|
aepsych-main
|
setup.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "Readme.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="aepsych_client",
version="0.3.0",
packages=find_packages(),
long_description=long_description,
long_description_content_type="text/markdown",
)
|
aepsych-main
|
clients/python/setup.py
|
aepsych-main
|
clients/python/tests/__init__.py
|
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import unittest
import uuid
from unittest.mock import MagicMock, patch
import torch
from aepsych.acquisition import MCPosteriorVariance
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.models import GPClassificationModel
from aepsych.server import AEPsychServer
from aepsych_client import AEPsychClient
from torch import tensor
class MockStrategy:
def gen(self, num_points):
self._count = self._count + num_points
return torch.tensor([[0.0]])
class RemoteServerTestCase(unittest.TestCase):
def setUp(self):
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = AEPsychServer(database_path=database_path)
self.client = AEPsychClient(connect=False)
self.client._send_recv = MagicMock(
wraps=lambda x: json.dumps(self.s.handle_request(x))
)
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
@patch(
"aepsych.strategy.Strategy.gen",
new=MockStrategy.gen,
)
def test_client(self):
config_str = """
[common]
lb = [0]
ub = [1]
parnames = [x]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
acqf = MCPosteriorVariance
model = GPClassificationModel
[init_strat]
min_asks = 1
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 1
generator = OptimizeAcqfGenerator
min_total_outcome_occurrences = 0
"""
self.client.configure(config_str=config_str, config_name="first_config")
self.assertEqual(self.s.strat_id, 0)
self.assertEqual(self.s.strat.strat_list[0].min_asks, 1)
self.assertEqual(self.s.strat.strat_list[1].min_asks, 1)
self.assertIsInstance(self.s.strat.strat_list[0].generator, SobolGenerator)
self.assertIsInstance(
self.s.strat.strat_list[1].generator, OptimizeAcqfGenerator
)
self.assertIsInstance(self.s.strat.strat_list[1].model, GPClassificationModel)
self.assertEqual(self.s.strat.strat_list[1].generator.acqf, MCPosteriorVariance)
response = self.client.ask()
self.assertSetEqual(set(response["config"].keys()), {"x"})
self.assertEqual(len(response["config"]["x"]), 1)
self.assertTrue(0 <= response["config"]["x"][0] <= 1)
self.assertFalse(response["is_finished"])
self.assertEqual(self.s.strat._count, 1)
self.client.tell(config={"x": [0]}, outcome=1)
self.assertEqual(self.s._strats[0].x, tensor([[0.0]]))
self.assertEqual(self.s._strats[0].y, tensor([[1.0]]))
self.client.tell(config={"x": [0]}, outcome=1, model_data=False)
self.assertEqual(self.s._strats[0].x, tensor([[0.0]]))
self.assertEqual(self.s._strats[0].y, tensor([[1.0]]))
response = self.client.ask()
self.assertTrue(response["is_finished"])
self.client.configure(config_str=config_str, config_name="second_config")
self.assertEqual(self.s.strat._count, 0)
self.assertEqual(self.s.strat_id, 1)
self.client.resume(config_name="first_config")
self.assertEqual(self.s.strat_id, 0)
self.client.resume(config_name="second_config")
self.assertEqual(self.s.strat_id, 1)
self.client.finalize()
class LocalServerTestCase(RemoteServerTestCase):
def setUp(self):
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = AEPsychServer(database_path=database_path)
self.client = AEPsychClient(server=self.s)
def test_warns_ignored_args(self):
with self.assertWarns(UserWarning):
AEPsychClient(ip="0.0.0.0", port=5555, server=self.s)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
clients/python/tests/test_client.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import socket
import warnings
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from aepsych.server import AEPsychServer
class ServerError(RuntimeError):
pass
class AEPsychClient:
def __init__(
self,
ip: Optional[str] = None,
port: Optional[int] = None,
connect: bool = True,
server: "AEPsychServer" = None,
) -> None:
"""Python client for AEPsych using built-in python sockets. By default it connects
to a localhost server matching AEPsych defaults.
Args:
ip (str, optional): IP to connect to (default: localhost).
port (str, optional): Port to connect on (default: 5555).
connect (bool): Connect as part of init? Defaults to True.
server (AEPsychServer, optional): An in-memory AEPsychServer object to connect to.
If this is not None, the other arguments will be ignored.
"""
self.configs = []
self.config_names = {}
self.server = server
if server is not None and (ip is not None or port is not None):
warnings.warn(
"AEPsychClient will ignore ip and port since it was given a server object!",
UserWarning,
)
if server is None:
ip = ip or "0.0.0.0"
port = port or 5555
self.socket = socket.socket()
if connect:
self.connect(ip, port)
def load_config_index(self) -> None:
"""Loads the config index when server is not None"""
self.configs = []
for i in range(self.server.n_strats):
self.configs.append(i)
def connect(self, ip: str, port: int) -> None:
"""Connect to the server.
Args:
ip (str): IP to connect to.
port (str): Port to connect on.
"""
addr = (ip, port)
self.socket.connect(addr)
def finalize(self) -> None:
"""Let the server know experiment is complete."""
request = {"message": "", "type": "exit"}
self._send_recv(request)
def _send_recv(self, message) -> str:
if self.server is not None:
return self.server.handle_request(message)
message = bytes(json.dumps(message), encoding="utf-8")
self.socket.send(message)
response = self.socket.recv(4096).decode("utf-8")
# TODO this is hacky but we don't consistencly return json
# from the server so we can't check for a status
if response[:12] == "server_error":
error_message = response[13:]
raise ServerError(error_message)
return response
def ask(
self, num_points: int = 1
) -> Union[Dict[str, List[float]], Dict[int, Dict[str, Any]]]:
"""Get next configuration from server.
Args:
num_points[int]: Number of points to return.
Returns:
Dict[int, Dict[str, Any]]: Next configuration(s) to evaluate.
If using the legacy backend, this is formatted as a dictionary where keys are parameter names and values
are lists of parameter values.
If using the Ax backend, this is formatted as a dictionary of dictionaries where the outer keys are trial indices,
the inner keys are parameter names, and the values are parameter values.
"""
request = {"message": {"num_points": num_points}, "type": "ask"}
response = self._send_recv(request)
if isinstance(response, str):
response = json.loads(response)
return response
def tell_trial_by_index(
self,
trial_index: int,
outcome: int,
model_data: bool = True,
**metadata: Dict[str, Any],
) -> None:
"""Update the server on a trial that already has a trial index, as provided by `ask`.
Args:
outcome (int): Outcome that was obtained.
model_data (bool): If True, the data will be recorded in the db and included in the server's model. If False,
the data will be recorded in the db, but will not be used by the model. Defaults to True.
trial_index (int): The associated trial index of the config.
metadata (optional kwargs) is passed to the extra_info field on the server.
Raises:
AssertionError if server failed to acknowledge the tell.
"""
request = {
"type": "tell",
"message": {
"outcome": outcome,
"model_data": model_data,
"trial_index": trial_index,
},
"extra_info": metadata,
}
self._send_recv(request)
def tell(
self,
config: Dict[str, List[Any]],
outcome: int,
model_data: bool = True,
**metadata: Dict[str, Any],
) -> None:
"""Update the server on a configuration that was executed. Use this method when using the legacy backend or for
manually-generated trials without an associated trial_index when uding the Ax backend.
Args:
config (Dict[str, str]): Config that was evaluated.
outcome (int): Outcome that was obtained.
metadata (optional kwargs) is passed to the extra_info field on the server.
model_data (bool): If True, the data will be recorded in the db and included in the server's model. If False,
the data will be recorded in the db, but will not be used by the model. Defaults to True.
Raises:
AssertionError if server failed to acknowledge the tell.
"""
request = {
"type": "tell",
"message": {
"config": config,
"outcome": outcome,
"model_data": model_data,
},
"extra_info": metadata,
}
self._send_recv(request)
def configure(
self, config_path: str = None, config_str: str = None, config_name: str = None
) -> None:
"""Configure the server and prepare for data collection.
Note that either config_path or config_str must be passed.
Args:
config_path (str, optional): Path to a config.ini. Defaults to None.
config_str (str, optional): Config.ini encoded as a string. Defaults to None.
config_name (str, optional): A name to assign to this config internally for convenience.
Raises:
AssertionError if neither config path nor config_str is passed.
"""
if config_path is not None:
assert config_str is None, "if config_path is passed, don't pass config_str"
with open(config_path, "r") as f:
config_str = f.read()
elif config_str is not None:
assert (
config_path is None
), "if config_str is passed, don't pass config_path"
request = {
"type": "setup",
"message": {"config_str": config_str},
}
idx = int(self._send_recv(request))
self.configs.append(idx)
if config_name is not None:
self.config_names[config_name] = idx
def resume(self, config_id: int = None, config_name: str = None):
"""Resume a previous config from this session. To access available configs,
use client.configs or client.config_names
Args:
config_id (int, optional): ID of config to resume.
config_name (str, optional): Name config to resume.
Raises:
AssertionError if name or ID does not exist, or if both name and ID are passed.
"""
if config_id is not None:
assert config_name is None, "if config_id is passed, don't pass config_name"
assert (
config_id in self.configs
), f"No strat with index {config_id} was created!"
elif config_name is not None:
assert config_id is None, "if config_name is passed, don't pass config_id"
assert (
config_name in self.config_names.keys()
), f"{config_name} not known, know {self.config_names.keys()}!"
config_id = self.config_names[config_name]
request = {
"type": "resume",
"message": {"strat_id": config_id},
}
self._send_recv(request)
def __del___(self):
self.finalize()
|
aepsych-main
|
clients/python/aepsych_client/client.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .client import AEPsychClient
__all__ = ["AEPsychClient"]
|
aepsych-main
|
clients/python/aepsych_client/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ast
import configparser
import json
import warnings
from types import ModuleType
from typing import Any, ClassVar, Dict, List, Mapping, Optional, Sequence, TypeVar
import botorch
import gpytorch
import numpy as np
import torch
from aepsych.version import __version__
_T = TypeVar("_T")
class Config(configparser.ConfigParser):
# names in these packages can be referred to by string name
registered_names: ClassVar[Dict[str, object]] = {}
def __init__(
self,
config_dict: Optional[Mapping[str, Any]] = None,
config_fnames: Optional[Sequence[str]] = None,
config_str: Optional[str] = None,
):
"""Initialize the AEPsych config object. This can be used to instantiate most
objects in AEPsych by calling object.from_config(config).
Args:
config_dict (Mapping[str, str], optional): Mapping to build configuration from.
Keys are section names, values are dictionaries with keys and values that
should be present in the section. Defaults to None.
config_fnames (Sequence[str], optional): List of INI filenames to load
configuration from. Defaults to None.
config_str (str, optional): String formatted as an INI file to load configuration
from. Defaults to None.
"""
super().__init__(
inline_comment_prefixes=("#"),
empty_lines_in_values=False,
default_section="common",
interpolation=configparser.ExtendedInterpolation(),
converters={
"list": self._str_to_list,
"tensor": self._str_to_tensor,
"obj": self._str_to_obj,
"array": self._str_to_array,
},
allow_no_value=True,
)
self.update(
config_dict=config_dict,
config_fnames=config_fnames,
config_str=config_str,
)
def _get(
self,
section,
conv,
option,
*,
raw=False,
vars=None,
fallback=configparser._UNSET,
**kwargs,
):
"""
Override configparser to:
1. Return from common if a section doesn't exist. This comes
up any time we have a module fully configured from the
common/default section.
2. Pass extra **kwargs to the converter.
"""
try:
return conv(
self.get(
section=section,
option=option,
raw=raw,
vars=vars,
fallback=fallback,
),
**kwargs,
)
except configparser.NoSectionError:
return conv(
self.get(
section="common",
option=option,
raw=raw,
vars=vars,
fallback=fallback,
),
**kwargs,
)
# Convert config into a dictionary (eliminate duplicates from defaulted 'common' section.)
def to_dict(self, deduplicate=True):
_dict = {}
for section in self:
_dict[section] = {}
for setting in self[section]:
if deduplicate and section != "common" and setting in self["common"]:
continue
_dict[section][setting] = self[section][setting]
return _dict
# Turn the metadata section into JSON.
def jsonifyMetadata(self) -> str:
configdict = self.to_dict()
return json.dumps(configdict["metadata"])
# Turn the entire config into JSON format.
def jsonifyAll(self) -> str:
configdict = self.to_dict()
return json.dumps(configdict)
def update(
self,
config_dict: Mapping[str, str] = None,
config_fnames: Sequence[str] = None,
config_str: str = None,
):
"""Update this object with a new configuration.
Args:
config_dict (Mapping[str, str], optional): Mapping to build configuration from.
Keys are section names, values are dictionaries with keys and values that
should be present in the section. Defaults to None.
config_fnames (Sequence[str], optional): List of INI filenames to load
configuration from. Defaults to None.
config_str (str, optional): String formatted as an INI file to load configuration
from. Defaults to None.
"""
if config_dict is not None:
self.read_dict(config_dict)
if config_fnames is not None:
read_ok = self.read(config_fnames)
if len(read_ok) < 1:
raise FileNotFoundError
if config_str is not None:
self.read_string(config_str)
# Deprecation warning for "experiment" section
if "experiment" in self:
for i in self["experiment"]:
self["common"][i] = self["experiment"][i]
del self["experiment"]
def _str_to_list(self, v: str, element_type: _T = float) -> List[_T]:
if v[0] == "[" and v[-1] == "]":
if v == "[]": # empty list
return []
else:
return [element_type(i.strip()) for i in v[1:-1].split(",")]
else:
return [v.strip()]
def _str_to_array(self, v: str) -> np.ndarray:
v = ast.literal_eval(v)
return np.array(v, dtype=float)
def _str_to_tensor(self, v: str) -> torch.Tensor:
return torch.Tensor(self._str_to_list(v))
def _str_to_obj(self, v: str, fallback_type: _T = str, warn: bool = True) -> object:
try:
return self.registered_names[v]
except KeyError:
if warn:
warnings.warn(f'No known object "{v}"!')
return fallback_type(v)
def __repr__(self):
return f"Config at {hex(id(self))}: \n {str(self)}"
@classmethod
def register_module(cls: _T, module: ModuleType):
"""Register a module with Config so that objects in it can
be referred to by their string name in config files.
Args:
module (ModuleType): Module to register.
"""
cls.registered_names.update(
{
name: getattr(module, name)
for name in module.__all__
if not isinstance(getattr(module, name), ModuleType)
}
)
@classmethod
def register_object(cls: _T, obj: object):
"""Register an object with Config so that it can be
referred to by its string name in config files.
Args:
obj (object): Object to register.
"""
if obj.__name__ in cls.registered_names.keys():
warnings.warn(
f"Registering {obj.__name__} but already"
+ f"have {cls.registered_names[obj.__name__]}"
+ "registered under that name!"
)
cls.registered_names.update({obj.__name__: obj})
def get_section(self, section):
sec = {}
for setting in self[section]:
if section != "common" and setting in self["common"]:
continue
sec[setting] = self[section][setting]
return sec
def __str__(self):
_str = ""
for section in self:
sec = self.get_section(section)
_str += f"[{section}]\n"
for setting in sec:
_str += f"{setting} = {self[section][setting]}\n"
return _str
def convert_to_latest(self):
self.convert(self.version, __version__)
def convert(self, from_version: str, to_version: str) -> None:
"""Converts a config from an older version to a newer version.
Args:
from_version (str): The version of the config to be converted.
to_version (str): The version the config should be converted to.
"""
if from_version == "0.0":
self["common"]["strategy_names"] = "[init_strat, opt_strat]"
if "experiment" in self:
for i in self["experiment"]:
self["common"][i] = self["experiment"][i]
bridge = self["common"]["modelbridge_cls"]
n_sobol = self["SobolStrategy"]["n_trials"]
n_opt = self["ModelWrapperStrategy"]["n_trials"]
if bridge == "PairwiseProbitModelbridge":
self["init_strat"] = {
"generator": "PairwiseSobolGenerator",
"min_asks": n_sobol,
}
self["opt_strat"] = {
"generator": "PairwiseOptimizeAcqfGenerator",
"model": "PairwiseProbitModel",
"min_asks": n_opt,
}
if "PairwiseProbitModelbridge" in self:
self["PairwiseOptimizeAcqfGenerator"] = self[
"PairwiseProbitModelbridge"
]
if "PairwiseGP" in self:
self["PairwiseProbitModel"] = self["PairwiseGP"]
elif bridge == "MonotonicSingleProbitModelbridge":
self["init_strat"] = {
"generator": "SobolGenerator",
"min_asks": n_sobol,
}
self["opt_strat"] = {
"generator": "MonotonicRejectionGenerator",
"model": "MonotonicRejectionGP",
"min_asks": n_opt,
}
if "MonotonicSingleProbitModelbridge" in self:
self["MonotonicRejectionGenerator"] = self[
"MonotonicSingleProbitModelbridge"
]
elif bridge == "SingleProbitModelbridge":
self["init_strat"] = {
"generator": "SobolGenerator",
"min_asks": n_sobol,
}
self["opt_strat"] = {
"generator": "OptimizeAcqfGenerator",
"model": "GPClassificationModel",
"min_asks": n_opt,
}
if "SingleProbitModelbridge" in self:
self["OptimizeAcqfGenerator"] = self["SingleProbitModelbridge"]
else:
raise NotImplementedError(
f"Refactor for {bridge} has not been implemented!"
)
if "ModelWrapperStrategy" in self:
if "refit_every" in self["ModelWrapperStrategy"]:
self["opt_strat"]["refit_every"] = self["ModelWrapperStrategy"][
"refit_every"
]
del self["common"]["model"]
if to_version == __version__:
if self["common"]["outcome_type"] == "single_probit":
self["common"]["stimuli_per_trial"] = "1"
self["common"]["outcome_types"] = "[binary]"
if self["common"]["outcome_type"] == "single_continuous":
self["common"]["stimuli_per_trial"] = "1"
self["common"]["outcome_types"] = "[continuous]"
if self["common"]["outcome_type"] == "pairwise_probit":
self["common"]["stimuli_per_trial"] = "2"
self["common"]["outcome_types"] = "[binary]"
del self["common"]["outcome_type"]
@property
def version(self) -> str:
"""Returns the version number of the config."""
# TODO: implement an explicit versioning system
# Try to infer the version
if "stimuli_per_trial" in self["common"] and "outcome_types" in self["common"]:
return __version__
if "common" in self and "strategy_names" in self["common"]:
return "0.1"
elif (
"SobolStrategy" in self
or "ModelWrapperStrategy" in self
or "EpsilonGreedyModelWrapperStrategy" in self
):
return "0.0"
else:
raise RuntimeError("Unrecognized config format!")
class ConfigurableMixin(abc.ABC):
@abc.abstractclassmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]: # noqa
raise NotImplementedError(
f"get_config_options hasn't been defined for {cls.__name__}!"
)
@classmethod
def from_config(cls, config: Config, name: Optional[str] = None):
return cls(**cls.get_config_options(config, name))
Config.register_module(gpytorch.likelihoods)
Config.register_module(gpytorch.kernels)
Config.register_module(botorch.acquisition)
Config.registered_names["None"] = None
|
aepsych-main
|
aepsych/config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.4.0"
|
aepsych-main
|
aepsych/version.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Callable, Iterable, List, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
from aepsych.strategy import Strategy
from aepsych.utils import get_lse_contour, get_lse_interval, make_scaled_sobol
from scipy.stats import norm
def plot_strat(
strat: Strategy,
ax: Optional[plt.Axes] = None,
true_testfun: Optional[Callable] = None,
cred_level: float = 0.95,
target_level: Optional[float] = 0.75,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
yes_label: str = "Yes trial",
no_label: str = "No trial",
flipx: bool = False,
logx: bool = False,
gridsize: int = 30,
title: str = "",
save_path: Optional[str] = None,
show: bool = True,
include_legend: bool = True,
include_colorbar: bool = True,
) -> None:
"""Creates a plot of a strategy, showing participants responses on each trial, the estimated response function and
threshold, and optionally a ground truth response threshold.
Args:
strat (Strategy): Strategy object to be plotted. Must have a dimensionality of 2 or less.
ax (plt.Axes, optional): Matplotlib axis to plot on (if None, creates a new axis). Default: None.
true_testfun (Callable, optional): Ground truth response function. Should take a n_samples x n_parameters tensor
as input and produce the response probability at each sample as output. Default: None.
cred_level (float): Percentage of posterior mass around the mean to be shaded. Default: 0.95.
target_level (float): Response probability to estimate the threshold of. Default: 0.75.
xlabel (str): Label of the x-axis. Default: "Context (abstract)".
ylabel (str): Label of the y-axis (if None, defaults to "Response Probability" for 1-d plots or
"Intensity (Abstract)" for 2-d plots). Default: None.
yes_label (str): Label of trials with response of 1. Default: "Yes trial".
no_label (str): Label of trials with response of 0. Default: "No trial".
flipx (bool): Whether the values of the x-axis should be flipped such that the min becomes the max and vice
versa.
(Only valid for 2-d plots.) Default: False.
logx (bool): Whether the x-axis should be log-transformed. (Only valid for 2-d plots.) Default: False.
gridsize (int): The number of points to sample each dimension at. Default: 30.
title (str): Title of the plot. Default: ''.
save_path (str, optional): File name to save the plot to. Default: None.
show (bool): Whether the plot should be shown in an interactive window. Default: True.
include_legend (bool): Whether to include the legend in the figure. Default: True.
include_colorbar (bool): Whether to include the colorbar indicating the probability of "Yes" trials.
Default: True.
"""
assert (
"binary" in strat.outcome_types
), f"Plotting not supported for outcome_type {strat.outcome_types[0]}"
if target_level is not None and not hasattr(strat.model, "monotonic_idxs"):
warnings.warn(
"Threshold estimation may not be accurate for non-monotonic models."
)
if ax is None:
_, ax = plt.subplots()
if xlabel is None:
xlabel = "Context (abstract)"
dim = strat.dim
if dim == 1:
if ylabel is None:
ylabel = "Response Probability"
_plot_strat_1d(
strat,
ax,
true_testfun,
cred_level,
target_level,
xlabel,
ylabel,
yes_label,
no_label,
gridsize,
)
elif dim == 2:
if ylabel is None:
ylabel = "Intensity (abstract)"
_plot_strat_2d(
strat,
ax,
true_testfun,
cred_level,
target_level,
xlabel,
ylabel,
yes_label,
no_label,
flipx,
logx,
gridsize,
include_colorbar,
)
elif dim == 3:
raise RuntimeError("Use plot_strat_3d for 3d plots!")
else:
raise NotImplementedError("No plots for >3d!")
ax.set_title(title)
if include_legend:
anchor = (1.4, 0.5) if include_colorbar and dim > 1 else (1, 0.5)
plt.legend(loc="center left", bbox_to_anchor=anchor)
if save_path is not None:
plt.savefig(save_path, bbox_inches="tight")
if show:
plt.tight_layout()
if include_legend or (include_colorbar and dim > 1):
plt.subplots_adjust(left=0.1, bottom=0.25, top=0.75)
plt.show()
def _plot_strat_1d(
strat: Strategy,
ax: plt.Axes,
true_testfun: Optional[Callable],
cred_level: float,
target_level: Optional[float],
xlabel: str,
ylabel: str,
yes_label: str,
no_label: str,
gridsize: int,
):
"""Helper function for creating 1-d plots. See plot_strat for an explanation of the arguments."""
x, y = strat.x, strat.y
assert x is not None and y is not None, "No data to plot!"
grid = strat.model.dim_grid(gridsize=gridsize)
samps = norm.cdf(strat.model.sample(grid, num_samples=10000).detach())
phimean = samps.mean(0)
ax.plot(np.squeeze(grid), phimean)
if cred_level is not None:
upper = np.quantile(samps, cred_level, axis=0)
lower = np.quantile(samps, 1 - cred_level, axis=0)
ax.fill_between(
np.squeeze(grid),
lower,
upper,
alpha=0.3,
hatch="///",
edgecolor="gray",
label=f"{cred_level*100:.0f}% posterior mass",
)
if target_level is not None:
from aepsych.utils import interpolate_monotonic
threshold_samps = [
interpolate_monotonic(
grid.squeeze().numpy(), s, target_level, strat.lb[0], strat.ub[0]
)
for s in samps
]
thresh_med = np.mean(threshold_samps)
thresh_lower = np.quantile(threshold_samps, q=1 - cred_level)
thresh_upper = np.quantile(threshold_samps, q=cred_level)
ax.errorbar(
thresh_med,
target_level,
xerr=np.r_[thresh_med - thresh_lower, thresh_upper - thresh_med][:, None],
capsize=5,
elinewidth=1,
label=f"Est. {target_level*100:.0f}% threshold \n(with {cred_level*100:.0f}% posterior \nmass marked)",
)
if true_testfun is not None:
true_f = true_testfun(grid)
ax.plot(grid, true_f.squeeze(), label="True function")
if target_level is not None:
true_thresh = interpolate_monotonic(
grid.squeeze().numpy(),
true_f.squeeze(),
target_level,
strat.lb[0],
strat.ub[0],
)
ax.plot(
true_thresh,
target_level,
"o",
label=f"True {target_level*100:.0f}% threshold",
)
ax.scatter(
x[y == 0, 0],
np.zeros_like(x[y == 0, 0]),
marker=3,
color="r",
label=no_label,
)
ax.scatter(
x[y == 1, 0],
np.zeros_like(x[y == 1, 0]),
marker=3,
color="b",
label=yes_label,
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def _plot_strat_2d(
strat: Strategy,
ax: plt.Axes,
true_testfun: Optional[Callable],
cred_level: float,
target_level: Optional[float],
xlabel: str,
ylabel: str,
yes_label: str,
no_label: str,
flipx: bool,
logx: bool,
gridsize: int,
include_colorbar: bool,
):
"""Helper function for creating 2-d plots. See plot_strat for an explanation of the arguments."""
x, y = strat.x, strat.y
assert x is not None and y is not None, "No data to plot!"
# make sure the model is fit well if we've been limiting fit time
strat.model.fit(train_x=x, train_y=y, max_fit_time=None)
grid = strat.model.dim_grid(gridsize=gridsize)
fmean, _ = strat.model.predict(grid)
phimean = norm.cdf(fmean.reshape(gridsize, gridsize).detach().numpy()).T
extent = np.r_[strat.lb[0], strat.ub[0], strat.lb[1], strat.ub[1]]
colormap = ax.imshow(
phimean, aspect="auto", origin="lower", extent=extent, alpha=0.5
)
if flipx:
extent = np.r_[strat.lb[0], strat.ub[0], strat.ub[1], strat.lb[1]]
colormap = ax.imshow(
phimean, aspect="auto", origin="upper", extent=extent, alpha=0.5
)
else:
extent = np.r_[strat.lb[0], strat.ub[0], strat.lb[1], strat.ub[1]]
colormap = ax.imshow(
phimean, aspect="auto", origin="lower", extent=extent, alpha=0.5
)
# hacky relabel to be in logspace
if logx:
locs = np.arange(strat.lb[0], strat.ub[0])
ax.set_xticks(ticks=locs)
ax.set_xticklabels(2.0**locs)
ax.plot(x[y == 0, 0], x[y == 0, 1], "ro", alpha=0.7, label=no_label)
ax.plot(x[y == 1, 0], x[y == 1, 1], "bo", alpha=0.7, label=yes_label)
if target_level is not None: # plot threshold
mono_grid = np.linspace(strat.lb[1], strat.ub[1], num=gridsize)
context_grid = np.linspace(strat.lb[0], strat.ub[0], num=gridsize)
thresh_75, lower, upper = get_lse_interval(
model=strat.model,
mono_grid=mono_grid,
target_level=target_level,
cred_level=cred_level,
mono_dim=1,
lb=mono_grid.min(),
ub=mono_grid.max(),
gridsize=gridsize,
)
ax.plot(
context_grid,
thresh_75,
label=f"Est. {target_level*100:.0f}% threshold \n(with {cred_level*100:.0f}% posterior \nmass shaded)",
)
ax.fill_between(
context_grid, lower, upper, alpha=0.3, hatch="///", edgecolor="gray"
)
if true_testfun is not None:
true_f = true_testfun(grid).reshape(gridsize, gridsize)
true_thresh = get_lse_contour(
true_f, mono_grid, level=target_level, lb=strat.lb[-1], ub=strat.ub[-1]
)
ax.plot(context_grid, true_thresh, label="Ground truth threshold")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if include_colorbar:
colorbar = plt.colorbar(colormap, ax=ax)
colorbar.set_label(f"Probability of {yes_label}")
def plot_strat_3d(
strat: Strategy,
parnames: Optional[List[str]] = None,
outcome_label: str = "Yes Trial",
slice_dim: int = 0,
slice_vals: Union[List[float], int] = 5,
contour_levels: Optional[Union[Iterable[float], bool]] = None,
probability_space: bool = False,
gridsize: int = 30,
extent_multiplier: Optional[List[float]] = None,
save_path: Optional[str] = None,
show: bool = True,
):
"""Creates a plot of a 2d slice of a 3D strategy, showing the estimated model or probability response and contours
Args:
strat (Strategy): Strategy object to be plotted. Must have a dimensionality of 3.
parnames (str list): list of the parameter names
outcome_label (str): The label of the outcome variable
slice_dim (int): dimension to slice on
dim_vals (list of floats or int): values to take slices; OR number of values to take even slices from
contour_levels (iterable of floats or bool, optional): List contour values to plot. Default: None. If true, all integer levels.
probability_space (bool): Whether to plot probability. Default: False
gridsize (int): The number of points to sample each dimension at. Default: 30.
extent_multiplier (list, optional): multipliers for each of the dimensions when plotting. Default:None
save_path (str, optional): File name to save the plot to. Default: None.
show (bool): Whether the plot should be shown in an interactive window. Default: True.
"""
assert strat.model is not None, "Cannot plot without a model!"
contour_levels_list = contour_levels or []
if parnames is None:
parnames = ["x1", "x2", "x3"]
# Get global min/max for all slices
if probability_space:
vmax = 1
vmin = 0
if contour_levels is True:
contour_levels_list = [0.75]
else:
d = make_scaled_sobol(strat.lb, strat.ub, 2000)
post = strat.model.posterior(d)
fmean = post.mean.squeeze().detach().numpy()
vmax = np.max(fmean)
vmin = np.min(fmean)
if contour_levels is True:
contour_levels_list = np.arange(np.ceil(vmin), vmax + 1)
# slice_vals is either a list of values or an integer number of values to slice on
if type(slice_vals) is int:
slices = np.linspace(strat.lb[slice_dim], strat.ub[slice_dim], slice_vals)
slices = np.around(slices, 4)
elif type(slice_vals) is not list:
raise TypeError("slice_vals must be either an integer or a list of values")
else:
slices = np.array(slice_vals)
_, axs = plt.subplots(1, len(slices), constrained_layout=True, figsize=(20, 3))
for _i, dim_val in enumerate(slices):
img = plot_slice(
axs[_i],
strat,
parnames,
slice_dim,
dim_val,
vmin,
vmax,
gridsize,
contour_levels_list,
probability_space,
extent_multiplier,
)
plt_parnames = np.delete(parnames, slice_dim)
axs[0].set_ylabel(plt_parnames[1])
cbar = plt.colorbar(img, ax=axs[-1])
if probability_space:
cbar.ax.set_ylabel(f"Probability of {outcome_label}")
else:
cbar.ax.set_ylabel(outcome_label)
for clevel in contour_levels_list: # type: ignore
cbar.ax.axhline(y=clevel, c="w")
if save_path is not None:
plt.savefig(save_path)
if show:
plt.show()
def plot_slice(
ax,
strat,
parnames,
slice_dim,
slice_val,
vmin,
vmax,
gridsize=30,
contour_levels=None,
lse=False,
extent_multiplier=None,
):
"""Creates a plot of a 2d slice of a 3D strategy, showing the estimated model or probability response and contours
Args:
strat (Strategy): Strategy object to be plotted. Must have a dimensionality of 3.
ax (plt.Axes): Matplotlib axis to plot on
parnames (str list): list of the parameter names
slice_dim (int): dimension to slice on
slice_vals (float): value to take the slice along that dimension
vmin (float): global model minimum to use for plotting
vmax (float): global model maximum to use for plotting
gridsize (int): The number of points to sample each dimension at. Default: 30.
contour_levels (int list): Contours to plot. Default: None
lse (bool): Whether to plot probability. Default: False
extent_multiplier (list, optional): multipliers for each of the dimensions when plotting. Default:None
"""
extent = np.c_[strat.lb, strat.ub].reshape(-1)
x = strat.model.dim_grid(gridsize=gridsize, slice_dims={slice_dim: slice_val})
if lse:
fmean, fvar = strat.predict(x)
fmean = fmean.detach().numpy().reshape(gridsize, gridsize)
fmean = norm.cdf(fmean)
else:
post = strat.model.posterior(x)
fmean = post.mean.squeeze().detach().numpy().reshape(gridsize, gridsize)
# optionally rescale extents to correct values
if extent_multiplier is not None:
extent_scaled = extent * np.repeat(extent_multiplier, 2)
dim_val_scaled = slice_val * extent_multiplier[slice_dim]
else:
extent_scaled = extent
dim_val_scaled = slice_val
plt_extents = np.delete(extent_scaled, [slice_dim * 2, slice_dim * 2 + 1])
plt_parnames = np.delete(parnames, slice_dim)
img = ax.imshow(
fmean.T, extent=plt_extents, origin="lower", aspect="auto", vmin=vmin, vmax=vmax
)
ax.set_title(parnames[slice_dim] + "=" + str(dim_val_scaled))
ax.set_xlabel(plt_parnames[0])
if len(contour_levels) > 0:
ax.contour(
fmean.T,
contour_levels,
colors="w",
extent=plt_extents,
origin="lower",
aspect="auto",
)
return img
|
aepsych-main
|
aepsych/plotting.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from gpytorch.likelihoods import BernoulliLikelihood, GaussianLikelihood
from . import acquisition, config, factory, generators, models, strategy, utils
from .config import Config
from .likelihoods import BernoulliObjectiveLikelihood
from .models import GPClassificationModel
from .strategy import SequentialStrategy, Strategy
__all__ = [
# modules
"acquisition",
"config",
"factory",
"models",
"strategy",
"utils",
"generators",
# classes
"GPClassificationModel",
"Strategy",
"SequentialStrategy",
"BernoulliObjectiveLikelihood",
"BernoulliLikelihood",
"GaussianLikelihood",
]
try:
from . import benchmark
__all__ += ["benchmark"]
except ImportError:
pass
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import time
import warnings
from copy import copy
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
import torch
from aepsych.config import Config, ConfigurableMixin
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.generators.sobol_generator import AxSobolGenerator, SobolGenerator
from aepsych.models.base import ModelProtocol
from aepsych.utils import (
_process_bounds,
get_objectives,
get_parameters,
make_scaled_sobol,
)
from aepsych.utils_logging import getLogger
from ax.core.base_trial import TrialStatus
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.plot.contour import interact_contour
from ax.plot.slice import plot_slice
from ax.service.ax_client import AxClient
from ax.utils.notebook.plotting import render
from botorch.exceptions.errors import ModelFittingError
logger = getLogger()
def ensure_model_is_fresh(f):
def wrapper(self, *args, **kwargs):
if self.can_fit and not self._model_is_fresh:
starttime = time.time()
if self._count % self.refit_every == 0 or self.refit_every == 1:
logger.info("Starting fitting (no warm start)...")
# don't warm start
self.fit()
else:
logger.info("Starting fitting (warm start)...")
# warm start
self.update()
logger.info(f"Fitting done, took {time.time()-starttime}")
self._model_is_fresh = True
return f(self, *args, **kwargs)
return wrapper
class Strategy(object):
"""Object that combines models and generators to generate points to sample."""
_n_eval_points: int = 1000
def __init__(
self,
generator: AEPsychGenerator,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
stimuli_per_trial: int,
outcome_types: Sequence[Type[str]],
dim: Optional[int] = None,
min_total_tells: int = 0,
min_asks: int = 0,
model: Optional[ModelProtocol] = None,
refit_every: int = 1,
min_total_outcome_occurrences: int = 1,
max_asks: Optional[int] = None,
keep_most_recent: Optional[int] = None,
min_post_range: Optional[float] = None,
name: str = "",
run_indefinitely: bool = False,
):
"""Initialize the strategy object.
Args:
generator (AEPsychGenerator): The generator object that determines how points are sampled.
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
min_total_tells (int): The minimum number of total observations needed to complete this strategy.
min_asks (int): The minimum number of points that should be generated from this strategy.
model (ModelProtocol, optional): The AEPsych model of the data.
refit_every (int): How often to refit the model from scratch.
min_total_outcome_occurrences (int): The minimum number of total observations needed for each outcome before the strategy will finish.
Defaults to 1 (i.e., for binary outcomes, there must be at least one "yes" trial and one "no" trial).
max_asks (int, optional): The maximum number of trials to generate using this strategy.
If None, there is no upper bound (default).
keep_most_recent (int, optional): Experimental. The number of most recent data points that the model will be fitted on.
This may be useful for discarding noisy data from trials early in the experiment that are not as informative
as data collected from later trials. When None, the model is fitted on all data.
min_post_range (float, optional): Experimental. The required difference between the posterior's minimum and maximum value in
probablity space before the strategy will finish. Ignored if None (default).
name (str): The name of the strategy. Defaults to the empty string.
run_indefinitely (bool): If true, the strategy will run indefinitely until finish() is explicitly called. Other stopping criteria will
be ignored. Defaults to False.
"""
self.is_finished = False
if run_indefinitely:
warnings.warn(
f"Strategy {name} will run indefinitely until finish() is explicitly called. Other stopping criteria will be ignored."
)
elif min_total_tells > 0 and min_asks > 0:
warnings.warn(
"Specifying both min_total_tells and min_asks > 0 may lead to unintended behavior."
)
if model is not None:
assert (
len(outcome_types) == model._num_outputs
), f"Strategy has {len(outcome_types)} outcomes, but model {type(model).__name__} supports {model._num_outputs}!"
assert (
stimuli_per_trial == model.stimuli_per_trial
), f"Strategy has {stimuli_per_trial} stimuli_per_trial, but model {type(model).__name__} supports {model.stimuli_per_trial}!"
if isinstance(model.outcome_type, str):
assert (
len(outcome_types) == 1 and outcome_types[0] == model.outcome_type
), f"Strategy outcome types is {outcome_types} but model outcome type is {model.outcome_type}!"
else:
assert set(outcome_types) == set(
model.outcome_type
), f"Strategy outcome types is {outcome_types} but model outcome type is {model.outcome_type}!"
self.run_indefinitely = run_indefinitely
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.min_total_outcome_occurrences = min_total_outcome_occurrences
self.max_asks = max_asks
self.keep_most_recent = keep_most_recent
self.min_post_range = min_post_range
if self.min_post_range is not None:
assert model is not None, "min_post_range must be None if model is None!"
self.eval_grid = make_scaled_sobol(
lb=self.lb, ub=self.ub, size=self._n_eval_points
)
self.x = None
self.y = None
self.n = 0
self.min_asks = min_asks
self._count = 0
self.min_total_tells = min_total_tells
self.stimuli_per_trial = stimuli_per_trial
self.outcome_types = outcome_types
if self.stimuli_per_trial == 1:
self.event_shape: Tuple[int, ...] = (self.dim,)
if self.stimuli_per_trial == 2:
self.event_shape = (self.dim, self.stimuli_per_trial)
self.model = model
self.refit_every = refit_every
self._model_is_fresh = False
self.generator = generator
self.has_model = self.model is not None
if self.generator._requires_model:
assert self.model is not None, f"{self.generator} requires a model!"
if self.min_asks == self.min_total_tells == 0:
warnings.warn(
"strategy.min_asks == strategy.min_total_tells == 0. This strategy will not generate any points!",
UserWarning,
)
self.name = name
def normalize_inputs(self, x, y):
"""converts inputs into normalized format for this strategy
Args:
x (np.ndarray): training inputs
y (np.ndarray): training outputs
Returns:
x (np.ndarray): training inputs, normalized
y (np.ndarray): training outputs, normalized
n (int): number of observations
"""
assert (
x.shape == self.event_shape or x.shape[1:] == self.event_shape
), f"x shape should be {self.event_shape} or batch x {self.event_shape}, instead got {x.shape}"
if x.shape == self.event_shape:
x = x[None, :]
if self.x is None:
x = np.r_[x]
else:
x = np.r_[self.x, x]
if self.y is None:
y = np.r_[y]
else:
y = np.r_[self.y, y]
n = y.shape[0]
return torch.Tensor(x), torch.Tensor(y), n
# TODO: allow user to pass in generator options
@ensure_model_is_fresh
def gen(self, num_points: int = 1):
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query. Defaults to 1.
Other arguments are forwared to underlying model.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
self._count = self._count + num_points
return self.generator.gen(num_points, self.model)
@ensure_model_is_fresh
def get_max(self, constraints=None):
constraints = constraints or {}
return self.model.get_max(constraints)
@ensure_model_is_fresh
def get_min(self, constraints=None):
constraints = constraints or {}
return self.model.get_min(constraints)
@ensure_model_is_fresh
def inv_query(self, y, constraints=None, probability_space=False):
constraints = constraints or {}
return self.model.inv_query(y, constraints, probability_space)
@ensure_model_is_fresh
def predict(self, x, probability_space=False):
return self.model.predict(x=x, probability_space=probability_space)
@ensure_model_is_fresh
def get_jnd(self, *args, **kwargs):
return self.model.get_jnd(*args, **kwargs)
@ensure_model_is_fresh
def sample(self, x, num_samples=None):
return self.model.sample(x, num_samples=num_samples)
def finish(self):
self.is_finished = True
@property
def finished(self):
if self.is_finished:
return True
if self.run_indefinitely:
return False
if hasattr(self.generator, "finished"): # defer to generator if possible
return self.generator.finished
if self.y is None: # always need some data before switching strats
return False
if self.max_asks is not None and self._count >= self.max_asks:
return True
if "binary" in self.outcome_types:
n_yes_trials = (self.y == 1).sum()
n_no_trials = (self.y == 0).sum()
sufficient_outcomes = (
n_yes_trials >= self.min_total_outcome_occurrences
and n_no_trials >= self.min_total_outcome_occurrences
)
else:
sufficient_outcomes = True
if self.min_post_range is not None:
fmean, _ = self.model.predict(self.eval_grid, probability_space=True)
meets_post_range = (fmean.max() - fmean.min()) >= self.min_post_range
else:
meets_post_range = True
finished = (
self._count >= self.min_asks
and self.n >= self.min_total_tells
and sufficient_outcomes
and meets_post_range
)
return finished
@property
def can_fit(self):
return self.has_model and self.x is not None and self.y is not None
@property
def n_trials(self):
warnings.warn(
"'n_trials' is deprecated and will be removed in a future release. Specify 'min_asks' instead.",
DeprecationWarning,
)
return self.min_asks
def add_data(self, x, y):
self.x, self.y, self.n = self.normalize_inputs(x, y)
self._model_is_fresh = False
def fit(self):
if self.can_fit:
if self.keep_most_recent is not None:
try:
self.model.fit(
self.x[-self.keep_most_recent :],
self.y[-self.keep_most_recent :],
)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
try:
self.model.fit(self.x, self.y)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
warnings.warn("Cannot fit: no model has been initialized!", RuntimeWarning)
def update(self):
if self.can_fit:
if self.keep_most_recent is not None:
try:
self.model.update(
self.x[-self.keep_most_recent :],
self.y[-self.keep_most_recent :],
)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
try:
self.model.update(self.x, self.y)
except (ModelFittingError):
logger.warning(
"Failed to fit model! Predictions may not be accurate!"
)
else:
warnings.warn("Cannot fit: no model has been initialized!", RuntimeWarning)
@classmethod
def from_config(cls, config: Config, name: str):
lb = config.gettensor(name, "lb")
ub = config.gettensor(name, "ub")
dim = config.getint(name, "dim", fallback=None)
stimuli_per_trial = config.getint(name, "stimuli_per_trial", fallback=1)
outcome_types = config.getlist(name, "outcome_types", element_type=str)
gen_cls = config.getobj(name, "generator", fallback=SobolGenerator)
generator = gen_cls.from_config(config)
model_cls = config.getobj(name, "model", fallback=None)
if model_cls is not None:
model = model_cls.from_config(config)
else:
model = None
acqf_cls = config.getobj(name, "acqf", fallback=None)
if acqf_cls is not None and hasattr(generator, "acqf"):
if generator.acqf is None:
generator.acqf = acqf_cls
generator.acqf_kwargs = generator._get_acqf_options(acqf_cls, config)
min_asks = config.getint(name, "min_asks", fallback=0)
min_total_tells = config.getint(name, "min_total_tells", fallback=0)
refit_every = config.getint(name, "refit_every", fallback=1)
if model is not None and not generator._requires_model:
if refit_every < min_asks:
warnings.warn(
f"Strategy '{name}' has refit_every < min_asks even though its generator does not require a model. Consider making refit_every = min_asks to speed up point generation.",
UserWarning,
)
keep_most_recent = config.getint(name, "keep_most_recent", fallback=None)
min_total_outcome_occurrences = config.getint(
name,
"min_total_outcome_occurrences",
fallback=1 if "binary" in outcome_types else 0,
)
min_post_range = config.getfloat(name, "min_post_range", fallback=None)
keep_most_recent = config.getint(name, "keep_most_recent", fallback=None)
n_trials = config.getint(name, "n_trials", fallback=None)
if n_trials is not None:
warnings.warn(
"'n_trials' is deprecated and will be removed in a future release. Specify 'min_asks' instead.",
DeprecationWarning,
)
min_asks = n_trials
return cls(
lb=lb,
ub=ub,
stimuli_per_trial=stimuli_per_trial,
outcome_types=outcome_types,
dim=dim,
model=model,
generator=generator,
min_asks=min_asks,
refit_every=refit_every,
min_total_outcome_occurrences=min_total_outcome_occurrences,
min_post_range=min_post_range,
keep_most_recent=keep_most_recent,
min_total_tells=min_total_tells,
name=name,
)
class SequentialStrategy(object):
"""Runs a sequence of strategies defined by its config
All getter methods defer to the current strat
Args:
strat_list (list[Strategy]): TODO make this nicely typed / doc'd
"""
def __init__(self, strat_list: List[Strategy]):
self.strat_list = strat_list
self._strat_idx = 0
self._suggest_count = 0
@property
def _strat(self):
return self.strat_list[self._strat_idx]
def __getattr__(self, name: str):
# return current strategy's attr if it's not a container attr
if "strat_list" not in vars(self):
raise AttributeError("Have no strategies in container, what happened?")
return getattr(self._strat, name)
def _make_next_strat(self):
if (self._strat_idx + 1) >= len(self.strat_list):
warnings.warn(
"Ran out of generators, staying on final generator!", RuntimeWarning
)
return
# populate new model with final data from last model
assert (
self.x is not None and self.y is not None
), "Cannot initialize next strategy; no data has been given!"
self.strat_list[self._strat_idx + 1].add_data(self.x, self.y)
self._suggest_count = 0
self._strat_idx = self._strat_idx + 1
def gen(self, num_points: int = 1, **kwargs):
if self._strat.finished:
self._make_next_strat()
self._suggest_count = self._suggest_count + num_points
return self._strat.gen(num_points=num_points, **kwargs)
def finish(self):
self._strat.finish()
@property
def finished(self):
return self._strat_idx == (len(self.strat_list) - 1) and self._strat.finished
def add_data(self, x, y):
self._strat.add_data(x, y)
@classmethod
def from_config(cls, config: Config):
strat_names = config.getlist("common", "strategy_names", element_type=str)
# ensure strat_names are unique
assert len(strat_names) == len(
set(strat_names)
), f"Strategy names {strat_names} are not all unique!"
strats = []
for name in strat_names:
strat = Strategy.from_config(config, str(name))
strats.append(strat)
return cls(strat_list=strats)
class AEPsychStrategy(ConfigurableMixin):
is_finished = False
def __init__(self, ax_client: AxClient):
self.ax_client = ax_client
self.ax_client.experiment.num_asks = 0
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None) -> Dict:
# TODO: Fix the mypy errors
strat_names: List[str] = config.getlist("common", "strategy_names", element_type=str) # type: ignore
steps = []
for name in strat_names:
generator = config.getobj(name, "generator", fallback=AxSobolGenerator) # type: ignore
opts = generator.get_config_options(config, name)
step = AEPsychGenerationStep(**opts)
steps.append(step)
# Add an extra step at the end that we can `ask` endlessly.
final_step = copy(step)
final_step.completion_criteria = []
steps.append(final_step)
parameters = get_parameters(config)
parameter_constraints = config.getlist(
"common", "par_constraints", element_type=str, fallback=None
)
objectives = get_objectives(config)
seed = config.getint("common", "random_seed", fallback=None)
strat = GenerationStrategy(steps=steps)
ax_client = AxClient(strat, random_seed=seed)
ax_client.create_experiment(
name="experiment",
parameters=parameters,
parameter_constraints=parameter_constraints,
objectives=objectives,
)
return {"ax_client": ax_client}
@property
def finished(self) -> bool:
if self.is_finished:
return True
self.strat._maybe_move_to_next_step()
return len(self.strat._steps) == (self.strat.current_step.index + 1)
def finish(self):
self.is_finished = True
def gen(self, num_points: int = 1):
x, _ = self.ax_client.get_next_trials(max_trials=num_points)
self.strat.experiment.num_asks += num_points
return x
def complete_new_trial(self, config, outcome):
_, trial_index = self.ax_client.attach_trial(config)
self.complete_existing_trial(trial_index, outcome)
def complete_existing_trial(self, trial_index, outcome):
self.ax_client.complete_trial(trial_index, outcome)
@property
def experiment(self):
return self.ax_client.experiment
@property
def strat(self):
return self.ax_client.generation_strategy
@property
def can_fit(self):
return (
self.strat.model is not None
and len(self.experiment.trial_indices_by_status[TrialStatus.COMPLETED]) > 0
)
def _warn_on_outcome_mismatch(self):
ax_model = self.ax_client.generation_strategy.model
aepsych_model = ax_model.model.surrogate.model
if (
hasattr(aepsych_model, "outcome_type")
and aepsych_model.outcome_type != "continuous"
):
warnings.warn(
"Cannot directly plot non-continuous outcomes. Plotting the latent function instead."
)
def plot_contours(
self, density: int = 50, slice_values: Optional[Dict[str, Any]] = None
):
"""Plot predictions for a 2-d slice of the parameter space.
Args:
density: Number of points along each parameter to evaluate predictions.
slice_values: A dictionary {name: val} for the fixed values of the
other parameters. If not provided, then the mean of numeric
parameters or the mode of choice parameters will be used.
"""
assert (
len(self.experiment.parameters) > 1
), "plot_contours requires at least 2 parameters! Use 'plot_slice' instead."
ax_model = self.ax_client.generation_strategy.model
self._warn_on_outcome_mismatch()
render(
interact_contour(
model=ax_model,
metric_name="objective",
density=density,
slice_values=slice_values,
)
)
def plot_slice(
self,
param_name: str,
density: int = 50,
slice_values: Optional[Dict[str, Any]] = None,
):
"""Plot predictions for a 1-d slice of the parameter space.
Args:
param_name: Name of parameter that will be sliced
density: Number of points along slice to evaluate predictions.
slice_values: A dictionary {name: val} for the fixed values of the
other parameters. If not provided, then the mean of numeric
parameters or the mode of choice parameters will be used.
"""
self._warn_on_outcome_mismatch()
ax_model = self.ax_client.generation_strategy.model
render(
plot_slice(
model=ax_model,
param_name=param_name,
metric_name="objective",
density=density,
slice_values=slice_values,
)
)
def get_pareto_optimal_parameters(self):
return self.ax_client.get_pareto_optimal_parameters()
|
aepsych-main
|
aepsych/strategy.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import logging.config
import os
logger = logging.getLogger()
def getLogger(level=logging.INFO, log_path="logs") -> logging.Logger:
my_format = "%(asctime)-15s [%(levelname)-7s] %(message)s"
os.makedirs(log_path, exist_ok=True)
logging_config = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {"standard": {"format": my_format}},
"handlers": {
"default": {
"level": level,
"class": "logging.StreamHandler",
"formatter": "standard",
},
"file": {
"class": "logging.FileHandler",
"level": logging.DEBUG,
"filename": f"{log_path}/bayes_opt_server.log",
"formatter": "standard",
},
},
"loggers": {
"": {"handlers": ["default", "file"], "level": level, "propagate": False},
},
}
logging.config.dictConfig(logging_config)
return logger
|
aepsych-main
|
aepsych/utils_logging.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Iterable
from configparser import NoOptionError
from typing import Dict, List, Mapping, Optional, Tuple
import numpy as np
import torch
from ax.service.utils.instantiation import ObjectiveProperties
from scipy.stats import norm
from torch.quasirandom import SobolEngine
def make_scaled_sobol(lb, ub, size, seed=None):
lb, ub, ndim = _process_bounds(lb, ub, None)
grid = SobolEngine(dimension=ndim, scramble=True, seed=seed).draw(size)
# rescale from [0,1] to [lb, ub]
grid = lb + (ub - lb) * grid
return grid
def promote_0d(x):
if not isinstance(x, Iterable):
return [x]
return x
def dim_grid(
lower: torch.Tensor,
upper: torch.Tensor,
dim: int,
gridsize: int = 30,
slice_dims: Optional[Mapping[int, float]] = None,
) -> torch.Tensor:
"""Create a grid
Create a grid based on lower, upper, and dim.
Parameters
----------
- lower ('int') - lower bound
- upper ('int') - upper bound
- dim ('int) - dimension
- gridsize ('int') - size for grid
- slice_dims (Optional, dict) - values to use for slicing axes, as an {index:value} dict
Returns
----------
grid : torch.FloatTensor
Tensor
"""
slice_dims = slice_dims or {}
lower, upper, _ = _process_bounds(lower, upper, None)
mesh_vals = []
for i in range(dim):
if i in slice_dims.keys():
mesh_vals.append(slice(slice_dims[i] - 1e-10, slice_dims[i] + 1e-10, 1))
else:
mesh_vals.append(slice(lower[i].item(), upper[i].item(), gridsize * 1j))
return torch.Tensor(np.mgrid[mesh_vals].reshape(dim, -1).T)
def _process_bounds(lb, ub, dim) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""Helper function for ensuring bounds are correct shape and type."""
lb = promote_0d(lb)
ub = promote_0d(ub)
if not isinstance(lb, torch.Tensor):
lb = torch.tensor(lb)
if not isinstance(ub, torch.Tensor):
ub = torch.tensor(ub)
lb = lb.float()
ub = ub.float()
assert lb.shape[0] == ub.shape[0], "bounds should be of equal shape!"
if dim is not None:
if lb.shape[0] == 1:
lb = lb.repeat(dim)
ub = ub.repeat(dim)
else:
assert lb.shape[0] == dim, "dim does not match shape of bounds!"
else:
dim = lb.shape[0]
for i, (l, u) in enumerate(zip(lb, ub)):
assert (
l <= u
), f"Lower bound {l} is not less than or equal to upper bound {u} on dimension {i}!"
return lb, ub, dim
def interpolate_monotonic(x, y, z, min_x=-np.inf, max_x=np.inf):
# Ben Letham's 1d interpolation code, assuming monotonicity.
# basic idea is find the nearest two points to the LSE and
# linearly interpolate between them (I think this is bisection
# root-finding)
idx = np.searchsorted(y, z)
if idx == len(y):
return float(max_x)
elif idx == 0:
return float(min_x)
x0 = x[idx - 1]
x1 = x[idx]
y0 = y[idx - 1]
y1 = y[idx]
x_star = x0 + (x1 - x0) * (z - y0) / (y1 - y0)
return x_star
def get_lse_interval(
model,
mono_grid,
target_level,
cred_level=None,
mono_dim=-1,
n_samps=500,
lb=-np.inf,
ub=np.inf,
gridsize=30,
**kwargs,
):
xgrid = torch.Tensor(
np.mgrid[
[
slice(model.lb[i].item(), model.ub[i].item(), gridsize * 1j)
for i in range(model.dim)
]
]
.reshape(model.dim, -1)
.T
)
samps = model.sample(xgrid, num_samples=n_samps, **kwargs)
samps = [s.reshape((gridsize,) * model.dim) for s in samps.detach().numpy()]
contours = np.stack(
[
get_lse_contour(norm.cdf(s), mono_grid, target_level, mono_dim, lb, ub)
for s in samps
]
)
if cred_level is None:
return np.mean(contours, 0.5, axis=0)
else:
alpha = 1 - cred_level
qlower = alpha / 2
qupper = 1 - alpha / 2
upper = np.quantile(contours, qupper, axis=0)
lower = np.quantile(contours, qlower, axis=0)
median = np.quantile(contours, 0.5, axis=0)
return median, lower, upper
def get_lse_contour(post_mean, mono_grid, level, mono_dim=-1, lb=-np.inf, ub=np.inf):
return np.apply_along_axis(
lambda p: interpolate_monotonic(mono_grid, p, level, lb, ub),
mono_dim,
post_mean,
)
def get_jnd_1d(post_mean, mono_grid, df=1, mono_dim=-1, lb=-np.inf, ub=np.inf):
interpolate_to = post_mean + df
return (
np.array(
[interpolate_monotonic(mono_grid, post_mean, ito) for ito in interpolate_to]
)
- mono_grid
)
def get_jnd_multid(post_mean, mono_grid, df=1, mono_dim=-1, lb=-np.inf, ub=np.inf):
return np.apply_along_axis(
lambda p: get_jnd_1d(p, mono_grid, df=df, mono_dim=mono_dim, lb=lb, ub=ub),
mono_dim,
post_mean,
)
def _get_ax_parameters(config):
range_parnames = config.getlist("common", "parnames", element_type=str, fallback=[])
lb = config.getlist("common", "lb", element_type=float, fallback=[])
ub = config.getlist("common", "ub", element_type=float, fallback=[])
assert (
len(range_parnames) == len(lb) == len(ub)
), f"Length of parnames ({range_parnames}), lb ({lb}), and ub ({ub}) don't match!"
range_params = [
{
"name": parname,
"type": "range",
"value_type": config.get(parname, "value_type", fallback="float"),
"log_scale": config.getboolean(parname, "log_scale", fallback=False),
"bounds": [l, u],
}
for parname, l, u in zip(range_parnames, lb, ub)
]
choice_parnames = config.getlist(
"common", "choice_parnames", element_type=str, fallback=[]
)
choices = [
config.getlist(parname, "choices", element_type=str, fallback=["True", "False"])
for parname in choice_parnames
]
choice_params = [
{
"name": parname,
"type": "choice",
"value_type": config.get(parname, "value_type", fallback="str"),
"is_ordered": config.getboolean(parname, "is_ordered", fallback=False),
"values": choice,
}
for parname, choice in zip(choice_parnames, choices)
]
fixed_parnames = config.getlist(
"common", "fixed_parnames", element_type=str, fallback=[]
)
values = []
for parname in fixed_parnames:
try:
try:
value = config.getfloat(parname, "value")
except ValueError:
value = config.get(parname, "value")
values.append(value)
except NoOptionError:
raise RuntimeError(f"Missing value for fixed parameter {parname}!")
fixed_params = [
{
"name": parname,
"type": "fixed",
"value": value,
}
for parname, value in zip(fixed_parnames, values)
]
return range_params, choice_params, fixed_params
def get_parameters(config) -> List[Dict]:
range_params, choice_params, fixed_params = _get_ax_parameters(config)
return range_params + choice_params + fixed_params
def get_dim(config) -> int:
range_params, choice_params, _ = _get_ax_parameters(config)
# Need to sum dimensions added by both range and choice parameters
dim = len(range_params) # 1 dim per range parameter
for par in choice_params:
if par["is_ordered"]:
dim += 1 # Ordered choice params are encoded like continuous parameters
elif len(par["values"]) > 2:
dim += len(
par["values"]
) # Choice parameter is one-hot encoded such that they add 1 dim for every choice
else:
dim += (
len(par["values"]) - 1
) # Choice parameters with n_choices < 3 add n_choices - 1 dims
return dim
def get_objectives(config) -> Dict:
outcome_types: List[str] = config.getlist(
"common", "outcome_types", element_type=str
)
if len(outcome_types) > 1:
for out_type in outcome_types:
assert (
out_type == "continuous"
), "Multiple outcomes is only currently supported for continuous outcomes!"
outcome_names: List[str] = config.getlist(
"common", "outcome_names", element_type=str, fallback=None
)
if outcome_names is None:
outcome_names = [f"outcome_{i+1}" for i in range(len(outcome_types))]
objectives = {}
for out_name in outcome_names:
minimize = config.getboolean(out_name, "minimize", fallback=False)
threshold = config.getfloat(out_name, "threshold", fallback=None)
objectives[out_name] = ObjectiveProperties(
minimize=minimize, threshold=threshold
)
return objectives
|
aepsych-main
|
aepsych/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import itertools
import time
from random import shuffle
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
import numpy as np
import pandas as pd
import torch
from aepsych.config import Config
from aepsych.strategy import ensure_model_is_fresh, SequentialStrategy
from tqdm.contrib.itertools import product as tproduct
from .problem import Problem
class Benchmark:
"""
Benchmark base class.
This class wraps standard functionality for benchmarking models including
generating cartesian products of run configurations, running the simulated
experiment loop, and logging results.
TODO make a benchmarking tutorial and link/refer to it here.
"""
def __init__(
self,
problems: List[Problem],
configs: Mapping[str, Union[str, list]],
seed: Optional[int] = None,
n_reps: int = 1,
log_every: Optional[int] = 10,
) -> None:
"""Initialize benchmark.
Args:
problems (List[Problem]): Problem objects containing the test function to evaluate.
configs (Mapping[str, Union[str, list]]): Dictionary of configs to run.
Lists at leaves are used to construct a cartesian product of configurations.
seed (int, optional): Random seed to use for reproducible benchmarks.
Defaults to randomized seeds.
n_reps (int, optional): Number of repetitions to run of each configuration. Defaults to 1.
log_every (int, optional): Logging interval during an experiment. Defaults to logging every 10 trials.
"""
self.problems = problems
self.n_reps = n_reps
self.combinations = self.make_benchmark_list(**configs)
self._log: List[Dict[str, object]] = []
self.log_every = log_every
# shuffle combinations so that intermediate results have a bit of everything
shuffle(self.combinations)
if seed is None:
# explicit cast because int and np.int_ are different types
self.seed = int(np.random.randint(0, 200))
else:
self.seed = seed
def make_benchmark_list(self, **bench_config) -> List[Dict[str, float]]:
"""Generate a list of benchmarks to run from configuration.
This constructs a cartesian product of config dicts using lists at
the leaves of the base config
Returns:
List[dict[str, float]]: List of dictionaries, each of which can be passed
to aepsych.config.Config.
"""
# This could be a generator but then we couldn't
# know how many params we have, tqdm wouldn't work, etc,
# so we materialize the full list.
def gen_combinations(d):
keys, values = d.keys(), d.values()
# only go cartesian on list leaves
values = [v if type(v) == list else [v] for v in values]
combinations = itertools.product(*values)
return [dict(zip(keys, c)) for c in combinations]
keys, values = bench_config.keys(), bench_config.values()
return [
dict(zip(keys, c))
for c in itertools.product(*(gen_combinations(v) for v in values))
]
def materialize_config(self, config_dict):
materialized_config = {}
for key, value in config_dict.items():
materialized_config[key] = {
k: v._evaluate(config_dict) if isinstance(v, DerivedValue) else v
for k, v in value.items()
}
return materialized_config
@property
def num_benchmarks(self) -> int:
"""Return the total number of runs in this benchmark.
Returns:
int: Total number of runs in this benchmark.
"""
return len(self.problems) * len(self.combinations) * self.n_reps
def make_strat_and_flatconfig(
self, config_dict: Mapping[str, str]
) -> Tuple[SequentialStrategy, Dict[str, str]]:
"""From a config dict, generate a strategy (for running) and
flattened config (for logging)
Args:
config_dict (Mapping[str, str]): A run configuration dictionary.
Returns:
Tuple[SequentialStrategy, Dict[str,str]]: A tuple containing a strategy
object and a flat config.
"""
config = Config()
config.update(config_dict=config_dict)
strat = SequentialStrategy.from_config(config)
flatconfig = self.flatten_config(config)
return strat, flatconfig
def run_experiment(
self,
problem: Problem,
config_dict: Dict[str, Any],
seed: int,
rep: int,
) -> Tuple[List[Dict[str, Any]], Union[SequentialStrategy, None]]:
"""Run one simulated experiment.
Args:
config_dict (Dict[str, str]): AEPsych configuration to use.
seed (int): Random seed for this run.
rep (int): Index of this repetition.
Returns:
Tuple[List[Dict[str, object]], SequentialStrategy]: A tuple containing a log of the results and the strategy as
of the end of the simulated experiment. This is ignored in large-scale benchmarks but useful for
one-off visualization.
"""
torch.manual_seed(seed)
np.random.seed(seed)
config_dict["common"]["lb"] = str(problem.lb.tolist())
config_dict["common"]["ub"] = str(problem.ub.tolist())
config_dict["problem"] = problem.metadata
materialized_config = self.materialize_config(config_dict)
# no-op config
is_invalid = materialized_config["common"].get("invalid_config", False)
if is_invalid:
return [{}], None
strat, flatconfig = self.make_strat_and_flatconfig(materialized_config)
problem_metadata = {
f"problem_{key}": value for key, value in problem.metadata.items()
}
total_gentime = 0.0
total_fittime = 0.0
i = 0
results = []
while not strat.finished:
starttime = time.time()
next_x = strat.gen()
gentime = time.time() - starttime
total_gentime += gentime
next_y = [problem.sample_y(next_x)]
strat.add_data(next_x, next_y)
# strat usually defers model fitting until it is needed
# (e.g. for gen or predict) so that we don't refit
# unnecessarily. But for benchmarking we want to time
# fit and gen separately, so we force a strat update
# so we can time fit vs gen. TODO make this less awkward
starttime = time.time()
ensure_model_is_fresh(lambda x: None)(strat._strat)
fittime = time.time() - starttime
total_fittime += fittime
if (self.log_at(i) or strat.finished) and strat.has_model:
metrics = problem.evaluate(strat)
result = {
"fit_time": fittime,
"cum_fit_time": total_fittime,
"gen_time": gentime,
"cum_gen_time": total_gentime,
"trial_id": i,
"rep": rep,
"seed": seed,
"final": strat.finished,
"strat_idx": strat._strat_idx,
}
result.update(problem_metadata)
result.update(flatconfig)
result.update(metrics)
results.append(result)
i = i + 1
return results, strat
def run_benchmarks(self):
"""Run all the benchmarks, sequentially."""
for i, (rep, config, problem) in enumerate(
tproduct(range(self.n_reps), self.combinations, self.problems)
):
local_seed = i + self.seed
results, _ = self.run_experiment(problem, config, seed=local_seed, rep=rep)
if results != [{}]:
self._log.extend(results)
def flatten_config(self, config: Config) -> Dict[str, str]:
"""Flatten a config object for logging.
Args:
config (Config): AEPsych config object.
Returns:
Dict[str,str]: A flat dictionary (that can be used to build a flat pandas data frame).
"""
flatconfig = {}
for s in config.sections():
flatconfig.update({f"{s}_{k}": v for k, v in config[s].items()})
return flatconfig
def log_at(self, i: int) -> bool:
"""Check if we should log on this trial index.
Args:
i (int): Trial index to (maybe) log at.
Returns:
bool: True if this trial should be logged.
"""
if self.log_every is not None:
return i % self.log_every == 0
else:
return False
def pandas(self) -> pd.DataFrame:
return pd.DataFrame(self._log)
class DerivedValue(object):
"""
A class for dynamically generating config values from other config values during benchmarking.
"""
def __init__(self, args: List[Tuple[str, str]], func: Callable) -> None:
"""Initialize DerivedValue.
Args:
args (List[Tuple[str]]): Each tuple in this list is a pair of strings that refer to keys in a nested dictionary.
func (Callable): A function that accepts args as input.
For example, consider the following:
benchmark_config = {
"common": {
"model": ["GPClassificationModel", "FancyNewModelToBenchmark"],
"acqf": "MCLevelSetEstimation"
},
"init_strat": {
"min_asks": [10, 20],
"generator": "SobolGenerator"
},
"opt_strat": {
"generator": "OptimizeAcqfGenerator",
"min_asks":
DerivedValue(
[("init_strat", "min_asks"), ("common", "model")],
lambda x,y : 100 - x if y == "GPClassificationModel" else 50 - x)
}
}
Four separate benchmarks would be generated from benchmark_config:
1. model = GPClassificationModel; init trials = 10; opt trials = 90
2. model = GPClassificationModel; init trials = 20; opt trials = 80
3. model = FancyNewModelToBenchmark; init trials = 10; opt trials = 40
4. model = FancyNewModelToBenchmark; init trials = 20; opt trials = 30
Note that if you can also access problem names into func by including ("problem", "name") in args.
"""
self.args = args
self.func = func
def _evaluate(self, benchmark_config: Dict) -> Any:
"""Fetches values of self.args from benchmark_config and evaluates self.func on them."""
_args = [benchmark_config[outer][inner] for outer, inner in self.args]
return self.func(*_args)
|
aepsych-main
|
aepsych/benchmark/benchmark.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import time
import traceback
from copy import deepcopy
from pathlib import Path
from random import shuffle
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import aepsych.utils_logging as utils_logging
import multiprocess.context as ctx
import numpy as np
import pathos
import torch
from aepsych.benchmark import Benchmark
from aepsych.benchmark.problem import Problem
from aepsych.strategy import SequentialStrategy
ctx._force_start_method("spawn") # fixes problems with CUDA and fork
logger = utils_logging.getLogger(logging.INFO)
class PathosBenchmark(Benchmark):
"""Benchmarking class for parallelized benchmarks using pathos"""
def __init__(self, nproc: int = 1, *args, **kwargs):
"""Initialize pathos benchmark.
Args:
nproc (int, optional): Number of cores to use. Defaults to 1.
"""
super().__init__(*args, **kwargs)
# parallelize over jobs, so each job should be 1 thread only
num_threads = torch.get_num_threads()
num_interopt_threads = torch.get_num_interop_threads()
if num_threads > 1 or num_interopt_threads > 1:
raise RuntimeError(
"PathosBenchmark parallelizes over threads,"
+ "and as such is incompatible with torch being threaded. "
+ "Please call `torch.set_num_threads(1)` and "
+ "`torch.set_num_interop_threads(1)` before using PathosBenchmark!"
)
cores_available = pathos.multiprocessing.cpu_count()
if nproc >= cores_available:
raise RuntimeError(
f"Requesting a benchmark with {nproc} cores but "
+ f"machine has {cores_available} cores! It is highly "
"recommended to leave at least 1-2 cores open for OS tasks."
)
self.pool = pathos.pools.ProcessPool(nodes=nproc)
def __del__(self):
# destroy the pool (for when we're testing or running
# multiple benchmarks in one script) but if the GC already
# cleared the underlying multiprocessing object (usually on
# the final call), don't do anything.
if hasattr(self, "pool") and self.pool is not None:
try:
self.pool.close()
self.pool.join()
self.pool.clear()
except TypeError:
pass
def run_experiment(
self,
problem: Problem,
config_dict: Dict[str, Any],
seed: int,
rep: int,
) -> Tuple[List[Dict[str, Any]], Union[SequentialStrategy, None]]:
"""Run one simulated experiment.
Args:
config_dict (Dict[str, Any]): AEPsych configuration to use.
seed (int): Random seed for this run.
rep (int): Index of this repetition.
Returns:
Tuple[List[Dict[str, Any]], SequentialStrategy]: A tuple containing a log of the results and the strategy as
of the end of the simulated experiment. This is ignored in large-scale benchmarks but useful for
one-off visualization.
"""
# copy things that we mutate
local_config = deepcopy(config_dict)
try:
return super().run_experiment(problem, local_config, seed, rep)
except Exception as e:
logging.error(
f"Error on config {config_dict}: {e}!"
+ f"Traceback follows:\n{traceback.format_exc()}"
)
return [], SequentialStrategy([])
def __getstate__(self):
self_dict = self.__dict__.copy()
if "pool" in self_dict.keys():
del self_dict["pool"]
if "futures" in self_dict.keys():
del self_dict["futures"]
return self_dict
def run_benchmarks(self):
"""Run all the benchmarks,
Note that this blocks while waiting for benchmarks to complete. If you
would like to start benchmarks and periodically collect partial results,
use start_benchmarks and then call collate_benchmarks(wait=False) on some
interval.
"""
self.start_benchmarks()
self.collate_benchmarks(wait=True)
def start_benchmarks(self):
"""Start benchmark run.
This does not block: after running it, self.futures holds the
status of benchmarks running in parallel.
"""
def run_discard_strat(*conf):
logger, _ = self.run_experiment(*conf)
return logger
self.all_sim_configs = [
(problem, config_dict, self.seed + seed, rep)
for seed, (problem, config_dict, rep) in enumerate(
itertools.product(self.problems, self.combinations, range(self.n_reps))
)
]
shuffle(self.all_sim_configs)
self.futures = [
self.pool.apipe(run_discard_strat, *conf) for conf in self.all_sim_configs
]
@property
def is_done(self) -> bool:
"""Check if the benchmark is done.
Returns:
bool: True if all futures are cleared and benchmark is done.
"""
return len(self.futures) == 0
def collate_benchmarks(self, wait: bool = False) -> None:
"""Collect benchmark results from completed futures.
Args:
wait (bool, optional): If true, this method blocks and waits
on all futures to complete. Defaults to False.
"""
newfutures = []
while self.futures:
item = self.futures.pop()
if wait or item.ready():
results = item.get()
# filter out empty results from invalid configs
results = [r for r in results if r != {}]
if isinstance(results, list):
self._log.extend(results)
else:
newfutures.append(item)
self.futures = newfutures
def run_benchmarks_with_checkpoints(
out_path: str,
benchmark_name: str,
problems: List[Problem],
configs: Mapping[str, Union[str, list]],
global_seed: Optional[int] = None,
n_chunks: int = 1,
n_reps_per_chunk: int = 1,
log_every: Optional[int] = None,
checkpoint_every: int = 60,
n_proc: int = 1,
serial_debug: bool = False,
) -> None:
"""Runs a series of benchmarks, saving both final and intermediate results to .csv files. Benchmarks are run in
sequential chunks, each of which runs all combinations of problems/configs/reps in parallel. This function should
always be used using the "if __name__ == '__main__': ..." idiom.
Args:
out_path (str): The path to save the results to.
benchmark_name (str): A name give to this set of benchmarks. Results will be saved in files named like
"out_path/benchmark_name_chunk{chunk_number}_out.csv"
problems (List[Problem]): Problem objects containing the test function to evaluate.
configs (Mapping[str, Union[str, list]]): Dictionary of configs to run.
Lists at leaves are used to construct a cartesian product of configurations.
global_seed (int, optional): Global seed to use for reproducible benchmarks.
Defaults to randomized seeds.
n_chunks (int): The number of chunks to break the results into. Each chunk will contain at least 1 run of every
combination of problem and config.
n_reps_per_chunk (int, optional): Number of repetitions to run each problem/config in each chunk.
log_every (int, optional): Logging interval during an experiment. Defaults to only logging at the end.
checkpoint_every (int): Save intermediate results every checkpoint_every seconds.
n_proc (int): Number of processors to use.
serial_debug: debug serially?
"""
Path(out_path).mkdir(
parents=True, exist_ok=True
) # make an output folder if not exist
if serial_debug:
out_fname = Path(f"{out_path}/{benchmark_name}_out.csv")
print(f"Starting {benchmark_name} benchmark (serial debug mode)...")
bench = Benchmark(
problems=problems,
configs=configs,
seed=global_seed,
n_reps=n_reps_per_chunk * n_chunks,
log_every=log_every,
)
bench.run_benchmarks()
final_results = bench.pandas()
final_results.to_csv(out_fname)
else:
for chunk in range(n_chunks):
out_fname = Path(f"{out_path}/{benchmark_name}_chunk{chunk}_out.csv")
intermediate_fname = Path(
f"{out_path}/{benchmark_name}_chunk{chunk}_checkpoint.csv"
)
print(f"Starting {benchmark_name} benchmark... chunk {chunk} ")
bench = PathosBenchmark(
nproc=n_proc,
problems=problems,
configs=configs,
seed=None,
n_reps=n_reps_per_chunk,
log_every=log_every,
)
if global_seed is None:
global_seed = int(np.random.randint(0, 200))
bench.seed = (
global_seed + chunk * bench.num_benchmarks
) # HACK. TODO: make num_benchmarks a property of bench configs
bench.start_benchmarks()
while not bench.is_done:
time.sleep(checkpoint_every)
collate_start = time.time()
print(
f"Checkpointing {benchmark_name} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive"
)
bench.collate_benchmarks(wait=False)
temp_results = bench.pandas()
if len(temp_results) > 0:
temp_results["rep"] = temp_results["rep"] + n_reps_per_chunk * chunk
temp_results.to_csv(intermediate_fname)
print(
f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left"
)
print(f"{benchmark_name} chunk {chunk} fully done!")
final_results = bench.pandas()
final_results["rep"] = final_results["rep"] + n_reps_per_chunk * chunk
final_results.to_csv(out_fname)
|
aepsych-main
|
aepsych/benchmark/pathos_benchmark.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .benchmark import Benchmark, DerivedValue
from .pathos_benchmark import PathosBenchmark, run_benchmarks_with_checkpoints
from .problem import LSEProblem, Problem
from .test_functions import (
discrim_highdim,
make_songetal_testfun,
modified_hartmann6,
novel_detection_testfun,
novel_discrimination_testfun,
)
__all__ = [
"Benchmark",
"DerivedValue",
"PathosBenchmark",
"PathosBenchmark",
"Problem",
"LSEProblem",
"make_songetal_testfun",
"novel_detection_testfun",
"novel_discrimination_testfun",
"modified_hartmann6",
"discrim_highdim",
"run_benchmarks_with_checkpoints",
]
|
aepsych-main
|
aepsych/benchmark/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import cached_property
from typing import Any, Dict, Union
import aepsych
import numpy as np
import torch
from aepsych.strategy import SequentialStrategy, Strategy
from aepsych.utils import make_scaled_sobol
from scipy.stats import bernoulli, norm, pearsonr
class Problem:
"""Wrapper for a problem or test function. Subclass from this
and override f() to define your test function.
"""
n_eval_points = 1000
@cached_property
def eval_grid(self):
return make_scaled_sobol(lb=self.lb, ub=self.ub, size=self.n_eval_points)
@property
def name(self) -> str:
raise NotImplementedError
def f(self, x):
raise NotImplementedError
@cached_property
def lb(self):
return self.bounds[0]
@cached_property
def ub(self):
return self.bounds[1]
@property
def bounds(self):
raise NotImplementedError
@property
def metadata(self) -> Dict[str, Any]:
"""A dictionary of metadata passed to the Benchmark to be logged. Each key will become a column in the
Benchmark's output dataframe, with its associated value stored in each row."""
return {"name": self.name}
def p(self, x: np.ndarray) -> np.ndarray:
"""Evaluate response probability from test function.
Args:
x (np.ndarray): Points at which to evaluate.
Returns:
np.ndarray: Response probability at queries points.
"""
return norm.cdf(self.f(x))
def sample_y(self, x: np.ndarray) -> np.ndarray:
"""Sample a response from test function.
Args:
x (np.ndarray): Points at which to sample.
Returns:
np.ndarray: A single (bernoulli) sample at points.
"""
return bernoulli.rvs(self.p(x))
def f_hat(self, model: aepsych.models.base.ModelProtocol) -> torch.Tensor:
"""Generate mean predictions from the model over the evaluation grid.
Args:
model (aepsych.models.base.ModelProtocol): Model to evaluate.
Returns:
torch.Tensor: Posterior mean from underlying model over the evaluation grid.
"""
f_hat, _ = model.predict(self.eval_grid)
return f_hat
@cached_property
def f_true(self) -> np.ndarray:
"""Evaluate true test function over evaluation grid.
Returns:
torch.Tensor: Values of true test function over evaluation grid.
"""
return self.f(self.eval_grid).detach().numpy()
@cached_property
def p_true(self) -> torch.Tensor:
"""Evaluate true response probability over evaluation grid.
Returns:
torch.Tensor: Values of true response probability over evaluation grid.
"""
return norm.cdf(self.f_true)
def p_hat(self, model: aepsych.models.base.ModelProtocol) -> torch.Tensor:
"""Generate mean predictions from the model over the evaluation grid.
Args:
model (aepsych.models.base.ModelProtocol): Model to evaluate.
Returns:
torch.Tensor: Posterior mean from underlying model over the evaluation grid.
"""
p_hat, _ = model.predict(self.eval_grid, probability_space=True)
return p_hat
def evaluate(
self,
strat: Union[Strategy, SequentialStrategy],
) -> Dict[str, float]:
"""Evaluate the strategy with respect to this problem.
Extend this in subclasses to add additional metrics.
Metrics include:
- mae (mean absolute error), mae (mean absolute error), max_abs_err (max absolute error),
pearson correlation. All of these are computed over the latent variable f and the
outcome probability p, w.r.t. the posterior mean. Squared and absolute errors (miae, mise) are
also computed in expectation over the posterior, by sampling.
- Brier score, which measures how well-calibrated the outcome probability is, both at the posterior
mean (plain brier) and in expectation over the posterior (expected_brier).
Args:
strat (aepsych.strategy.Strategy): Strategy to evaluate.
Returns:
Dict[str, float]: A dictionary containing metrics and their values.
"""
# we just use model here but eval gets called on strat in case we need it in downstream evals
# for example to separate out sobol vs opt trials
model = strat.model
assert model is not None, "Cannot evaluate strategy without a model!"
# always eval f
f_hat = self.f_hat(model).detach().numpy()
p_hat = self.p_hat(model).detach().numpy()
assert (
self.f_true.shape == f_hat.shape
), f"self.f_true.shape=={self.f_true.shape} != f_hat.shape=={f_hat.shape}"
mae_f = np.mean(np.abs(self.f_true - f_hat))
mse_f = np.mean((self.f_true - f_hat) ** 2)
max_abs_err_f = np.max(np.abs(self.f_true - f_hat))
corr_f = pearsonr(self.f_true.flatten(), f_hat.flatten())[0]
mae_p = np.mean(np.abs(self.p_true - p_hat))
mse_p = np.mean((self.p_true - p_hat) ** 2)
max_abs_err_p = np.max(np.abs(self.p_true - p_hat))
corr_p = pearsonr(self.p_true.flatten(), p_hat.flatten())[0]
brier = np.mean(2 * np.square(self.p_true - p_hat))
# eval in samp-based expectation over posterior instead of just mean
fsamps = model.sample(self.eval_grid, num_samples=1000).detach().numpy()
try:
psamps = (
model.sample(self.eval_grid, num_samples=1000, probability_space=True) # type: ignore
.detach()
.numpy()
)
except TypeError: # vanilla models don't have proba_space samps, TODO maybe we should add them
psamps = norm.cdf(fsamps)
ferrs = fsamps - self.f_true[None, :]
miae_f = np.mean(np.abs(ferrs))
mise_f = np.mean(ferrs**2)
perrs = psamps - self.p_true[None, :]
miae_p = np.mean(np.abs(perrs))
mise_p = np.mean(perrs**2)
expected_brier = (2 * np.square(self.p_true[None, :] - psamps)).mean()
metrics = {
"mean_abs_err_f": mae_f,
"mean_integrated_abs_err_f": miae_f,
"mean_square_err_f": mse_f,
"mean_integrated_square_err_f": mise_f,
"max_abs_err_f": max_abs_err_f,
"pearson_corr_f": corr_f,
"mean_abs_err_p": mae_p,
"mean_integrated_abs_err_p": miae_p,
"mean_square_err_p": mse_p,
"mean_integrated_square_err_p": mise_p,
"max_abs_err_p": max_abs_err_p,
"pearson_corr_p": corr_p,
"brier": brier,
"expected_brier": expected_brier,
}
return metrics
class LSEProblem(Problem):
"""Level set estimation problem.
This extends the base problem class to evaluate the LSE/threshold estimate
in addition to the function estimate.
"""
threshold = 0.75
@property
def metadata(self) -> Dict[str, Any]:
"""A dictionary of metadata passed to the Benchmark to be logged. Each key will become a column in the
Benchmark's output dataframe, with its associated value stored in each row."""
md = super().metadata
md["threshold"] = self.threshold
return md
def f_threshold(self, model=None):
try:
inverse_torch = model.likelihood.objective.inverse
def inverse_link(x):
return inverse_torch(torch.tensor(x)).numpy()
except AttributeError:
inverse_link = norm.ppf
return float(inverse_link(self.threshold))
@cached_property
def true_below_threshold(self) -> np.ndarray:
"""
Evaluate whether the true function is below threshold over the eval grid
(used for proper scoring and threshold missclassification metric).
"""
return (self.p(self.eval_grid) <= self.threshold).astype(float)
def evaluate(self, strat: Union[Strategy, SequentialStrategy]) -> Dict[str, float]:
"""Evaluate the model with respect to this problem.
For level set estimation, we add metrics w.r.t. the true threshold:
- brier_p_below_{thresh), the brier score w.r.t. p(f(x)<thresh), in contrast to
regular brier, which is the brier score for p(phi(f(x))=1), and the same
for misclassification error.
Args:
strat (aepsych.strategy.Strategy): Strategy to evaluate.
Returns:
Dict[str, float]: A dictionary containing metrics and their values,
including parent class metrics.
"""
metrics = super().evaluate(strat)
# we just use model here but eval gets called on strat in case we need it in downstream evals
# for example to separate out sobol vs opt trials
model = strat.model
assert model is not None, "Cannot make predictions without a model!"
# TODO bring back more threshold error metrics when we more clearly
# define what "threshold" means in high-dim.
# Predict p(below threshold) at test points
p_l = model.p_below_threshold(self.eval_grid, self.f_threshold(model))
# Brier score on level-set probabilities
thresh = self.threshold
brier_name = f"brier_p_below_{thresh}"
metrics[brier_name] = np.mean(2 * np.square(self.true_below_threshold - p_l))
# Classification error
classerr_name = f"missclass_on_thresh_{thresh}"
metrics[classerr_name] = np.mean(
p_l * (1 - self.true_below_threshold)
+ (1 - p_l) * self.true_below_threshold
)
return metrics
|
aepsych-main
|
aepsych/benchmark/problem.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import io
import math
from typing import Callable
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline, interp1d
from scipy.stats import norm
# manually scraped data from doi:10.1007/s10162-013-0396-x fig 2
raw = """\
freq,thresh,phenotype
0.25,6.816404934,Older-normal
0.5,5.488517768,Older-normal
1,3.512856308,Older-normal
2,5.909671334,Older-normal
3,6.700337017,Older-normal
4,10.08761498,Older-normal
6,13.46962853,Older-normal
8,12.97026073,Older-normal
0.25,5.520856346,Sensory
0.5,4.19296918,Sensory
1,5.618122764,Sensory
2,19.83681866,Sensory
3,42.00403606,Sensory
4,53.32679981,Sensory
6,62.0527006,Sensory
8,66.08775286,Sensory
0.25,21.2291323,Metabolic
0.5,22.00676227,Metabolic
1,24.24163372,Metabolic
2,33.92590956,Metabolic
3,41.35626176,Metabolic
4,47.17294402,Metabolic
6,54.1174655,Metabolic
8,58.31446133,Metabolic
0.25,20.25772154,Metabolic+Sensory
0.5,20.71121368,Metabolic+Sensory
1,21.97442369,Metabolic+Sensory
2,37.48866818,Metabolic+Sensory
3,53.17814263,Metabolic+Sensory
4,64.01507567,Metabolic+Sensory
6,75.00818649,Metabolic+Sensory
8,76.61433583,Metabolic+Sensory"""
dubno_data = pd.read_csv(io.StringIO(raw))
def make_songetal_threshfun(x: np.ndarray, y: np.ndarray) -> Callable[[float], float]:
"""Generate a synthetic threshold function by interpolation of real data.
Real data is from Dubno et al. 2013, and procedure follows Song et al. 2017, 2018.
See make_songetal_testfun for more detail.
Args:
x (np.ndarray): Frequency
y (np.ndarray): Threshold
Returns:
Callable[[float], float]: Function that interpolates the given
frequencies and thresholds and returns threshold as a function
of frequency.
"""
f_interp = CubicSpline(x, y, extrapolate=False)
f_extrap = interp1d(x, y, fill_value="extrapolate")
def f_combo(x):
# interpolate first
interpolated = f_interp(x)
# whatever is nan needs extrapolating
interpolated[np.isnan(interpolated)] = f_extrap(x[np.isnan(interpolated)])
return interpolated
return f_combo
def make_songetal_testfun(
phenotype: str = "Metabolic", beta: float = 1
) -> Callable[[np.ndarray, bool], np.ndarray]:
"""Make an audiometric test function following Song et al. 2017.
To do so,we first compute a threshold by interpolation/extrapolation
from real data, then assume a linear psychometric function in intensity
with slope beta.
Args:
phenotype (str, optional): Audiometric phenotype from Dubno et al. 2013.
Specifically, one of "Metabolic", "Sensory", "Metabolic+Sensory",
or "Older-normal". Defaults to "Metabolic".
beta (float, optional): Psychometric function slope. Defaults to 1.
Returns:
Callable[[np.ndarray, bool], np.ndarray]: A test function taking a [b x 2] array of points and returning the psychometric function value at those points.
Raises:
AssertionError: if an invalid phenotype is passed.
References:
Song, X. D., Garnett, R., & Barbour, D. L. (2017).
Psychometric function estimation by probabilistic classification.
The Journal of the Acoustical Society of America, 141(4), 2513–2525.
https://doi.org/10.1121/1.4979594
"""
valid_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
assert phenotype in valid_phenotypes, f"Phenotype must be one of {valid_phenotypes}"
x = dubno_data[dubno_data.phenotype == phenotype].freq.values
y = dubno_data[dubno_data.phenotype == phenotype].thresh.values
# first, make the threshold fun
threshfun = make_songetal_threshfun(x, y)
# now make it into a test function
def song_testfun(x, cdf=False):
logfreq = x[..., 0]
intensity = x[..., 1]
thresh = threshfun(2**logfreq)
return (
norm.cdf((intensity - thresh) / beta)
if cdf
else (intensity - thresh) / beta
)
return song_testfun
def novel_discrimination_testfun(x: np.ndarray) -> np.ndarray:
"""Evaluate novel discrimination test function from Owen et al.
The threshold is roughly parabolic with context, and the slope
varies with the threshold. Adding to the difficulty is the fact
that the function is minimized at f=0 (or p=0.5), corresponding
to discrimination being at chance at zero stimulus intensity.
Args:
x (np.ndarray): Points at which to evaluate.
Returns:
np.ndarray: Value of function at these points.
"""
freq = x[..., 0]
amp = x[..., 1]
context = 2 * (0.05 + 0.4 * (-1 + 0.2 * freq) ** 2 * freq**2)
return 2 * (amp + 1) / context
def novel_detection_testfun(x: np.ndarray) -> np.ndarray:
"""Evaluate novel detection test function from Owen et al.
The threshold is roughly parabolic with context, and the slope
varies with the threshold.
Args:
x (np.ndarray): Points at which to evaluate.
Returns:
np.ndarray: Value of function at these points.
"""
freq = x[..., 0]
amp = x[..., 1]
context = 2 * (0.05 + 0.4 * (-1 + 0.2 * freq) ** 2 * freq**2)
return 4 * (amp + 1) / context - 4
def discrim_highdim(x: np.ndarray) -> np.ndarray:
amp = x[..., 0]
freq = x[..., 1]
vscale = x[..., 2]
vshift = x[..., 3]
variance = x[..., 4]
asym = x[..., 5]
phase = x[..., 6]
period = x[..., 7]
context = (
-0.5 * vscale * np.cos(period * 0.6 * math.pi * freq + phase)
+ vscale / 2
+ vshift
) * (
-1 * asym * np.sin(period * 0.6 * math.pi * 0.5 * freq + phase) + (2 - asym)
) - 1
z = (amp - context) / (variance + variance * (1 + context))
p = norm.cdf(z)
p = (1 - 0.5) * p + 0.5 # Floor at p=0.5
p = np.clip(p, 0.5, 1 - 1e-5) # clip so that norm.ppf doesn't go to inf
return norm.ppf(p)
def modified_hartmann6(X):
"""
The modified Hartmann6 function used in Lyu et al.
"""
C = np.r_[0.2, 0.22, 0.28, 0.3]
a_t = np.c_[
[8, 3, 10, 3.5, 1.7, 6],
[0.5, 8, 10, 1.0, 6, 9],
[3, 3.5, 1.7, 8, 10, 6],
[10, 6, 0.5, 8, 1.0, 9],
].T
p_t = (
10 ** (-4)
* np.c_[
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
].T
)
y = 0.0
for i, C_i in enumerate(C):
t = 0
for j in range(6):
t += a_t[i, j] * ((X[j] - p_t[i, j]) ** 2)
y += C_i * np.exp(-t)
return -10 * (float(y) - 0.1)
|
aepsych-main
|
aepsych/benchmark/test_functions.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import logging
import os
import uuid
from contextlib import contextmanager
from pathlib import Path
from typing import Dict
import aepsych.database.tables as tables
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import close_all_sessions
logger = logging.getLogger()
class Database:
def __init__(self, db_path=None):
if db_path is None:
db_path = "./databases/default.db"
db_dir, db_name = os.path.split(db_path)
self._db_name = db_name
self._db_dir = db_dir
if os.path.exists(db_path):
logger.info(f"Found DB at {db_path}, appending!")
else:
logger.info(f"No DB found at {db_path}, creating a new DB!")
self._engine = self.get_engine()
def get_engine(self):
if not hasattr(self, "_engine") or self._engine is None:
self._full_db_path = Path(self._db_dir)
self._full_db_path.mkdir(parents=True, exist_ok=True)
self._full_db_path = self._full_db_path.joinpath(self._db_name)
self._engine = create_engine(f"sqlite:///{self._full_db_path.as_posix()}")
# create the table metadata and tables
tables.Base.metadata.create_all(self._engine)
# create an ongoing session to be used. Provides a conduit
# to the db so the instantiated objects work properly.
Session = sessionmaker(bind=self.get_engine())
self._session = Session()
return self._engine
def delete_db(self):
if self._engine is not None and self._full_db_path.exists():
close_all_sessions()
self._full_db_path.unlink()
self._engine = None
def is_update_required(self):
return (
tables.DBMasterTable.requires_update(self._engine)
or tables.DbReplayTable.requires_update(self._engine)
or tables.DbStratTable.requires_update(self._engine)
or tables.DbConfigTable.requires_update(self._engine)
or tables.DbRawTable.requires_update(self._engine)
or tables.DbParamTable.requires_update(self._engine)
or tables.DbOutcomeTable.requires_update(self._engine)
)
def perform_updates(self):
"""Perform updates on known tables. SQLAlchemy doesn't do alters so they're done the old fashioned way."""
tables.DBMasterTable.update(self._engine)
tables.DbReplayTable.update(self._engine)
tables.DbStratTable.update(self._engine)
tables.DbConfigTable.update(self._engine)
tables.DbRawTable.update(self, self._engine)
tables.DbParamTable.update(self._engine)
tables.DbOutcomeTable.update(self._engine)
@contextmanager
def session_scope(self):
"""Provide a transactional scope around a series of operations."""
Session = sessionmaker(bind=self.get_engine())
session = Session()
try:
yield session
session.commit()
except Exception as err:
logger.error(f"db session use failed: {err}")
session.rollback()
raise
finally:
session.close()
# @retry(stop_max_attempt_number=8, wait_exponential_multiplier=1.8)
def execute_sql_query(self, query: str, vals: Dict[str, str]):
"""Execute an arbitrary query written in sql."""
with self.session_scope() as session:
return session.execute(query, vals).fetchall()
def get_master_records(self):
"""Grab the list of master records."""
records = self._session.query(tables.DBMasterTable).all()
return records
def get_master_record(self, experiment_id):
"""Grab the list of master record for a specific experiment (master) id."""
records = (
self._session.query(tables.DBMasterTable)
.filter(tables.DBMasterTable.experiment_id == experiment_id)
.all()
)
if 0 < len(records):
return records[0]
return None
def get_replay_for(self, master_id):
"""Get the replay records for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None:
return master_record.children_replay
return None
def get_strats_for(self, master_id=0):
"""Get the strat records for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None and len(master_record.children_strat) > 0:
return [c.strat for c in master_record.children_strat]
return None
def get_strat_for(self, master_id, strat_id=-1):
"""Get a specific strat record for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None and len(master_record.children_strat) > 0:
return master_record.children_strat[strat_id].strat
return None
def get_config_for(self, master_id):
"""Get the strat records for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None:
return master_record.children_config[0].config
return None
def get_raw_for(self, master_id):
"""Get the raw data for a specific master row."""
master_record = self.get_master_record(master_id)
if master_record is not None:
return master_record.children_raw
return None
def get_all_params_for(self, master_id):
"""Get the parameters for all the iterations of a specific experiment."""
raw_record = self.get_raw_for(master_id)
params = []
if raw_record is not None:
for raw in raw_record:
for param in raw.children_param:
params.append(param)
return params
return None
def get_param_for(self, master_id, iteration_id):
"""Get the parameters for a specific iteration of a specific experiment."""
raw_record = self.get_raw_for(master_id)
if raw_record is not None:
for raw in raw_record:
if raw.unique_id == iteration_id:
return raw.children_param
return None
def get_all_outcomes_for(self, master_id):
"""Get the outcomes for all the iterations of a specific experiment."""
raw_record = self.get_raw_for(master_id)
outcomes = []
if raw_record is not None:
for raw in raw_record:
for outcome in raw.children_outcome:
outcomes.append(outcome)
return outcomes
return None
def get_outcome_for(self, master_id, iteration_id):
"""Get the outcomes for a specific iteration of a specific experiment."""
raw_record = self.get_raw_for(master_id)
if raw_record is not None:
for raw in raw_record:
if raw.unique_id == iteration_id:
return raw.children_outcome
return None
def record_setup(
self,
description,
name,
extra_metadata=None,
id=None,
request=None,
participant_id=None,
) -> str:
self.get_engine()
if id is None:
master_table = tables.DBMasterTable()
master_table.experiment_description = description
master_table.experiment_name = name
master_table.experiment_id = str(uuid.uuid4())
if participant_id is not None:
master_table.participant_id = participant_id
else:
master_table.participant_id = str(
uuid.uuid4()
) # no p_id specified will result in a generated UUID
master_table.extra_metadata = extra_metadata
self._session.add(master_table)
logger.debug(f"record_setup = [{master_table}]")
else:
master_table = self.get_master_record(id)
if master_table is None:
raise RuntimeError(f"experiment id {id} doesn't exist in the db.")
record = tables.DbReplayTable()
record.message_type = "setup"
record.message_contents = request
if "extra_info" in request:
record.extra_info = request["extra_info"]
record.timestamp = datetime.datetime.now()
record.parent = master_table
logger.debug(f"record_setup = [{record}]")
self._session.add(record)
self._session.commit()
# return the master table if it has a link to the list of child rows
# tis needs to be passed into all future calls to link properly
return master_table
def record_message(self, master_table, type, request) -> None:
# create a linked setup table
record = tables.DbReplayTable()
record.message_type = type
record.message_contents = request
if "extra_info" in request:
record.extra_info = request["extra_info"]
record.timestamp = datetime.datetime.now()
record.parent = master_table
self._session.add(record)
self._session.commit()
def record_raw(self, master_table, model_data, timestamp=None):
raw_entry = tables.DbRawTable()
raw_entry.model_data = model_data
if timestamp is None:
raw_entry.timestamp = datetime.datetime.now()
else:
raw_entry.timestamp = timestamp
raw_entry.parent = master_table
self._session.add(raw_entry)
self._session.commit()
return raw_entry
def record_param(self, raw_table, param_name, param_value) -> None:
param_entry = tables.DbParamTable()
param_entry.param_name = param_name
param_entry.param_value = param_value
param_entry.parent = raw_table
self._session.add(param_entry)
self._session.commit()
def record_outcome(self, raw_table, outcome_name, outcome_value) -> None:
outcome_entry = tables.DbOutcomeTable()
outcome_entry.outcome_name = outcome_name
outcome_entry.outcome_value = outcome_value
outcome_entry.parent = raw_table
self._session.add(outcome_entry)
self._session.commit()
def record_strat(self, master_table, strat):
strat_entry = tables.DbStratTable()
strat_entry.strat = strat
strat_entry.timestamp = datetime.datetime.now()
strat_entry.parent = master_table
self._session.add(strat_entry)
self._session.commit()
def record_config(self, master_table, config):
config_entry = tables.DbConfigTable()
config_entry.config = config
config_entry.timestamp = datetime.datetime.now()
config_entry.parent = master_table
self._session.add(config_entry)
self._session.commit()
def list_master_records(self):
master_records = self.get_master_records()
print("Listing master records:")
for record in master_records:
print(
f'\t{record.unique_id} - name: "{record.experiment_name}" experiment id: {record.experiment_id}'
)
|
aepsych-main
|
aepsych/database/db.py
|
aepsych-main
|
aepsych/database/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pickle
from collections.abc import Iterable
from aepsych.config import Config
from aepsych.version import __version__
from sqlalchemy import (
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Integer,
PickleType,
String,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
logger = logging.getLogger()
Base = declarative_base()
"""
Original Schema
CREATE TABLE master (
unique_id INTEGER NOT NULL,
experiment_name VARCHAR(256),
experiment_description VARCHAR(2048),
experiment_id VARCHAR(10),
PRIMARY KEY (unique_id),
UNIQUE (experiment_id)
);
CREATE TABLE replay_data (
unique_id INTEGER NOT NULL,
timestamp DATETIME,
message_type VARCHAR(64),
message_contents BLOB,
master_table_id INTEGER,
PRIMARY KEY (unique_id),
FOREIGN KEY(master_table_id) REFERENCES master (unique_id)
);
"""
class DBMasterTable(Base):
"""
Master table to keep track of all experiments and unique keys associated with the experiment
"""
__tablename__ = "master"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
experiment_name = Column(String(256))
experiment_description = Column(String(2048))
experiment_id = Column(String(10), unique=True)
participant_id = Column(String(50), unique=True)
extra_metadata = Column(String(4096)) # JSON-formatted metadata
children_replay = relationship("DbReplayTable", back_populates="parent")
children_strat = relationship("DbStratTable", back_populates="parent")
children_config = relationship("DbConfigTable", back_populates="parent")
children_raw = relationship("DbRawTable", back_populates="parent")
@classmethod
def from_sqlite(cls, row):
this = DBMasterTable()
this.unique_id = row["unique_id"]
this.experiment_name = row["experiment_name"]
this.experiment_description = row["experiment_description"]
this.experiment_id = row["experiment_id"]
return this
def __repr__(self):
return (
f"<DBMasterTable(unique_id={self.unique_id})"
f", experiment_name={self.experiment_name}, "
f"experiment_description={self.experiment_description}, "
f"experiment_id={self.experiment_id})>"
)
@staticmethod
def update(engine):
logger.info("DBMasterTable : update called")
if not DBMasterTable._has_column(engine, "extra_metadata"):
DBMasterTable._add_column(engine, "extra_metadata")
if not DBMasterTable._has_column(engine, "participant_id"):
DBMasterTable._add_column(engine, "participant_id")
@staticmethod
def requires_update(engine):
return not DBMasterTable._has_column(
engine, "extra_metadata"
) or not DBMasterTable._has_column(engine, "participant_id")
@staticmethod
def _has_column(engine, column: str):
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('master') WHERE name='{0}'".format(
column
)
)
rows = result.fetchall()
count = rows[0][0]
return count != 0
@staticmethod
def _add_column(engine, column: str):
try:
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('master') WHERE name='{0}'".format(
column
)
)
rows = result.fetchall()
count = rows[0][0]
if 0 == count:
logger.debug(
"Altering the master table to add the {0} column".format(column)
)
engine.execute(
"ALTER TABLE master ADD COLUMN {0} VARCHAR".format(column)
)
engine.commit()
except Exception as e:
logger.debug(f"Column already exists, no need to alter. [{e}]")
class DbReplayTable(Base):
__tablename__ = "replay_data"
use_extra_info = False
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
message_type = Column(String(64))
# specify the pickler to allow backwards compatibility between 3.7 and 3.8
message_contents = Column(PickleType(pickler=pickle))
extra_info = Column(PickleType(pickler=pickle))
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_replay")
__mapper_args__ = {}
@classmethod
def from_sqlite(cls, row):
this = DbReplayTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.message_type = row["message_type"]
this.message_contents = row["message_contents"]
this.master_table_id = row["master_table_id"]
if "extra_info" in row:
this.extra_info = row["extra_info"]
else:
this.extra_info = None
this.strat = row["strat"]
return this
def __repr__(self):
return (
f"<DbReplayTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp}, "
f"message_type={self.message_type}"
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def _has_extra_info(engine):
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('replay_data') WHERE name='extra_info'"
)
rows = result.fetchall()
count = rows[0][0]
return count != 0
@staticmethod
def _configs_require_conversion(engine):
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
results = session.query(DbReplayTable).all()
for result in results:
if result.message_contents["type"] == "setup":
config_str = result.message_contents["message"]["config_str"]
config = Config(config_str=config_str)
if config.version < __version__:
return True # assume that if any config needs to be refactored, all of them do
return False
@staticmethod
def update(engine):
logger.info("DbReplayTable : update called")
if not DbReplayTable._has_extra_info(engine):
DbReplayTable._add_extra_info(engine)
if DbReplayTable._configs_require_conversion(engine):
DbReplayTable._convert_configs(engine)
@staticmethod
def requires_update(engine):
return not DbReplayTable._has_extra_info(
engine
) or DbReplayTable._configs_require_conversion(engine)
@staticmethod
def _add_extra_info(engine):
try:
result = engine.execute(
"SELECT COUNT(*) FROM pragma_table_info('replay_data') WHERE name='extra_info'"
)
rows = result.fetchall()
count = rows[0][0]
if 0 == count:
logger.debug(
"Altering the replay_data table to add the extra_info column"
)
engine.execute("ALTER TABLE replay_data ADD COLUMN extra_info BLOB")
engine.commit()
except Exception as e:
logger.debug(f"Column already exists, no need to alter. [{e}]")
@staticmethod
def _convert_configs(engine):
Session = sessionmaker(bind=engine)
session = Session()
results = session.query(DbReplayTable).all()
for result in results:
if result.message_contents["type"] == "setup":
config_str = result.message_contents["message"]["config_str"]
config = Config(config_str=config_str)
if config.version < __version__:
config.convert_to_latest()
new_str = str(config)
new_message = {"type": "setup", "message": {"config_str": new_str}}
if "version" in result.message_contents:
new_message["version"] = result.message_contents["version"]
result.message_contents = new_message
session.commit()
logger.info("DbReplayTable : updated old configs.")
class DbStratTable(Base):
__tablename__ = "strat_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
strat = Column(PickleType(pickler=pickle))
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_strat")
@classmethod
def from_sqlite(cls, row):
this = DbStratTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.strat = row["strat"]
this.master_table_id = row["master_table_id"]
return this
def __repr__(self):
return (
f"<DbStratTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp} "
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def update(engine):
logger.info("DbStratTable : update called")
@staticmethod
def requires_update(engine):
return False
class DbConfigTable(Base):
__tablename__ = "config_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
config = Column(PickleType(pickler=pickle))
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_config")
@classmethod
def from_sqlite(cls, row):
this = DbConfigTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.strat = row["config"]
this.master_table_id = row["master_table_id"]
return this
def __repr__(self):
return (
f"<DbStratTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp} "
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def update(engine):
logger.info("DbConfigTable : update called")
@staticmethod
def requires_update(engine):
return False
class DbRawTable(Base):
"""
Fact table to store the raw data of each iteration of an experiment.
"""
__tablename__ = "raw_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
timestamp = Column(DateTime)
model_data = Column(Boolean)
master_table_id = Column(Integer, ForeignKey("master.unique_id"))
parent = relationship("DBMasterTable", back_populates="children_raw")
children_param = relationship("DbParamTable", back_populates="parent")
children_outcome = relationship("DbOutcomeTable", back_populates="parent")
@classmethod
def from_sqlite(cls, row):
this = DbRawTable()
this.unique_id = row["unique_id"]
this.timestamp = row["timestamp"]
this.model_data = row["model_data"]
this.master_table_id = row["master_table_id"]
return this
def __repr__(self):
return (
f"<DbRawTable(unique_id={self.unique_id})"
f", timestamp={self.timestamp} "
f", master_table_id={self.master_table_id})>"
)
@staticmethod
def update(db, engine):
logger.info("DbRawTable : update called")
# Get every master table
for master_table in db.get_master_records():
# Get raw tab
for message in master_table.children_replay:
if message.message_type != "tell":
continue
timestamp = message.timestamp
# Deserialize pickle message
message_contents = message.message_contents
# Get outcome
outcomes = message_contents["message"]["outcome"]
# Get parameters
params = message_contents["message"]["config"]
# Get model_data
model_data = message_contents["message"].get("model_data", True)
db_raw_record = db.record_raw(
master_table=master_table,
model_data=bool(model_data),
timestamp=timestamp,
)
for param_name, param_value in params.items():
if isinstance(param_value, Iterable) and type(param_value) != str:
if len(param_value) == 1:
db.record_param(
raw_table=db_raw_record,
param_name=str(param_name),
param_value=float(param_value[0]),
)
else:
for j, v in enumerate(param_value):
db.record_param(
raw_table=db_raw_record,
param_name=str(param_name) + "_stimuli" + str(j),
param_value=float(v),
)
else:
db.record_param(
raw_table=db_raw_record,
param_name=str(param_name),
param_value=float(param_value),
)
if isinstance(outcomes, Iterable) and type(outcomes) != str:
for j, outcome_value in enumerate(outcomes):
if (
isinstance(outcome_value, Iterable)
and type(outcome_value) != str
):
if len(outcome_value) == 1:
outcome_value = outcome_value[0]
else:
raise ValueError(
"Multi-outcome values must be a list of lists of length 1!"
)
db.record_outcome(
raw_table=db_raw_record,
outcome_name="outcome_" + str(j),
outcome_value=float(outcome_value),
)
else:
db.record_outcome(
raw_table=db_raw_record,
outcome_name="outcome",
outcome_value=float(outcomes),
)
@staticmethod
def requires_update(engine):
"""Check if the raw table is empty, and data already exists."""
n_raws = engine.execute("SELECT COUNT (*) FROM raw_data").fetchone()[0]
n_tells = engine.execute(
"SELECT COUNT (*) FROM replay_data \
WHERE message_type = 'tell'"
).fetchone()[0]
if n_raws == 0 and n_tells != 0:
return True
return False
class DbParamTable(Base):
"""
Dimension table to store the parameters of each iteration of an experiment.
Supports multiple parameters per iteration, and multiple stimuli per parameter.
"""
__tablename__ = "param_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
param_name = Column(String(50))
param_value = Column(String(50))
iteration_id = Column(Integer, ForeignKey("raw_data.unique_id"))
parent = relationship("DbRawTable", back_populates="children_param")
@classmethod
def from_sqlite(cls, row):
this = DbParamTable()
this.unique_id = row["unique_id"]
this.param_name = row["param_name"]
this.param_value = row["param_value"]
this.iteration_id = row["iteration_id"]
return this
def __repr__(self):
return (
f"<DbParamTable(unique_id={self.unique_id})"
f", iteration_id={self.iteration_id}>"
)
@staticmethod
def update(engine):
logger.info("DbParamTable : update called")
@staticmethod
def requires_update(engine):
return False
class DbOutcomeTable(Base):
"""
Dimension table to store the outcomes of each iteration of an experiment.
Supports multiple outcomes per iteration.
"""
__tablename__ = "outcome_data"
unique_id = Column(Integer, primary_key=True, autoincrement=True)
outcome_name = Column(String(50))
outcome_value = Column(Float)
iteration_id = Column(Integer, ForeignKey("raw_data.unique_id"))
parent = relationship("DbRawTable", back_populates="children_outcome")
@classmethod
def from_sqlite(cls, row):
this = DbOutcomeTable()
this.unique_id = row["unique_id"]
this.outcome_name = row["outcome_name"]
this.outcome_value = row["outcome_value"]
this.iteration_id = row["iteration_id"]
return this
def __repr__(self):
return (
f"<DbOutcomeTable(unique_id={self.unique_id})"
f", iteration_id={self.iteration_id}>"
)
@staticmethod
def update(engine):
logger.info("DbOutcomeTable : update called")
@staticmethod
def requires_update(engine):
return False
|
aepsych-main
|
aepsych/database/tables.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Any
import torch
from gpytorch.kernels.rbf_kernel_grad import RBFKernelGrad
class RBFKernelPartialObsGrad(RBFKernelGrad):
"""An RBF kernel over observations of f, and partial/non-overlapping
observations of the gradient of f.
gpytorch.kernels.rbf_kernel_grad assumes a block structure where every
partial derivative is observed at the same set of points at which x is
observed. This generalizes that by allowing f and any subset of the
derivatives of f to be observed at different sets of points.
The final column of x1 and x2 needs to be an index that identifies what is
observed at that point. It should be 0 if this observation is of f, and i
if it is of df/dxi.
"""
def forward(
self, x1: torch.Tensor, x2: torch.Tensor, diag: bool = False, **params: Any
) -> torch.Tensor:
# Extract grad index from each
grad_idx1 = x1[..., -1].to(dtype=torch.long)
grad_idx2 = x2[..., -1].to(dtype=torch.long)
K = super().forward(x1[..., :-1], x2[..., :-1], diag=diag, **params)
# Compute which elements to return
n1 = x1.shape[-2]
n2 = x2.shape[-2]
d = x1.shape[-1] - 1
p1 = [(i * (d + 1)) + int(grad_idx1[i]) for i in range(n1)]
p2 = [(i * (d + 1)) + int(grad_idx2[i]) for i in range(n2)]
if not diag:
return K[..., p1, :][..., p2]
else:
return K[..., p1]
def num_outputs_per_input(self, x1: torch.Tensor, x2: torch.Tensor) -> int:
return 1
|
aepsych-main
|
aepsych/kernels/rbf_partial_grad.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
aepsych/kernels/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
"""
from __future__ import annotations
from typing import Optional
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCAcquisition
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.acquisition.objective import MCAcquisitionObjective
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import t_batch_mode_transform
from torch import Tensor
from torch.distributions.bernoulli import Bernoulli
def bald_acq(obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate Mutual Information acquisition function.
With latent function F and X a hypothetical observation at a new point,
I(F; X) = I(X; F) = H(X) - H(X |F),
H(X |F ) = E_{f} (H(X |F =f )
i.e., we take the posterior entropy of the (Bernoulli) observation X given the
current model posterior and subtract the conditional entropy on F, that being
the mean entropy over the posterior for F. This is equivalent to the BALD
acquisition function in Houlsby et al. NeurIPS 2012.
Args:
obj_samples (torch.Tensor): Objective samples from the GP, of
shape num_samples x batch_shape x d_out
Returns:
torch.Tensor: Value of acquisition at samples.
"""
mean_p = obj_samples.mean(dim=0)
posterior_entropies = Bernoulli(mean_p).entropy().squeeze(-1)
sample_entropies = Bernoulli(obj_samples).entropy()
conditional_entropies = sample_entropies.mean(dim=0).squeeze(-1)
return posterior_entropies - conditional_entropies
class BernoulliMCMutualInformation(MCAcquisitionFunction):
"""Mutual Information acquisition function for a bernoulli outcome.
Given a model and an objective link function, calculate the mutual
information of a trial at a new point and the distribution on the
latent function.
Objective here should give values in (0, 1) (e.g. logit or probit).
"""
def __init__(
self,
model: Model,
objective: MCAcquisitionObjective,
sampler: Optional[MCSampler] = None,
) -> None:
r"""Single Bernoulli mutual information for active learning
Args:
model (Model): A fitted model.
objective (MCAcquisitionObjective): An MCAcquisitionObjective representing the link function
(e.g., logistic or probit)
sampler (MCSampler, optional): The sampler used for drawing MC samples.
"""
if sampler is None:
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([1024]))
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=None
)
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate mutual information on the candidate set `X`.
Args:
X: A `batch_size x q x d`-dim Tensor.
Returns:
Tensor of shape `batch_size x q` representing the mutual
information of a hypothetical trial at X that active
learning hopes to maximize.
"""
post = self.model.posterior(X)
samples = self.sampler(post)
return self.acquisition(self.objective(samples, X))
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition function value based on samples.
Args:
obj_samples (torch.Tensor): Samples from the model, transformed through the objective.
Returns:
torch.Tensor: value of the acquisition function (BALD) at the input samples.
"""
# RejectionSampler drops the final dim so we reaugment it
# here for compatibility with non-Monotonic MCAcquisition
if len(obj_samples.shape) == 2:
obj_samples = obj_samples[..., None]
return bald_acq(obj_samples)
@acqf_input_constructor(BernoulliMCMutualInformation)
def construct_inputs_mi(
model,
training_data,
objective=None,
sampler=None,
**kwargs,
):
return {
"model": model,
"objective": objective,
"sampler": sampler,
}
class MonotonicBernoulliMCMutualInformation(MonotonicMCAcquisition):
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition function value based on samples.
Args:
obj_samples (torch.Tensor): Samples from the model, transformed through the objective.
Returns:
torch.Tensor: value of the acquisition function (BALD) at the input samples.
"""
# TODO this is identical to nono-monotonic BALV acquisition with a different
# base class mixin, consider redesigning?
# RejectionSampler drops the final dim so we reaugment it
# here for compatibility with non-Monotonic MCAcquisition
if len(obj_samples.shape) == 2:
obj_samples = obj_samples[..., None]
return bald_acq(obj_samples)
|
aepsych-main
|
aepsych/acquisition/mutual_information.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Union
import torch
from aepsych.acquisition.objective import ProbitObjective
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.monte_carlo import (
MCAcquisitionFunction,
MCAcquisitionObjective,
MCSampler,
)
from botorch.models.model import Model
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import t_batch_mode_transform
from torch import Tensor
class MCLevelSetEstimation(MCAcquisitionFunction):
def __init__(
self,
model: Model,
target: Union[float, Tensor] = 0.75,
beta: Union[float, Tensor] = 3.84,
objective: Optional[MCAcquisitionObjective] = None,
sampler: Optional[MCSampler] = None,
) -> None:
r"""Monte-carlo level set estimation.
Args:
model: A fitted model.
target: the level set (after objective transform) to be estimated
beta: a parameter that governs explore-exploit tradeoff
objective: An MCAcquisitionObjective representing the link function
(e.g., logistic or probit.) applied on the samples.
Can be implemented via GenericMCObjective.
sampler: The sampler used for drawing MC samples.
"""
if sampler is None:
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([512]))
if objective is None:
objective = ProbitObjective()
super().__init__(model=model, sampler=sampler, objective=None, X_pending=None)
self.objective = objective
self.beta = beta
self.target = target
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition based on objective samples.
Usually you should not call this directly unless you are
subclassing this class and modifying how objective samples
are generated.
Args:
obj_samples (torch.Tensor): Samples from the model, transformed
by the objective. Should be samples x batch_shape.
Returns:
torch.Tensor: Acquisition function at the sampled values.
"""
mean = obj_samples.mean(dim=0)
variance = obj_samples.var(dim=0)
# prevent numerical issues if probit makes all the values 1 or 0
variance = torch.clamp(variance, min=1e-5)
delta = torch.sqrt(self.beta * variance)
return delta - torch.abs(mean - self.target)
@t_batch_mode_transform()
def forward(self, X: torch.Tensor) -> torch.Tensor:
"""Evaluate the acquisition function
Args:
X (torch.Tensor): Points at which to evaluate.
Returns:
torch.Tensor: Value of the acquisition functiona at these points.
"""
post = self.model.posterior(X)
samples = self.sampler(post) # num_samples x batch_shape x q x d_out
return self.acquisition(self.objective(samples, X)).squeeze(-1)
@acqf_input_constructor(MCLevelSetEstimation)
def construct_inputs_lse(
model,
training_data,
objective=None,
target=0.75,
beta=3.84,
sampler=None,
**kwargs,
):
return {
"model": model,
"objective": objective,
"target": target,
"beta": beta,
"sampler": sampler,
}
|
aepsych-main
|
aepsych/acquisition/lse.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Tuple
import torch
from botorch.acquisition.objective import PosteriorTransform
from gpytorch.models import GP
from gpytorch.utils.quadrature import GaussHermiteQuadrature1D
from torch import Tensor
from torch.distributions import Normal
from .bvn import bvn_cdf
def posterior_at_xstar_xq(
model: GP,
Xstar: Tensor,
Xq: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Evaluate the posteriors of f at single point Xstar and set of points Xq.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) tensor.
Xq: (b x m x d) tensor.
Returns:
Mu_s: (b x 1) mean at Xstar.
Sigma2_s: (b x 1) variance at Xstar.
Mu_q: (b x m) mean at Xq.
Sigma2_q: (b x m) variance at Xq.
Sigma_sq: (b x m) covariance between Xstar and each point in Xq.
"""
# Evaluate posterior and extract needed components
Xext = torch.cat((Xstar, Xq), dim=-2)
posterior = model.posterior(Xext, posterior_transform=posterior_transform)
mu = posterior.mean[..., :, 0]
Mu_s = mu[..., 0].unsqueeze(-1)
Mu_q = mu[..., 1:]
Cov = posterior.distribution.covariance_matrix
Sigma2_s = Cov[..., 0, 0].unsqueeze(-1)
Sigma2_q = torch.diagonal(Cov[..., 1:, 1:], dim1=-1, dim2=-2)
Sigma_sq = Cov[..., 0, 1:]
return Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq
def lookahead_levelset_at_xstar(
model: GP,
Xstar: Tensor,
Xq: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs: Dict[str, Any],
):
"""
Evaluate the look-ahead level-set posterior at Xq given observation at xstar.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) observation point.
Xq: (b x m x d) reference points.
gamma: Threshold in f-space.
Returns:
Px: (b x m) Level-set posterior at Xq, before observation at xstar.
P1: (b x m) Level-set posterior at Xq, given observation of 1 at xstar.
P0: (b x m) Level-set posterior at Xq, given observation of 0 at xstar.
py1: (b x 1) Probability of observing 1 at xstar.
"""
Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq = posterior_at_xstar_xq(
model=model, Xstar=Xstar, Xq=Xq, posterior_transform=posterior_transform
)
try:
gamma = kwargs.get("gamma")
except KeyError:
raise RuntimeError("lookahead_levelset_at_xtar requires passing gamma!")
# Compute look-ahead components
Norm = torch.distributions.Normal(0, 1)
Sigma_q = torch.sqrt(Sigma2_q)
b_q = (gamma - Mu_q) / Sigma_q
Phi_bq = Norm.cdf(b_q)
denom = torch.sqrt(1 + Sigma2_s)
a_s = Mu_s / denom
Phi_as = Norm.cdf(a_s)
Z_rho = -Sigma_sq / (Sigma_q * denom)
Z_qs = bvn_cdf(a_s, b_q, Z_rho)
Px = Phi_bq
py1 = Phi_as
P1 = Z_qs / py1
P0 = (Phi_bq - Z_qs) / (1 - py1)
return Px, P1, P0, py1
def lookahead_p_at_xstar(
model: GP,
Xstar: Tensor,
Xq: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs: Dict[str, Any],
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
Evaluate the look-ahead response probability posterior at Xq given observation at xstar.
Uses the approximation given in expr. 9 in:
Zhao, Guang, et al. "Efficient active learning for Gaussian process classification by
error reduction." Advances in Neural Information Processing Systems 34 (2021): 9734-9746.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) observation point.
Xq: (b x m x d) reference points.
kwargs: ignored (here for compatibility with other kinds of lookahead)
Returns:
Px: (b x m) Response posterior at Xq, before observation at xstar.
P1: (b x m) Response posterior at Xq, given observation of 1 at xstar.
P0: (b x m) Response posterior at Xq, given observation of 0 at xstar.
py1: (b x 1) Probability of observing 1 at xstar.
"""
Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq = posterior_at_xstar_xq(
model=model, Xstar=Xstar, Xq=Xq, posterior_transform=posterior_transform
)
probit = Normal(0, 1).cdf
def lookahead_inner(f_q):
mu_tilde_star = Mu_s + (f_q - Mu_q) * Sigma_sq / Sigma2_q
sigma_tilde_star = Sigma2_s - (Sigma_sq**2) / Sigma2_q
return probit(mu_tilde_star / torch.sqrt(sigma_tilde_star + 1)) * probit(f_q)
pstar_marginal_1 = probit(Mu_s / torch.sqrt(1 + Sigma2_s))
pstar_marginal_0 = 1 - pstar_marginal_1
pq_marginal_1 = probit(Mu_q / torch.sqrt(1 + Sigma2_q))
quad = GaussHermiteQuadrature1D()
fq_mvn = Normal(Mu_q, torch.sqrt(Sigma2_q))
joint_ystar1_yq1 = quad(lookahead_inner, fq_mvn)
joint_ystar0_yq1 = pq_marginal_1 - joint_ystar1_yq1
# now we need from the joint to the marginal on xq
lookahead_pq1 = joint_ystar1_yq1 / pstar_marginal_1
lookahead_pq0 = joint_ystar0_yq1 / pstar_marginal_0
return pq_marginal_1, lookahead_pq1, lookahead_pq0, pstar_marginal_1
def approximate_lookahead_levelset_at_xstar(
model: GP,
Xstar: Tensor,
Xq: Tensor,
gamma: float,
posterior_transform: Optional[PosteriorTransform] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
The look-ahead posterior approximation of Lyu et al.
Args:
model: The model to evaluate.
Xstar: (b x 1 x d) observation point.
Xq: (b x m x d) reference points.
gamma: Threshold in f-space.
Returns:
Px: (b x m) Level-set posterior at Xq, before observation at xstar.
P1: (b x m) Level-set posterior at Xq, given observation of 1 at xstar.
P0: (b x m) Level-set posterior at Xq, given observation of 0 at xstar.
py1: (b x 1) Probability of observing 1 at xstar.
"""
Mu_s, Sigma2_s, Mu_q, Sigma2_q, Sigma_sq = posterior_at_xstar_xq(
model=model, Xstar=Xstar, Xq=Xq, posterior_transform=posterior_transform
)
Norm = torch.distributions.Normal(0, 1)
Mu_s_pdf = torch.exp(Norm.log_prob(Mu_s))
Mu_s_cdf = Norm.cdf(Mu_s)
# Formulae from the supplement of the paper (Result 2)
vnp1_p = Mu_s_pdf**2 / Mu_s_cdf**2 + Mu_s * Mu_s_pdf / Mu_s_cdf # (C.4)
p_p = Norm.cdf(Mu_s / torch.sqrt(1 + Sigma2_s)) # (C.5)
vnp1_n = Mu_s_pdf**2 / (1 - Mu_s_cdf) ** 2 - Mu_s * Mu_s_pdf / (
1 - Mu_s_cdf
) # (C.6)
p_n = 1 - p_p # (C.7)
vtild = vnp1_p * p_p + vnp1_n * p_n
Sigma2_q_np1 = Sigma2_q - Sigma_sq**2 / ((1 / vtild) + Sigma2_s) # (C.8)
Px = Norm.cdf((gamma - Mu_q) / torch.sqrt(Sigma2_q))
P1 = Norm.cdf((gamma - Mu_q) / torch.sqrt(Sigma2_q_np1))
P0 = P1 # Same because we ignore value of y in this approximation
py1 = 0.5 * torch.ones(*Px.shape[:-1], 1) # Value doesn't matter because P1 = P0
return Px, P1, P0, py1
|
aepsych-main
|
aepsych/acquisition/lookahead_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.posteriors import Posterior
from botorch.sampling.base import MCSampler
from torch import Tensor
class RejectionSampler(MCSampler):
"""
Samples from a posterior subject to the constraint that samples in constrained_idx
should be >= 0.
If not enough feasible samples are generated, will return the least violating
samples.
"""
def __init__(
self, num_samples: int, num_rejection_samples: int, constrained_idx: Tensor
):
"""Initialize RejectionSampler
Args:
num_samples (int): Number of samples to return. Note that if fewer samples
than this number are positive in the required dimension, the remaining
samples returned will be the "least violating", i.e. closest to 0.
num_rejection_samples (int): Number of samples to draw before rejecting.
constrained_idx (Tensor): Indices of input dimensions that should be
constrained positive.
"""
self.num_samples = num_samples
self.num_rejection_samples = num_rejection_samples
self.constrained_idx = constrained_idx
super().__init__(sample_shape=torch.Size([num_samples]))
def forward(self, posterior: Posterior) -> Tensor:
"""Run the rejection sampler.
Args:
posterior (Posterior): The unconstrained GP posterior object
to perform rejection samples on.
Returns:
Tensor: Kept samples.
"""
samples = posterior.rsample(
sample_shape=torch.Size([self.num_rejection_samples])
)
assert (
samples.shape[-1] == 1
), "Batches not supported" # TODO T68656582 handle batches later
constrained_samps = samples[:, self.constrained_idx, 0]
valid = (constrained_samps >= 0).all(dim=1)
if valid.sum() < self.num_samples:
worst_violation = constrained_samps.min(dim=1)[0]
keep = torch.argsort(worst_violation, descending=True)[: self.num_samples]
else:
keep = torch.where(valid)[0][: self.num_samples]
return samples[keep, :, :]
|
aepsych-main
|
aepsych/acquisition/rejection_sampler.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .lookahead import ApproxGlobalSUR, EAVC, GlobalMI, GlobalSUR, LocalMI, LocalSUR
from .lse import MCLevelSetEstimation
from .mc_posterior_variance import MCPosteriorVariance, MonotonicMCPosteriorVariance
from .monotonic_rejection import MonotonicMCLSE
from .mutual_information import (
BernoulliMCMutualInformation,
MonotonicBernoulliMCMutualInformation,
)
from .objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
ProbitObjective,
semi_p,
)
lse_acqfs = [
MonotonicMCLSE,
GlobalMI,
GlobalSUR,
ApproxGlobalSUR,
EAVC,
LocalMI,
LocalSUR,
]
__all__ = [
"BernoulliMCMutualInformation",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCLSE",
"MCPosteriorVariance",
"MonotonicMCPosteriorVariance",
"MCPosteriorVariance",
"MCLevelSetEstimation",
"ProbitObjective",
"FloorProbitObjective",
"FloorLogitObjective",
"FloorGumbelObjective",
"GlobalMI",
"GlobalSUR",
"ApproxGlobalSUR",
"EAVC",
"LocalMI",
"LocalSUR",
"semi_p",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/acquisition/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCAcquisition
from aepsych.acquisition.objective import ProbitObjective
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.acquisition.objective import MCAcquisitionObjective
from botorch.models.model import Model
from botorch.sampling.base import MCSampler
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.transforms import t_batch_mode_transform
from torch import Tensor
def balv_acq(obj_samps: torch.Tensor) -> torch.Tensor:
"""Evaluate BALV (posterior variance) on a set of objective samples.
Args:
obj_samps (torch.Tensor): Samples from the GP, transformed by the objective.
Should be samples x batch_shape.
Returns:
torch.Tensor: Acquisition function value.
"""
# the output of objective is of shape num_samples x batch_shape x d_out
# objective should project the last dimension to 1d,
# so incoming should be samples x batch_shape, we take var in samp dim
return obj_samps.var(dim=0).squeeze(-1)
class MCPosteriorVariance(MCAcquisitionFunction):
r"""Posterior variance, computed using samples so we can use objective/transform"""
def __init__(
self,
model: Model,
objective: Optional[MCAcquisitionObjective] = None,
sampler: Optional[MCSampler] = None,
) -> None:
r"""Posterior Variance of Link Function
Args:
model: A fitted model.
objective: An MCAcquisitionObjective representing the link function
(e.g., logistic or probit.) applied on the difference of (usually 1-d)
two samples. Can be implemented via GenericMCObjective.
sampler: The sampler used for drawing MC samples.
"""
if sampler is None:
sampler = SobolQMCNormalSampler(sample_shape=torch.Size([512]))
if objective is None:
objective = ProbitObjective()
super().__init__(model=model, sampler=sampler, objective=None, X_pending=None)
self.objective = objective
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate MCPosteriorVariance on the candidate set `X`.
Args:
X: A `batch_size x q x d`-dim Tensor
Returns:
Posterior variance of link function at X that active learning
hopes to maximize
"""
# the output is of shape batch_shape x q x d_out
post = self.model.posterior(X)
samples = self.sampler(post) # num_samples x batch_shape x q x d_out
return self.acquisition(self.objective(samples, X))
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
# RejectionSampler drops the final dim so we reaugment it
# here for compatibility with non-Monotonic MCAcquisition
if len(obj_samples.shape) == 2:
obj_samples = obj_samples[..., None]
return balv_acq(obj_samples)
@acqf_input_constructor(MCPosteriorVariance)
def construct_inputs(
model,
training_data,
objective=None,
sampler=None,
**kwargs,
):
return {
"model": model,
"objective": objective,
"sampler": sampler,
}
class MonotonicMCPosteriorVariance(MonotonicMCAcquisition):
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
return balv_acq(obj_samples)
|
aepsych-main
|
aepsych/acquisition/mc_posterior_variance.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from math import pi as _pi
import torch
inv_2pi = 1 / (2 * _pi)
_neg_inv_sqrt2 = -1 / (2**0.5)
def _gauss_legendre20(dtype):
_abscissae = torch.tensor(
[
0.9931285991850949,
0.9639719272779138,
0.9122344282513259,
0.8391169718222188,
0.7463319064601508,
0.6360536807265150,
0.5108670019508271,
0.3737060887154196,
0.2277858511416451,
0.07652652113349733,
],
dtype=dtype,
)
_weights = torch.tensor(
[
0.01761400713915212,
0.04060142980038694,
0.06267204833410906,
0.08327674157670475,
0.1019301198172404,
0.1181945319615184,
0.1316886384491766,
0.1420961093183821,
0.1491729864726037,
0.1527533871307259,
],
dtype=dtype,
)
abscissae = torch.cat([1.0 - _abscissae, 1.0 + _abscissae], dim=0)
weights = torch.cat([_weights, _weights], dim=0)
return abscissae, weights
def _ndtr(x: torch.Tensor) -> torch.Tensor:
"""
Standard normal CDF. Called <phid> in Genz's original code.
"""
return 0.5 * torch.erfc(_neg_inv_sqrt2 * x)
def _bvnu(
dh: torch.Tensor,
dk: torch.Tensor,
r: torch.Tensor,
) -> torch.Tensor:
"""
Primary subroutine for bvnu()
"""
# Precompute some terms
h = dh
k = dk
hk = h * k
x, w = _gauss_legendre20(dtype=dh.dtype)
asr = 0.5 * torch.asin(r)
sn = torch.sin(asr[..., None] * x)
res = (sn * hk[..., None] - 0.5 * (h**2 + k**2)[..., None]) / (1 - sn**2)
res = torch.sum(w * torch.exp(res), dim=-1)
res = res * inv_2pi * asr + _ndtr(-h) * _ndtr(-k)
return torch.clip(res, 0, 1)
def bvn_cdf(
xu: torch.Tensor,
yu: torch.Tensor,
r: torch.Tensor,
) -> torch.Tensor:
"""
Evaluate the bivariate normal CDF.
WARNING: Implements only the routine for moderate levels of correlation. Will be
inaccurate and should not be used for correlations larger than 0.925.
Standard (mean 0, var 1) bivariate normal distribution with correlation r.
Evaluated from -inf to xu, and -inf to yu.
Based on function developed by Alan Genz:
http://www.math.wsu.edu/faculty/genz/software/matlab/bvn.m
based in turn on
Drezner, Z and G.O. Wesolowsky, (1989),
On the computation of the bivariate normal inegral,
Journal of Statist. Comput. Simul. 35, pp. 101-107.
Args:
xu: Upper limits for cdf evaluation in x
yu: Upper limits for cdf evaluation in y
r: BVN correlation
Returns: Tensor of cdf evaluations of same size as xu, yu, and r.
"""
p = 1 - _ndtr(-xu) - _ndtr(-yu) + _bvnu(xu, yu, r)
return torch.clip(p, 0, 1)
|
aepsych-main
|
aepsych/acquisition/bvn.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional, Tuple
from ax.models.torch.botorch_modular.acquisition import Acquisition
from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform
class AEPsychAcquisition(Acquisition):
def get_botorch_objective_and_transform(
self, **kwargs
) -> Tuple[Optional[MCAcquisitionObjective], Optional[PosteriorTransform]]:
objective, transform = super().get_botorch_objective_and_transform(**kwargs)
if "objective" in self.options:
objective = self.options.pop("objective")
return objective, transform
|
aepsych-main
|
aepsych/acquisition/acquisition.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple, cast
import numpy as np
import torch
from aepsych.utils import make_scaled_sobol
from botorch.acquisition import AcquisitionFunction
from botorch.acquisition.input_constructors import acqf_input_constructor
from botorch.acquisition.objective import PosteriorTransform
from botorch.models.gpytorch import GPyTorchModel
from botorch.utils.transforms import t_batch_mode_transform
from scipy.stats import norm
from torch import Tensor
from .lookahead_utils import (
approximate_lookahead_levelset_at_xstar,
lookahead_levelset_at_xstar,
lookahead_p_at_xstar,
)
def Hb(p: Tensor):
"""
Binary entropy.
Args:
p: Tensor of probabilities.
Returns: Binary entropy for each probability.
"""
epsilon = torch.tensor(np.finfo(float).eps)
p = torch.clamp(p, min=epsilon, max=1 - epsilon)
return -torch.nan_to_num(p * torch.log2(p) + (1 - p) * torch.log2(1 - p))
def MI_fn(Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
"""
Average mutual information.
H(p) - E_y*[H(p | y*)]
Args:
Px: (b x m) Level-set posterior before observation
P1: (b x m) Level-set posterior given observation of 1
P0: (b x m) Level-set posterior given observation of 0
py1: (b x 1) Probability of observing 1
Returns: (b) tensor of mutual information averaged over Xq.
"""
mi = Hb(Px) - py1 * Hb(P1) - (1 - py1) * Hb(P0)
return mi.sum(dim=-1)
def ClassErr(p: Tensor) -> Tensor:
"""
Expected classification error, min(p, 1-p).
"""
return torch.min(p, 1 - p)
def SUR_fn(Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
"""
Stepwise uncertainty reduction.
Expected reduction in expected classification error given observation at Xstar,
averaged over Xq.
Args:
Px: (b x m) Level-set posterior before observation
P1: (b x m) Level-set posterior given observation of 1
P0: (b x m) Level-set posterior given observation of 0
py1: (b x 1) Probability of observing 1
Returns: (b) tensor of SUR values.
"""
sur = ClassErr(Px) - py1 * ClassErr(P1) - (1 - py1) * ClassErr(P0)
return sur.sum(dim=-1)
def EAVC_fn(Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
"""
Expected absolute value change.
Expected absolute change in expected level-set volume given observation at Xstar.
Args:
Px: (b x m) Level-set posterior before observation
P1: (b x m) Level-set posterior given observation of 1
P0: (b x m) Level-set posterior given observation of 0
py1: (b x 1) Probability of observing 1
Returns: (b) tensor of EAVC values.
"""
avc1 = torch.abs((Px - P1).sum(dim=-1))
avc0 = torch.abs((Px - P0).sum(dim=-1))
return py1.squeeze(-1) * avc1 + (1 - py1).squeeze(-1) * avc0
class LookaheadAcquisitionFunction(AcquisitionFunction):
def __init__(
self,
model: GPyTorchModel,
target: Optional[float],
lookahead_type: str = "levelset",
) -> None:
"""
A localized look-ahead acquisition function.
Args:
model: The gpytorch model.
target: Threshold value to target in p-space.
"""
super().__init__(model=model)
if lookahead_type == "levelset":
self.lookahead_fn = lookahead_levelset_at_xstar
assert target is not None, "Need a target for levelset lookahead!"
self.gamma = norm.ppf(target)
elif lookahead_type == "posterior":
self.lookahead_fn = lookahead_p_at_xstar
self.gamma = None
else:
raise RuntimeError(f"Got unknown lookahead type {lookahead_type}!")
## Local look-ahead acquisitions
class LocalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction):
def __init__(
self,
model: GPyTorchModel,
lookahead_type: str = "levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
) -> None:
"""
A localized look-ahead acquisition function.
Args:
model: The gpytorch model.
target: Threshold value to target in p-space.
"""
super().__init__(model=model, target=target, lookahead_type=lookahead_type)
self.posterior_transform = posterior_transform
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
Evaluate acquisition function at X.
Args:
X: (b x 1 x d) point at which to evalaute acquisition function.
Returns: (b) tensor of acquisition values.
"""
Px, P1, P0, py1 = self.lookahead_fn(
model=self.model,
Xstar=X,
Xq=X,
gamma=self.gamma,
posterior_transform=self.posterior_transform,
) # Return shape here has m=1.
return self._compute_acqf(Px, P1, P0, py1)
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
raise NotImplementedError
class LocalMI(LocalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return MI_fn(Px, P1, P0, py1)
class LocalSUR(LocalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return SUR_fn(Px, P1, P0, py1)
@acqf_input_constructor(LocalMI, LocalSUR)
def construct_inputs_local_lookahead(
model: GPyTorchModel,
training_data,
lookahead_type="levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs,
):
return {
"model": model,
"lookahead_type": lookahead_type,
"target": target,
"posterior_transform": posterior_transform,
}
## Global look-ahead acquisitions
class GlobalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction):
def __init__(
self,
model: GPyTorchModel,
lookahead_type: str = "levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[Tensor] = None,
) -> None:
"""
A global look-ahead acquisition function.
Args:
model: The gpytorch model.
target: Threshold value to target in p-space.
Xq: (m x d) global reference set.
"""
super().__init__(model=model, target=target, lookahead_type=lookahead_type)
self.posterior_transform = posterior_transform
assert (
Xq is not None or query_set_size is not None
), "Must pass either query set size or a query set!"
if Xq is not None and query_set_size is not None:
assert Xq.shape[0] == query_set_size, (
"If passing both Xq and query_set_size,"
+ "first dim of Xq should be query_set_size, got {Xq.shape[0]} != {query_set_size}"
)
if Xq is None:
# cast to an int in case we got a float from Config, which
# would raise on make_scaled_sobol
query_set_size = cast(int, query_set_size) # make mypy happy
assert int(query_set_size) == query_set_size # make sure casting is safe
# if the asserts above pass and Xq is None, query_set_size is not None so this is safe
query_set_size = int(query_set_size) # cast
Xq = make_scaled_sobol(model.lb, model.ub, query_set_size)
self.register_buffer("Xq", Xq)
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
Evaluate acquisition function at X.
Args:
X: (b x 1 x d) point at which to evalaute acquisition function.
Returns: (b) tensor of acquisition values.
"""
Px, P1, P0, py1 = self._get_lookahead_posterior(X)
return self._compute_acqf(Px, P1, P0, py1)
def _get_lookahead_posterior(
self, X: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
Xq_batch = self.Xq.expand(X.shape[0], *self.Xq.shape)
return self.lookahead_fn(
model=self.model,
Xstar=X,
Xq=Xq_batch,
gamma=self.gamma,
posterior_transform=self.posterior_transform,
)
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
raise NotImplementedError
class GlobalMI(GlobalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return MI_fn(Px, P1, P0, py1)
class GlobalSUR(GlobalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return SUR_fn(Px, P1, P0, py1)
class ApproxGlobalSUR(GlobalSUR):
def __init__(
self,
model: GPyTorchModel,
lookahead_type="levelset",
target: Optional[float] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[Tensor] = None,
) -> None:
assert (
lookahead_type == "levelset"
), f"ApproxGlobalSUR only supports lookahead on level set, got {lookahead_type}!"
super().__init__(
model=model,
target=target,
lookahead_type=lookahead_type,
query_set_size=query_set_size,
Xq=Xq,
)
def _get_lookahead_posterior(
self, X: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
Xq_batch = self.Xq.expand(X.shape[0], *self.Xq.shape)
return approximate_lookahead_levelset_at_xstar(
model=self.model,
Xstar=X,
Xq=Xq_batch,
gamma=self.gamma,
posterior_transform=self.posterior_transform,
)
class EAVC(GlobalLookaheadAcquisitionFunction):
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
return EAVC_fn(Px, P1, P0, py1)
class MOCU(GlobalLookaheadAcquisitionFunction):
"""
MOCU acquisition function given in expr. 4 of:
Zhao, Guang, et al. "Uncertainty-aware active learning for optimal Bayesian classifier."
International Conference on Learning Representations (ICLR) 2021.
"""
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
current_max_query = torch.maximum(Px, 1 - Px)
# expectation w.r.t. y* of the max of pq
lookahead_pq1_max = torch.maximum(P1, 1 - P1)
lookahead_pq0_max = torch.maximum(P0, 1 - P0)
lookahead_max_query = lookahead_pq1_max * py1 + lookahead_pq0_max * (1 - py1)
return (lookahead_max_query - current_max_query).mean(-1)
class SMOCU(GlobalLookaheadAcquisitionFunction):
"""
SMOCU acquisition function given in expr. 11 of:
Zhao, Guang, et al. "Bayesian active learning by soft mean objective cost of uncertainty."
International Conference on Artificial Intelligence and Statistics (AISTATS) 2021.
"""
def __init__(self, k, *args, **kwargs):
super().__init__(*args, **kwargs)
self.k = k
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
stacked = torch.stack((Px, 1 - Px), dim=-1)
current_softmax_query = torch.logsumexp(self.k * stacked, dim=-1) / self.k
# expectation w.r.t. y* of the max of pq
lookahead_pq1_max = torch.maximum(P1, 1 - P1)
lookahead_pq0_max = torch.maximum(P0, 1 - P0)
lookahead_max_query = lookahead_pq1_max * py1 + lookahead_pq0_max * (1 - py1)
return (lookahead_max_query - current_softmax_query).mean(-1)
class BEMPS(GlobalLookaheadAcquisitionFunction):
"""
BEMPS acquisition function given in:
Tan, Wei, et al. "Diversity Enhanced Active Learning with Strictly Proper Scoring Rules."
Advances in Neural Information Processing Systems 34 (2021).
"""
def __init__(self, scorefun, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scorefun = scorefun
def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tensor:
current_score = self.scorefun(Px)
lookahead_pq1_score = self.scorefun(P1)
lookahead_pq0_score = self.scorefun(P0)
lookahead_expected_score = lookahead_pq1_score * py1 + lookahead_pq0_score * (
1 - py1
)
return (lookahead_expected_score - current_score).mean(-1)
@acqf_input_constructor(GlobalMI, GlobalSUR, ApproxGlobalSUR, EAVC, MOCU, SMOCU, BEMPS)
def construct_inputs_global_lookahead(
model: GPyTorchModel,
training_data,
lookahead_type="levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
query_set_size: Optional[int] = 256,
Xq: Optional[Tensor] = None,
**kwargs,
):
lb = [bounds[0] for bounds in kwargs["bounds"]]
ub = [bounds[1] for bounds in kwargs["bounds"]]
Xq = Xq if Xq is not None else make_scaled_sobol(lb, ub, query_set_size)
return {
"model": model,
"lookahead_type": lookahead_type,
"target": target,
"posterior_transform": posterior_transform,
"query_set_size": query_set_size,
"Xq": Xq,
}
|
aepsych-main
|
aepsych/acquisition/lookahead.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.objective import IdentityMCObjective, MCAcquisitionObjective
from botorch.models.model import Model
from torch import Tensor
from .rejection_sampler import RejectionSampler
class MonotonicMCAcquisition(AcquisitionFunction):
"""
Acquisition function base class for use with the rejection sampling
monotonic GP. This handles the bookkeeping of the derivative
constraint points -- implement specific monotonic MC acquisition
in subclasses.
"""
def __init__(
self,
model: Model,
deriv_constraint_points: torch.Tensor,
num_samples: int = 32,
num_rejection_samples: int = 1024,
objective: Optional[MCAcquisitionObjective] = None,
) -> None:
"""Initialize MonotonicMCAcquisition
Args:
model (Model): Model to use, usually a MonotonicRejectionGP.
num_samples (int, optional): Number of samples to keep from the rejection sampler. . Defaults to 32.
num_rejection_samples (int, optional): Number of rejection samples to draw. Defaults to 1024.
objective (Optional[MCAcquisitionObjective], optional): Objective transform of the GP output
before evaluating the acquisition. Defaults to identity transform.
"""
super().__init__(model=model)
self.deriv_constraint_points = deriv_constraint_points
self.num_samples = num_samples
self.num_rejection_samples = num_rejection_samples
self.sampler_shape = torch.Size([])
if objective is None:
assert model.num_outputs == 1
objective = IdentityMCObjective()
else:
assert isinstance(objective, MCAcquisitionObjective)
self.add_module("objective", objective)
def forward(self, X: Tensor) -> Tensor:
"""Evaluate the acquisition function at a set of points.
Args:
X (Tensor): Points at which to evaluate the acquisition function.
Should be (b) x q x d, and q should be 1.
Returns:
Tensor: Acquisition function value at these points.
"""
# This is currently doing joint samples over (b), and requiring q=1
# TODO T68656582 support batches properly.
if len(X.shape) == 3:
assert X.shape[1] == 1, "q must be 1"
Xfull = torch.cat((X[:, 0, :], self.deriv_constraint_points), dim=0)
else:
Xfull = torch.cat((X, self.deriv_constraint_points), dim=0)
if not hasattr(self, "sampler") or Xfull.shape != self.sampler_shape:
self._set_sampler(X.shape)
self.sampler_shape = Xfull.shape
posterior = self.model.posterior(Xfull)
samples = self.sampler(posterior)
assert len(samples.shape) == 3
# Drop derivative samples
samples = samples[:, : X.shape[0], :]
# NOTE: Squeeze below makes sure that we pass in the same `X` that was used
# to generate the `samples`. This is necessitated by `MCAcquisitionObjective`,
# which verifies that `samples` and `X` have the same q-batch size.
obj_samples = self.objective(samples, X=X.squeeze(-2) if X.ndim == 3 else X)
return self.acquisition(obj_samples)
def _set_sampler(self, Xshape: torch.Size) -> None:
sampler = RejectionSampler(
num_samples=self.num_samples,
num_rejection_samples=self.num_rejection_samples,
constrained_idx=torch.arange(
Xshape[0], Xshape[0] + self.deriv_constraint_points.shape[0]
),
)
self.add_module("sampler", sampler)
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
class MonotonicMCLSE(MonotonicMCAcquisition):
def __init__(
self,
model: Model,
deriv_constraint_points: torch.Tensor,
target: float,
num_samples: int = 32,
num_rejection_samples: int = 1024,
beta: float = 3.84,
objective: Optional[MCAcquisitionObjective] = None,
) -> None:
"""Level set estimation acquisition function for use with monotonic models.
Args:
model (Model): Underlying model object, usually should be MonotonicRejectionGP.
target (float): Level set value to target (after the objective).
num_samples (int, optional): Number of MC samples to draw in MC acquisition. Defaults to 32.
num_rejection_samples (int, optional): Number of rejection samples from which to subsample monotonic ones. Defaults to 1024.
beta (float, optional): Parameter of the LSE acquisition function that governs exploration vs
exploitation (similarly to the same parameter in UCB). Defaults to 3.84 (1.96 ** 2), which maps to the straddle
heuristic of Bryan et al. 2005.
objective (Optional[MCAcquisitionObjective], optional): Objective transform. Defaults to identity transform.
"""
self.beta = beta
self.target = target
super().__init__(
model=model,
deriv_constraint_points=deriv_constraint_points,
num_samples=num_samples,
num_rejection_samples=num_rejection_samples,
objective=objective,
)
def acquisition(self, obj_samples: torch.Tensor) -> torch.Tensor:
mean = obj_samples.mean(dim=0)
variance = obj_samples.var(dim=0)
# prevent numerical issues if probit makes all the values 1 or 0
variance = torch.clamp(variance, min=1e-5)
delta = torch.sqrt(self.beta * variance)
return delta - torch.abs(mean - self.target)
|
aepsych-main
|
aepsych/acquisition/monotonic_rejection.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import torch
from botorch.acquisition.objective import MCAcquisitionObjective
from torch import Tensor
from torch.distributions.normal import Normal
class AEPsychObjective(MCAcquisitionObjective):
def inverse(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
raise NotImplementedError
class ProbitObjective(AEPsychObjective):
"""Probit objective
Transforms the input through the normal CDF (probit).
"""
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the objective (normal CDF).
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: [description]
"""
return Normal(loc=0, scale=1).cdf(samples.squeeze(-1))
def inverse(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the inverse of the objective (normal PPF).
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: [description]
"""
return Normal(loc=0, scale=1).icdf(samples.squeeze(-1))
class FloorLinkObjective(AEPsychObjective):
"""
Wrapper for objectives to add a floor, when
the probability is known not to go below it.
"""
def __init__(self, floor=0.5):
self.floor = floor
super().__init__()
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the objective for input x and floor f
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: outcome probability.
"""
return self.link(samples.squeeze(-1)) * (1 - self.floor) + self.floor
def inverse(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the inverse of the objective.
Args:
samples (Tensor): GP samples.
X (Optional[Tensor], optional): ignored, here for compatibility
with MCAcquisitionObjective.
Returns:
Tensor: [description]
"""
return self.inverse_link((samples - self.floor) / (1 - self.floor))
def link(self, samples):
raise NotImplementedError
def inverse_link(self, samples):
raise NotImplementedError
@classmethod
def from_config(cls, config):
floor = config.getfloat(cls.__name__, "floor")
return cls(floor=floor)
class FloorLogitObjective(FloorLinkObjective):
"""
Logistic sigmoid (aka expit, aka logistic CDF),
but with a floor so that its output is
between floor and 1.0.
"""
def link(self, samples):
return torch.special.expit(samples)
def inverse_link(self, samples):
return torch.special.logit(samples)
class FloorGumbelObjective(FloorLinkObjective):
"""
Gumbel CDF but with a floor so that its output
is between floor and 1.0. Note that this is not
the standard Gumbel distribution, but rather the
left-skewed Gumbel that arises as the log of the Weibull
distribution, e.g. Treutwein 1995, doi:10.1016/0042-6989(95)00016-X.
"""
def link(self, samples):
return torch.nan_to_num(
-torch.special.expm1(-torch.exp(samples)), posinf=1.0, neginf=0.0
)
def inverse_link(self, samples):
return torch.log(-torch.special.log1p(-samples))
class FloorProbitObjective(FloorLinkObjective):
"""
Probit (aka Gaussian CDF), but with a floor
so that its output is between floor and 1.0.
"""
def link(self, samples):
return Normal(0, 1).cdf(samples)
def inverse_link(self, samples):
return Normal(0, 1).icdf(samples)
|
aepsych-main
|
aepsych/acquisition/objective/objective.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import torch
from aepsych.config import Config
from aepsych.likelihoods import LinearBernoulliLikelihood
from botorch.acquisition.objective import MCAcquisitionObjective
from gpytorch.likelihoods import Likelihood
from torch import Tensor
class SemiPObjectiveBase(MCAcquisitionObjective):
"""Wraps the semi-parametric transform into an objective
that correctly extracts various things
"""
# because we have an extra dim for the SemiP batch dimension,
# all the q-batch output shape checks fail, disable them here
_verify_output_shape: bool = False
def __init__(self, stim_dim: int = 0):
super().__init__()
self.stim_dim = stim_dim
class SemiPProbabilityObjective(SemiPObjectiveBase):
"""Wraps the semi-parametric transform into an objective
that gives outcome probabilities
"""
def __init__(self, likelihood: Likelihood = None, *args, **kwargs):
"""Evaluates the probability objective.
Args:
likelihood (Likelihood). Underlying SemiP likelihood (which we use for its objective/link)
other arguments are passed to the base class (notably, stim_dim).
"""
super().__init__(*args, **kwargs)
self.likelihood = likelihood or LinearBernoulliLikelihood()
def forward(self, samples: Tensor, X: Tensor) -> Tensor:
"""Evaluates the probability objective.
Args:
samples (Tensor): GP samples.
X (Tensor): Inputs at which to evaluate objective. Unlike most AEPsych objectives,
we need X here to split out the intensity dimension.
Returns:
Tensor: Response probabilities at the specific X values and function samples.
"""
Xi = X[..., self.stim_dim]
# the output of LinearBernoulliLikelihood is (nsamp x b x n x 1)
# but the output of MCAcquisitionObjective should be `nsamp x *batch_shape x q`
# so we remove the final dim
return self.likelihood.p(function_samples=samples, Xi=Xi).squeeze(-1)
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
return cls(likelihood=likelihood)
class SemiPThresholdObjective(SemiPObjectiveBase):
"""Wraps the semi-parametric transform into an objective
that gives the threshold distribution.
"""
def __init__(self, target: float, likelihood=None, *args, **kwargs):
"""Evaluates the probability objective.
Args:
target (float): the threshold to evaluate.
likelihood (Likelihood): Underlying SemiP likelihood (which we use for its inverse link)
other arguments are passed to the base class (notably, stim_dim).
"""
super().__init__(*args, **kwargs)
self.likelihood = likelihood or LinearBernoulliLikelihood()
self.fspace_target = self.likelihood.objective.inverse(torch.tensor(target))
def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
"""Evaluates the probability objective.
Args:
samples (Tensor): GP samples.
X (Tensor, optional): Ignored, here for compatibility with the objective API.
Returns:
Tensor: Threshold probabilities at the specific GP sample values.
"""
offset = samples[..., 0, :]
slope = samples[..., 1, :]
return (self.fspace_target + slope * offset) / slope
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
target = config.getfloat(classname, "target", fallback=0.75)
return cls(likelihood=likelihood, target=target)
|
aepsych-main
|
aepsych/acquisition/objective/semi_p.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ...config import Config
from .objective import (
AEPsychObjective,
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
ProbitObjective,
)
from .semi_p import SemiPProbabilityObjective, SemiPThresholdObjective
__all__ = [
"AEPsychObjective",
"FloorGumbelObjective",
"FloorLogitObjective",
"FloorProbitObjective",
"ProbitObjective",
"SemiPProbabilityObjective",
"SemiPThresholdObjective",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/acquisition/objective/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
aepsych/means/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from gpytorch.means.constant_mean import ConstantMean
class ConstantMeanPartialObsGrad(ConstantMean):
"""A mean function for use with partial gradient observations.
This follows gpytorch.means.constant_mean_grad and sets the prior mean for
derivative observations to 0, though unlike that function it allows for
partial observation of derivatives.
The final column of input should be an index that is 0 if the observation
is of f, or i if it is of df/dxi.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
idx = input[..., -1].to(dtype=torch.long) > 0
mean_fit = super(ConstantMeanPartialObsGrad, self).forward(input[..., ~idx, :])
sz = mean_fit.shape[:-1] + torch.Size([input.shape[-2]])
mean = torch.zeros(sz)
mean[~idx] = mean_fit
return mean
|
aepsych-main
|
aepsych/means/constant_partial_grad.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import io
import logging
import os
import sys
import threading
import traceback
import warnings
from typing import Optional
import aepsych.database.db as db
import aepsych.utils_logging as utils_logging
import dill
import numpy as np
import pandas as pd
import torch
from aepsych.server.message_handlers import MESSAGE_MAP
from aepsych.server.message_handlers.handle_ask import ask
from aepsych.server.message_handlers.handle_setup import configure
from aepsych.server.replay import (
get_dataframe_from_replay,
get_strat_from_replay,
get_strats_from_replay,
replay,
)
from aepsych.server.sockets import BAD_REQUEST, DummySocket, PySocket
logger = utils_logging.getLogger(logging.INFO)
DEFAULT_DESC = "default description"
DEFAULT_NAME = "default name"
def get_next_filename(folder, fname, ext):
"""Generates appropriate filename for logging purposes."""
n = sum(1 for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)))
return f"{folder}/{fname}_{n+1}.{ext}"
class AEPsychServer(object):
def __init__(self, socket=None, database_path=None):
"""Server for doing black box optimization using gaussian processes.
Keyword Arguments:
socket -- socket object that implements `send` and `receive` for json
messages (default: DummySocket()).
TODO actually make an abstract interface to subclass from here
"""
if socket is None:
self.socket = DummySocket()
else:
self.socket = socket
self.db = None
self.is_performing_replay = False
self.exit_server_loop = False
self._db_master_record = None
self._db_raw_record = None
self.db = db.Database(database_path)
self.skip_computations = False
self.strat_names = None
if self.db.is_update_required():
self.db.perform_updates()
self._strats = []
self._parnames = []
self._configs = []
self.strat_id = -1
self._pregen_asks = []
self.enable_pregen = False
self.debug = False
self.receive_thread = threading.Thread(
target=self._receive_send, args=(self.exit_server_loop,), daemon=True
)
self.queue = []
def cleanup(self):
"""Close the socket and terminate connection to the server.
Returns:
None
"""
self.socket.close()
def _receive_send(self, is_exiting: bool) -> None:
"""Receive messages from the client.
Args:
is_exiting (bool): True to terminate reception of new messages from the client, False otherwise.
Returns:
None
"""
while True:
request = self.socket.receive(is_exiting)
if request != BAD_REQUEST:
self.queue.append(request)
if self.exit_server_loop:
break
logger.info("Terminated input thread")
def _handle_queue(self) -> None:
"""Handles the queue of messages received by the server.
Returns:
None
"""
if self.queue:
request = self.queue.pop(0)
try:
result = self.handle_request(request)
except Exception as e:
error_message = f"Request '{request}' raised error '{e}'!"
result = f"server_error, {error_message}"
logger.error(f"{error_message}! Full traceback follows:")
logger.error(traceback.format_exc())
self.socket.send(result)
else:
if self.can_pregen_ask and (len(self._pregen_asks) == 0):
self._pregen_asks.append(ask(self))
def serve(self) -> None:
"""Run the server. Note that all configuration outside of socket type and port
happens via messages from the client. The server simply forwards messages from
the client to its `setup`, `ask` and `tell` methods, and responds with either
acknowledgment or other response as needed. To understand the server API, see
the docs on the methods in this class.
Returns:
None
Raises:
RuntimeError: if a request from a client has no request type
RuntimeError: if a request from a client has no known request type
TODO make things a little more robust to bad messages from client; this
requires resetting the req/rep queue status.
"""
logger.info("Server up, waiting for connections!")
logger.info("Ctrl-C to quit!")
# yeah we're not sanitizing input at all
# Start the method to accept a client connection
self.socket.accept_client()
self.receive_thread.start()
while True:
self._handle_queue()
if self.exit_server_loop:
break
# Close the socket and terminate with code 0
self.cleanup()
sys.exit(0)
def _unpack_strat_buffer(self, strat_buffer):
if isinstance(strat_buffer, io.BytesIO):
strat = torch.load(strat_buffer, pickle_module=dill)
strat_buffer.seek(0)
elif isinstance(strat_buffer, bytes):
warnings.warn(
"Strat buffer is not in bytes format!"
+ " This is a deprecated format, loading using dill.loads.",
DeprecationWarning,
)
strat = dill.loads(strat_buffer)
else:
raise RuntimeError("Trying to load strat in unknown format!")
return strat
def generate_experiment_table(
self,
experiment_id: str,
table_name: str = "experiment_table",
return_df: bool = False,
) -> Optional[pd.DataFrame]:
"""Generate a table of a given experiment with all the raw data.
This table is generated from the database, and is added to the
experiment's database.
Args:
experiment_id (str): The experiment ID to generate the table for.
table_name (str): The name of the table. Defaults to
"experiment_table".
return_df (bool): If True, also return the dataframe.
Returns:
pd.DataFrame: The dataframe of the experiment table, if
return_df is True.
"""
param_space = self.db.get_param_for(experiment_id, 1)
outcome_space = self.db.get_outcome_for(experiment_id, 1)
columns = []
columns.append("iteration_id")
for param in param_space:
columns.append(param.param_name)
for outcome in outcome_space:
columns.append(outcome.outcome_name)
columns.append("timestamp")
# Create dataframe
df = pd.DataFrame(columns=columns)
# Fill dataframe
for raw in self.db.get_raw_for(experiment_id):
row = {}
row["iteration_id"] = raw.unique_id
for param in raw.children_param:
row[param.param_name] = param.param_value
for outcome in raw.children_outcome:
row[outcome.outcome_name] = outcome.outcome_value
row["timestamp"] = raw.timestamp
# concat to dataframe
df = pd.concat([df, pd.DataFrame([row])], ignore_index=True)
# Make iteration_id the index
df.set_index("iteration_id", inplace=True)
# Save to .db file
df.to_sql(table_name, self.db.get_engine(), if_exists="replace")
if return_df:
return df
else:
return None
### Properties that are set on a per-strat basis
@property
def strat(self):
if self.strat_id == -1:
return None
else:
return self._strats[self.strat_id]
@strat.setter
def strat(self, s):
self._strats.append(s)
@property
def config(self):
if self.strat_id == -1:
return None
else:
return self._configs[self.strat_id]
@config.setter
def config(self, s):
self._configs.append(s)
@property
def parnames(self):
if self.strat_id == -1:
return []
else:
return self._parnames[self.strat_id]
@parnames.setter
def parnames(self, s):
self._parnames.append(s)
@property
def n_strats(self):
return len(self._strats)
@property
def can_pregen_ask(self):
return self.strat is not None and self.enable_pregen
def _tensor_to_config(self, next_x):
config = {}
for name, val in zip(self.parnames, next_x):
if val.dim() == 0:
config[name] = [float(val)]
else:
config[name] = np.array(val)
return config
def _config_to_tensor(self, config):
unpacked = [config[name] for name in self.parnames]
# handle config elements being either scalars or length-1 lists
if isinstance(unpacked[0], list):
x = torch.tensor(np.stack(unpacked, axis=0)).squeeze(-1)
else:
x = torch.tensor(np.stack(unpacked))
return x
def __getstate__(self):
# nuke the socket since it's not pickleble
state = self.__dict__.copy()
del state["socket"]
del state["db"]
return state
def write_strats(self, termination_type):
if self._db_master_record is not None and self.strat is not None:
logger.info(f"Dumping strats to DB due to {termination_type}.")
for strat in self._strats:
buffer = io.BytesIO()
torch.save(strat, buffer, pickle_module=dill)
buffer.seek(0)
self.db.record_strat(master_table=self._db_master_record, strat=buffer)
def generate_debug_info(self, exception_type, dumptype):
fname = get_next_filename(".", dumptype, "pkl")
logger.exception(f"Got {exception_type}, exiting! Server dump in {fname}")
dill.dump(self, open(fname, "wb"))
def handle_request(self, request):
if "type" not in request.keys():
raise RuntimeError(f"Request {request} contains no request type!")
else:
type = request["type"]
if type in MESSAGE_MAP.keys():
logger.info(f"Received msg [{type}]")
ret_val = MESSAGE_MAP[type](self, request)
return ret_val
else:
exception_message = (
f"unknown type: {type}. Allowed types [{MESSAGE_MAP.keys()}]"
)
raise RuntimeError(exception_message)
def replay(self, uuid_to_replay, skip_computations=False):
return replay(self, uuid_to_replay, skip_computations)
def get_strats_from_replay(self, uuid_of_replay=None, force_replay=False):
return get_strats_from_replay(self, uuid_of_replay, force_replay)
def get_strat_from_replay(self, uuid_of_replay=None, strat_id=-1):
return get_strat_from_replay(self, uuid_of_replay, strat_id)
def get_dataframe_from_replay(self, uuid_of_replay=None, force_replay=False):
return get_dataframe_from_replay(self, uuid_of_replay, force_replay)
#! THIS IS WHAT START THE SERVER
def startServerAndRun(
server_class, socket=None, database_path=None, config_path=None, uuid_of_replay=None
):
server = server_class(socket=socket, database_path=database_path)
try:
if config_path is not None:
with open(config_path) as f:
config_str = f.read()
configure(server, config_str=config_str)
if socket is not None:
if uuid_of_replay is not None:
server.replay(uuid_of_replay, skip_computations=True)
server._db_master_record = server.db.get_master_record(uuid_of_replay)
server.serve()
else:
if config_path is not None:
logger.info(
"You have passed in a config path but this is a replay. If there's a config in the database it will be used instead of the passed in config path."
)
server.replay(uuid_of_replay)
except KeyboardInterrupt:
exception_type = "CTRL+C"
dump_type = "dump"
server.write_strats(exception_type)
server.generate_debug_info(exception_type, dump_type)
except RuntimeError as e:
exception_type = "RuntimeError"
dump_type = "crashdump"
server.write_strats(exception_type)
server.generate_debug_info(exception_type, dump_type)
raise RuntimeError(e)
def parse_argument():
parser = argparse.ArgumentParser(description="AEPsych Server!")
parser.add_argument(
"--port", metavar="N", type=int, default=5555, help="port to serve on"
)
parser.add_argument(
"--ip",
metavar="M",
type=str,
default="0.0.0.0",
help="ip to bind",
)
parser.add_argument(
"-s",
"--stratconfig",
help="Location of ini config file for strat",
type=str,
)
parser.add_argument(
"--logs",
type=str,
help="The logs path to use if not the default (./logs).",
default="logs",
)
parser.add_argument(
"-d",
"--db",
type=str,
help="The database to use if not the default (./databases/default.db).",
default=None,
)
parser.add_argument(
"-r", "--replay", type=str, help="UUID of the experiment to replay."
)
parser.add_argument(
"-m", "--resume", action="store_true", help="Resume server after replay."
)
args = parser.parse_args()
return args
def start_server(server_class, args):
logger.info("Starting the AEPsychServer")
try:
if "db" in args and args.db is not None:
database_path = args.db
if "replay" in args and args.replay is not None:
logger.info(f"Attempting to replay {args.replay}")
if args.resume is True:
sock = PySocket(port=args.port)
logger.info(f"Will resume {args.replay}")
else:
sock = None
startServerAndRun(
server_class,
socket=sock,
database_path=database_path,
uuid_of_replay=args.replay,
config_path=args.stratconfig,
)
else:
logger.info(f"Setting the database path {database_path}")
sock = PySocket(port=args.port)
startServerAndRun(
server_class,
database_path=database_path,
socket=sock,
config_path=args.stratconfig,
)
else:
sock = PySocket(port=args.port)
startServerAndRun(server_class, socket=sock, config_path=args.stratconfig)
except (KeyboardInterrupt, SystemExit):
logger.exception("Got Ctrl+C, exiting!")
sys.exit()
except RuntimeError as e:
fname = get_next_filename(".", "dump", "pkl")
logger.exception(f"CRASHING!! dump in {fname}")
raise RuntimeError(e)
def main(server_class=AEPsychServer):
args = parse_argument()
if args.logs:
# overide logger path
log_path = args.logs
logger = utils_logging.getLogger(logging.DEBUG, log_path)
logger.info(f"Saving logs to path: {log_path}")
start_server(server_class, args)
if __name__ == "__main__":
main(AEPsychServer)
|
aepsych-main
|
aepsych/server/server.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .server import AEPsychServer
__all__ = ["AEPsychServer"]
|
aepsych-main
|
aepsych/server/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import warnings
import aepsych.utils_logging as utils_logging
import pandas as pd
from aepsych.server.message_handlers.handle_tell import flatten_tell_record
logger = utils_logging.getLogger(logging.INFO)
def replay(server, uuid_to_replay, skip_computations=False):
"""
Run a replay against the server. The UUID will be looked up in the database.
if skip_computations is true, skip all the asks and queries, which should make the replay much faster.
"""
if uuid_to_replay is None:
raise RuntimeError("UUID is a required parameter to perform a replay")
if server.db is None:
raise RuntimeError("A database is required to perform a replay")
if skip_computations is True:
logger.info(
"skip_computations=True, make sure to refit the final strat before doing anything!"
)
master_record = server.db.get_master_record(uuid_to_replay)
if master_record is None:
raise RuntimeError(
f"The UUID {uuid_to_replay} isn't in the database. Unable to perform replay."
)
# this prevents writing back to the DB and creating a circular firing squad
server.is_performing_replay = True
server.skip_computations = skip_computations
for result in master_record.children_replay:
request = result.message_contents
logger.debug(f"replay - type = {result.message_type} request = {request}")
server.handle_request(request)
def get_strats_from_replay(server, uuid_of_replay=None, force_replay=False):
if uuid_of_replay is None:
records = server.db.get_master_records()
if len(records) > 0:
uuid_of_replay = records[-1].experiment_id
else:
raise RuntimeError("Server has no experiment records!")
if force_replay:
warnings.warn(
"Force-replaying to get non-final strats is deprecated after the ability"
+ " to save all strats was added, and will eventually be removed.",
DeprecationWarning,
)
replay(server, uuid_of_replay, skip_computations=True)
for strat in server._strats:
if strat.has_model:
strat.model.fit(strat.x, strat.y)
return server._strats
else:
strat_buffers = server.db.get_strats_for(uuid_of_replay)
return [server._unpack_strat_buffer(sb) for sb in strat_buffers]
def get_strat_from_replay(server, uuid_of_replay=None, strat_id=-1):
if uuid_of_replay is None:
records = server.db.get_master_records()
if len(records) > 0:
uuid_of_replay = records[-1].experiment_id
else:
raise RuntimeError("Server has no experiment records!")
strat_buffer = server.db.get_strat_for(uuid_of_replay, strat_id)
if strat_buffer is not None:
return server._unpack_strat_buffer(strat_buffer)
else:
warnings.warn(
"No final strat found (likely due to old DB,"
+ " trying to replay tells to generate a final strat. Note"
+ " that this fallback will eventually be removed!",
DeprecationWarning,
)
# sometimes there's no final strat, e.g. if it's a very old database
# (we dump strats on crash) in this case, replay the setup and tells
replay(server, uuid_of_replay, skip_computations=True)
# then if the final strat is model-based, refit
strat = server._strats[strat_id]
if strat.has_model:
strat.model.fit(strat.x, strat.y)
return strat
def get_dataframe_from_replay(server, uuid_of_replay=None, force_replay=False):
warnings.warn(
"get_dataframe_from_replay is deprecated."
+ " Use generate_experiment_table with return_df = True instead.",
DeprecationWarning,
stacklevel=2,
)
if uuid_of_replay is None:
records = server.db.get_master_records()
if len(records) > 0:
uuid_of_replay = records[-1].experiment_id
else:
raise RuntimeError("Server has no experiment records!")
recs = server.db.get_replay_for(uuid_of_replay)
strats = get_strats_from_replay(server, uuid_of_replay, force_replay=force_replay)
out = pd.DataFrame(
[flatten_tell_record(server, rec) for rec in recs if rec.message_type == "tell"]
)
# flatten any final nested lists
def _flatten(x):
return x[0] if len(x) == 1 else x
for col in out.columns:
if out[col].dtype == object:
out.loc[:, col] = out[col].apply(_flatten)
n_tell_records = len(out)
n_strat_datapoints = 0
post_means = []
post_vars = []
# collect posterior means and vars
for strat in strats:
if strat.has_model:
post_mean, post_var = strat.predict(strat.x)
n_tell_records = len(out)
n_strat_datapoints += len(post_mean)
post_means.extend(post_mean.detach().numpy())
post_vars.extend(post_var.detach().numpy())
if n_tell_records == n_strat_datapoints:
out["post_mean"] = post_means
out["post_var"] = post_vars
else:
logger.warn(
f"Number of tell records ({n_tell_records}) does not match "
+ f"number of datapoints in strat ({n_strat_datapoints}) "
+ "cowardly refusing to populate GP mean and var to dataframe!"
)
return out
|
aepsych-main
|
aepsych/server/replay.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import sys
import aepsych.database.db as db
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def get_next_filename(folder, fname, ext):
n = sum(1 for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)))
return f"{folder}/{fname}_{n+1}.{ext}"
def parse_argument():
parser = argparse.ArgumentParser(description="AEPsych Database!")
parser.add_argument(
"-l",
"--list",
help="Lists available experiments in the database.",
action="store_true",
)
parser.add_argument(
"-d",
"--db",
type=str,
help="The database to use if not the default (./databases/default.db).",
default=None,
)
parser.add_argument(
"-u",
"--update",
action="store_true",
help="Update the database tables with the most recent columns and tables.",
)
args = parser.parse_args()
return args
def run_database(args):
logger.info("Starting AEPsych Database!")
try:
database_path = args.db
database = db.Database(database_path)
if args.list is True:
database.list_master_records()
elif "update" in args and args.update:
logger.info(f"Updating the database {database_path}")
if database.is_update_required():
database.perform_updates()
logger.info(f"- updated database {database_path}")
else:
logger.info(f"- update not needed for database {database_path}")
except (KeyboardInterrupt, SystemExit):
logger.exception("Got Ctrl+C, exiting!")
sys.exit()
except RuntimeError as e:
fname = get_next_filename(".", "dump", "pkl")
logger.exception(f"CRASHING!! dump in {fname}")
raise RuntimeError(e)
def main():
args = parse_argument()
run_database(args)
if __name__ == "__main__":
main()
|
aepsych-main
|
aepsych/server/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import select
import socket
import sys
import aepsych.utils_logging as utils_logging
import numpy as np
logger = utils_logging.getLogger(logging.INFO)
BAD_REQUEST = "bad request"
def SimplifyArrays(message):
return {
k: v.tolist()
if type(v) == np.ndarray
else SimplifyArrays(v)
if type(v) is dict
else v
for k, v in message.items()
}
class DummySocket(object):
def close(self):
pass
class PySocket(object):
def __init__(self, port, ip=""):
addr = (ip, port) # all interfaces
if socket.has_dualstack_ipv6():
self.socket = socket.create_server(
addr, family=socket.AF_INET6, dualstack_ipv6=True
)
else:
self.socket = socket.create_server(addr)
self.conn, self.addr = None, None
def close(self):
self.socket.close()
def return_socket(self):
return self.socket
def accept_client(self):
client_not_connected = True
logger.info("Waiting for connection...")
while client_not_connected:
rlist, wlist, xlist = select.select([self.socket], [], [], 0)
if rlist:
for sock in rlist:
try:
self.conn, self.addr = sock.accept()
logger.info(
f"Connected by {self.addr}, waiting for messages..."
)
client_not_connected = False
except Exception as e:
logger.info(f"Connection to client failed with error {e}")
raise Exception
def receive(self, server_exiting):
while not server_exiting:
rlist, wlist, xlist = select.select(
[self.conn], [], [], 0
) # 0 Here is the timeout. It makes the server constantly poll for output. Timeout can be added to save CPU usage.
# rlist,wlist,xlist represent lists of sockets to check against. Rlist is sockets to read from, wlist is sockets to write to, xlist is sockets to listen to for errors.
for sock in rlist:
try:
if rlist:
recv_result = sock.recv(
1024 * 512
) # 1024 * 512 is the max size of the message
msg = json.loads(recv_result)
logger.debug(f"receive : result = {recv_result}")
logger.info(f"Got: {msg}")
return msg
except Exception:
return BAD_REQUEST
def send(self, message):
if self.conn is None:
logger.error("No connection to send to!")
return
if type(message) == str:
pass # keep it as-is
elif type(message) == int:
message = str(message)
else:
message = json.dumps(SimplifyArrays(message))
logger.info(f"Sending: {message}")
sys.stdout.flush()
self.conn.sendall(bytes(message, "utf-8"))
|
aepsych-main
|
aepsych/server/sockets.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections.abc import Mapping
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_ask(server, request):
"""Returns dictionary with two entries:
"config" -- dictionary with config (keys are strings, values are floats)
"is_finished" -- bool, true if the strat is finished
"""
logger.debug("got ask message!")
if server._pregen_asks:
params = server._pregen_asks.pop()
else:
# Some clients may still send "message" as an empty string, so we need to check if its a dict or not.
msg = request["message"]
if isinstance(msg, Mapping):
params = ask(server, **msg)
else:
params = ask(server)
new_config = {"config": params, "is_finished": server.strat.finished}
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="ask", request=request
)
return new_config
def ask(server, num_points=1):
"""get the next point to query from the model
Returns:
dict -- new config dict (keys are strings, values are floats)
"""
if server.skip_computations:
# HACK to makke sure strategies finish correctly
server.strat._strat._count += 1
if server.strat._strat.finished:
server.strat._make_next_strat()
return None
if not server.use_ax:
# index by [0] is temporary HACK while serverside
# doesn't handle batched ask
next_x = server.strat.gen()[0]
return server._tensor_to_config(next_x)
next_x = server.strat.gen(num_points)
return next_x
|
aepsych-main
|
aepsych/server/message_handlers/handle_ask.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .handle_ask import handle_ask
from .handle_can_model import handle_can_model
from .handle_exit import handle_exit
from .handle_finish_strategy import handle_finish_strategy
from .handle_get_config import handle_get_config
from .handle_info import handle_info
from .handle_params import handle_params
from .handle_query import handle_query
from .handle_resume import handle_resume
from .handle_setup import handle_setup
from .handle_tell import handle_tell
MESSAGE_MAP = {
"setup": handle_setup,
"ask": handle_ask,
"tell": handle_tell,
"query": handle_query,
"parameters": handle_params,
"can_model": handle_can_model,
"exit": handle_exit,
"get_config": handle_get_config,
"finish_strategy": handle_finish_strategy,
"info": handle_info,
"resume": handle_resume,
}
|
aepsych-main
|
aepsych/server/message_handlers/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def handle_finish_strategy(self, request):
self.strat.finish()
return f"finished strategy {self.strat.name}"
|
aepsych-main
|
aepsych/server/message_handlers/handle_finish_strategy.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
import numpy as np
logger = utils_logging.getLogger(logging.INFO)
def handle_query(server, request):
logger.debug("got query message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="query", request=request
)
response = query(server, **request["message"])
return response
def query(
server,
query_type="max",
probability_space=False,
x=None,
y=None,
constraints=None,
):
if server.skip_computations:
return None
constraints = constraints or {}
response = {
"query_type": query_type,
"probability_space": probability_space,
"constraints": constraints,
}
if query_type == "max":
fmax, fmax_loc = server.strat.get_max(constraints)
response["y"] = fmax.item()
response["x"] = server._tensor_to_config(fmax_loc)
elif query_type == "min":
fmin, fmin_loc = server.strat.get_min(constraints)
response["y"] = fmin.item()
response["x"] = server._tensor_to_config(fmin_loc)
elif query_type == "prediction":
# returns the model value at x
if x is None: # TODO: ensure if x is between lb and ub
raise RuntimeError("Cannot query model at location = None!")
mean, _var = server.strat.predict(
server._config_to_tensor(x).unsqueeze(axis=0),
probability_space=probability_space,
)
response["x"] = x
response["y"] = mean.item()
elif query_type == "inverse":
# expect constraints to be a dictionary; values are float arrays size 1 (exact) or 2 (upper/lower bnd)
constraints = {server.parnames.index(k): v for k, v in constraints.items()}
nearest_y, nearest_loc = server.strat.inv_query(
y, constraints, probability_space=probability_space
)
response["y"] = nearest_y
response["x"] = server._tensor_to_config(nearest_loc)
else:
raise RuntimeError("unknown query type!")
# ensure all x values are arrays
response["x"] = {
k: np.array([v]) if np.array(v).ndim == 0 else v
for k, v in response["x"].items()
}
return response
|
aepsych-main
|
aepsych/server/message_handlers/handle_query.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_get_config(server, request):
msg = request["message"]
section = msg.get("section", None)
prop = msg.get("property", None)
# If section and property are not specified, return the whole config
if section is None and prop is None:
return server.config.to_dict(deduplicate=False)
# If section and property are not both specified, raise an error
if section is None and prop is not None:
raise RuntimeError("Message contains a property but not a section!")
if section is not None and prop is None:
raise RuntimeError("Message contains a section but not a property!")
# If both section and property are specified, return only the relevant value from the config
return server.config.to_dict(deduplicate=False)[section][prop]
|
aepsych-main
|
aepsych/server/message_handlers/handle_get_config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_can_model(server, request):
# Check if the strategy has finished initialization; i.e.,
# if it has a model and data to fit (strat.can_fit)
logger.debug("got can_model message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="can_model", request=request
)
return {"can_model": server.strat.can_fit}
|
aepsych-main
|
aepsych/server/message_handlers/handle_can_model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import io
import logging
from collections.abc import Iterable
import aepsych.utils_logging as utils_logging
import dill
import pandas as pd
import torch
logger = utils_logging.getLogger(logging.INFO)
DEFAULT_DESC = "default description"
DEFAULT_NAME = "default name"
def handle_tell(server, request):
logger.debug("got tell message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="tell", request=request
)
# Batch update mode
if type(request["message"]) == list:
for msg in request["message"]:
tell(server, **msg)
else:
tell(server, **request["message"])
if server.strat is not None and server.strat.finished is True:
logger.info("Recording strat because the experiment is complete.")
buffer = io.BytesIO()
torch.save(server.strat, buffer, pickle_module=dill)
buffer.seek(0)
server.db.record_strat(master_table=server._db_master_record, strat=buffer)
return "acq"
def flatten_tell_record(server, rec):
out = {}
out["response"] = int(rec.message_contents["message"]["outcome"])
out.update(
pd.json_normalize(rec.message_contents["message"]["config"], sep="_").to_dict(
orient="records"
)[0]
)
if rec.extra_info is not None:
out.update(rec.extra_info)
return out
def tell(server, outcome, config=None, model_data=True, trial_index=-1):
"""tell the model which input was run and what the outcome was
Arguments:
server: The AEPsych server object.
outcome: The outcome of the trial. If using the legacy backend, this must be an int or a float. If using the Ax
backend, this may be an int or float if using a single outcome, or if using multiple outcomes, it must be a
dictionary mapping outcome names to values.
config: A dictionary mapping parameter names to values. This must be provided if using the legacy backend. If
using the Ax backend, this should be provided only for trials that do not already have a trial_index.
model_data: If True, the data from this trial will be added to the model. If False, the trial will be recorded in
the db, but will not be modeled.
trial_index: The trial_index for the trial as provided by the ask response when using the Ax backend. Ignored by
the legacy backend.
"""
if config is None:
config = {}
if not server.is_performing_replay:
_record_tell(server, outcome, config, model_data)
if model_data:
if not server.use_ax:
x = server._config_to_tensor(config)
server.strat.add_data(x, outcome)
else:
assert (
config or trial_index >= 0
), "Must supply a trial parameterization or a trial index!"
if trial_index >= 0:
server.strat.complete_existing_trial(trial_index, outcome)
else:
server.strat.complete_new_trial(config, outcome)
def _record_tell(server, outcome, config, model_data):
server._db_raw_record = server.db.record_raw(
master_table=server._db_master_record,
model_data=bool(model_data),
)
for param_name, param_value in config.items():
if isinstance(param_value, Iterable) and type(param_value) != str:
if len(param_value) == 1:
server.db.record_param(
raw_table=server._db_raw_record,
param_name=str(param_name),
param_value=str(param_value[0]),
)
else:
for i, v in enumerate(param_value):
server.db.record_param(
raw_table=server._db_raw_record,
param_name=str(param_name) + "_stimuli" + str(i),
param_value=str(v),
)
else:
server.db.record_param(
raw_table=server._db_raw_record,
param_name=str(param_name),
param_value=str(param_value),
)
if isinstance(outcome, dict):
for key in outcome.keys():
server.db.record_outcome(
raw_table=server._db_raw_record,
outcome_name=key,
outcome_value=float(outcome[key]),
)
# Check if we get single or multiple outcomes
# Multiple outcomes come in the form of iterables that aren't strings or single-element tensors
elif isinstance(outcome, Iterable) and type(outcome) != str:
for i, outcome_value in enumerate(outcome):
if isinstance(outcome_value, Iterable) and type(outcome_value) != str:
if isinstance(outcome_value, torch.Tensor) and outcome_value.dim() < 2:
outcome_value = outcome_value.item()
elif len(outcome_value) == 1:
outcome_value = outcome_value[0]
else:
raise ValueError(
"Multi-outcome values must be a list of lists of length 1!"
)
server.db.record_outcome(
raw_table=server._db_raw_record,
outcome_name="outcome_" + str(i),
outcome_value=float(outcome_value),
)
else:
server.db.record_outcome(
raw_table=server._db_raw_record,
outcome_name="outcome",
outcome_value=float(outcome),
)
|
aepsych-main
|
aepsych/server/message_handlers/handle_tell.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_exit(server, request):
# Make local server write strats into DB and close the connection
termination_type = "Normal termination"
logger.info("Got termination message!")
server.write_strats(termination_type)
server.exit_server_loop = True
return "Terminate"
|
aepsych-main
|
aepsych/server/message_handlers/handle_exit.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_info(server, request: Dict[str, Any]) -> Dict[str, Any]:
"""Handles info message from the client.
Args:
request (Dict[str, Any]): The info message from the client
Returns:
Dict[str, Any]: Returns dictionary containing the current state of the experiment
"""
logger.debug("got info message!")
ret_val = info(server)
return ret_val
def info(server) -> Dict[str, Any]:
"""Returns details about the current state of the server and experiments
Returns:
Dict: Dict containing server and experiment details
"""
current_strat_model = (
server.config.get(server.strat.name, "model", fallback="model not set")
if server.config and ("model" in server.config.get_section(server.strat.name))
else "model not set"
)
current_strat_acqf = (
server.config.get(server.strat.name, "acqf", fallback="acqf not set")
if server.config and ("acqf" in server.config.get_section(server.strat.name))
else "acqf not set"
)
response = {
"db_name": server.db._db_name,
"exp_id": server._db_master_record.experiment_id,
"strat_count": server.n_strats,
"all_strat_names": server.strat_names,
"current_strat_index": server.strat_id,
"current_strat_name": server.strat.name,
"current_strat_data_pts": server.strat.x.shape[0]
if server.strat.x is not None
else 0,
"current_strat_model": current_strat_model,
"current_strat_acqf": current_strat_acqf,
"current_strat_finished": server.strat.finished,
}
logger.debug(f"Current state of server: {response}")
return response
|
aepsych-main
|
aepsych/server/message_handlers/handle_info.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_resume(server, request):
logger.debug("got resume message!")
strat_id = int(request["message"]["strat_id"])
server.strat_id = strat_id
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="resume", request=request
)
return server.strat_id
|
aepsych-main
|
aepsych/server/message_handlers/handle_resume.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
from aepsych.config import Config
from aepsych.strategy import AEPsychStrategy, SequentialStrategy
from aepsych.version import __version__
logger = utils_logging.getLogger(logging.INFO)
DEFAULT_DESC = "default description"
DEFAULT_NAME = "default name"
def _configure(server, config):
server._pregen_asks = (
[]
) # TODO: Allow each strategy to have its own stack of pre-generated asks
parnames = config._str_to_list(config.get("common", "parnames"), element_type=str)
server.parnames = parnames
server.config = config
server.use_ax = config.getboolean("common", "use_ax", fallback=False)
server.enable_pregen = config.getboolean("common", "pregen_asks", fallback=False)
if server.use_ax:
server.trial_index = -1
server.strat = AEPsychStrategy.from_config(config)
server.strat_id = server.n_strats - 1
else:
server.strat = SequentialStrategy.from_config(config)
server.strat_id = server.n_strats - 1 # 0-index strats
return server.strat_id
def configure(server, config=None, **config_args):
# To preserve backwards compatibility, config_args is still usable for unittests and old functions.
# But if config is specified, the server will use that rather than create a new config object.
if config is None:
usedconfig = Config(**config_args)
else:
usedconfig = config
if "experiment" in usedconfig:
logger.warning(
'The "experiment" section is being deprecated from configs. Please put everything in the "experiment" section in the "common" section instead.'
)
for i in usedconfig["experiment"]:
usedconfig["common"][i] = usedconfig["experiment"][i]
del usedconfig["experiment"]
version = usedconfig.version
if version < __version__:
try:
usedconfig.convert_to_latest()
server.db.perform_updates()
logger.warning(
f"Config version {version} is less than AEPsych version {__version__}. The config was automatically modified to be compatible. Check the config table in the db to see the changes."
)
except RuntimeError:
logger.warning(
f"Config version {version} is less than AEPsych version {__version__}, but couldn't automatically update the config! Trying to configure the server anyway..."
)
server.db.record_config(master_table=server._db_master_record, config=usedconfig)
return _configure(server, usedconfig)
def handle_setup(server, request):
logger.debug("got setup message!")
### make a temporary config object to derive parameters because server handles config after table
if (
"config_str" in request["message"].keys()
or "config_dict" in request["message"].keys()
):
tempconfig = Config(**request["message"])
if not server.is_performing_replay:
experiment_id = None
if server._db_master_record is not None:
experiment_id = server._db_master_record.experiment_id
if "metadata" in tempconfig.keys():
cdesc = (
tempconfig["metadata"]["experiment_description"]
if ("experiment_description" in tempconfig["metadata"].keys())
else DEFAULT_DESC
)
cname = (
tempconfig["metadata"]["experiment_name"]
if ("experiment_name" in tempconfig["metadata"].keys())
else DEFAULT_NAME
)
cid = (
tempconfig["metadata"]["experiment_id"]
if ("experiment_id" in tempconfig["metadata"].keys())
else None
)
server._db_master_record = server.db.record_setup(
description=cdesc,
name=cname,
request=request,
id=cid,
extra_metadata=tempconfig.jsonifyMetadata(),
)
### if the metadata does not exist, we are going to log nothing
else:
server._db_master_record = server.db.record_setup(
description=DEFAULT_DESC,
name=DEFAULT_NAME,
request=request,
id=experiment_id,
)
strat_id = configure(server, config=tempconfig)
else:
raise RuntimeError("Missing a configure message!")
return strat_id
|
aepsych-main
|
aepsych/server/message_handlers/handle_setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_params(server, request):
logger.debug("got parameters message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="parameters", request=request
)
config_setup = {
server.parnames[i]: [server.strat.lb[i].item(), server.strat.ub[i].item()]
for i in range(len(server.parnames))
}
return config_setup
|
aepsych-main
|
aepsych/server/message_handlers/handle_params.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from typing import Any, Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.acquisition.objective import FloorLogitObjective
from aepsych.acquisition.objective.semi_p import SemiPThresholdObjective
from aepsych.config import Config
from aepsych.likelihoods import BernoulliObjectiveLikelihood, LinearBernoulliLikelihood
from aepsych.models import GPClassificationModel
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from botorch.optim.fit import fit_gpytorch_mll_scipy
from botorch.posteriors import GPyTorchPosterior
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import BernoulliLikelihood, Likelihood
from gpytorch.means import ConstantMean, ZeroMean
from gpytorch.priors import GammaPrior
from torch import Tensor
from torch.distributions import Normal
# TODO: Implement a covar factory and analytic method for getting the lse
logger = getLogger()
def _hadamard_mvn_approx(x_intensity, slope_mean, slope_cov, offset_mean, offset_cov):
"""
MVN approximation to the hadamard product of GPs (from the SemiP paper, extending the
zero-mean results in https://mathoverflow.net/questions/293955/normal-approximation-to-the-pointwise-hadamard-schur-product-of-two-multivariat)
"""
offset_mean = offset_mean + x_intensity
mean_x = offset_mean * slope_mean
# Same as torch.diag_embed(slope_mean) @ offset_cov @ torch.diag_embed(slope_mean), but more efficient
term1 = slope_mean.unsqueeze(-1) * offset_cov * slope_mean.unsqueeze(-2)
# Same as torch.diag_embed(offset_mean) @ slope_cov @ torch.diag_embed(offset_mean), but more efficient
term2 = offset_mean.unsqueeze(-1) * slope_cov * offset_mean.unsqueeze(-2)
term3 = slope_cov * offset_cov
cov_x = term1 + term2 + term3
return mean_x, cov_x
def semi_p_posterior_transform(posterior):
batch_mean = posterior.mvn.mean
batch_cov = posterior.mvn.covariance_matrix
offset_mean = batch_mean[..., 0, :]
slope_mean = batch_mean[..., 1, :]
offset_cov = batch_cov[..., 0, :, :]
slope_cov = batch_cov[..., 1, :, :]
Xi = posterior.Xi
approx_mean, approx_cov = _hadamard_mvn_approx(
x_intensity=Xi,
slope_mean=slope_mean,
slope_cov=slope_cov,
offset_mean=offset_mean,
offset_cov=offset_cov,
)
approx_mvn = MultivariateNormal(mean=approx_mean, covariance_matrix=approx_cov)
return GPyTorchPosterior(mvn=approx_mvn)
class SemiPPosterior(GPyTorchPosterior):
def __init__(
self,
mvn: MultivariateNormal,
likelihood: LinearBernoulliLikelihood,
Xi: torch.Tensor,
):
super().__init__(distribution=mvn)
self.likelihood = likelihood
self.Xi = Xi
def rsample_from_base_samples(
self,
sample_shape: torch.Size,
base_samples: Tensor,
) -> Tensor:
r"""Sample from the posterior (with gradients) using base samples.
This is intended to be used with a sampler that produces the corresponding base
samples, and enables acquisition optimization via Sample Average Approximation.
"""
return (
super()
.rsample_from_base_samples(
sample_shape=sample_shape, base_samples=base_samples
)
.squeeze(-1)
)
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
kcsamps = (
super()
.rsample(sample_shape=sample_shape, base_samples=base_samples)
.squeeze(-1)
)
# fsamps is of shape nsamp x 2 x n, or nsamp x b x 2 x n
return kcsamps
def sample_p(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
kcsamps = self.rsample(sample_shape=sample_shape, base_samples=base_samples)
return self.likelihood.p(function_samples=kcsamps, Xi=self.Xi).squeeze(-1)
def sample_f(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
kcsamps = self.rsample(sample_shape=sample_shape, base_samples=base_samples)
return self.likelihood.f(function_samples=kcsamps, Xi=self.Xi).squeeze(-1)
def sample_thresholds(
self,
threshold_level: float,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
fsamps = self.rsample(sample_shape=sample_shape, base_samples=base_samples)
return SemiPThresholdObjective(
likelihood=self.likelihood, target=threshold_level
)(samples=fsamps, X=self.Xi)
class SemiParametricGPModel(GPClassificationModel):
"""
Semiparametric GP model for psychophysics.
Implements a semi-parametric model with a functional form like :math:`k(x_c()x_i + c(x_c))`,
for scalar intensity dimension :math:`x_i` and vector-valued context dimensions :math:`x_c`,
with k and c having a GP prior. In contrast to HadamardSemiPModel, this version uses a batched GP
directly, which is about 2-3x slower but does not use the MVN approximation.
Intended for use with a BernoulliObjectiveLikelihood with flexible link function such as
Logistic or Gumbel nonlinearity with a floor.
"""
_num_outputs = 1
_batch_shape = 2
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
stim_dim: int = 0,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Any] = None,
slope_mean: float = 2,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
"""
Initialize SemiParametricGP.
Args:
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
gamma prior.
likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
linear-Bernouli likelihood with probit link.
inducing_size (int): Number of inducing points. Defaults to 100.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
inducing_point_method (string): The method to use to select the inducing points. Defaults to "auto".
If "sobol", a number of Sobol points equal to inducing_size will be selected.
If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
If "kmeans++", selects points by performing kmeans++ clustering on the training data.
If "auto", tries to determine the best method automatically.
"""
lb, ub, dim = _process_bounds(lb, ub, dim)
self.stim_dim = stim_dim
self.context_dims = list(range(dim))
self.context_dims.pop(stim_dim)
if mean_module is None:
mean_module = ConstantMean(batch_shape=torch.Size([2]))
mean_module.requires_grad_(False)
mean_module.constant.copy_(
torch.tensor([0.0, slope_mean]) # offset mean is 0, slope mean is 2
)
if covar_module is None:
covar_module = ScaleKernel(
RBFKernel(
ard_num_dims=dim - 1,
lengthscale_prior=GammaPrior(3, 6),
active_dims=self.context_dims, # Operate only on x_s
batch_shape=torch.Size([2]),
),
outputscale_prior=GammaPrior(1.5, 1.0),
)
likelihood = likelihood or LinearBernoulliLikelihood()
assert isinstance(
likelihood, LinearBernoulliLikelihood
), "SemiP model only supports linear Bernoulli likelihoods!"
super().__init__(
lb=lb,
ub=ub,
dim=dim,
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
@classmethod
def from_config(cls, config: Config) -> SemiParametricGPModel:
"""Alternate constructor for SemiParametricGPModel model.
This is used when we recursively build a full sampling strategy
from a configuration.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
SemiParametricGPModel: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=100)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
stim_dim = config.getint(classname, "stim_dim", fallback=0)
slope_mean = config.getfloat(classname, "slope_mean", fallback=2)
return cls(
lb=lb,
ub=ub,
stim_dim=stim_dim,
dim=dim,
likelihood=likelihood,
slope_mean=slope_mean,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
def fit(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
warmstart_hyperparams: bool = False,
warmstart_induc: bool = False,
**kwargs,
) -> None:
"""Fit underlying model.
Args:
train_x (torch.Tensor): Inputs.
train_y (torch.LongTensor): Responses.
warmstart_hyperparams (bool): Whether to reuse the previous hyperparameters (True) or fit from scratch
(False). Defaults to False.
warmstart_induc (bool): Whether to reuse the previous inducing points or fit from scratch (False).
Defaults to False.
kwargs: Keyword arguments passed to `optimizer=fit_gpytorch_mll_scipy`.
"""
super().fit(
train_x=train_x,
train_y=train_y,
optimizer=fit_gpytorch_mll_scipy,
warmstart_hyperparams=warmstart_hyperparams,
warmstart_induc=warmstart_induc,
closure_kwargs={"Xi": train_x[..., self.stim_dim]},
**kwargs,
)
def sample(
self,
x: Union[torch.Tensor, np.ndarray],
num_samples: int,
probability_space=False,
) -> torch.Tensor:
"""Sample from underlying model.
Args:
x ((n x d) torch.Tensor): Points at which to sample.
num_samples (int, optional): Number of samples to return. Defaults to None.
kwargs are ignored
Returns:
(num_samples x n) torch.Tensor: Posterior samples
"""
post = self.posterior(x)
if probability_space is True:
samps = post.sample_p(torch.Size([num_samples])).detach()
else:
samps = post.sample_f(torch.Size([num_samples])).detach()
assert samps.shape == (num_samples, 1, x.shape[0])
return samps.squeeze(1)
def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at query points.
"""
with torch.no_grad():
samps = self.sample(
x, num_samples=10000, probability_space=probability_space
)
m, v = samps.mean(0), samps.var(0)
return promote_0d(m), promote_0d(v)
def posterior(self, X, posterior_transform=None):
# Assume x is (b) x n x d
if X.ndim > 3:
raise ValueError
# Add in the extra 2 batch for the 2 GPs in this model
Xnew = X.unsqueeze(-3).expand(
X.shape[:-2] # (b)
+ torch.Size([2]) # For the two GPs
+ X.shape[-2:] # n x d
)
# The shape of Xnew is: (b) x 2 x n x d
posterior = SemiPPosterior(
mvn=self(Xnew),
likelihood=self.likelihood,
Xi=X[..., self.stim_dim],
)
if posterior_transform is not None:
return posterior_transform(posterior)
else:
return posterior
class HadamardSemiPModel(GPClassificationModel):
"""
Semiparametric GP model for psychophysics, with a MVN approximation to the elementwise
product of GPs.
Implements a semi-parametric model with a functional form like :math:`k(x_c()x_i + c(x_c))`,
for scalar intensity dimension :math:`x_i` and vector-valued context dimensions :math:`x_c`,
with k and c having a GP prior. In contrast to SemiParametricGPModel, this version approximates
the product as a single multivariate normal, which should be faster (the approximation is exact
if one of the GP's variance goes to zero).
Intended for use with a BernoulliObjectiveLikelihood with flexible link function such as
Logistic or Gumbel nonlinearity with a floor.
"""
_num_outputs = 1
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
stim_dim: int = 0,
slope_mean_module: Optional[gpytorch.means.Mean] = None,
slope_covar_module: Optional[gpytorch.kernels.Kernel] = None,
offset_mean_module: Optional[gpytorch.means.Mean] = None,
offset_covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
slope_mean: float = 2,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
"""
Initialize HadamardSemiPModel.
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
slope_mean_module (gpytorch.means.Mean, optional): Mean module to use (default: constant mean) for slope.
slope_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for slope.
offset_mean_module (gpytorch.means.Mean, optional): Mean module to use (default: constant mean) for offset.
offset_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for offset.
likelihood (gpytorch.likelihood.Likelihood, optional)): defaults to bernoulli with logistic input and a floor of .5
inducing_size (int): Number of inducing points. Defaults to 100.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
inducing_point_method (string): The method to use to select the inducing points. Defaults to "auto".
If "sobol", a number of Sobol points equal to inducing_size will be selected.
If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
If "kmeans++", selects points by performing kmeans++ clustering on the training data.
If "auto", tries to determine the best method automatically.
"""
super().__init__(
lb=lb,
ub=ub,
dim=dim,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
self.stim_dim = stim_dim
if slope_mean_module is None:
self.slope_mean_module = ConstantMean()
self.slope_mean_module.requires_grad_(False)
self.slope_mean_module.constant.copy_(
torch.tensor(slope_mean)
) # magic number to shift the slope prior to be generally positive.
else:
self.slope_mean_module = slope_mean_module
if offset_mean_module is None:
self.offset_mean_module = ZeroMean()
else:
self.offset_mean_module = offset_mean_module
self.offset_mean_module = offset_mean_module or ZeroMean()
context_dims = list(range(self.dim))
context_dims.pop(stim_dim)
self.slope_covar_module = slope_covar_module or ScaleKernel(
RBFKernel(
ard_num_dims=self.dim - 1,
lengthscale_prior=GammaPrior(3, 6),
active_dims=context_dims, # Operate only on x_s
),
outputscale_prior=GammaPrior(1.5, 1.0),
)
self.offset_covar_module = offset_covar_module or ScaleKernel(
RBFKernel(
ard_num_dims=self.dim - 1,
lengthscale_prior=GammaPrior(3, 6),
active_dims=context_dims, # Operate only on x_s
),
outputscale_prior=GammaPrior(1.5, 1.0),
)
self.likelihood = likelihood or BernoulliObjectiveLikelihood(
objective=FloorLogitObjective()
)
self._fresh_state_dict = deepcopy(self.state_dict())
self._fresh_likelihood_dict = deepcopy(self.likelihood.state_dict())
def forward(self, x: torch.Tensor) -> MultivariateNormal:
"""Forward pass for semip GP.
generates a k(c + x[:,stim_dim]) = kc + kx[:,stim_dim] mvn object where k and c are
slope and offset GPs and x[:,stim_dim] are the intensity stimulus (x)
locations and thus acts as a constant offset to the k mvn.
Args:
x (torch.Tensor): Points at which to sample.
Returns:
MVN object evaluated at samples
"""
transformed_x = self.normalize_inputs(x)
# TODO: make slope prop to intensity width.
slope_mean = self.slope_mean_module(transformed_x)
# kc mvn
offset_mean = self.offset_mean_module(transformed_x)
slope_cov = self.slope_covar_module(transformed_x)
offset_cov = self.offset_covar_module(transformed_x)
mean_x, cov_x = _hadamard_mvn_approx(
x_intensity=transformed_x[..., self.stim_dim],
slope_mean=slope_mean,
slope_cov=slope_cov,
offset_mean=offset_mean,
offset_cov=offset_cov,
)
return MultivariateNormal(mean_x, cov_x)
@classmethod
def from_config(cls, config: Config) -> HadamardSemiPModel:
"""Alternate constructor for HadamardSemiPModel model.
This is used when we recursively build a full sampling strategy
from a configuration.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
HadamardSemiPModel: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=100)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
slope_mean_module = config.getobj(classname, "slope_mean_module", fallback=None)
slope_covar_module = config.getobj(
classname, "slope_covar_module", fallback=None
)
offset_mean_module = config.getobj(
classname, "offset_mean_module", fallback=None
)
offset_covar_module = config.getobj(
classname, "offset_covar_module", fallback=None
)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
slope_mean = config.getfloat(classname, "slope_mean", fallback=2)
stim_dim = config.getint(classname, "stim_dim", fallback=0)
return cls(
lb=lb,
ub=ub,
stim_dim=stim_dim,
dim=dim,
slope_mean_module=slope_mean_module,
slope_covar_module=slope_covar_module,
offset_mean_module=offset_mean_module,
offset_covar_module=offset_covar_module,
likelihood=likelihood,
slope_mean=slope_mean,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
if probability_space:
if hasattr(self.likelihood, "objective"):
fsamps = self.sample(x, 1000)
psamps = self.likelihood.objective(fsamps)
return psamps.mean(0).squeeze(), psamps.var(0).squeeze()
elif isinstance(self.likelihood, BernoulliLikelihood): # default to probit
fsamps = self.sample(x, 1000)
psamps = Normal(0, 1).cdf(fsamps)
return psamps.mean(0).squeeze(), psamps.var(0).squeeze()
else:
raise NotImplementedError(
f"p-space sampling not defined if likelihood ({self.likelihood}) does not have a link!"
)
else:
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
return fmean, fvar
|
aepsych-main
|
aepsych/models/semi_p.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
from typing import Any, Dict, Optional, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory import default_mean_covar_factory
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from botorch.fit import fit_gpytorch_mll
from botorch.models import PairwiseGP, PairwiseLaplaceMarginalLogLikelihood
from botorch.models.transforms.input import Normalize
from scipy.stats import norm
logger = getLogger()
class PairwiseProbitModel(PairwiseGP, AEPsychMixin):
_num_outputs = 1
stimuli_per_trial = 2
outcome_type = "binary"
def _pairs_to_comparisons(self, x, y):
"""
Takes x, y structured as pairs and judgments and
returns pairs and comparisons as PairwiseGP requires
"""
# This needs to take a unique over the feature dim by flattening
# over pairs but not instances/batches. This is actually tensor
# matricization over the feature dimension but awkward in numpy
unique_coords = torch.unique(
torch.transpose(x, 1, 0).reshape(self.dim, -1), dim=1
)
def _get_index_of_equal_row(arr, x, dim=0):
return torch.all(torch.eq(arr, x[:, None]), dim=dim).nonzero().item()
comparisons = []
for pair, judgement in zip(x, y):
comparison = (
_get_index_of_equal_row(unique_coords, pair[..., 0]),
_get_index_of_equal_row(unique_coords, pair[..., 1]),
)
if judgement == 0:
comparisons.append(comparison)
else:
comparisons.append(comparison[::-1])
return unique_coords.T, torch.LongTensor(comparisons)
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
max_fit_time: Optional[float] = None,
):
self.lb, self.ub, dim = _process_bounds(lb, ub, dim)
self.max_fit_time = max_fit_time
bounds = torch.stack((self.lb, self.ub))
input_transform = Normalize(d=dim, bounds=bounds)
if covar_module is None:
config = Config(
config_dict={
"default_mean_covar_factory": {
"lb": str(self.lb.tolist()),
"ub": str(self.ub.tolist()),
}
}
) # type: ignore
_, covar_module = default_mean_covar_factory(config)
super().__init__(
datapoints=None,
comparisons=None,
covar_module=covar_module,
jitter=1e-3,
input_transform=input_transform,
)
self.dim = dim # The Pairwise constructor sets self.dim = None.
def fit(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
self.train()
mll = PairwiseLaplaceMarginalLogLikelihood(self.likelihood, self)
datapoints, comparisons = self._pairs_to_comparisons(train_x, train_y)
self.set_train_data(datapoints, comparisons)
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy()
max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time)
if max_fit_time is not None:
# figure out how long evaluating a single samp
starttime = time.time()
_ = mll(self(datapoints), comparisons)
single_eval_time = time.time() - starttime
n_eval = int(max_fit_time / single_eval_time)
optimizer_kwargs["maxfun"] = n_eval
logger.info(f"fit maxfun is {n_eval}")
logger.info("Starting fit...")
starttime = time.time()
fit_gpytorch_mll(mll, **kwargs, **optimizer_kwargs)
logger.info(f"Fit done, time={time.time()-starttime}")
def update(
self, train_x: torch.Tensor, train_y: torch.Tensor, warmstart: bool = True
):
"""Perform a warm-start update of the model from previous fit."""
self.fit(train_x, train_y)
def predict(
self, x, probability_space=False, num_samples=1000, rereference="x_min"
):
if rereference is not None:
samps = self.sample(x, num_samples, rereference)
fmean, fvar = samps.mean(0).squeeze(), samps.var(0).squeeze()
else:
post = self.posterior(x)
fmean, fvar = post.mean.squeeze(), post.variance.squeeze()
if probability_space:
return (
promote_0d(norm.cdf(fmean)),
promote_0d(norm.cdf(fvar)),
)
else:
return fmean, fvar
def predict_probability(
self, x, probability_space=False, num_samples=1000, rereference="x_min"
):
return self.predict(
x, probability_space=True, num_samples=num_samples, rereference=rereference
)
def sample(self, x, num_samples, rereference="x_min"):
if len(x.shape) < 2:
x = x.reshape(-1, 1)
if rereference is None:
return self.posterior(x).rsample(torch.Size([num_samples]))
if rereference == "x_min":
x_ref = self.lb
elif rereference == "x_max":
x_ref = self.ub
elif rereference == "f_max":
x_ref = torch.Tensor(self.get_max()[1])
elif rereference == "f_min":
x_ref = torch.Tensor(self.get_min()[1])
else:
raise RuntimeError(
f"Unknown rereference type {rereference}! Options: x_min, x_max, f_min, f_max."
)
x_stack = torch.vstack([x, x_ref])
samps = self.posterior(x_stack).rsample(torch.Size([num_samples]))
samps, samps_ref = torch.split(samps, [samps.shape[1] - 1, 1], dim=1)
if rereference == "x_min" or rereference == "f_min":
return samps - samps_ref
else:
return -samps + samps_ref
@classmethod
def from_config(cls, config):
classname = cls.__name__
mean_covar_factory = config.getobj(
"PairwiseProbitModel",
"mean_covar_factory",
fallback=default_mean_covar_factory,
)
# no way of passing mean into PairwiseGP right now
_, covar = mean_covar_factory(config)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = lb.shape[0]
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
return cls(lb=lb, ub=ub, dim=dim, covar_module=covar, max_fit_time=max_fit_time)
|
aepsych-main
|
aepsych/models/pairwise_probit.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gpytorch
import torch
from aepsych.likelihoods import OrdinalLikelihood
from aepsych.models import GPClassificationModel
class OrdinalGPModel(GPClassificationModel):
"""
Convenience wrapper for GPClassificationModel that hardcodes
an ordinal likelihood, better priors for this setting, and
adds a convenience method for computing outcome probabilities.
TODO: at some point we should refactor posteriors so that things like
OrdinalPosterior and MonotonicPosterior don't have to have their own
model classes.
"""
outcome_type = "ordinal"
def __init__(self, likelihood=None, *args, **kwargs):
covar_module = kwargs.pop("covar_module", None)
dim = kwargs.get("dim")
if covar_module is None:
ls_prior = gpytorch.priors.GammaPrior(concentration=1.5, rate=3.0)
ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate
ls_constraint = gpytorch.constraints.Positive(
transform=None, initial_value=ls_prior_mode
)
# no outputscale due to shift identifiability in d.
covar_module = gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
)
if likelihood is None:
likelihood = OrdinalLikelihood(n_levels=5)
super().__init__(
*args,
covar_module=covar_module,
likelihood=likelihood,
**kwargs,
)
def predict_probs(self, xgrid):
fmean, fvar = self.predict(xgrid)
return self.calculate_probs(fmean, fvar)
def calculate_probs(self, fmean, fvar):
fsd = torch.sqrt(1 + fvar)
probs = torch.zeros(*fmean.size(), self.likelihood.n_levels)
probs[..., 0] = self.likelihood.link(
(self.likelihood.cutpoints[0] - fmean) / fsd
)
for i in range(1, self.likelihood.n_levels - 1):
probs[..., i] = self.likelihood.link(
(self.likelihood.cutpoints[i] - fmean) / fsd
) - self.likelihood.link((self.likelihood.cutpoints[i - 1] - fmean) / fsd)
probs[..., -1] = 1 - self.likelihood.link(
(self.likelihood.cutpoints[-1] - fmean) / fsd
)
return probs
|
aepsych-main
|
aepsych/models/ordinal_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
from typing import Dict, List, Optional, Sequence, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.acquisition.rejection_sampler import RejectionSampler
from aepsych.config import Config
from aepsych.factory.factory import monotonic_mean_covar_factory
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from aepsych.models.base import AEPsychMixin
from aepsych.models.utils import select_inducing_points
from aepsych.utils import _process_bounds, promote_0d
from botorch.fit import fit_gpytorch_mll
from gpytorch.kernels import Kernel
from gpytorch.likelihoods import BernoulliLikelihood, Likelihood
from gpytorch.means import Mean
from gpytorch.mlls.variational_elbo import VariationalELBO
from gpytorch.models import ApproximateGP
from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy
from scipy.stats import norm
from torch import Tensor
class MonotonicRejectionGP(AEPsychMixin, ApproximateGP):
"""A monotonic GP using rejection sampling.
This takes the same insight as in e.g. Riihimäki & Vehtari 2010 (that the derivative of a GP
is likewise a GP) but instead of approximately optimizing the likelihood of the model
using EP, we optimize an unconstrained model by VI and then draw monotonic samples
by rejection sampling.
References:
Riihimäki, J., & Vehtari, A. (2010). Gaussian processes with monotonicity information.
Journal of Machine Learning Research, 9, 645–652.
"""
_num_outputs = 1
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
monotonic_idxs: Sequence[int],
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[Mean] = None,
covar_module: Optional[Kernel] = None,
likelihood: Optional[Likelihood] = None,
fixed_prior_mean: Optional[float] = None,
num_induc: int = 25,
num_samples: int = 250,
num_rejection_samples: int = 5000,
inducing_point_method: str = "auto",
) -> None:
"""Initialize MonotonicRejectionGP.
Args:
likelihood (str): Link function and likelihood. Can be 'probit-bernoulli' or
'identity-gaussian'.
monotonic_idxs (List[int]): List of which columns of x should be given monotonicity
constraints.
fixed_prior_mean (Optional[float], optional): Fixed prior mean. If classification, should be the prior
classification probability (not the latent function value). Defaults to None.
covar_module (Optional[Kernel], optional): Covariance kernel to use (default: scaled RBF).
mean_module (Optional[Mean], optional): Mean module to use (default: constant mean).
num_induc (int, optional): Number of inducing points for variational GP.]. Defaults to 25.
num_samples (int, optional): Number of samples for estimating posterior on preDict or
acquisition function evaluation. Defaults to 250.
num_rejection_samples (int, optional): Number of samples used for rejection sampling. Defaults to 4096.
acqf (MonotonicMCAcquisition, optional): Acquisition function to use for querying points. Defaults to MonotonicMCLSE.
objective (Optional[MCAcquisitionObjective], optional): Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli.
extra_acqf_args (Optional[Dict[str, object]], optional): Additional arguments to pass into the acquisition function. Defaults to None.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
if likelihood is None:
likelihood = BernoulliLikelihood()
self.inducing_size = num_induc
self.inducing_point_method = inducing_point_method
inducing_points = select_inducing_points(
inducing_size=self.inducing_size,
bounds=self.bounds,
method="sobol",
)
inducing_points_aug = self._augment_with_deriv_index(inducing_points, 0)
variational_distribution = CholeskyVariationalDistribution(
inducing_points_aug.size(0)
)
variational_strategy = VariationalStrategy(
model=self,
inducing_points=inducing_points_aug,
variational_distribution=variational_distribution,
learn_inducing_locations=False,
)
if mean_module is None:
mean_module = ConstantMeanPartialObsGrad()
if fixed_prior_mean is not None:
if isinstance(likelihood, BernoulliLikelihood):
fixed_prior_mean = norm.ppf(fixed_prior_mean)
mean_module.constant.requires_grad_(False)
mean_module.constant.copy_(torch.tensor(fixed_prior_mean))
if covar_module is None:
ls_prior = gpytorch.priors.GammaPrior(
concentration=4.6, rate=1.0, transform=lambda x: 1 / x
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
covar_module = gpytorch.kernels.ScaleKernel(
RBFKernelPartialObsGrad(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
super().__init__(variational_strategy)
self.bounds_ = torch.stack([self.lb, self.ub])
self.mean_module = mean_module
self.covar_module = covar_module
self.likelihood = likelihood
self.num_induc = num_induc
self.monotonic_idxs = monotonic_idxs
self.num_samples = num_samples
self.num_rejection_samples = num_rejection_samples
self.fixed_prior_mean = fixed_prior_mean
self.inducing_points = inducing_points
def fit(self, train_x: Tensor, train_y: Tensor, **kwargs) -> None:
"""Fit the model
Args:
train_x (Tensor): Training x points
train_y (Tensor): Training y points. Should be (n x 1).
"""
self.set_train_data(train_x, train_y)
self.inducing_points = select_inducing_points(
inducing_size=self.inducing_size,
covar_module=self.covar_module,
X=self.train_inputs[0],
bounds=self.bounds,
method=self.inducing_point_method,
)
self._set_model(train_x, train_y)
def _set_model(
self,
train_x: Tensor,
train_y: Tensor,
model_state_dict: Optional[Dict[str, Tensor]] = None,
likelihood_state_dict: Optional[Dict[str, Tensor]] = None,
) -> None:
train_x_aug = self._augment_with_deriv_index(train_x, 0)
self.set_train_data(train_x_aug, train_y)
# Set model parameters
if model_state_dict is not None:
self.load_state_dict(model_state_dict)
if likelihood_state_dict is not None:
self.likelihood.load_state_dict(likelihood_state_dict)
# Fit!
mll = VariationalELBO(
likelihood=self.likelihood, model=self, num_data=train_y.numel()
)
mll = fit_gpytorch_mll(mll)
def update(self, train_x: Tensor, train_y: Tensor, warmstart: bool = True) -> None:
"""
Update the model with new data.
Expects the full set of data, not the incremental new data.
Args:
train_x (Tensor): Train X.
train_y (Tensor): Train Y. Should be (n x 1).
warmstart (bool): If True, warm-start model fitting with current parameters.
"""
if warmstart:
model_state_dict = self.state_dict()
likelihood_state_dict = self.likelihood.state_dict()
else:
model_state_dict = None
likelihood_state_dict = None
self._set_model(
train_x=train_x,
train_y=train_y,
model_state_dict=model_state_dict,
likelihood_state_dict=likelihood_state_dict,
)
def sample(
self,
x: Tensor,
num_samples: Optional[int] = None,
num_rejection_samples: Optional[int] = None,
) -> torch.Tensor:
"""Sample from monotonic GP
Args:
x (Tensor): tensor of n points at which to sample
num_samples (int, optional): how many points to sample (default: self.num_samples)
Returns: a Tensor of shape [n_samp, n]
"""
if num_samples is None:
num_samples = self.num_samples
if num_rejection_samples is None:
num_rejection_samples = self.num_rejection_samples
rejection_ratio = 20
if num_samples * rejection_ratio > num_rejection_samples:
warnings.warn(
f"num_rejection_samples should be at least {rejection_ratio} times greater than num_samples."
)
n = x.shape[0]
# Augment with derivative index
x_aug = self._augment_with_deriv_index(x, 0)
# Add in monotonicity constraint points
deriv_cp = self._get_deriv_constraint_points()
x_aug = torch.cat((x_aug, deriv_cp), dim=0)
assert x_aug.shape[0] == x.shape[0] + len(
self.monotonic_idxs * self.inducing_points.shape[0]
)
constrained_idx = torch.arange(n, x_aug.shape[0])
with torch.no_grad():
posterior = self.posterior(x_aug)
sampler = RejectionSampler(
num_samples=num_samples,
num_rejection_samples=num_rejection_samples,
constrained_idx=constrained_idx,
)
samples = sampler(posterior)
samples_f = samples[:, :n, 0].detach().cpu()
return samples_f
def predict(
self, x: Tensor, probability_space: bool = False
) -> Tuple[Tensor, Tensor]:
"""Predict
Args:
x: tensor of n points at which to predict.
Returns: tuple (f, var) where f is (n,) and var is (n,)
"""
samples_f = self.sample(x)
mean = torch.mean(samples_f, dim=0).squeeze()
variance = torch.var(samples_f, dim=0).clamp_min(0).squeeze()
if probability_space:
return (
torch.Tensor(promote_0d(norm.cdf(mean))),
torch.Tensor(promote_0d(norm.cdf(variance))),
)
return mean, variance
def predict_probability(
self, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.predict(x, probability_space=True)
def _augment_with_deriv_index(self, x: Tensor, indx):
return torch.cat(
(x, indx * torch.ones(x.shape[0], 1)),
dim=1,
)
def _get_deriv_constraint_points(self):
deriv_cp = torch.tensor([])
for i in self.monotonic_idxs:
induc_i = self._augment_with_deriv_index(self.inducing_points, i + 1)
deriv_cp = torch.cat((deriv_cp, induc_i), dim=0)
return deriv_cp
@classmethod
def from_config(cls, config: Config) -> MonotonicRejectionGP:
classname = cls.__name__
num_induc = config.gettensor(classname, "num_induc", fallback=25)
num_samples = config.gettensor(classname, "num_samples", fallback=250)
num_rejection_samples = config.getint(
classname, "num_rejection_samples", fallback=5000
)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=monotonic_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
monotonic_idxs: List[int] = config.getlist(
classname, "monotonic_idxs", fallback=[-1]
)
return cls(
monotonic_idxs=monotonic_idxs,
lb=lb,
ub=ub,
dim=dim,
num_induc=num_induc,
num_samples=num_samples,
num_rejection_samples=num_rejection_samples,
mean_module=mean,
covar_module=covar,
)
def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal:
"""Evaluate GP
Args:
x (torch.Tensor): Tensor of points at which GP should be evaluated.
Returns:
gpytorch.distributions.MultivariateNormal: Distribution object
holding mean and covariance at x.
"""
# final dim is deriv index, we only normalize the "real" dims
transformed_x = x.clone()
transformed_x[..., :-1] = self.normalize_inputs(transformed_x[..., :-1])
mean_x = self.mean_module(transformed_x)
covar_x = self.covar_module(transformed_x)
latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return latent_pred
|
aepsych-main
|
aepsych/models/monotonic_rejection_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Any, List, Optional, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.gp_classification import GPClassificationModel
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch.likelihoods import Likelihood
from statsmodels.stats.moment_helpers import corr2cov, cov2corr
class MonotonicProjectionGP(GPClassificationModel):
"""A monotonic GP based on posterior projection
NOTE: This model does not currently support backprop and so cannot be used
with gradient optimization for active learning.
This model produces predictions that are monotonic in any number of
specified monotonic dimensions. It follows the intuition of the paper
Lin L, Dunson DB (2014) Bayesian monotone regression using Gaussian process
projection, Biometrika 101(2): 303-317.
but makes significant departures by using heuristics for a lot of what is
done in a more principled way in the paper. The reason for the move to
heuristics is to improve scaling, especially with multiple monotonic
dimensions.
The method in the paper applies PAVA projection at the sample level,
which requires a significant amount of costly GP posterior sampling. The
approach taken here applies rolling-max projection to quantiles of the
distribution, and so requires only marginal posterior evaluation. There is
also a significant departure in the way multiple monotonic dimensions are
handled, since in the paper computation scales exponentially with the
number of monotonic dimensions and the heuristic approach taken here scales
linearly in the number of dimensions.
The cost of these changes is that the convergence guarantees proven in the
paper no longer hold. The method implemented here is a heuristic, and it
may be useful in some problems.
The principle behind the method given here is that sample-level
monotonicity implies monotonicity in the quantiles. We enforce monotonicity
in several quantiles, and use that as an approximation for the true
projected posterior distribution.
The approach here also supports specifying a minimum value of f. That
minimum will be enforced on mu, but not necessarily on the lower bound
of the projected posterior since we keep the projected posterior normal.
The min f value will also be enforced on samples drawn from the model,
while monotonicity will not be enforced at the sample level.
The procedure for computing the monotonic projected posterior at x is:
1. Separately for each monotonic dimension, create a grid of s points that
differ only in that dimension, and sweep from the lower bound up to x.
2. Evaluate the marginal distribution, mu and sigma, on the full set of
points (x and the s grid points for each monotonic dimension).
3. Compute the mu +/- 2 * sigma quantiles.
4. Enforce monotonicity in the quantiles by taking mu_proj as the maximum
mu across the set, and lb_proj as the maximum of mu - 2 * sigma across the
set. ub_proj is left as mu(x) + 2 * sigma(x), but is clamped to mu_proj in
case that project put it above the original ub.
5. Clamp mu and lb to the minimum value for f, if one was set.
6. Construct a new normal posterior given the projected quantiles by taking
mu_proj as the mean, and (ub - lb) / 4 as the standard deviation. Adjust
the covariance matrix to account for the change in the marginal variances.
The process above requires only marginal posterior evaluation on the grid
of points used for the posterior projection, and the size of that grid
scales linearly with the number of monotonic dimensions, not exponentially.
The args here are the same as for GPClassificationModel with the addition
of:
Args:
monotonic_dims: A list of the dimensions on which monotonicity should
be enforced.
monotonic_grid_size: The size of the grid, s, in 1. above.
min_f_val: If provided, maintains this minimum in the projection in 5.
"""
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
monotonic_dims: List[int],
monotonic_grid_size: int = 20,
min_f_val: Optional[float] = None,
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
assert len(monotonic_dims) > 0
self.monotonic_dims = monotonic_dims
self.mon_grid_size = monotonic_grid_size
self.min_f_val = min_f_val
super().__init__(
lb=lb,
ub=ub,
dim=dim,
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
def posterior(
self,
X: torch.Tensor,
observation_noise: Union[bool, torch.Tensor] = False,
**kwargs: Any,
) -> GPyTorchPosterior:
# Augment X with monotonicity grid points, for each monotonic dim
n, d = X.shape # Require no batch dimensions
m = len(self.monotonic_dims)
s = self.mon_grid_size
X_aug = X.repeat(s * m + 1, 1, 1)
for i, dim in enumerate(self.monotonic_dims):
# using numpy because torch doesn't support vectorized linspace,
# pytorch/issues/61292
grid: Union[np.ndarray, torch.Tensor] = np.linspace(
self.lb[dim],
X[:, dim].numpy(),
s + 1,
) # (s+1 x n)
grid = torch.tensor(grid[:-1, :], dtype=X.dtype) # Drop x; (s x n)
X_aug[(1 + i * s) : (1 + (i + 1) * s), :, dim] = grid
# X_aug[0, :, :] is X, and then subsequent indices are points in the grids
# Predict marginal distributions on X_aug
with torch.no_grad():
post_aug = super().posterior(X=X_aug)
mu_aug = post_aug.mean.squeeze() # (m*s+1 x n)
var_aug = post_aug.variance.squeeze() # (m*s+1 x n)
mu_proj = mu_aug.max(dim=0).values
lb_proj = (mu_aug - 2 * torch.sqrt(var_aug)).max(dim=0).values
if self.min_f_val is not None:
mu_proj = mu_proj.clamp(min=self.min_f_val)
lb_proj = lb_proj.clamp(min=self.min_f_val)
ub_proj = (mu_aug[0, :] + 2 * torch.sqrt(var_aug[0, :])).clamp(min=mu_proj)
sigma_proj = ((ub_proj - lb_proj) / 4).clamp(min=1e-4)
# Adjust the whole covariance matrix to accomadate the projected marginals
with torch.no_grad():
post = super().posterior(X=X)
R = cov2corr(post.distribution.covariance_matrix.squeeze().numpy())
S_proj = torch.tensor(corr2cov(R, sigma_proj.numpy()), dtype=X.dtype)
mvn_proj = gpytorch.distributions.MultivariateNormal(
mu_proj.unsqueeze(0),
S_proj.unsqueeze(0),
)
return GPyTorchPosterior(mvn_proj)
def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
) -> torch.Tensor:
samps = super().sample(x=x, num_samples=num_samples)
if self.min_f_val is not None:
samps = samps.clamp(min=self.min_f_val)
return samps
@classmethod
def from_config(cls, config: Config) -> MonotonicProjectionGP:
"""Alternate constructor for MonotonicProjectionGP model.
This is used when we recursively build a full sampling strategy
from a configuration. TODO: document how this works in some tutorial.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
MonotonicProjectionGP: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=10)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
monotonic_dims: List[int] = config.getlist(
classname, "monotonic_dims", fallback=[-1]
)
monotonic_grid_size = config.getint(
classname, "monotonic_grid_size", fallback=20
)
min_f_val = config.getfloat(classname, "min_f_val", fallback=None)
return cls(
lb=lb,
ub=ub,
dim=dim,
inducing_size=inducing_size,
mean_module=mean,
covar_module=covar,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
likelihood=likelihood,
monotonic_dims=monotonic_dims,
monotonic_grid_size=monotonic_grid_size,
min_f_val=min_f_val,
)
|
aepsych-main
|
aepsych/models/monotonic_projection_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .exact_gp import ContinuousRegressionGP, ExactGP
from .gp_classification import GPBetaRegressionModel, GPClassificationModel
from .gp_regression import GPRegressionModel
from .monotonic_projection_gp import MonotonicProjectionGP
from .monotonic_rejection_gp import MonotonicRejectionGP
from .multitask_regression import IndependentMultitaskGPRModel, MultitaskGPRModel
from .ordinal_gp import OrdinalGPModel
from .pairwise_probit import PairwiseProbitModel
from .semi_p import (
HadamardSemiPModel,
semi_p_posterior_transform,
SemiParametricGPModel,
)
from .variational_gp import BetaRegressionGP, BinaryClassificationGP, OrdinalGP, VariationalGP
__all__ = [
"GPClassificationModel",
"MonotonicRejectionGP",
"GPRegressionModel",
"PairwiseProbitModel",
"OrdinalGPModel",
"MonotonicProjectionGP",
"VariationalGP",
"BinaryClassificationGP",
"BetaRegressionGP",
"ExactGP",
"ContinuousRegressionGP",
"MultitaskGPRModel",
"IndependentMultitaskGPRModel",
"HadamardSemiPModel",
"SemiParametricGPModel",
"semi_p_posterior_transform",
"OrdinalGP",
"GPBetaRegressionModel",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/models/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import ordinal_mean_covar_factory
from aepsych.likelihoods.ordinal import OrdinalLikelihood
from aepsych.models.base import AEPsychModel
from aepsych.models.ordinal_gp import OrdinalGPModel
from aepsych.models.utils import get_probability_space, select_inducing_points
from aepsych.utils import get_dim
from botorch.models import SingleTaskVariationalGP
from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood
from gpytorch.mlls import VariationalELBO
# TODO: Find a better way to do this on the Ax/Botorch side
class MyHackyVariationalELBO(VariationalELBO):
def __init__(self, likelihood, model, beta=1.0, combine_terms=True):
num_data = model.model.train_targets.shape[0]
super().__init__(likelihood, model.model, num_data, beta, combine_terms)
class VariationalGP(AEPsychModel, SingleTaskVariationalGP):
@classmethod
def get_mll_class(cls):
return MyHackyVariationalELBO
@classmethod
def construct_inputs(cls, training_data, **kwargs):
inputs = super().construct_inputs(training_data=training_data, **kwargs)
inducing_size = kwargs.get("inducing_size")
inducing_point_method = kwargs.get("inducing_point_method")
bounds = kwargs.get("bounds")
inducing_points = select_inducing_points(
inducing_size,
inputs["covar_module"],
inputs["train_X"],
bounds,
inducing_point_method,
)
inputs.update(
{
"inducing_points": inducing_points,
}
)
return inputs
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None) -> Dict:
classname = cls.__name__
options = super().get_config_options(config, classname)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
inducing_size = config.getint(classname, "inducing_size", fallback=100)
learn_inducing_points = config.getboolean(
classname, "learn_inducing_points", fallback=False
)
options.update(
{
"inducing_size": inducing_size,
"inducing_point_method": inducing_point_method,
"learn_inducing_points": learn_inducing_points,
}
)
return options
class BinaryClassificationGP(VariationalGP):
stimuli_per_trial = 1
outcome_type = "binary"
def predict_probability(
self, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean, fvar = get_probability_space(
likelihood=self.likelihood, posterior=post
)
return fmean, fvar
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None):
options = super().get_config_options(config)
if options["likelihood"] is None:
options["likelihood"] = BernoulliLikelihood()
return options
class BetaRegressionGP(VariationalGP):
outcome_type = "percentage"
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None):
options = super().get_config_options(config)
if options["likelihood"] is None:
options["likelihood"] = BetaLikelihood()
return options
class OrdinalGP(VariationalGP):
"""
Convenience class for using a VariationalGP with an OrdinalLikelihood.
"""
outcome_type = "ordinal"
def predict_probability(self, x: Union[torch.Tensor, np.ndarray]):
fmean, fvar = super().predict(x)
return OrdinalGPModel.calculate_probs(self, fmean, fvar)
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None):
options = super().get_config_options(config)
if options["likelihood"] is None:
options["likelihood"] = OrdinalLikelihood(n_levels=5)
dim = get_dim(config)
if config.getobj(cls.__name__, "mean_covar_factory", fallback=None) is None:
mean, covar = ordinal_mean_covar_factory(config)
options["mean_covar_factory"] = (mean, covar)
ls_prior = gpytorch.priors.GammaPrior(concentration=1.5, rate=3.0)
ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate
ls_constraint = gpytorch.constraints.Positive(
transform=None, initial_value=ls_prior_mode
)
# no outputscale due to shift identifiability in d.
covar_module = gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
)
options["covar_module"] = covar_module
assert options["inducing_size"] >= 1, "Inducing size must be non-zero."
return options
|
aepsych-main
|
aepsych/models/variational_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from typing import Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.base import AEPsychMixin
from aepsych.models.utils import select_inducing_points
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood, Likelihood
from gpytorch.models import ApproximateGP
from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy
from scipy.special import owens_t
from scipy.stats import norm
from torch.distributions import Normal
logger = getLogger()
class GPClassificationModel(AEPsychMixin, ApproximateGP):
"""Probit-GP model with variational inference.
From a conventional ML perspective this is a GP Classification model,
though in the psychophysics context it can also be thought of as a
nonlinear generalization of the standard linear model for 1AFC or
yes/no trials.
For more on variational inference, see e.g.
https://docs.gpytorch.ai/en/v1.1.1/examples/04_Variational_and_Approximate_GPs/
"""
_batch_size = 1
_num_outputs = 1
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
"""Initialize the GP Classification model
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
gamma prior.
likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
Bernouli likelihood.
inducing_size (int): Number of inducing points. Defaults to 100.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
inducing_point_method (string): The method to use to select the inducing points. Defaults to "auto".
If "sobol", a number of Sobol points equal to inducing_size will be selected.
If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
If "kmeans++", selects points by performing kmeans++ clustering on the training data.
If "auto", tries to determine the best method automatically.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.max_fit_time = max_fit_time
self.inducing_size = inducing_size
if likelihood is None:
likelihood = BernoulliLikelihood()
self.inducing_point_method = inducing_point_method
# initialize to sobol before we have data
inducing_points = select_inducing_points(
inducing_size=self.inducing_size, bounds=self.bounds, method="sobol"
)
variational_distribution = CholeskyVariationalDistribution(
inducing_points.size(0), batch_shape=torch.Size([self._batch_size])
)
variational_strategy = VariationalStrategy(
self,
inducing_points,
variational_distribution,
learn_inducing_locations=False,
)
super().__init__(variational_strategy)
if mean_module is None or covar_module is None:
default_mean, default_covar = default_mean_covar_factory(dim=self.dim)
self.mean_module = mean_module or default_mean
self.covar_module = covar_module or default_covar
self.likelihood = likelihood
self._fresh_state_dict = deepcopy(self.state_dict())
self._fresh_likelihood_dict = deepcopy(self.likelihood.state_dict())
@classmethod
def from_config(cls, config: Config) -> GPClassificationModel:
"""Alternate constructor for GPClassification model.
This is used when we recursively build a full sampling strategy
from a configuration. TODO: document how this works in some tutorial.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
GPClassificationModel: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=10)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
return cls(
lb=lb,
ub=ub,
dim=dim,
inducing_size=inducing_size,
mean_module=mean,
covar_module=covar,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
likelihood=likelihood,
)
def _reset_hyperparameters(self):
# warmstart_hyperparams affects hyperparams but not the variational strat,
# so we keep the old variational strat (which is only refreshed
# if warmstart_induc=False).
vsd = self.variational_strategy.state_dict() # type: ignore
vsd_hack = {f"variational_strategy.{k}": v for k, v in vsd.items()}
state_dict = deepcopy(self._fresh_state_dict)
state_dict.update(vsd_hack)
self.load_state_dict(state_dict)
self.likelihood.load_state_dict(self._fresh_likelihood_dict)
def _reset_variational_strategy(self):
inducing_points = select_inducing_points(
inducing_size=self.inducing_size,
covar_module=self.covar_module,
X=self.train_inputs[0],
bounds=self.bounds,
method=self.inducing_point_method,
)
variational_distribution = CholeskyVariationalDistribution(
inducing_points.size(0), batch_shape=torch.Size([self._batch_size])
)
self.variational_strategy = VariationalStrategy(
self,
inducing_points,
variational_distribution,
learn_inducing_locations=False,
)
def fit(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
warmstart_hyperparams: bool = False,
warmstart_induc: bool = False,
**kwargs,
) -> None:
"""Fit underlying model.
Args:
train_x (torch.Tensor): Inputs.
train_y (torch.LongTensor): Responses.
warmstart_hyperparams (bool): Whether to reuse the previous hyperparameters (True) or fit from scratch
(False). Defaults to False.
warmstart_induc (bool): Whether to reuse the previous inducing points or fit from scratch (False).
Defaults to False.
"""
self.set_train_data(train_x, train_y)
# by default we reuse the model state and likelihood. If we
# want a fresh fit (no warm start), copy the state from class initialization.
if not warmstart_hyperparams:
self._reset_hyperparameters()
if not warmstart_induc:
self._reset_variational_strategy()
n = train_y.shape[0]
mll = gpytorch.mlls.VariationalELBO(self.likelihood, self, n)
self._fit_mll(mll, **kwargs)
def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
) -> torch.Tensor:
"""Sample from underlying model.
Args:
x (torch.Tensor): Points at which to sample.
num_samples (int, optional): Number of samples to return. Defaults to None.
kwargs are ignored
Returns:
torch.Tensor: Posterior samples [num_samples x dim]
"""
return self.posterior(x).rsample(torch.Size([num_samples])).detach().squeeze()
def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
if probability_space:
if isinstance(self.likelihood, BernoulliLikelihood):
# Probability-space mean and variance for Bernoulli-probit models is
# available in closed form, Proposition 1 in Letham et al. 2022 (AISTATS).
a_star = fmean / torch.sqrt(1 + fvar)
pmean = Normal(0, 1).cdf(a_star)
t_term = torch.tensor(
owens_t(a_star.numpy(), 1 / np.sqrt(1 + 2 * fvar.numpy())),
dtype=a_star.dtype,
)
pvar = pmean - 2 * t_term - pmean.square()
return promote_0d(pmean), promote_0d(pvar)
else:
fsamps = post.sample(torch.Size([10000]))
if hasattr(self.likelihood, "objective"):
psamps = self.likelihood.objective(fsamps)
else:
psamps = norm.cdf(fsamps)
pmean, pvar = psamps.mean(0), psamps.var(0)
return promote_0d(pmean), promote_0d(pvar)
else:
return promote_0d(fmean), promote_0d(fvar)
def predict_probability(
self, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.predict(x, probability_space=True)
def update(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs):
"""Perform a warm-start update of the model from previous fit."""
return self.fit(
train_x, train_y, warmstart_hyperparams=True, warmstart_induc=True, **kwargs
)
class GPBetaRegressionModel(GPClassificationModel):
outcome_type = "percentage"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
if likelihood is None:
likelihood = BetaLikelihood()
super().__init__(
lb=lb,
ub=ub,
dim=dim,
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
|
aepsych-main
|
aepsych/models/gp_classification.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from typing import Dict, Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from gpytorch.likelihoods import GaussianLikelihood, Likelihood
from gpytorch.models import ExactGP
logger = getLogger()
class GPRegressionModel(AEPsychMixin, ExactGP):
"""GP Regression model for continuous outcomes, using exact inference."""
_num_outputs = 1
_batch_size = 1
stimuli_per_trial = 1
outcome_type = "continuous"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
max_fit_time: Optional[float] = None,
):
"""Initialize the GP regression model
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
gamma prior.
likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
Gaussian likelihood.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
"""
if likelihood is None:
likelihood = GaussianLikelihood()
super().__init__(None, None, likelihood)
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.max_fit_time = max_fit_time
if mean_module is None or covar_module is None:
default_mean, default_covar = default_mean_covar_factory(dim=self.dim)
self.mean_module = mean_module or default_mean
self.covar_module = covar_module or default_covar
self._fresh_state_dict = deepcopy(self.state_dict())
self._fresh_likelihood_dict = deepcopy(self.likelihood.state_dict())
@classmethod
def construct_inputs(cls, config: Config) -> Dict:
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
return {
"lb": lb,
"ub": ub,
"dim": dim,
"mean_module": mean,
"covar_module": covar,
"likelihood": likelihood,
"max_fit_time": max_fit_time,
}
@classmethod
def from_config(cls, config: Config) -> GPRegressionModel:
"""Alternate constructor for GP regression model.
This is used when we recursively build a full sampling strategy
from a configuration. TODO: document how this works in some tutorial.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
GPRegressionModel: Configured class instance.
"""
args = cls.construct_inputs(config)
return cls(**args)
def fit(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs) -> None:
"""Fit underlying model.
Args:
train_x (torch.Tensor): Inputs.
train_y (torch.LongTensor): Responses.
"""
self.set_train_data(train_x, train_y)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
return self._fit_mll(mll, **kwargs)
def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
) -> torch.Tensor:
"""Sample from underlying model.
Args:
x (torch.Tensor): Points at which to sample.
num_samples (int, optional): Number of samples to return. Defaults to None.
kwargs are ignored
Returns:
torch.Tensor: Posterior samples [num_samples x dim]
"""
return self.posterior(x).rsample(torch.Size([num_samples])).detach().squeeze()
def update(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs):
"""Perform a warm-start update of the model from previous fit."""
return self.fit(train_x, train_y, **kwargs)
def predict(
self, x: Union[torch.Tensor, np.ndarray], **kwargs
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
return promote_0d(fmean), promote_0d(fvar)
|
aepsych-main
|
aepsych/models/gp_regression.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional, Union
import gpytorch
import torch
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from botorch.models.gpytorch import GPyTorchModel
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import Kernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.means import Mean
from gpytorch.priors.torch_priors import GammaPrior
from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy
class MixedDerivativeVariationalGP(gpytorch.models.ApproximateGP, GPyTorchModel):
"""A variational GP with mixed derivative observations.
For more on GPs with derivative observations, see e.g. Riihimaki & Vehtari 2010.
References:
Riihimäki, J., & Vehtari, A. (2010). Gaussian processes with
monotonicity information. Journal of Machine Learning Research, 9, 645–652.
"""
def __init__(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
inducing_points: torch.Tensor,
scales: Union[torch.Tensor, float] = 1.0,
mean_module: Optional[Mean] = None,
covar_module: Optional[Kernel] = None,
fixed_prior_mean: Optional[float] = None,
) -> None:
"""Initialize MixedDerivativeVariationalGP
Args:
train_x (torch.Tensor): Training x points. The last column of x is the derivative
indiciator: 0 if it is an observation of f(x), and i if it
is an observation of df/dx_i.
train_y (torch.Tensor): Training y points
inducing_points (torch.Tensor): Inducing points to use
scales (Union[torch.Tensor, float], optional): Typical scale of each dimension
of input space (this is used to set the lengthscale prior).
Defaults to 1.0.
mean_module (Mean, optional): A mean class that supports derivative
indexes as the final dim. Defaults to a constant mean.
covar_module (Kernel, optional): A covariance kernel class that
supports derivative indexes as the final dim. Defaults to RBF kernel.
fixed_prior_mean (float, optional): A prior mean value to use with the
constant mean. Often setting this to the target threshold speeds
up experiments. Defaults to None, in which case the mean will be inferred.
"""
variational_distribution = CholeskyVariationalDistribution(
inducing_points.size(0)
)
variational_distribution.to(train_x)
variational_strategy = VariationalStrategy(
model=self,
inducing_points=inducing_points,
variational_distribution=variational_distribution,
learn_inducing_locations=False,
)
super(MixedDerivativeVariationalGP, self).__init__(variational_strategy)
# Set the mean if specified to
if mean_module is None:
self.mean_module = ConstantMeanPartialObsGrad()
else:
self.mean_module = mean_module
if fixed_prior_mean is not None:
self.mean_module.constant.requires_grad_(False)
self.mean_module.constant.copy_(
torch.tensor(fixed_prior_mean, dtype=train_x.dtype)
)
if covar_module is None:
self.base_kernel = RBFKernelPartialObsGrad(
ard_num_dims=train_x.shape[-1] - 1,
lengthscale_prior=GammaPrior(3.0, 6.0 / scales),
)
self.covar_module = ScaleKernel(
self.base_kernel, outputscale_prior=GammaPrior(2.0, 0.15)
)
else:
self.covar_module = covar_module
self._num_outputs = 1
self.train_inputs = (train_x,)
self.train_targets = train_y
self(train_x) # Necessary for CholeskyVariationalDistribution
def forward(self, x: torch.Tensor) -> MultivariateNormal:
"""Evaluate the model
Args:
x (torch.Tensor): Points at which to evaluate.
Returns:
MultivariateNormal: Object containig mean and covariance
of GP at these points.
"""
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
|
aepsych-main
|
aepsych/models/derivative_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import gpytorch
import torch
from aepsych.models import GPRegressionModel
class MultitaskGPRModel(GPRegressionModel):
"""
Multitask (multi-output) GP regression, using a kronecker-separable model
where [a] each output is observed at each input, and [b] the kernel between
two outputs at two points is given by k_x(x, x') * k_t[i, j] where k(x, x')
is the usual GP kernel and k_t[i, j] is indexing into a freeform covariance
of potentially low rank.
This essentially implements / wraps the GPyTorch multitask GPR tutorial
in https://docs.gpytorch.ai/en/stable/examples/03_Multitask_Exact_GPs/Multitask_GP_Regression.html
with AEPsych API and convenience fitting / prediction methods.
"""
_num_outputs = 1
_batch_size = 1
stimuli_per_trial = 1
outcome_type = "continuous"
def __init__(
self,
num_outputs: int = 2,
rank: int = 1,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[gpytorch.likelihoods.Likelihood] = None,
*args,
**kwargs,
):
"""Initialize multitask GPR model.
Args:
num_outputs (int, optional): Number of tasks (outputs). Defaults to 2.
rank (int, optional): Rank of cross-task covariance. Lower rank is a simpler model.
Should be less than or equal to num_outputs. Defaults to 1.
mean_module (Optional[gpytorch.means.Mean], optional): GP mean. Defaults to a constant mean.
covar_module (Optional[gpytorch.kernels.Kernel], optional): GP kernel module.
Defaults to scaled RBF kernel.
likelihood (Optional[gpytorch.likelihoods.Likelihood], optional): Likelihood
(should be a multitask-compatible likelihood). Defaults to multitask Gaussian likelihood.
"""
self._num_outputs = num_outputs
self.rank = rank
likelihood = likelihood or gpytorch.likelihoods.MultitaskGaussianLikelihood(
num_tasks=self._num_outputs
)
super().__init__(
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
*args,
**kwargs,
) # type: ignore # mypy issue 4335
self.mean_module = gpytorch.means.MultitaskMean(
self.mean_module, num_tasks=num_outputs
)
self.covar_module = gpytorch.kernels.MultitaskKernel(
self.covar_module, num_tasks=num_outputs, rank=rank
)
def forward(self, x):
transformed_x = self.normalize_inputs(x)
mean_x = self.mean_module(transformed_x)
covar_x = self.covar_module(transformed_x)
return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)
@classmethod
def construct_inputs(cls, config):
classname = cls.__name__
args = super().construct_inputs(config)
args["num_outputs"] = config.getint(classname, "num_outputs", 2)
args["rank"] = config.getint(classname, "rank", 1)
return args
class IndependentMultitaskGPRModel(GPRegressionModel):
"""Independent multitask GP regression. This is a convenience wrapper for
fitting a batch of independent GPRegression models. It wraps the GPyTorch tutorial here
https://docs.gpytorch.ai/en/stable/examples/03_Multitask_Exact_GPs/Batch_Independent_Multioutput_GP.html
with AEPsych API and convenience fitting / prediction methods.
"""
_num_outputs = 1
_batch_size = 1
stimuli_per_trial = 1
outcome_type = "continuous"
def __init__(
self,
num_outputs: int = 2,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[gpytorch.likelihoods.Likelihood] = None,
*args,
**kwargs,
):
"""Initialize independent multitask GPR model.
Args:
num_outputs (int, optional): Number of tasks (outputs). Defaults to 2.
mean_module (Optional[gpytorch.means.Mean], optional): GP mean. Defaults to a constant mean.
covar_module (Optional[gpytorch.kernels.Kernel], optional): GP kernel module.
Defaults to scaled RBF kernel.
likelihood (Optional[gpytorch.likelihoods.Likelihood], optional): Likelihood
(should be a multitask-compatible likelihood). Defaults to multitask Gaussian likelihood.
"""
self._num_outputs = num_outputs
self._batch_size = num_outputs
self._batch_shape = torch.Size([num_outputs])
mean_module = mean_module or gpytorch.means.ConstantMean(
batch_shape=self._batch_shape
)
covar_module = covar_module or gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(batch_shape=self._batch_shape),
batch_shape=self._batch_shape,
)
likelihood = likelihood or gpytorch.likelihoods.MultitaskGaussianLikelihood(
num_tasks=self._batch_shape[0]
)
super().__init__(
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
*args,
**kwargs,
) # type: ignore # mypy issue 4335
def forward(self, x):
base_mvn = super().forward(x) # do transforms
return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(
base_mvn
)
@classmethod
def get_config_args(cls, config):
classname = cls.__name__
args = super().get_config_args(config)
args["num_outputs"] = config.getint(classname, "num_outputs", 2)
return args
|
aepsych-main
|
aepsych/models/multitask_regression.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
import torch
from botorch.acquisition import PosteriorMean
from botorch.models.model import Model
from botorch.models.utils.inducing_point_allocators import GreedyVarianceReduction
from botorch.optim import optimize_acqf
from botorch.utils.sampling import draw_sobol_samples
from gpytorch.kernels import Kernel
from gpytorch.likelihoods import BernoulliLikelihood
from scipy.cluster.vq import kmeans2
from scipy.special import owens_t
from scipy.stats import norm
from torch.distributions import Normal
def compute_p_quantile(
f_mean: torch.Tensor, f_std: torch.Tensor, alpha: Union[torch.Tensor, float]
) -> torch.Tensor:
"""Compute quantile of p in probit model
For f ~ N(mu_f, sigma_f^2) and p = Phi(f), computes the alpha quantile of p
using the formula
x = Phi(mu_f + Phi^-1(alpha) * sigma_f),
which solves for x such that P(p <= x) = alpha.
A 95% CI for p can be computed as
p_l = compute_p_quantile(f_mean, f_std, 0.025)
p_u = compute_p_quantile(f_mean, f_std, 0.975)
"""
norm = torch.distributions.Normal(0, 1)
alpha = torch.tensor(alpha, dtype=f_mean.dtype)
return norm.cdf(f_mean + norm.icdf(alpha) * f_std)
def select_inducing_points(
inducing_size: int,
covar_module: Kernel = None,
X: Optional[torch.Tensor] = None,
bounds: Optional[Union[torch.Tensor, np.ndarray]] = None,
method: str = "auto",
):
with torch.no_grad():
assert method in (
"pivoted_chol",
"kmeans++",
"auto",
"sobol",
), f"Inducing point method should be one of pivoted_chol, kmeans++, sobol, or auto; got {method}"
if method == "sobol":
assert bounds is not None, "Must pass bounds for sobol inducing points!"
inducing_points = draw_sobol_samples(
bounds=bounds, n=inducing_size, q=1
).squeeze()
if len(inducing_points.shape) == 1:
inducing_points = inducing_points.reshape(-1, 1)
return inducing_points
assert X is not None, "Must pass X for non-sobol inducing point selection!"
# remove dupes from X, which is both wasteful for inducing points
# and would break kmeans++
unique_X = torch.unique(X, dim=0)
if method == "auto":
if unique_X.shape[0] <= inducing_size:
return unique_X
else:
method = "kmeans++"
if method == "pivoted_chol":
inducing_point_allocator = GreedyVarianceReduction()
inducing_points = inducing_point_allocator.allocate_inducing_points(
inputs=X,
covar_module=covar_module,
num_inducing=inducing_size,
input_batch_shape=torch.Size([]),
)
elif method == "kmeans++":
# initialize using kmeans
inducing_points = torch.tensor(
kmeans2(unique_X.numpy(), inducing_size, minit="++")[0],
dtype=X.dtype,
)
return inducing_points
def get_probability_space(likelihood, posterior):
fmean = posterior.mean.squeeze()
fvar = posterior.variance.squeeze()
if isinstance(likelihood, BernoulliLikelihood):
# Probability-space mean and variance for Bernoulli-probit models is
# available in closed form, Proposition 1 in Letham et al. 2022 (AISTATS).
a_star = fmean / torch.sqrt(1 + fvar)
pmean = Normal(0, 1).cdf(a_star)
t_term = torch.tensor(
owens_t(a_star.numpy(), 1 / np.sqrt(1 + 2 * fvar.numpy())),
dtype=a_star.dtype,
)
pvar = pmean - 2 * t_term - pmean.square()
else:
fsamps = posterior.sample(torch.Size([10000]))
if hasattr(likelihood, "objective"):
psamps = likelihood.objective(fsamps)
else:
psamps = norm.cdf(fsamps)
pmean, pvar = psamps.mean(0), psamps.var(0)
return pmean, pvar
def get_extremum(
model: Model,
extremum_type: str,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]],
n_samples: int,
) -> Tuple[float, np.ndarray]:
"""Return the extremum (min or max) of the modeled function
Args:
extremum_type (str): type of extremum (currently 'min' or 'max'
n_samples int: number of coarse grid points to sample for optimization estimate.
Returns:
Tuple[float, np.ndarray]: Tuple containing the min and its location (argmin).
"""
locked_dims = locked_dims or {}
acqf = PosteriorMean(model=model, maximize=(extremum_type == "max"))
best_point, best_val = optimize_acqf(
acq_function=acqf,
bounds=bounds,
q=1,
num_restarts=10,
raw_samples=n_samples,
fixed_features=locked_dims,
)
# PosteriorMean flips the sign on minimize, we flip it back
if extremum_type == "min":
best_val = -best_val
return best_val, best_point.squeeze(0)
|
aepsych-main
|
aepsych/models/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import dataclasses
import time
from typing import Dict, List, Optional
from aepsych.utils_logging import getLogger
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TCandidateMetadata
from ax.models.torch.botorch_modular.surrogate import Surrogate
from botorch.fit import fit_gpytorch_mll
from botorch.utils.datasets import SupervisedDataset
from torch import Tensor
logger = getLogger()
class AEPsychSurrogate(Surrogate):
def __init__(self, max_fit_time: Optional[float] = None, **kwargs) -> None:
self.max_fit_time = max_fit_time
super().__init__(**kwargs)
def fit(
self,
datasets: List[SupervisedDataset],
metric_names: List[str],
search_space_digest: SearchSpaceDigest,
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
state_dict: Optional[Dict[str, Tensor]] = None,
refit: bool = True,
**kwargs,
) -> None:
self.construct(
datasets=datasets,
metric_names=metric_names,
**dataclasses.asdict(search_space_digest),
)
self._outcomes = metric_names
if state_dict:
self.model.load_state_dict(state_dict)
if state_dict is None or refit:
mll = self.mll_class(self.model.likelihood, self.model, **self.mll_options)
optimizer_kwargs = {}
if self.max_fit_time is not None:
# figure out how long evaluating a single samp
starttime = time.time()
_ = mll(self.model(datasets[0].X()), datasets[0].Y().squeeze())
single_eval_time = time.time() - starttime
n_eval = int(self.max_fit_time / single_eval_time)
logger.info(f"fit maxfun is {n_eval}")
optimizer_kwargs["options"] = {"maxfun": n_eval}
logger.info("Starting fit...")
starttime = time.time()
fit_gpytorch_mll(
mll, optimizer_kwargs=optimizer_kwargs
) # TODO: Support flexible optimizers
logger.info(f"Fit done, time={time.time()-starttime}")
|
aepsych-main
|
aepsych/models/surrogate.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from aepsych.models.base import AEPsychModel
from botorch.models import SingleTaskGP
from gpytorch.mlls import ExactMarginalLogLikelihood
class ExactGP(AEPsychModel, SingleTaskGP):
@classmethod
def get_mll_class(cls):
return ExactMarginalLogLikelihood
class ContinuousRegressionGP(ExactGP):
"""GP Regression model for single continuous outcomes, using exact inference."""
stimuli_per_trial = 1
outcome_type = "continuous"
|
aepsych-main
|
aepsych/models/exact_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import time
from typing import Any, Dict, List, Mapping, Optional, Protocol, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config, ConfigurableMixin
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.utils import get_extremum
from aepsych.utils import dim_grid, get_jnd_multid, make_scaled_sobol, promote_0d
from aepsych.utils_logging import getLogger
from botorch.fit import fit_gpytorch_mll, fit_gpytorch_mll_scipy
from botorch.models.gpytorch import GPyTorchModel
from botorch.posteriors import GPyTorchPosterior
from gpytorch.likelihoods import Likelihood
from gpytorch.mlls import MarginalLogLikelihood
from scipy.optimize import minimize
from scipy.stats import norm
logger = getLogger()
torch.set_default_dtype(torch.double) # TODO: find a better way to prevent type errors
class ModelProtocol(Protocol):
@property
def _num_outputs(self) -> int:
pass
@property
def outcome_type(self) -> str:
pass
@property
def extremum_solver(self) -> str:
pass
@property
def train_inputs(self) -> torch.Tensor:
pass
@property
def lb(self) -> torch.Tensor:
pass
@property
def ub(self) -> torch.Tensor:
pass
@property
def bounds(self) -> torch.Tensor:
pass
@property
def dim(self) -> int:
pass
def posterior(self, x: torch.Tensor) -> GPyTorchPosterior:
pass
def predict(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
pass
@property
def stimuli_per_trial(self) -> int:
pass
@property
def likelihood(self) -> Likelihood:
pass
def sample(self, x: torch.Tensor, num_samples: int) -> torch.Tensor:
pass
def _get_extremum(
self,
extremum_type: str,
locked_dims: Optional[Mapping[int, List[float]]],
n_samples=1000,
) -> Tuple[float, np.ndarray]:
pass
def dim_grid(self, gridsize: int = 30) -> torch.Tensor:
pass
def fit(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs: Any) -> None:
pass
def update(
self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs: Any
) -> None:
pass
def p_below_threshold(self, x, f_thresh) -> np.ndarray:
pass
class AEPsychMixin(GPyTorchModel):
"""Mixin class that provides AEPsych-specific utility methods."""
extremum_solver = "Nelder-Mead"
outcome_types: List[str] = []
@property
def bounds(self):
return torch.stack((self.lb, self.ub))
def get_max(
self: ModelProtocol,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the maximum of the modeled function, subject to constraints
Returns:
Tuple[float, np.ndarray]: Tuple containing the max and its location (argmax).
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
n_samples int: number of coarse grid points to sample for optimization estimate.
"""
locked_dims = locked_dims or {}
return get_extremum(self, "max", self.bounds, locked_dims, n_samples)
def get_min(
self: ModelProtocol,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the minimum of the modeled function, subject to constraints
Returns:
Tuple[float, np.ndarray]: Tuple containing the min and its location (argmin).
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
n_samples int: number of coarse grid points to sample for optimization estimate.
"""
locked_dims = locked_dims or {}
return get_extremum(self, "min", self.bounds, locked_dims, n_samples)
def inv_query(
self: ModelProtocol,
y: float,
locked_dims: Optional[Mapping[int, List[float]]] = None,
probability_space: bool = False,
n_samples: int = 1000,
) -> Tuple[float, torch.Tensor]:
"""Query the model inverse.
Return nearest x such that f(x) = queried y, and also return the
value of f at that point.
Args:
y (float): Points at which to find the inverse.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
probability_space (bool, optional): Is y (and therefore the
returned nearest_y) in probability space instead of latent
function space? Defaults to False.
Returns:
Tuple[float, np.ndarray]: Tuple containing the value of f
nearest to queried y and the x position of this value.
"""
if probability_space:
assert (
self.outcome_type == "binary"
), f"Cannot get probability space for outcome_type '{self.outcome_type}'"
locked_dims = locked_dims or {}
def model_distance(x, pt, probability_space):
return np.abs(
self.predict(torch.tensor([x]), probability_space=probability_space)[0]
.detach()
.numpy()
- pt
)
# Look for point with value closest to y, subject the dict of locked dims
query_lb = self.lb.clone()
query_ub = self.ub.clone()
for locked_dim in locked_dims.keys():
dim_values = locked_dims[locked_dim]
if len(dim_values) == 1:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[0]
else:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[1]
d = make_scaled_sobol(query_lb, query_ub, n_samples, seed=0)
bounds = zip(query_lb.numpy(), query_ub.numpy())
fmean, _ = self.predict(d, probability_space=probability_space)
f = torch.abs(fmean - y)
estimate = d[torch.where(f == torch.min(f))[0][0]].numpy()
a = minimize(
model_distance,
estimate,
args=(y, probability_space),
method=self.extremum_solver,
bounds=bounds,
)
val = self.predict(torch.tensor([a.x]), probability_space=probability_space)[
0
].item()
return val, torch.Tensor(a.x)
def get_jnd(
self: ModelProtocol,
grid: Optional[Union[np.ndarray, torch.Tensor]] = None,
cred_level: Optional[float] = None,
intensity_dim: int = -1,
confsamps: int = 500,
method: str = "step",
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""Calculate the JND.
Note that JND can have multiple plausible definitions
outside of the linear case, so we provide options for how to compute it.
For method="step", we report how far one needs to go over in stimulus
space to move 1 unit up in latent space (this is a lot of people's
conventional understanding of the JND).
For method="taylor", we report the local derivative, which also maps to a
1st-order Taylor expansion of the latent function. This is a formal
generalization of JND as defined in Weber's law.
Both definitions are equivalent for linear psychometric functions.
Args:
grid (Optional[np.ndarray], optional): Mesh grid over which to find the JND.
Defaults to a square grid of size as determined by aepsych.utils.dim_grid
cred_level (float, optional): Credible level for computing an interval.
Defaults to None, computing no interval.
intensity_dim (int, optional): Dimension over which to compute the JND.
Defaults to -1.
confsamps (int, optional): Number of posterior samples to use for
computing the credible interval. Defaults to 500.
method (str, optional): "taylor" or "step" method (see docstring).
Defaults to "step".
Raises:
RuntimeError: for passing an unknown method.
Returns:
Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: either the
mean JND, or a median, lower, upper tuple of the JND posterior.
"""
if grid is None:
grid = self.dim_grid()
else:
grid = torch.tensor(grid)
# this is super awkward, back into intensity dim grid assuming a square grid
gridsize = int(grid.shape[0] ** (1 / grid.shape[1]))
coords = torch.linspace(
self.lb[intensity_dim].item(), self.ub[intensity_dim].item(), gridsize
)
if cred_level is None:
fmean, _ = self.predict(grid)
fmean = fmean.reshape(*[gridsize for i in range(self.dim)])
if method == "taylor":
return torch.tensor(1 / np.gradient(fmean, coords, axis=intensity_dim))
elif method == "step":
return torch.clip(
torch.tensor(
get_jnd_multid(
fmean.detach().numpy(),
coords.detach().numpy(),
mono_dim=intensity_dim,
)
),
0,
np.inf,
)
alpha = 1 - cred_level # type: ignore
qlower = alpha / 2
qupper = 1 - alpha / 2
fsamps = self.sample(grid, confsamps)
if method == "taylor":
jnds = torch.tensor(
1
/ np.gradient(
fsamps.reshape(confsamps, *[gridsize for i in range(self.dim)]),
coords,
axis=intensity_dim,
)
)
elif method == "step":
samps = [s.reshape((gridsize,) * self.dim) for s in fsamps]
jnds = torch.stack(
[get_jnd_multid(s, coords, mono_dim=intensity_dim) for s in samps]
)
else:
raise RuntimeError(f"Unknown method {method}!")
upper = torch.clip(torch.quantile(jnds, qupper, axis=0), 0, np.inf) # type: ignore
lower = torch.clip(torch.quantile(jnds, qlower, axis=0), 0, np.inf) # type: ignore
median = torch.clip(torch.quantile(jnds, 0.5, axis=0), 0, np.inf) # type: ignore
return median, lower, upper
def dim_grid(
self: ModelProtocol,
gridsize: int = 30,
slice_dims: Optional[Mapping[int, float]] = None,
) -> torch.Tensor:
return dim_grid(self.lb, self.ub, self.dim, gridsize, slice_dims)
def set_train_data(self, inputs=None, targets=None, strict=False):
"""
:param torch.Tensor inputs: The new training inputs.
:param torch.Tensor targets: The new training targets.
:param bool strict: (default False, ignored). Here for compatibility with
input transformers. TODO: actually use this arg or change input transforms
to not require it.
"""
if inputs is not None:
self.train_inputs = (inputs,)
if targets is not None:
self.train_targets = targets
def normalize_inputs(self, x):
scale = self.ub - self.lb
return (x - self.lb) / scale
def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal:
"""Evaluate GP
Args:
x (torch.Tensor): Tensor of points at which GP should be evaluated.
Returns:
gpytorch.distributions.MultivariateNormal: Distribution object
holding mean and covariance at x.
"""
transformed_x = self.normalize_inputs(x)
mean_x = self.mean_module(transformed_x)
covar_x = self.covar_module(transformed_x)
pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return pred
def _fit_mll(
self,
mll: MarginalLogLikelihood,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
optimizer=fit_gpytorch_mll_scipy,
**kwargs,
) -> None:
self.train()
train_x, train_y = mll.model.train_inputs[0], mll.model.train_targets
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy()
max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time)
if max_fit_time is not None:
# figure out how long evaluating a single samp
starttime = time.time()
_ = mll(self(train_x), train_y)
single_eval_time = time.time() - starttime
n_eval = int(max_fit_time / single_eval_time)
optimizer_kwargs["options"] = {"maxfun": n_eval}
logger.info(f"fit maxfun is {n_eval}")
starttime = time.time()
res = fit_gpytorch_mll(
mll, optimizer=optimizer, optimizer_kwargs=optimizer_kwargs, **kwargs
)
return res
def p_below_threshold(self, x, f_thresh) -> np.ndarray:
f, var = self.predict(x)
return norm.cdf((f_thresh - f.detach().numpy()) / var.sqrt().detach().numpy())
class AEPsychModel(ConfigurableMixin, abc.ABC):
extremum_solver = "Nelder-Mead"
outcome_type: Optional[str] = None
def predict(
self: GPyTorchModel, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (Union[torch.Tensor, np.ndarray]): Points at which to predict from the model.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Posterior mean and variance at queried points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
return promote_0d(fmean), promote_0d(fvar)
def predict_probability(self: GPyTorchModel, x: Union[torch.Tensor, np.ndarray]):
raise NotImplementedError
def sample(
self: GPyTorchModel, x: Union[torch.Tensor, np.ndarray], n: int
) -> torch.Tensor:
"""Sample the model posterior at the given points.
Args:
x (Union[torch.Tensor, np.ndarray]): Points at which to sample from the model.
n (int): Number of samples to take at each point.
Returns:
torch.Tensor: Posterior samples at queried points. Shape is n x len(x) x number of outcomes.
"""
return self.posterior(x).sample(torch.Size([n]))
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None) -> Dict:
if name is None:
name = cls.__name__
mean_covar_factory = config.getobj(
name, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
likelihood_cls = config.getobj(name, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
max_fit_time = config.getfloat(name, "max_fit_time", fallback=None)
options = {
"likelihood": likelihood,
"covar_module": covar,
"mean_module": mean,
"max_fit_time": max_fit_time,
}
return options
@classmethod
def construct_inputs(cls, training_data, **kwargs):
train_X = training_data.X()
train_Y = training_data.Y()
likelihood = kwargs.get("likelihood")
covar_module = kwargs.get("covar_module")
mean_module = kwargs.get("mean_module")
inputs = {
"train_X": train_X,
"train_Y": train_Y,
"likelihood": likelihood,
"covar_module": covar_module,
"mean_module": mean_module,
}
return inputs
def get_max(
self,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the maximum of the modeled function, subject to constraints
Args:
bounds (torch.Tensor): The lower and upper bounds in the parameter space to search for the maximum,
formatted as a 2xn tensor, where d is the number of parameters.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
n_samples int: number of coarse grid points to sample for optimization estimate.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Tuple containing the max and its location (argmax).
"""
locked_dims = locked_dims or {}
return get_extremum(self, "max", bounds, locked_dims, n_samples)
def get_min(
self,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the minimum of the modeled function, subject to constraints
Args:
bounds (torch.Tensor): The lower and upper bounds in the parameter space to search for the minimum,
formatted as a 2xn tensor, where d is the number of parameters.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Tuple containing the min and its location (argmin).
"""
locked_dims = locked_dims or {}
return get_extremum(self, "min", bounds, locked_dims, n_samples)
def inv_query(
self,
y: float,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]] = None,
probability_space: bool = False,
n_samples: int = 1000,
) -> Tuple[float, torch.Tensor]:
"""Query the model inverse.
Return nearest x such that f(x) = queried y, and also return the
value of f at that point.
Args:
y (float): Points at which to find the inverse.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
probability_space (bool): Is y (and therefore the
returned nearest_y) in probability space instead of latent
function space? Defaults to False.
Returns:
Tuple[float, np.ndarray]: Tuple containing the value of f
nearest to queried y and the x position of this value.
"""
if probability_space:
assert (
self.outcome_type == "binary" or self.outcome_type is None
), f"Cannot get probability space for outcome_type '{self.outcome_type}'"
pred_function = self.predict_probability
else:
pred_function = self.predict
locked_dims = locked_dims or {}
def model_distance(x, pt, probability_space):
return np.abs(pred_function(torch.tensor([x]))[0].detach().numpy() - pt)
# Look for point with value closest to y, subject the dict of locked dims
query_lb = bounds[0]
query_ub = bounds[-1]
for locked_dim in locked_dims.keys():
dim_values = locked_dims[locked_dim]
if len(dim_values) == 1:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[0]
else:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[1]
d = make_scaled_sobol(query_lb, query_ub, n_samples, seed=0)
opt_bounds = zip(query_lb.numpy(), query_ub.numpy())
fmean, _ = pred_function(d)
f = torch.abs(fmean - y)
estimate = d[torch.where(f == torch.min(f))[0][0]].numpy()
a = minimize(
model_distance,
estimate,
args=(y, probability_space),
method=self.extremum_solver,
bounds=opt_bounds,
)
val = pred_function(torch.tensor([a.x]))[0].item()
return val, torch.Tensor(a.x)
@abc.abstractmethod
def get_mll_class(self):
raise NotImplementedError
def fit(self):
mll_class = self.get_mll_class()
mll = mll_class(self.likelihood, self)
fit_gpytorch_mll(mll)
|
aepsych-main
|
aepsych/models/base.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from aepsych.acquisition.objective import AEPsychObjective, FloorProbitObjective
from aepsych.config import Config
from gpytorch.likelihoods import _OneDimensionalLikelihood
class LinearBernoulliLikelihood(_OneDimensionalLikelihood):
"""
A likelihood of the form Bernoulli(sigma(k(x+c))), where k and c are
GPs and sigma is a flexible link function.
"""
def __init__(self, objective: Optional[AEPsychObjective] = None):
"""Initializes the linear bernoulli likelihood.
Args:
objective (Callable, optional): Link function to use (sigma in the notation above).
Defaults to probit with no floor.
"""
super().__init__()
self.objective = objective or FloorProbitObjective(floor=0.0)
def f(self, function_samples: torch.Tensor, Xi: torch.Tensor) -> torch.Tensor:
"""Return the latent function value, k(x-c).
Args:
function_samples (torch.Tensor): Samples from a batched GP
Xi (torch.Tensor): Intensity values.
Returns:
torch.Tensor: latent function value.
"""
# function_samples is of shape nsamp x (b) x 2 x n
# If (b) is present,
if function_samples.ndim > 3:
assert function_samples.ndim == 4
assert function_samples.shape[2] == 2
# In this case, Xi will be of size b x n
# Offset and slope should be num_samps x b x n
offset = function_samples[:, :, 0, :]
slope = function_samples[:, :, 1, :]
fsamps = slope * (Xi - offset)
# Expand from (nsamp x b x n) to (nsamp x b x n x 1)
fsamps = fsamps.unsqueeze(-1)
else:
assert function_samples.ndim == 3
assert function_samples.shape[1] == 2
# Shape is num_samps x 2 x n
# Offset and slope should be num_samps x n
# Xi will be of size n
offset = function_samples[:, 0, :]
slope = function_samples[:, 1, :]
fsamps = slope * (Xi - offset)
# Expand from (nsamp x n) to (nsamp x 1 x n x 1)
fsamps = fsamps.unsqueeze(1).unsqueeze(-1)
return fsamps
def p(self, function_samples: torch.Tensor, Xi: torch.Tensor) -> torch.Tensor:
"""Returns the response probability sigma(k(x+c)).
Args:
function_samples (torch.Tensor): Samples from the batched GP (see documentation for self.f)
Xi (torch.Tensor): Intensity Values.
Returns:
torch.Tensor: Response probabilities.
"""
fsamps = self.f(function_samples, Xi)
return self.objective(fsamps)
def forward(
self, function_samples: torch.Tensor, Xi: torch.Tensor, **kwargs
) -> torch.distributions.Bernoulli:
"""Forward pass for the likelihood
Args:
function_samples (torch.Tensor): Samples from a batched GP of batch size 2.
Xi (torch.Tensor): Intensity values.
Returns:
torch.distributions.Bernoulli: Outcome likelihood.
"""
output_probs = self.p(function_samples, Xi)
return torch.distributions.Bernoulli(probs=output_probs)
def expected_log_prob(self, observations, function_dist, *args, **kwargs):
"""This has to be overridden to fix a bug in gpytorch where the kwargs
aren't being passed along to self.forward.
"""
# modified, TODO fixme upstream (cc @bletham)
def log_prob_lambda(function_samples):
return self.forward(function_samples, **kwargs).log_prob(observations)
log_prob = self.quadrature(log_prob_lambda, function_dist)
return log_prob
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
objective = config.getobj(classname, "objective")
if hasattr(objective, "from_config"):
objective = objective.from_config(config)
else:
objective = objective
return cls(objective=objective)
|
aepsych-main
|
aepsych/likelihoods/semi_p.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import gpytorch
import torch
from gpytorch.likelihoods import Likelihood
from torch.distributions import Categorical, Normal
class OrdinalLikelihood(Likelihood):
"""
Ordinal likelihood, suitable for rating models (e.g. likert scales). Formally,
.. math:: z_k(x\\mid f) := p(d_k < f(x) \\le d_{k+1}) = \\sigma(d_{k+1}-f(x)) - \\sigma(d_{k}-f(x)),
where :math:`\\sigma()` is the link function (equivalent to the perceptual noise
distribution in psychophysics terms), :math:`f(x)` is the latent GP evaluated at x,
and :math:`d_k` is a learned cutpoint parameter for each level.
"""
def __init__(self, n_levels: int, link: Optional[Callable] = None):
super().__init__()
self.n_levels = n_levels
self.register_parameter(
name="raw_cutpoint_deltas",
parameter=torch.nn.Parameter(torch.abs(torch.randn(n_levels - 2))),
)
self.register_constraint("raw_cutpoint_deltas", gpytorch.constraints.Positive())
self.link = link or Normal(0, 1).cdf
@property
def cutpoints(self):
cutpoint_deltas = self.raw_cutpoint_deltas_constraint.transform(
self.raw_cutpoint_deltas
)
# for identification, the first cutpoint is 0
return torch.cat((torch.tensor([0]), torch.cumsum(cutpoint_deltas, 0)))
def forward(self, function_samples, *params, **kwargs):
# this whole thing can probably be some clever batched thing, meh
probs = torch.zeros(*function_samples.size(), self.n_levels)
probs[..., 0] = self.link(self.cutpoints[0] - function_samples)
for i in range(1, self.n_levels - 1):
probs[..., i] = self.link(self.cutpoints[i] - function_samples) - self.link(
self.cutpoints[i - 1] - function_samples
)
probs[..., -1] = 1 - self.link(self.cutpoints[-1] - function_samples)
res = Categorical(probs=probs)
return res
@classmethod
def from_config(cls, config):
classname = cls.__name__
n_levels = config.getint(classname, "n_levels")
link = config.getobj(classname, "link", fallback=None)
return cls(n_levels=n_levels, link=link)
|
aepsych-main
|
aepsych/likelihoods/ordinal.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .bernoulli import BernoulliObjectiveLikelihood
from .ordinal import OrdinalLikelihood
from .semi_p import LinearBernoulliLikelihood
__all__ = [
"BernoulliObjectiveLikelihood",
"OrdinalLikelihood",
"LinearBernoulliLikelihood",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/likelihoods/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable
import torch
from aepsych.config import Config
from gpytorch.likelihoods import _OneDimensionalLikelihood
class BernoulliObjectiveLikelihood(_OneDimensionalLikelihood):
"""
Bernoulli likelihood with a flexible link (objective) defined
by a callable (which can be a botorch objective)
"""
def __init__(self, objective: Callable):
super().__init__()
self.objective = objective
def forward(self, function_samples, **kwargs):
output_probs = self.objective(function_samples)
return torch.distributions.Bernoulli(probs=output_probs)
@classmethod
def from_config(cls, config: Config):
objective_cls = config.getobj(cls.__name__, "objective")
objective = objective_cls.from_config(config)
return cls(objective=objective)
|
aepsych-main
|
aepsych/likelihoods/bernoulli.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Optional, Union
import numpy as np
import torch
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerator
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds
class ManualGenerator(AEPsychGenerator):
"""Generator that generates points from the Sobol Sequence."""
_requires_model = False
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
points: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
shuffle: bool = True,
):
"""Iniatialize SobolGenerator.
Args:
lb (Union[np.ndarray, torch.Tensor]): Lower bounds of each parameter.
ub (Union[np.ndarray, torch.Tensor]): Upper bounds of each parameter.
points (Union[np.ndarray, torch.Tensor]): The points that will be generated.
dim (int, optional): Dimensionality of the parameter space. If None, it is inferred from lb and ub.
shuffle (bool): Whether or not to shuffle the order of the points. True by default.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.points = points
if shuffle:
np.random.shuffle(points)
self.finished = False
self._idx = 0
def gen(
self,
num_points: int = 1,
model: Optional[AEPsychMixin] = None, # included for API compatibility
):
"""Query next point(s) to run by quasi-randomly sampling the parameter space.
Args:
num_points (int): Number of points to query.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
if num_points > (len(self.points) - self._idx):
warnings.warn(
"Asked for more points than are left in the generator! Giving everthing it has!",
RuntimeWarning,
)
points = self.points[self._idx : self._idx + num_points]
self._idx += num_points
if self._idx >= len(self.points):
self.finished = True
return points
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
points = config.getarray(classname, "points")
shuffle = config.getboolean(classname, "shuffle", fallback=True)
return cls(lb=lb, ub=ub, dim=dim, points=points, shuffle=shuffle)
|
aepsych-main
|
aepsych/generators/manual_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from aepsych.config import Config
from aepsych.generators import OptimizeAcqfGenerator
class PairwiseOptimizeAcqfGenerator(OptimizeAcqfGenerator):
"""Deprecated. Use OptimizeAcqfGenerator instead."""
stimuli_per_trial = 2
@classmethod
def from_config(cls, config: Config):
warnings.warn(
"PairwiseOptimizeAcqfGenerator is deprecated. Use OptimizeAcqfGenerator instead.",
DeprecationWarning,
)
return super().from_config(config)
|
aepsych-main
|
aepsych/generators/pairwise_optimize_acqf_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict, Optional, Union
import numpy as np
import torch
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds
from ax.modelbridge import Models
from torch.quasirandom import SobolEngine
class SobolGenerator(AEPsychGenerator):
"""Generator that generates points from the Sobol Sequence."""
_requires_model = False
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
seed: Optional[int] = None,
stimuli_per_trial: int = 1,
):
"""Iniatialize SobolGenerator.
Args:
lb (Union[np.ndarray, torch.Tensor]): Lower bounds of each parameter.
ub (Union[np.ndarray, torch.Tensor]): Upper bounds of each parameter.
dim (int, optional): Dimensionality of the parameter space. If None, it is inferred from lb and ub.
seed (int, optional): Random seed.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.lb = self.lb.repeat(stimuli_per_trial)
self.ub = self.ub.repeat(stimuli_per_trial)
self.stimuli_per_trial = stimuli_per_trial
self.seed = seed
self.engine = SobolEngine(
dimension=self.dim * stimuli_per_trial, scramble=True, seed=self.seed
)
def gen(
self,
num_points: int = 1,
model: Optional[AEPsychMixin] = None, # included for API compatibility
):
"""Query next point(s) to run by quasi-randomly sampling the parameter space.
Args:
num_points (int, optional): Number of points to query.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
grid = self.engine.draw(num_points)
grid = self.lb + (self.ub - self.lb) * grid
if self.stimuli_per_trial == 1:
return grid
return torch.tensor(
np.moveaxis(
grid.reshape(num_points, self.stimuli_per_trial, -1).numpy(),
-1,
-self.stimuli_per_trial,
)
)
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
seed = config.getint(classname, "seed", fallback=None)
stimuli_per_trial = config.getint(classname, "stimuli_per_trial")
return cls(
lb=lb, ub=ub, dim=dim, seed=seed, stimuli_per_trial=stimuli_per_trial
)
@classmethod
def get_config_options(cls, config: Config, name: str):
return AxSobolGenerator.get_config_options(config, name)
class AxSobolGenerator(AEPsychGenerationStep):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
classname = "SobolGenerator"
seed = config.getint(classname, "seed", fallback=None)
scramble = config.getboolean(classname, "scramble", fallback=True)
opts = {
"model": Models.SOBOL,
"model_kwargs": {"seed": seed, "scramble": scramble},
}
opts.update(super().get_config_options(config, name))
return opts
|
aepsych-main
|
aepsych/generators/sobol_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from aepsych.config import Config
from .sobol_generator import SobolGenerator
class PairwiseSobolGenerator(SobolGenerator):
"""Deprecated. Use SobolGenerator instead."""
stimuli_per_trial = 2
@classmethod
def from_config(cls, config: Config):
warnings.warn(
"PairwiseSobolGenerator is deprecated. Use SobolGenerator instead.",
DeprecationWarning,
)
return super().from_config(config)
|
aepsych-main
|
aepsych/generators/pairwise_sobol_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Type
import torch
from aepsych.acquisition.objective.semi_p import SemiPThresholdObjective
from aepsych.generators import OptimizeAcqfGenerator
from aepsych.models.semi_p import SemiParametricGPModel
class IntensityAwareSemiPGenerator(OptimizeAcqfGenerator):
"""Generator for SemiP. With botorch machinery, in order to optimize acquisition
separately over context and intensity, we need two ingredients.
1. An objective that samples from some posterior w.r.t. the context. From the
paper, this is ThresholdBALV and needs the threshold posterior.
`SemiPThresholdObjective` implements this for ThresholdBALV but theoretically
this can be any subclass of `SemiPObjectiveBase`.
2. A way to do acquisition over context and intensity separately, which is
provided by this class. We optimize the acquisition function over context
dimensions, then conditioned on the optimum we evaluate the intensity
at the objective to obtain the intensity value.
We only developed ThresholdBALV that is specific to SemiP, which is what we tested
with this generator. It should work with other similar acquisition functions.
"""
def gen( # type: ignore[override]
self,
num_points: int,
model: SemiParametricGPModel, # type: ignore[override]
context_objective: Type = SemiPThresholdObjective,
) -> torch.Tensor:
fixed_features = {model.stim_dim: 0}
next_x = super().gen(
num_points=num_points, model=model, fixed_features=fixed_features
)
# to compute intensity, we need the point where f is at the
# threshold as a function of context. self.acqf_kwargs should contain
# remaining objective args (like threshold target value)
thresh_objective = context_objective(
likelihood=model.likelihood, stim_dim=model.stim_dim, **self.acqf_kwargs
)
kc_mean_at_best_context = model(torch.Tensor(next_x)).mean
thresh_at_best_context = thresh_objective(kc_mean_at_best_context)
thresh_at_best_context = torch.clamp(
thresh_at_best_context,
min=model.lb[model.stim_dim],
max=model.ub[model.stim_dim],
)
next_x[..., model.stim_dim] = thresh_at_best_context.detach()
return next_x
|
aepsych-main
|
aepsych/generators/semi_p.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Sequence
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCAcquisition
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerator
from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP
from botorch.logging import logger
from botorch.optim.initializers import gen_batch_initial_conditions
from botorch.optim.utils import columnwise_clamp, fix_features
def default_loss_constraint_fun(
loss: torch.Tensor, candidates: torch.Tensor
) -> torch.Tensor:
"""Identity transform for constrained optimization.
This simply returns loss as-is. Write your own versions of this
for constrained optimization by e.g. interior point method.
Args:
loss (torch.Tensor): Value of loss at candidate points.
candidates (torch.Tensor): Location of candidate points.
Returns:
torch.Tensor: New loss (unchanged)
"""
return loss
class MonotonicRejectionGenerator(AEPsychGenerator[MonotonicRejectionGP]):
"""Generator specifically to be used with MonotonicRejectionGP, which generates new points to sample by minimizing
an acquisition function through stochastic gradient descent."""
def __init__(
self,
acqf: MonotonicMCAcquisition,
acqf_kwargs: Optional[Dict[str, Any]] = None,
model_gen_options: Optional[Dict[str, Any]] = None,
explore_features: Optional[Sequence[int]] = None,
) -> None:
"""Initialize MonotonicRejectionGenerator.
Args:
acqf (AcquisitionFunction): Acquisition function to use.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
model_gen_options: Dictionary with options for generating candidate, such as
SGD parameters. See code for all options and their defaults.
explore_features: List of features that will be selected randomly and then
fixed for acquisition fn optimization.
"""
if acqf_kwargs is None:
acqf_kwargs = {}
self.acqf = acqf
self.acqf_kwargs = acqf_kwargs
self.model_gen_options = model_gen_options
self.explore_features = explore_features
def _instantiate_acquisition_fn(self, model: MonotonicRejectionGP):
return self.acqf(
model=model,
deriv_constraint_points=model._get_deriv_constraint_points(),
**self.acqf_kwargs,
)
def gen(
self,
num_points: int, # Current implementation only generates 1 point at a time
model: MonotonicRejectionGP,
):
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query.
model (AEPsychMixin): Fitted model of the data.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
options = self.model_gen_options or {}
num_restarts = options.get("num_restarts", 10)
raw_samples = options.get("raw_samples", 1000)
verbosity_freq = options.get("verbosity_freq", -1)
lr = options.get("lr", 0.01)
momentum = options.get("momentum", 0.9)
nesterov = options.get("nesterov", True)
epochs = options.get("epochs", 50)
milestones = options.get("milestones", [25, 40])
gamma = options.get("gamma", 0.1)
loss_constraint_fun = options.get(
"loss_constraint_fun", default_loss_constraint_fun
)
# Augment bounds with deriv indicator
bounds = torch.cat((model.bounds_, torch.zeros(2, 1)), dim=1)
# Fix deriv indicator to 0 during optimization
fixed_features = {(bounds.shape[1] - 1): 0.0}
# Fix explore features to random values
if self.explore_features is not None:
for idx in self.explore_features:
val = (
bounds[0, idx]
+ torch.rand(1, dtype=bounds.dtype)
* (bounds[1, idx] - bounds[0, idx])
).item()
fixed_features[idx] = val
bounds[0, idx] = val
bounds[1, idx] = val
acqf = self._instantiate_acquisition_fn(model)
# Initialize
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=acqf,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
)
clamped_candidates = columnwise_clamp(
X=batch_initial_conditions, lower=bounds[0], upper=bounds[1]
).requires_grad_(True)
candidates = fix_features(clamped_candidates, fixed_features)
optimizer = torch.optim.SGD(
params=[clamped_candidates], lr=lr, momentum=momentum, nesterov=nesterov
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=gamma
)
# Optimize
for epoch in range(epochs):
loss = -acqf(candidates).sum()
# adjust loss based on constraints on candidates
loss = loss_constraint_fun(loss, candidates)
if verbosity_freq > 0 and epoch % verbosity_freq == 0:
logger.info("Iter: {} - Value: {:.3f}".format(epoch, -(loss.item())))
def closure():
optimizer.zero_grad()
loss.backward(
retain_graph=True
) # Variational model requires retain_graph
return loss
optimizer.step(closure)
clamped_candidates.data = columnwise_clamp(
X=clamped_candidates, lower=bounds[0], upper=bounds[1]
)
candidates = fix_features(clamped_candidates, fixed_features)
lr_scheduler.step()
# Extract best point
with torch.no_grad():
batch_acquisition = acqf(candidates)
best = torch.argmax(batch_acquisition.view(-1), dim=0)
Xopt = candidates[best][:, :-1].detach()
return Xopt
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
acqf = config.getobj("common", "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
options = {}
options["num_restarts"] = config.getint(classname, "restarts", fallback=10)
options["raw_samples"] = config.getint(classname, "samps", fallback=1000)
options["verbosity_freq"] = config.getint(
classname, "verbosity_freq", fallback=-1
)
options["lr"] = config.getfloat(classname, "lr", fallback=0.01) # type: ignore
options["momentum"] = config.getfloat(classname, "momentum", fallback=0.9) # type: ignore
options["nesterov"] = config.getboolean(classname, "nesterov", fallback=True)
options["epochs"] = config.getint(classname, "epochs", fallback=50)
options["milestones"] = config.getlist(
classname, "milestones", fallback=[25, 40] # type: ignore
)
options["gamma"] = config.getfloat(classname, "gamma", fallback=0.1) # type: ignore
options["loss_constraint_fun"] = config.getobj(
classname, "loss_constraint_fun", fallback=default_loss_constraint_fun
)
explore_features = config.getlist(classname, "explore_idxs", fallback=None) # type: ignore
return cls(
acqf=acqf,
acqf_kwargs=extra_acqf_args,
model_gen_options=options,
explore_features=explore_features,
)
|
aepsych-main
|
aepsych/generators/monotonic_rejection_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .epsilon_greedy_generator import EpsilonGreedyGenerator
from .manual_generator import ManualGenerator
from .monotonic_rejection_generator import MonotonicRejectionGenerator
from .monotonic_thompson_sampler_generator import MonotonicThompsonSamplerGenerator
from .multi_outcome_generator import MultiOutcomeOptimizationGenerator
from .optimize_acqf_generator import AxOptimizeAcqfGenerator, OptimizeAcqfGenerator
from .pairwise_optimize_acqf_generator import PairwiseOptimizeAcqfGenerator
from .pairwise_sobol_generator import PairwiseSobolGenerator
from .random_generator import RandomGenerator
from .random_generator import AxRandomGenerator, RandomGenerator
from .semi_p import IntensityAwareSemiPGenerator
from .sobol_generator import AxSobolGenerator, SobolGenerator
__all__ = [
"OptimizeAcqfGenerator",
"MonotonicRejectionGenerator",
"MonotonicThompsonSamplerGenerator",
"RandomGenerator",
"SobolGenerator",
"EpsilonGreedyGenerator",
"ManualGenerator",
"PairwiseOptimizeAcqfGenerator",
"PairwiseSobolGenerator",
"AxOptimizeAcqfGenerator",
"AxSobolGenerator",
"IntensityAwareSemiPGenerator",
"MultiOutcomeOptimizationGenerator",
"AxRandomGenerator",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/generators/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import time
from inspect import signature
from typing import Any, cast, Dict, Optional
import numpy as np
import torch
from aepsych.acquisition.acquisition import AEPsychAcquisition
from aepsych.config import Config, ConfigurableMixin
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.models.base import ModelProtocol
from aepsych.models.surrogate import AEPsychSurrogate
from aepsych.utils_logging import getLogger
from ax.modelbridge import Models
from ax.modelbridge.registry import Cont_X_trans
from botorch.acquisition import AcquisitionFunction
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from botorch.optim import optimize_acqf
from botorch.utils import draw_sobol_samples
logger = getLogger()
class OptimizeAcqfGenerator(AEPsychGenerator):
"""Generator that chooses points by minimizing an acquisition function."""
def __init__(
self,
acqf: AcquisitionFunction,
acqf_kwargs: Optional[Dict[str, Any]] = None,
restarts: int = 10,
samps: int = 1000,
max_gen_time: Optional[float] = None,
stimuli_per_trial: int = 1,
) -> None:
"""Initialize OptimizeAcqfGenerator.
Args:
acqf (AcquisitionFunction): Acquisition function to use.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
restarts (int): Number of restarts for acquisition function optimization.
samps (int): Number of samples for quasi-random initialization of the acquisition function optimizer.
max_gen_time (optional, float): Maximum time (in seconds) to optimize the acquisition function.
This is only loosely followed by scipy's optimizer, so consider using a number about 1/3 or
less of what your true upper bound is.
"""
if acqf_kwargs is None:
acqf_kwargs = {}
self.acqf = acqf
self.acqf_kwargs = acqf_kwargs
self.restarts = restarts
self.samps = samps
self.max_gen_time = max_gen_time
self.stimuli_per_trial = stimuli_per_trial
def _instantiate_acquisition_fn(self, model: ModelProtocol):
if self.acqf == AnalyticExpectedUtilityOfBestOption:
return self.acqf(pref_model=model)
if self.acqf in self.baseline_requiring_acqfs:
return self.acqf(
model=model, X_baseline=model.train_inputs[0], **self.acqf_kwargs
)
else:
return self.acqf(model=model, **self.acqf_kwargs)
def gen(self, num_points: int, model: ModelProtocol, **gen_options) -> torch.Tensor:
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query.
model (ModelProtocol): Fitted model of the data.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
if self.stimuli_per_trial == 2:
qbatch_points = self._gen(
num_points=num_points * 2, model=model, **gen_options
)
# output of super() is (q, dim) but the contract is (num_points, dim, 2)
# so we need to split q into q and pairs and then move the pair dim to the end
return qbatch_points.reshape(num_points, 2, -1).swapaxes(-1, -2)
else:
return self._gen(num_points=num_points, model=model, **gen_options)
def _gen(
self, num_points: int, model: ModelProtocol, **gen_options
) -> torch.Tensor:
# eval should be inherited from superclass
model.eval() # type: ignore
train_x = model.train_inputs[0]
acqf = self._instantiate_acquisition_fn(model)
logger.info("Starting gen...")
starttime = time.time()
if self.max_gen_time is None:
new_candidate, _ = optimize_acqf(
acq_function=acqf,
bounds=torch.tensor(np.c_[model.lb, model.ub]).T.to(train_x),
q=num_points,
num_restarts=self.restarts,
raw_samples=self.samps,
**gen_options,
)
else:
# figure out how long evaluating a single samp
starttime = time.time()
_ = acqf(train_x[0:num_points, :])
single_eval_time = time.time() - starttime
# only a heuristic for total num evals since everything is stochastic,
# but the reasoning is: we initialize with self.samps samps, subsample
# self.restarts from them in proportion to the value of the acqf, and
# run that many optimization. So:
# total_time = single_eval_time * n_eval * restarts + single_eval_time * samps
# and we solve for n_eval
n_eval = int(
(self.max_gen_time - single_eval_time * self.samps)
/ (single_eval_time * self.restarts)
)
if n_eval > 10:
# heuristic, if we can't afford 10 evals per restart, just use quasi-random search
options = {"maxfun": n_eval}
logger.info(f"gen maxfun is {n_eval}")
new_candidate, _ = optimize_acqf(
acq_function=acqf,
bounds=torch.tensor(np.c_[model.lb, model.ub]).T.to(train_x),
q=num_points,
num_restarts=self.restarts,
raw_samples=self.samps,
options=options,
)
else:
logger.info(f"gen maxfun is {n_eval}, falling back to random search...")
nsamp = max(int(self.max_gen_time / single_eval_time), 10)
# Generate the points at which to sample
bounds = torch.stack((model.lb, model.ub))
X = draw_sobol_samples(bounds=bounds, n=nsamp, q=num_points)
acqvals = acqf(X)
best_indx = torch.argmax(acqvals, dim=0)
new_candidate = X[best_indx]
logger.info(f"Gen done, time={time.time()-starttime}")
return new_candidate
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
acqf = config.getobj(classname, "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
stimuli_per_trial = config.getint(classname, "stimuli_per_trial")
restarts = config.getint(classname, "restarts", fallback=10)
samps = config.getint(classname, "samps", fallback=1000)
max_gen_time = config.getfloat(classname, "max_gen_time", fallback=None)
return cls(
acqf=acqf,
acqf_kwargs=extra_acqf_args,
restarts=restarts,
samps=samps,
max_gen_time=max_gen_time,
stimuli_per_trial=stimuli_per_trial,
)
@classmethod
def get_config_options(cls, config: Config, name: str):
return AxOptimizeAcqfGenerator.get_config_options(config, name)
class AxOptimizeAcqfGenerator(AEPsychGenerationStep, ConfigurableMixin):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
classname = "OptimizeAcqfGenerator"
model_class = config.getobj(name, "model", fallback=None)
model_options = model_class.get_config_options(config)
acqf_cls = config.getobj(name, "acqf", fallback=None)
if acqf_cls is None:
acqf_cls = config.getobj(classname, "acqf")
acqf_options = cls._get_acqf_options(acqf_cls, config)
gen_options = cls._get_gen_options(config)
max_fit_time = model_options["max_fit_time"]
model_kwargs = {
"surrogate": AEPsychSurrogate(
botorch_model_class=model_class,
mll_class=model_class.get_mll_class(),
model_options=model_options,
max_fit_time=max_fit_time,
),
"acquisition_class": AEPsychAcquisition,
"botorch_acqf_class": acqf_cls,
"acquisition_options": acqf_options,
# The Y transforms are removed because they are incompatible with our thresholding-finding acqfs
# The target value doesn't get transformed, so it searches for the target in the wrong space.
"transforms": Cont_X_trans, # TODO: Make LSE acqfs compatible with Y transforms
}
opts = {
"model": Models.BOTORCH_MODULAR,
"model_kwargs": model_kwargs,
"model_gen_kwargs": gen_options,
}
opts.update(super().get_config_options(config, name))
return opts
@classmethod
def _get_acqf_options(cls, acqf: AcquisitionFunction, config: Config):
class MissingValue:
pass
if acqf is not None:
acqf_name = acqf.__name__
acqf_args_expected = signature(acqf).parameters.keys()
acqf_args = {
k: config.getobj(
acqf_name,
k,
fallback_type=float,
fallback=MissingValue(),
warn=False,
)
for k in acqf_args_expected
}
acqf_args = {
k: v for k, v in acqf_args.items() if not isinstance(v, MissingValue)
}
for k, v in acqf_args.items():
if hasattr(v, "from_config"): # configure if needed
acqf_args[k] = cast(Any, v).from_config(config)
elif isinstance(v, type): # instaniate a class if needed
acqf_args[k] = v()
else:
acqf_args = {}
return acqf_args
@classmethod
def _get_gen_options(cls, config: Config):
classname = "OptimizeAcqfGenerator"
restarts = config.getint(classname, "restarts", fallback=10)
samps = config.getint(classname, "samps", fallback=1000)
return {"restarts": restarts, "samps": samps}
|
aepsych-main
|
aepsych/generators/optimize_acqf_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Type
import numpy as np
import torch
from aepsych.acquisition.objective import ProbitObjective
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerator
from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP
from botorch.acquisition.objective import MCAcquisitionObjective
from botorch.utils.sampling import draw_sobol_samples
class MonotonicThompsonSamplerGenerator(AEPsychGenerator[MonotonicRejectionGP]):
"""A generator specifically to be used with MonotonicRejectionGP that uses a Thompson-sampling-style
approach for gen, rather than using an acquisition function. We draw a posterior sample at a large number
of points, and then choose the point that is closest to the target value.
"""
def __init__(
self,
n_samples: int,
n_rejection_samples: int,
num_ts_points: int,
target_value: float,
objective: MCAcquisitionObjective,
explore_features: Optional[List[Type[int]]] = None,
) -> None:
"""Initialize MonotonicMCAcquisition
Args:
n_samples (int): Number of samples to select point from.
num_rejection_samples (int): Number of rejection samples to draw.
num_ts_points (int): Number of points at which to sample.
target_value (float): target value that is being looked for
objective (Optional[MCAcquisitionObjective], optional): Objective transform of the GP output
before evaluating the acquisition. Defaults to identity transform.
explore_features (Sequence[int], optional)
"""
self.n_samples = n_samples
self.n_rejection_samples = n_rejection_samples
self.num_ts_points = num_ts_points
self.target_value = target_value
self.objective = objective()
self.explore_features = explore_features
def gen(
self,
num_points: int, # Current implementation only generates 1 point at a time
model: MonotonicRejectionGP,
) -> torch.Tensor:
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query.
model (AEPsychMixin): Fitted model of the data.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
# Generate the points at which to sample
X = draw_sobol_samples(bounds=model.bounds_, n=self.num_ts_points, q=1).squeeze(
1
)
# Fix any explore features
if self.explore_features is not None:
for idx in self.explore_features:
val = (
model.bounds_[0, idx]
+ torch.rand(1) * (model.bounds_[1, idx] - model.bounds_[0, idx])
).item()
X[:, idx] = val
# Draw n samples
f_samp = model.sample(
X,
num_samples=self.n_samples,
num_rejection_samples=self.n_rejection_samples,
)
# Find the point closest to target
dist = torch.abs(self.objective(f_samp) - self.target_value)
best_indx = torch.argmin(dist, dim=1)
return torch.Tensor(X[best_indx])
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
n_samples = config.getint(classname, "num_samples", fallback=1)
n_rejection_samples = config.getint(
classname, "num_rejection_samples", fallback=500
)
num_ts_points = config.getint(classname, "num_ts_points", fallback=1000)
target = config.getfloat(classname, "target", fallback=0.75)
objective = config.getobj(classname, "objective", fallback=ProbitObjective)
explore_features = config.getlist(classname, "explore_idxs", element_type=int, fallback=None) # type: ignore
return cls(
n_samples=n_samples,
n_rejection_samples=n_rejection_samples,
num_ts_points=num_ts_points,
target_value=target,
objective=objective,
explore_features=explore_features, # type: ignore
)
|
aepsych-main
|
aepsych/generators/monotonic_thompson_sampler_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from aepsych.config import Config
from ..models.base import ModelProtocol
from .base import AEPsychGenerator
from .optimize_acqf_generator import OptimizeAcqfGenerator
class EpsilonGreedyGenerator(AEPsychGenerator):
def __init__(self, subgenerator: AEPsychGenerator, epsilon: float = 0.1):
self.subgenerator = subgenerator
self.epsilon = epsilon
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
subgen_cls = config.getobj(
classname, "subgenerator", fallback=OptimizeAcqfGenerator
)
subgen = subgen_cls.from_config(config)
epsilon = config.getfloat(classname, "epsilon", fallback=0.1)
return cls(subgenerator=subgen, epsilon=epsilon)
def gen(self, num_points: int, model: ModelProtocol):
if num_points > 1:
raise NotImplementedError("Epsilon-greedy batched gen is not implemented!")
if np.random.uniform() < self.epsilon:
sample = np.random.uniform(low=model.lb, high=model.ub)
return torch.tensor(sample).reshape(1, -1)
else:
return self.subgenerator.gen(num_points, model)
|
aepsych-main
|
aepsych/generators/epsilon_greedy_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict
from ax.modelbridge import Models
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerationStep
class MultiOutcomeOptimizationGenerator(AEPsychGenerationStep):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
# classname = cls.__name__
opts = {
"model": Models.MOO,
}
opts.update(super().get_config_options(config, name))
return opts
|
aepsych-main
|
aepsych/generators/multi_outcome_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Union
import numpy as np
import torch
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds
from ax.modelbridge import Models
class RandomGenerator(AEPsychGenerator):
"""Generator that generates points randomly without an acquisition function."""
_requires_model = False
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
):
"""Iniatialize RandomGenerator.
Args:
lb (Union[np.ndarray, torch.Tensor]): Lower bounds of each parameter.
ub (Union[np.ndarray, torch.Tensor]): Upper bounds of each parameter.
dim (int, optional): Dimensionality of the parameter space. If None, it is inferred from lb and ub.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.bounds_ = torch.stack([self.lb, self.ub])
def gen(
self,
num_points: int = 1,
model: Optional[AEPsychMixin] = None, # included for API compatibility.
) -> torch.Tensor:
"""Query next point(s) to run by randomly sampling the parameter space.
Args:
num_points (int, optional): Number of points to query. Currently, only 1 point can be queried at a time.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
X = self.bounds_[0] + torch.rand((num_points, self.bounds_.shape[1])) * (
self.bounds_[1] - self.bounds_[0]
)
return X
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
return cls(lb=lb, ub=ub, dim=dim)
class AxRandomGenerator(AEPsychGenerationStep):
classname = "RandomGenerator"
model = Models.UNIFORM
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
seed = config.getint(cls.classname, "seed", fallback=None)
deduplicate = config.getboolean(cls.classname, "deduplicate", fallback=True)
opts = {
"model": cls.model,
"model_kwargs": {"seed": seed, "deduplicate": deduplicate},
}
opts.update(super().get_config_options(config, name))
return opts
|
aepsych-main
|
aepsych/generators/random_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import abc
from inspect import signature
from typing import Any, Dict, Generic, Protocol, runtime_checkable, TypeVar
import torch
from aepsych.config import Config, ConfigurableMixin
from aepsych.models.base import AEPsychMixin
from ax.core.experiment import Experiment
from ax.modelbridge.generation_node import GenerationStep
from botorch.acquisition import (
AcquisitionFunction,
NoisyExpectedImprovement,
qNoisyExpectedImprovement,
)
from .completion_criterion import completion_criteria
AEPsychModelType = TypeVar("AEPsychModelType", bound=AEPsychMixin)
@runtime_checkable
class AcqArgProtocol(Protocol):
@classmethod
def from_config(cls, config: Config) -> Any:
pass
class AEPsychGenerator(abc.ABC, Generic[AEPsychModelType]):
"""Abstract base class for generators, which are responsible for generating new points to sample."""
_requires_model = True
baseline_requiring_acqfs = [qNoisyExpectedImprovement, NoisyExpectedImprovement]
stimuli_per_trial = 1
def __init__(
self,
) -> None:
pass
@abc.abstractmethod
def gen(self, num_points: int, model: AEPsychModelType) -> torch.Tensor:
pass
@classmethod
@abc.abstractmethod
def from_config(cls, config: Config):
pass
@classmethod
def _get_acqf_options(cls, acqf: AcquisitionFunction, config: Config):
if acqf is not None:
acqf_name = acqf.__name__
# model is not an extra arg, it's a default arg
acqf_args_expected = [
i for i in list(signature(acqf).parameters.keys()) if i != "model"
]
# this is still very ugly
extra_acqf_args = {}
if acqf_name in config:
full_section = config[acqf_name]
for k in acqf_args_expected:
# if this thing is configured
if k in full_section.keys():
# if it's an object make it an object
if full_section[k] in Config.registered_names.keys():
extra_acqf_args[k] = config.getobj(acqf_name, k)
else:
# otherwise try a float
try:
extra_acqf_args[k] = config.getfloat(acqf_name, k)
# finally just return a string
except ValueError:
extra_acqf_args[k] = config.get(acqf_name, k)
# next, do more processing
for k, v in extra_acqf_args.items():
if hasattr(v, "from_config"): # configure if needed
assert isinstance(v, AcqArgProtocol) # make mypy happy
extra_acqf_args[k] = v.from_config(config)
elif isinstance(v, type): # instaniate a class if needed
extra_acqf_args[k] = v()
else:
extra_acqf_args = {}
return extra_acqf_args
class AEPsychGenerationStep(GenerationStep, ConfigurableMixin, abc.ABC):
def __init__(self, name, **kwargs):
super().__init__(num_trials=-1, **kwargs)
self.name = name
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
criteria = []
for crit in completion_criteria:
# TODO: Figure out how to convince mypy that CompletionCriterion have `from_config`
criterion = crit.from_config(config, name) # type: ignore
criteria.append(criterion)
options = {"completion_criteria": criteria, "name": name}
return options
def finished(self, experiment: Experiment):
finished = all(
[criterion.is_met(experiment) for criterion in self.completion_criteria]
)
return finished
|
aepsych-main
|
aepsych/generators/base.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from ax.core.experiment import Experiment
from ax.modelbridge.completion_criterion import CompletionCriterion
from aepsych.config import Config, ConfigurableMixin
class MinAsks(CompletionCriterion, ConfigurableMixin):
def __init__(self, threshold: int) -> None:
self.threshold = threshold
def is_met(self, experiment: Experiment) -> bool:
return experiment.num_asks >= self.threshold
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
min_asks = config.getint(name, "min_asks", fallback=1)
options = {"threshold": min_asks}
return options
|
aepsych-main
|
aepsych/generators/completion_criterion/min_asks.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from aepsych.config import Config, ConfigurableMixin
from ax.core import Experiment
from ax.modelbridge.completion_criterion import CompletionCriterion
class RunIndefinitely(CompletionCriterion, ConfigurableMixin):
def __init__(self, run_indefinitely: bool) -> None:
self.run_indefinitely = run_indefinitely
def is_met(self, experiment: Experiment) -> bool:
return not self.run_indefinitely
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
run_indefinitely = config.getboolean(name, "run_indefinitely", fallback=False)
options = {"run_indefinitely": run_indefinitely}
return options
|
aepsych-main
|
aepsych/generators/completion_criterion/run_indefinitely.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.