python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""
Django settings for elementary project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
from elementary import hacks
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+so6mgy57y$sk#i9#-m-*lax8n#+7h@*fb$*&l3kw+rfs!%sh0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
PREREQ_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'rest_framework',
'rest_framework.authtoken',
'django_extensions',
]
PROJECT_APPS = [
'resources',
]
INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS
MIDDLEWARE_CLASSES = (
'elementary.middleware.exception_log.ExceptionLoggingMiddleware',
'elementary.middleware.request_log.RequestLogMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'elementary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = [os.path.join(PROJECT_DIR, 'templates')]
WSGI_APPLICATION = 'elementary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'elem',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Pacific'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = os.path.join(BASE_DIR, "collected_static")
LOGIN_REDIRECT_URL = '/'
LOGGING = {
'version': 1,
'root': {'level': 'INFO'},
}
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'PAGE_SIZE': 100,
'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name',
'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description'
}
# default to localhost:27017
MONGODB_CONNECTION_PARAMS = {
'host': None,
'port': None,
'tz_aware': True,
}
BROKER_URL = 'mongodb://localhost:27017/celery?tz_aware=true'
BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 3600}
CELERY_ACCEPT_CONTENT = ['json', 'msgpack', 'yaml']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_IGNORE_RESULT = True
CELERY_ALWAYS_EAGER = False
CELERY_MONGODB_BACKEND_SETTINGS = {
"host": "localhost",
"port": 27017,
'database': 'celery',
'taskmeta_collection': 'taskmeta',
}
ELEMENTARY_DATA_DIR = os.path.join(BASE_DIR, "data")
# we may not trust the user to select the right pipeline.
# override in settings.prod_local if necessary.
ELEMENTARY_ALLOW_REPO_CREATION = False
ELEMENTARY_PARSER_ENDPOINT = 'http://localhost:9000'
|
elementary-master
|
django/elementary/settings/base.py
|
from setuptools import setup
_REQUIRED = [
"tqdm",
"openai",
"manifest-ml",
]
setup(
name="evaporate",
version="0.0.1",
description="evaporating data lakes with foundation models",
author="simran brandon sabri avanika andrew immanuel chris",
packages=["evaporate"],
install_requires=_REQUIRED,
)
|
evaporate-main
|
setup.py
|
import re
import argparse
import random
from bs4 import BeautifulSoup
from collections import Counter, defaultdict
def set_profiler_args(profiler_args):
parser = argparse.ArgumentParser(
"LLM profiler.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--chunk_size",
type=int,
default=5000
)
parser.add_argument(
"--train_size",
type=int,
default=15,
)
parser.add_argument(
"--eval_size",
type=int,
default=15,
)
parser.add_argument(
"--max_chunks_per_file",
type=int,
default=-1,
)
parser.add_argument(
"--num_top_k_scripts",
type=int,
default=1,
help="of all the scripts we generate for the metadata fields, number to retain after scoring their qualities",
)
parser.add_argument(
"--extraction_fraction_thresh",
type=int,
default=0.9,
help="for abstensions approach",
)
parser.add_argument(
"--remove_tables",
type=bool,
default=False,
help="Remove tables from the html files?",
)
parser.add_argument(
"--body_only",
type=bool,
default=False,
help="Only use HTML body",
)
parser.add_argument(
"--max_metadata_fields",
type=int,
default=15,
)
parser.add_argument(
"--use_dynamic_backoff",
type=bool,
default=True,
help="Whether to do the function generation workflow or directly extract from chunks",
)
parser.add_argument(
"--use_qa_model",
type=bool,
default=False,
help="Whether to apply the span-extractor QA model.",
)
parser.add_argument(
"--overwrite_cache",
type=int,
default=0,
help="overwrite the manifest cache"
)
# models to use in the pipeline
parser.add_argument(
"--MODELS",
type=list,
help="models to use in the pipeline"
)
parser.add_argument(
"--KEYS",
type=list,
help="keys for openai models"
)
parser.add_argument(
"--GOLDKEY",
type=str,
help="models to use in the pipeline"
)
parser.add_argument(
"--MODEL2URL",
type=dict,
default={},
help="models to use in the pipeline"
)
parser.add_argument(
"--swde_plus",
type=bool,
default=False,
help="Whether to use the extended SWDE dataset to measure OpenIE performance",
)
parser.add_argument(
"--schema_id_sizes",
type=int,
default=0,
help="Number of documents to use for schema identification stage, if it differs from extraction",
)
parser.add_argument(
"--slice_results",
type=bool,
default=False,
help="Whether to measure the results by attribute-slice",
)
parser.add_argument(
"--fn_generation_prompt_num",
type=int,
default=-1,
help="For ablations on function generation with diversity, control which prompt we use. Default is all.",
)
parser.add_argument(
"--upper_bound_fns",
type=bool,
default=False,
help="For ablations that select functions using ground truth instead of the FM.",
)
parser.add_argument(
"--combiner_mode",
type=str,
default='mv',
help="For ablations that select functions using ground truth instead of the FM.",
)
parser.add_argument(
"--use_alg_filtering",
type=str,
default=True,
help="Whether to filter functions based on quality.",
)
parser.add_argument(
"--use_abstension",
type=str,
default=True,
help="Whether to use the abstensions approach.",
)
args = parser.parse_args(args=[])
for arg, val in profiler_args.items():
setattr(args, arg, val)
return args
#################### GET SOME SAMPLE FILES TO SEED THE METADATA SEARCH #########################
def sample_scripts(files, train_size=5):
# "Train" split
random.seed(0)
if train_size <= len(files):
sample_files = random.sample(files, train_size)
else:
sample_files = files
sample_contents = []
for sample_file in sample_files:
with open(sample_file, 'r') as f:
sample_contents.append(f.read())
return sample_files
#################### BOILERPLATE CHUNKING CODE, CRITICAL FOR LONG SEUQENCES ####################
def chunk_file(
parser, file, chunk_size=5000, mode="train", remove_tables=False, body_only=False
):
content = get_file_contents(file)
if "html" in parser:
content, chunks = get_html_parse(
content,
chunk_size=chunk_size,
mode=mode,
remove_tables=remove_tables,
body_only=body_only
)
else:
content, chunks = get_txt_parse(content, chunk_size=chunk_size, mode=mode)
return content, chunks
# HTML --> CHUNKS
def clean_html(content):
for tag in ['script', 'style', 'svg']:
content = content.split("\n")
clean_content = []
in_script = 0
for c in content:
if c.strip().strip("\t").startswith(f"<{tag}"):
in_script = 1
endstr = "</" + tag # + ">"
if endstr in c or "/>" in c:
in_script = 0
if not in_script:
clean_content.append(c)
content = "\n".join(clean_content)
return content
def get_flattened_items(content, chunk_size=500):
flattened_divs = str(content).split("\n")
flattened_divs = [ch for ch in flattened_divs if ch.strip() and ch.strip("\n").strip()]
clean_flattened_divs = []
for div in flattened_divs:
if len(str(div)) > chunk_size:
sub_divs = div.split("><")
if len(sub_divs) == 1:
clean_flattened_divs.append(div)
else:
clean_flattened_divs.append(sub_divs[0] + ">")
for sd in sub_divs[1:-1]:
clean_flattened_divs.append("<" + sd + ">")
clean_flattened_divs.append("<" + sub_divs[-1])
else:
clean_flattened_divs.append(div)
return clean_flattened_divs
def get_html_parse(content, chunk_size=5000, mode="train", remove_tables=False, body_only=False):
if remove_tables:
soup = BeautifulSoup(content)
tables = soup.find_all("table")
for table in tables:
if "infobox" not in str(table):
content = str(soup)
content = content.replace(str(table), "")
soup = BeautifulSoup(content)
if body_only:
soup = BeautifulSoup(content)
content = str(soup.find("body"))
soup = BeautifulSoup(content)
else:
content = clean_html(content)
clean_flattened_divs = []
flattened_divs = get_flattened_items(content, chunk_size=chunk_size)
for i, div in enumerate(flattened_divs):
new_div = re.sub(r'style="[^"]*"', '', str(div))
new_div = re.sub(r'<style>.*?</style>', '', str(new_div))
new_div = re.sub(r'<style.*?/style>', '', str(new_div))
new_div = re.sub(r'<meta.*?/>', '', str(new_div))
new_div = "\n".join([l for l in new_div.split("\n") if l.strip() and l.strip("\n").strip()])
# new_div = BeautifulSoup(new_div) #.fsind_all("div")[0]
if new_div:
clean_flattened_divs.append(new_div)
if mode == "eval":
return []
grouped_divs = []
current_div = []
current_length = 0
max_length = chunk_size
join_str = " " if use_raw_text else "\n"
for div in clean_flattened_divs:
str_div = str(div)
len_div = len(str_div)
if (current_length + len_div > max_length):
grouped_divs.append(join_str.join(current_div))
current_div = []
current_length = 0
elif not current_div and (current_length + len_div > max_length):
grouped_divs.append(str_div)
continue
current_div.append(str_div)
current_length += len_div
return content, grouped_divs
# GENERIC TXT --> CHUNKS
def get_txt_parse(content, chunk_size=5000, mode="train"):
# convert to chunks
if mode == "train":
chunks = content.split("\n")
clean_chunks = []
for chunk in chunks:
if len(chunk) > chunk_size:
sub_chunks = chunk.split(". ")
clean_chunks.extend(sub_chunks)
else:
clean_chunks.append(chunk)
chunks = clean_chunks.copy()
clean_chunks = []
for chunk in chunks:
if len(chunk) > chunk_size:
sub_chunks = chunk.split(", ")
clean_chunks.extend(sub_chunks)
else:
clean_chunks.append(chunk)
final_chunks = []
cur_chunk = []
cur_chunk_size = 0
for chunk in clean_chunks:
if cur_chunk_size + len(chunk) > chunk_size:
final_chunks.append("\n".join(cur_chunk))
cur_chunk = []
cur_chunk_size = 0
cur_chunk.append(chunk)
cur_chunk_size += len(chunk)
if cur_chunk:
final_chunks.append("\n".join(cur_chunk))
else:
final_chunks = []
return content, final_chunks
def get_file_contents(file):
text = ''
if file.endswith(".swp"):
return text
try:
with open(file) as f:
text = f.read()
except:
with open(file, "rb") as f:
text = f.read().decode("utf-8", "ignore")
return text
def clean_metadata(field):
return field.replace("\t", " ").replace("\n", " ").strip().lower()
def filter_file2chunks(file2chunks, sample_files, attribute):
def get_attribute_parts(attribute):
for char in ["/", "-", "(", ")", "[", "]", "{", "}", ":"]:
attribute = attribute.replace(char, " ")
attribute_parts = attribute.lower().split()
return attribute_parts
# filter chunks with simple keyword search
attribute_chunks = defaultdict(list)
starting_num_chunks = 0
ending_num_chunks = 0
ending_in_sample_chunks = 0
starting_in_sample_chunks = 0
for file, chunks in file2chunks.items():
starting_num_chunks += len(chunks)
if file in sample_files:
starting_in_sample_chunks += len(chunks)
cleaned_chunks = []
for chunk in chunks:
if attribute.lower() in chunk.lower():
cleaned_chunks.append(chunk)
if len(cleaned_chunks) == 0:
for chunk in chunks:
if attribute.lower().replace(" ", "") in chunk.lower().replace(" ", ""):
cleaned_chunks.append(chunk)
if len(cleaned_chunks) == 0:
chunk2num_word_match = Counter()
for chunk_num, chunk in enumerate(chunks):
attribute_parts = get_attribute_parts(attribute.lower())
for wd in attribute_parts:
if wd.lower() in chunk.lower():
chunk2num_word_match[chunk_num] += 1
# sort chunks by number of words that match
sorted_chunks = sorted(chunk2num_word_match.items(), key=lambda x: x[1], reverse=True)
if len(sorted_chunks) > 0:
cleaned_chunks.append(chunks[sorted_chunks[0][0]])
if len(sorted_chunks) > 1:
cleaned_chunks.append(chunks[sorted_chunks[1][0]])
ending_num_chunks += len(cleaned_chunks)
num_chunks = len(cleaned_chunks)
num_chunks = min(num_chunks, 2)
cleaned_chunks = cleaned_chunks[:num_chunks]
attribute_chunks[file] = cleaned_chunks
if file in sample_files:
ending_in_sample_chunks += len(attribute_chunks[file])
file2chunks = attribute_chunks
if ending_num_chunks == 0 or ending_in_sample_chunks == 0:
print(f"Removing because no chunks for attribute {attribute} in any file")
return None
print(f"For attribute {attribute}\n-- Starting with {starting_num_chunks} chunks\n-- Ending with {ending_num_chunks} chunks")
print(f"-- {starting_in_sample_chunks} starting chunks in sample files\n-- {ending_in_sample_chunks} chunks in sample files")
return file2chunks
def clean_function_predictions(extraction, attribute=None):
if extraction is None:
return ''
if type(extraction) == list:
if extraction and type(extraction[0]) == list:
full_answer = []
for answer in extraction:
if type(answer) == list:
dedup_list = []
for a in answer:
if a not in dedup_list:
dedup_list.append(a)
answer = dedup_list
answer = [str(a).strip().strip("\n") for a in answer]
full_answer.append(", ".join(answer))
else:
full_answer.append(answer.strip().strip("\n"))
full_answer = [a.strip() for a in full_answer]
extraction = ", ".join(full_answer)
elif extraction and len(extraction) == 1 and extraction[0] is None:
extraction = ''
else:
dedup_list = []
for a in extraction:
if a not in dedup_list:
dedup_list.append(a)
extraction = dedup_list
extraction = [(str(e)).strip().strip("\n") for e in extraction]
extraction = ", ".join(extraction)
if type(extraction) == "str" and extraction.lower() == "none":
extraction = ""
extraction = extraction.strip().replace(" ", " ")
if extraction.lower().startswith(attribute.lower()):
idx = extraction.lower().find(attribute.lower())
extraction = extraction[idx+len(attribute):].strip()
for char in [':', ","]:
extraction = extraction.strip(char).strip()
extraction = extraction.replace(",", ", ").replace(" ", " ")
return extraction
def check_vs_train_extractions(train_extractions, final_extractions, gold_key):
clean_final_extractions = {}
gold_values = train_extractions[gold_key]
modes = []
start_toks = []
end_toks = []
for file, gold in gold_values.items():
if type(gold) == list:
if gold and type(gold[0]) == list:
gold = [g[0] for g in gold]
gold = ", ".join(gold)
else:
gold = ", ".join(gold)
gold = gold.lower()
pred = final_extractions[file].lower()
if not pred or not gold:
continue
if ("<" in pred and "<" not in gold) or (">" in pred and ">" not in gold):
check_pred = BeautifulSoup(pred).text
if check_pred in gold or gold in check_pred:
modes.append("soup")
elif gold in pred and len(pred) > len(gold):
modes.append("longer")
idx = pred.index(gold)
if idx > 0:
start_toks.append(pred[:idx-1])
end_idx = idx + len(gold)
if end_idx < len(pred):
end_toks.append(pred[end_idx:])
def long_substr(data):
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0])-i+1):
if j > len(substr) and is_substr(data[0][i:i+j], data):
substr = data[0][i:i+j]
return substr
def is_substr(find, data):
if len(data) < 1 and len(find) < 1:
return False
for i in range(len(data)):
if find not in data[i]:
return False
return True
longest_end_tok = long_substr(end_toks)
longest_start_tok = long_substr(start_toks)
if len(set(modes)) == 1:
num_golds = len(gold_values)
for file, extraction in final_extractions.items():
if "longer" in modes:
# gold longer than pred
if len(end_toks) == num_golds and longest_end_tok in extraction and extraction.count(longest_end_tok) == 1:
idx = extraction.index(longest_end_tok)
extraction = extraction[:idx]
if len(start_toks) == num_golds and longest_start_tok in extraction and extraction.count(longest_start_tok) == 1:
idx = extraction.index(longest_start_tok)
extraction = extraction[idx:]
elif "soup" in modes:
extraction = BeautifulSoup(extraction).text
clean_final_extractions[file] = extraction
else:
clean_final_extractions = final_extractions
return clean_final_extractions
|
evaporate-main
|
evaporate/profiler_utils.py
|
import argparse
import os
def get_args(database_name, BASE_DATA_DIR = "/data/evaporate/"):
parser = argparse.ArgumentParser(
"LLM explorer.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--overwrite_cache",
type=bool,
default=0,
help="Whether to overwrite the caching for prompts."
)
parser.add_argument(
"--data_lake",
type=str,
default="fda_510ks",
help="Name of the data lake"
)
parser.add_argument(
"--data_dir",
type=str,
help="Path to raw data-lake documents",
)
parser.add_argument(
"--generative_index_path",
type=str,
help="Path to store the generated structured view of the data lake",
)
parser.add_argument(
"--cache_dir",
type=str,
default=".cache/",
help="Path to cache intermediate files during system execution",
)
parser.add_argument(
"--set_dicts",
type=str,
default='',
help="Alternate valid names for the SWDE attributes as provided in the benchmark.",
)
parser.add_argument(
"--topic",
type=list,
default=[],
help="Topic of the data lake",
)
CONSTANTS = {
"fda_510ks": {
"data_dir": os.path.join(BASE_DATA_DIR, "fda-ai-pmas/510k/"),
"database_name": "fda_510ks",
"cache_dir": ".cache/fda_510ks/",
"generative_index_path": os.path.join(BASE_DATA_DIR, "generative_indexes/fda_510ks/"),
"gold_extractions_file": os.path.join(BASE_DATA_DIR, "ground_truth/fda_510ks_gold_extractions.json"),
"topic": "fda 510k device premarket notifications",
},
}
args = parser.parse_args(args=[])
args_fill = CONSTANTS[database_name]
args.data_dir = args_fill["data_dir"]
args.cache_dir = args_fill["cache_dir"]
args.generative_index_path = args_fill["generative_index_path"]
args.topic = args_fill['topic']
args.gold_extractions_file = args_fill['gold_extractions_file']
args.data_lake = database_name
if 'set_dicts' in args_fill:
args.set_dicts = args_fill['set_dicts']
return args
|
evaporate-main
|
evaporate/configs.py
|
import numpy as np
from collections import Counter, defaultdict
def text_f1(preds=[], golds=[], attribute= ''):
"""Compute average F1 of text spans.
Taken from Squad without prob threshold for no answer.
"""
total_f1 = 0
total_recall = 0
total_prec = 0
f1s = []
for pred, gold in zip(preds, golds):
pred_toks = pred.split()
gold_toks = gold.split()
common = Counter(pred_toks) & Counter(gold_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
total_f1 += int(gold_toks == pred_toks)
f1s.append(int(gold_toks == pred_toks))
elif num_same == 0:
total_f1 += 0
f1s.append(0)
else:
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
total_f1 += f1
total_recall += recall
total_prec += precision
f1s.append(f1)
f1_avg = total_f1 / len(golds)
f1_median = np.percentile(f1s, 50)
return f1_avg, f1_median
|
evaporate-main
|
evaporate/evaluate_synthetic_utils.py
|
import os
import math
import json
import pickle
import html
from bs4 import BeautifulSoup
from collections import Counter, defaultdict
from utils import get_file_attribute
from evaluate_synthetic_utils import text_f1
# Compute recall from two sets
def set_recall(pred, gt):
return len(set(pred) & set(gt)) / len(set(gt))
# Compute precision from two sets
def set_precision(pred, gt):
return len(set(pred) & set(gt)) / len(set(pred))
# Compute F1 from precision and recall
def compute_f1(precision, recall):
if recall > 0. or precision > 0.:
return 2. * (precision * recall) / (precision + recall)
else:
return 0.
def evaluate_schema_identification(run_string, args, group_name, train_size=-1):
with open(f"{args.generative_index_path}/{run_string}_identified_schema.json") as f:
most_common_fields = json.load(f)
try:
with open(args.gold_extractions_file) as f:
gold_file2extractions = json.load(f)
except:
with open(args.gold_extractions_file, "rb") as f:
gold_file2extractions = pickle.load(f)
for file, dic in gold_file2extractions.items():
gold_metadata = list(dic.keys())
gold_metadata = [m for m in gold_metadata if m not in ['topic_entity_name']]
break
ctr = Counter(most_common_fields)
results = {}
for k in [len(gold_metadata), 1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 100, len(most_common_fields)]:
if not most_common_fields:
results[k] = {
"recall": 0,
"precision": 0,
"f1": 0,
"num_gold_attributes": k,
}
continue
gold_metadata = [item.lower() for item in gold_metadata]
pred_metadata = ctr
limit = k
pred_metadata = sorted(pred_metadata.most_common(limit), key=lambda x: (x[1], x[0]), reverse=True)
pred_metadata = [item[0].lower() for item in pred_metadata]
cleaned_pred_metadata = set()
for pred in pred_metadata:
if not pred:
continue
cleaned_pred_metadata.add(pred)
cleaned_gold_metadata = set()
for gold in gold_metadata:
cleaned_gold_metadata.add(gold)
recall = [x for x in cleaned_gold_metadata if x in cleaned_pred_metadata]
precision = [x for x in cleaned_pred_metadata if x in cleaned_gold_metadata]
recall = len(recall) / len(cleaned_gold_metadata)
precision = len(precision) / len(cleaned_pred_metadata)
f1 = compute_f1(precision, recall)
results[k] = {
"recall": recall,
"precision": precision,
"f1": f1,
"num_gold_attributes": limit,
}
print(f"@k = %d --- Recall: %.3f, Precision: %.3f, F1: %.3f" % (k, recall, precision, f1))
print()
return results
def clean_comparison(extraction, attribute='', exact_match=False):
# formatting transformations
if type(extraction) == list:
if extraction and type(extraction[0]) == list:
full_answer = []
for answer in extraction:
if type(answer) == list:
dedup_list = []
for a in answer:
if a not in dedup_list:
dedup_list.append(a)
answer = dedup_list
answer = [str(a).strip().strip("\n") for a in answer]
full_answer.append(", ".join(answer))
else:
full_answer.append(answer.strip().strip("\n"))
full_answer = [a.strip() for a in full_answer]
extraction = ", ".join(full_answer)
else:
dedup_list = []
for a in extraction:
if a not in dedup_list:
dedup_list.append(a)
extraction = dedup_list
extraction = [(str(e)).strip().strip("\n") for e in extraction if e]
extraction = ", ".join(extraction)
elif type(extraction) == "str" and (
extraction.lower() == "none" or any(
phrase in extraction.lower() for phrase in ["not reported", "none available", "n/a"]
)
):
extraction = ""
if type(extraction) == float and math.isnan(extraction):
extraction = ''
if type(extraction) != str:
extraction = str(extraction)
if type(extraction) == str:
if ("<" in extraction) and (">" in extraction):
extraction = BeautifulSoup(extraction).text
extraction = extraction.strip().replace(" ", " ").lower()
attribute_variations = [f"{attribute}(s)".lower(), f"{attribute.strip()}(s)".lower(), attribute.lower(), attribute]
for a in attribute_variations:
extraction = extraction.replace(a, "").strip()
for char in ["'", '"', "(", ")", ",", "/", "]", "[", ":"]:
extraction = extraction.replace(char, "").strip()
extraction = html.unescape(extraction)
for char in ['&', '&', "-", "_", "\n", "\t", "http:", "<", ">"]:
extraction = extraction.replace(char, " ").strip()
if exact_match:
extraction = extraction.replace(" ", "")
if extraction == " ":
extraction = ""
extraction = extraction.strip()
return extraction
def evaluate_extraction_quality(run_string, args, gold_extractions_file, gold_attributes=None):
all_attribute_f1 = 0
all_attribute_total = 0
total_runtime_overall_attributes = 0
attribute2f1 = {}
attribute2scripts = {}
# load gold extractions file
try:
with open(gold_extractions_file) as f:
gold_extractions = json.load(f)
except:
with open(gold_extractions_file, "rb") as f:
gold_extractions = pickle.load(f)
for attribute in gold_attributes:
attribute = attribute.lower()
# load predicted extractions
fileattribute = get_file_attribute(attribute)
if not os.path.exists(f"{args.generative_index_path}/{run_string}_{fileattribute}_file2metadata.json"):
print(f"Missing file for {attribute}")
continue
with open(f"{args.generative_index_path}/{run_string}_{fileattribute}_file2metadata.json") as f:
file2metadata = json.load(f)
try:
with open(f"{args.generative_index_path}/{run_string}_{fileattribute}_functions.json") as f:
function_dictionary = json.load(f)
with open(f"{args.generative_index_path}/{run_string}_{fileattribute}_top_k_keys.json") as f:
selected_keys = json.load(f)
attribute2scripts[attribute] = selected_keys
except:
function_dictionary = {}
selected_keys = []
pass
total_runtime = 0
for key in selected_keys:
if key in function_dictionary:
runtime = function_dictionary[key]['runtime']
total_runtime += runtime
preds = []
golds = []
for file, gold_entry in gold_extractions.items():
for attr, gold_value in gold_entry.items():
attr = clean_comparison(attr)
attribute = clean_comparison(attribute)
if attr.lower() != attribute.lower():
continue
if file not in file2metadata:
continue
pred_value = file2metadata[file]
value_check = ''
pred_value_check = ''
if type(pred_value) == list and type(pred_value[0]) == str:
pred_value_check = sorted([p.strip() for p in pred_value])
elif type(pred_value) == str and "," in pred_value:
pred_value_check = sorted([p.strip() for p in pred_value.split(",")])
if type(gold_value) == list:
value_check = gold_value[0]
if "," in gold_value:
value_check = sorted([p.strip() for p in gold_value.split(",")])
if value_check and pred_value_check and value_check == pred_value_check:
gold_value = pred_value
# SWDE doesn't include the full passage in many cases (e.g. "IMDB synopsis")
pred_value = clean_comparison(pred_value, attribute=attribute)
gold_value = clean_comparison(gold_value, attribute=attribute)
if pred_value.lower().strip(".").startswith(gold_value.lower().strip(".")):
pred_value = " ".join(pred_value.split()[:len(gold_value.split())])
preds.append(pred_value)
golds.append(gold_value)
if not preds:
total_f1, total_f1_median = 0, 0
if golds and preds:
total_f1, total_f1_median = text_f1(preds, golds, attribute=attribute)
else:
print(f"Skipping eval of attribute: {attribute}")
continue
if preds:
all_attribute_f1 += (total_f1)
all_attribute_total += 1
attribute2f1[attribute] = total_f1
total_runtime_overall_attributes += total_runtime
num_function_scripts = 0
for k, v in attribute2f1.items():
scripts = []
if k in attribute2scripts:
scripts = attribute2scripts[k]
if any(s for s in scripts if "function" in s):
num_function_scripts += 1
print(f"{k}, text-f1 = {v} --- {scripts}")
try:
overall_f1 = all_attribute_f1 / all_attribute_total
print(f"\nOverall f1 across %d attributes: %.3f" % (all_attribute_total, overall_f1))
print(f"Used functions for {num_function_scripts} out of {len(attribute2f1)} attributes")
print(f"Average time: {total_runtime_overall_attributes/all_attribute_total} seconds, {all_attribute_total} fns.\n\n")
results = {
"f1": all_attribute_f1 / all_attribute_total,
"total_attributes": all_attribute_total,
"attribute2f1": attribute2f1,
}
except:
results = {
"f1": 0,
"total_attributes": all_attribute_total,
"attribute2f1": attribute2f1,
}
return results
def determine_attribute_slices(gold_extractions, slice_results):
num_occurences, num_characters = defaultdict(int), defaultdict(int)
num_documents = len(gold_extractions)
for file, extraction_dict in gold_extractions.items():
for key, value in extraction_dict.items():
if type(value) == str and value:
num_occurences[key] += 1
num_characters[key] += len(value)
elif type(value) == list and value[0]:
num_occurences[key] += 1
num_characters[key] += len(value[0])
# calculate the average length of the attribute
for attr, total_len in num_characters.items():
num_characters[attr] = total_len / num_occurences[attr]
# split into the "head", "tail", and "unstructured"
attribute_slices = defaultdict(set)
for attr, num_occur in num_occurences.items():
attribute_slices["all"].add(attr)
# skip the rest if not slicing results
if not slice_results:
continue
num_char = num_characters[attr]
if int(num_documents * 0.5) <= num_occur:
attribute_slices["head"].add(attr)
else:
attribute_slices["tail"].add(attr)
if num_char >= 20:
attribute_slices["unstructured"].add(attr)
else:
attribute_slices["structured"].add(attr)
return attribute_slices
def evaluate_openie_quality(
run_string,
args,
gold_extractions_file,
sample_files=None,
slice_results=False,
mappings_names={}
):
# load pred extractions file
with open(f"{args.generative_index_path}/{run_string}_file2extractions.json") as f:
pred_extractions = json.load(f)
# alternate gold attribute naming
if args.set_dicts:
with open(args.set_dicts) as f:
set_dicts = json.load(f)
else:
set_dicts = {}
# load gold extractions file
try:
with open(gold_extractions_file) as f:
gold_extractions = json.load(f)
except:
with open(gold_extractions_file, "rb") as f:
gold_extractions = pickle.load(f)
pred_attributes = set()
for file, extraction_dict in pred_extractions.items():
for key, value in extraction_dict.items():
pred_attributes.add(key)
# split the attribute into slices -> "head", "tail", and "unstructured"
attribute_slices = determine_attribute_slices(gold_extractions, slice_results)
results = {}
for attribute_slice, gold_attributes in attribute_slices.items():
# lenient attribute scoring method: https://arxiv.org/pdf/2201.10608.pdf
gold_attribute_mapping = {}
for gold_attribute in gold_attributes:
if gold_attribute in pred_attributes or not set_dicts:
gold_attribute_mapping[gold_attribute] = gold_attribute
continue
if gold_attribute in set_dicts:
alternate_golds = set_dicts[gold_attribute]
else:
alternate_golds = [gold_attribute]
found = 0
for alternate_gold in alternate_golds:
if alternate_gold in pred_attributes:
gold_attribute_mapping[gold_attribute] = alternate_gold
found = 1
if not found:
if gold_attribute.strip('s') in pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute.strip('s')
elif gold_attribute+"s" in pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute+"s"
elif gold_attribute.strip('(s)') in pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute.strip('(s)')
elif gold_attribute+"(s)" in pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute+"(s)"
elif gold_attribute.replace(" ", "") in pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute.replace(" ", "")
elif any(pred_attribute.replace(" ", "") in gold_attributes for pred_attribute in pred_attributes):
for pred_attribute in pred_attributes:
if pred_attribute.replace(" ", "") in gold_attributes:
gold_attribute_mapping[gold_attribute] = pred_attribute
elif gold_attribute in mappings_names and mappings_names[gold_attribute] in pred_attributes:
gold_attribute_mapping[gold_attribute] = mappings_names[gold_attribute]
else:
gold_attribute_mapping[gold_attribute] = gold_attribute
pred_set = set()
skipped = set()
all_measurements = defaultdict(dict)
for file, extraction_dict in pred_extractions.items():
if sample_files and file not in sample_files:
continue
for key, value in extraction_dict.items():
if key not in attribute_slices["all"]:
if key.replace(" ", "") in attribute_slices["all"]:
key = key.replace(" ", "")
# skip predicted attributes that are in a different slice
if key in attribute_slices["all"] and key not in gold_attributes:
skipped.add(key)
continue
clean_key = clean_comparison(key, exact_match=True)
clean_value = clean_comparison(value, attribute=key, exact_match=True)
if clean_value:
pred_set.add((file, clean_key, clean_value))
if file not in all_measurements[clean_key]:
all_measurements[clean_key][file] = {
"pred": "",
"gold": "",
}
all_measurements[clean_key][file]['pred'] = clean_value
clean_pred_attributes = set([x[1] for x in pred_set])
# resolve mapping between gold and pred attributes
gold_attribute_mapping = {}
for gold_attribute in gold_attributes:
if gold_attribute in clean_pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute
continue
found = False
if set_dicts and gold_attribute in set_dicts:
alternate_golds = set_dicts[gold_attribute]
for alternate_gold in alternate_golds:
if alternate_gold in clean_pred_attributes:
gold_attribute_mapping[gold_attribute] = alternate_gold
found = True
if not found:
if gold_attribute.strip('s') in clean_pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute.strip('s')
elif gold_attribute+"s" in clean_pred_attributes:
gold_attribute_mapping[gold_attribute] = gold_attribute+"s"
else:
gold_attribute_mapping[gold_attribute] = gold_attribute
num_attributes = len(clean_pred_attributes)
gold_set = set()
for file, extraction_dict in gold_extractions.items():
if sample_files and file not in sample_files:
continue
for key, value in extraction_dict.items():
# ignore attributes in a different slice
if key not in gold_attributes:
continue
if key == "topic_entity_name":
if "name" in pred_attributes:
gold_attribute_mapping[key] = "name"
key = gold_attribute_mapping[key]
if key not in pred_attributes:
if key.replace(" ", "") in pred_attributes:
key = key.replace(" ", "")
# sort list-based attribute values for consistency.
if file in pred_extractions and key in pred_extractions[file]:
pred_value = pred_extractions[file][key]
value_check = ''
pred_value_check = ''
if type(pred_value) == list and type(pred_value[0]) == str:
pred_value_check = sorted([p.strip() for p in pred_value])
elif type(pred_value) == str and "," in pred_value:
pred_value_check = sorted([p.strip() for p in pred_value.split(",")])
if type(value) == list:
value_check = value[0]
if "," in value:
value_check = sorted([p.strip() for p in value.split(",")])
if value_check and pred_value_check and value_check == pred_value_check:
value = pred_value
clean_key = clean_comparison(key, exact_match=True)
clean_value = clean_comparison(value, attribute=key, exact_match=True)
if clean_value:
gold_set.add((file, clean_key, clean_value))
if file not in all_measurements[clean_key]:
all_measurements[clean_key][file] = {
"pred": "",
"gold": "",
}
all_measurements[clean_key][file]['gold'] = clean_value
if not pred_set or not gold_set:
results[attribute_slice] = {
"precision": 0,
"recall": 0,
"f1": 0,
"num_files_evaluated": len(pred_extractions),
}
else:
# exact match over all fields
precision = set_precision(pred_set, gold_set)
recall = set_recall(pred_set, gold_set)
f1 = compute_f1(precision, recall)
results[attribute_slice] = {
"precision": precision,
"recall": recall,
"f1": f1,
"num_files_evaluated": len(pred_extractions),
}
print(f"[%s] OpenIE Precision (%d attributes): Precision: %.3f Recall: %.3f F1: %.3f" % (attribute_slice, num_attributes, precision, recall, f1))
return results if slice_results else results["all"]
def main(
run_string,
args,
profiler_args,
data_lake = "wiki_nba_players",
sample_files=None,
stage='',
gold_attributes=[],
mappings_names={}
):
gold_extractions_file = args.gold_extractions_file
train_size = profiler_args.train_size
overall_results = {}
if stage and stage != 'schema_id':
pass
else:
schema_id_results = evaluate_schema_identification(
run_string,
args,
data_lake,
train_size=train_size,
)
overall_results["schema_id"] = schema_id_results
if stage and stage != 'extract':
pass
else:
extraction_results = evaluate_extraction_quality(
run_string,
args,
gold_extractions_file,
gold_attributes=gold_attributes
)
overall_results["extraction"] = extraction_results
if stage and stage != 'openie':
pass
else:
openie_results = evaluate_openie_quality(
run_string,
args,
gold_extractions_file,
sample_files=sample_files,
slice_results = profiler_args.slice_results,
mappings_names = mappings_names
)
overall_results["openie"] = openie_results
return overall_results
if __name__ == "__main__":
main()
|
evaporate-main
|
evaporate/evaluate_synthetic.py
|
import json
import math
import statistics
import random
from tqdm import tqdm
from collections import Counter, defaultdict
from typing import List, Dict, Tuple, Set
from prompts import Step, SCHEMA_ID_PROMPTS
from utils import apply_prompt
from profiler_utils import clean_metadata
def directly_extract_from_chunks_w_value(
file2chunks,
sample_files,
manifest_session,
overwrite_cache=False,
topic=None,
use_dynamic_backoff=True,
):
total_tokens_prompted = 0
field2value = defaultdict(list)
field2count = Counter()
file2results = defaultdict()
num_chunks_per_file = [len(file2chunks[file]) for file in file2chunks]
avg_num_chunks_per_file = statistics.mean(num_chunks_per_file)
stdev_num_chunks_per_file = statistics.stdev(num_chunks_per_file)
for i, file in enumerate(sample_files):
chunks = file2chunks[file]
print(f"Chunks in sample file {file}: {len(chunks)}")
for i, file in tqdm(
enumerate(sample_files),
total=len(sample_files),
desc="Directly extracting metadata from chunks",
):
chunks = file2chunks[file]
extractionset = set()
file_results = {}
for chunk_num, chunk in enumerate(chunks):
if (chunk_num > avg_num_chunks_per_file + stdev_num_chunks_per_file) and use_dynamic_backoff:
break
prompt_template = SCHEMA_ID_PROMPTS[0]
prompt = prompt_template.format(chunk=chunk, topic=topic)
try:
result, num_toks = apply_prompt(
Step(prompt),
max_toks=500,
manifest=manifest_session,
overwrite_cache=overwrite_cache
)
except:
print("Failed to apply prompt to chunk.")
continue
total_tokens_prompted += num_toks
result = result.split("---")[0].strip("\n")
results = result.split("\n")
results = [r.strip("-").strip() for r in results]
results = [r[2:].strip() if len(r) > 2 and r[1] == "." else r for r in results ]
for result in results:
try:
field = result.split(": ")[0].strip(":")
value = ": ".join(result.split(": ")[1:])
except:
print(f"Skipped: {result}")
continue
field_versions = [
field,
field.replace(" ", ""),
field.replace("-", ""),
field.replace("_", ""),
]
if not any([f.lower() in chunk.lower() for f in field_versions]) and use_dynamic_backoff:
continue
if not value and use_dynamic_backoff:
continue
field = field.lower().strip("-").strip("_").strip(" ").strip(":")
if field in extractionset and use_dynamic_backoff:
continue
field2value[field].append(value)
extractionset.add(field)
field2count[field] += 1
file_results[field] = value
file2results[file] = file_results
return field2value, field2count, total_tokens_prompted
def get_metadata_string_w_value(field2value, exclude=[], key=0):
field2num_extractions = Counter()
for field, values in field2value.items():
field2num_extractions[field] += len(values)
reranked_metadata = {}
try:
max_count = field2num_extractions.most_common(1)[0][1]
except:
return ''
fields = []
sort_field2num_extractions = sorted(
field2num_extractions.most_common(),
key=lambda x: (x[1], x[0]),
reverse=True
)
for item in sort_field2num_extractions:
field, count = item[0], item[1]
if field.lower() in exclude:
continue
if count == 1 and max_count > 1:
continue
idx = min(key, len(field2value[field]) - 1)
values = [field2value[field][idx]]
if idx < len(field2value[field]) - 1:
values.append(field2value[field][idx + 1])
reranked_metadata[field] = values
if len(reranked_metadata) > 200:
break
fields.append(field)
random.seed(key)
keys=reranked_metadata.keys()
random.shuffle(list(keys))
reordered_dict = {}
for key in keys:
reordered_dict[key] = reranked_metadata[key]
reranked_metadata_str = str(reordered_dict)
return reranked_metadata_str
def rerank(
field2value, exclude, cleaned_counter, order_of_addition, base_extraction_count,
most_in_context_example, topic, manifest_session, overwrite_cache=False
):
total_tokens_prompted = 0
votes_round1 = Counter()
for i in range(3):
reranked_metadata_str = get_metadata_string_w_value(field2value, exclude=exclude, key=i)
if not reranked_metadata_str:
continue
prompt = \
f"""{most_in_context_example}Attributes:
{reranked_metadata_str}
List the most useful keys to include in a SQL database about "{topic}", if any.
Answer:"""
try:
result, num_toks = apply_prompt(
Step(prompt),
max_toks=500,
manifest=manifest_session,
overwrite_cache=overwrite_cache,
)
except:
print("Failed to apply prompt")
continue
total_tokens_prompted += num_toks
result = result.split("---")[0].strip("\n")
results = result.split("\n")
result = results[0].replace("[", "").replace("]", "").replace("'", "").replace('"', '')
result = result.split(", ")
result = [r.lower() for r in result]
indices = [idx for idx, r in enumerate(result) if not r]
if result and indices:
result = result[:indices[0]]
# Deduplicate but preserve order
result = list(dict.fromkeys(result))
for r in result:
r = r.strip("_").strip("-")
r = r.strip("'").strip('"').strip()
if not r or r in exclude or r not in base_extraction_count:
continue
votes_round1[r] += 2
fields = sorted(list(votes_round1.keys()))
for r in fields:
r = r.strip("_").strip("-")
r = r.strip("'").strip('"').strip()
if not r or r in exclude or r not in base_extraction_count:
continue
if votes_round1[r] > 1:
cleaned_counter[r] = votes_round1[r] * base_extraction_count[r]
order_of_addition.append(r)
else:
cleaned_counter[r] = base_extraction_count[r]
order_of_addition.append(r)
exclude.append(r)
return cleaned_counter, order_of_addition, exclude, total_tokens_prompted
def rerank_metadata(
base_extraction_count, field2value, topic, manifest_session, overwrite_cache
):
most_in_context_example = \
"""Attributes:
{'name': 'Jessica', 'student major': 'Computer Science', 'liscense': 'accredited', 'college name': 'University of Michigan', ''GPA': '3.9', 'student email': 'jess@umich.edu', 'rating': '42', 'title': 'details'}
List the most useful keys to include in a SQL database for "students", if any.
Answer: ['name', 'student major', 'college name', 'GPA', 'student email']
----
"""
total_tokens_prompted = 0
cleaned_counter = Counter()
exclude = []
order_of_addition = []
cleaned_counter, order_of_addition, exclude, total_tokens_prompted = rerank(
field2value, exclude, cleaned_counter, order_of_addition, base_extraction_count,
most_in_context_example, topic, manifest_session, overwrite_cache=overwrite_cache
)
cleaned_counter, order_of_addition, exclude, total_tokens_prompted = rerank(
field2value, exclude, cleaned_counter, order_of_addition, base_extraction_count,
most_in_context_example, topic, manifest_session, overwrite_cache=overwrite_cache
)
fields = sorted(list(base_extraction_count.keys()))
for field in fields:
if field not in cleaned_counter:
cleaned_counter[field] = base_extraction_count[field] / 2
order_of_addition.append(field)
return cleaned_counter, total_tokens_prompted, order_of_addition
#################### SAVE GENERATIVE INDEX OF FILE BASED METADATA #########################
def identify_schema(run_string, args, file2chunks: Dict, file2contents: Dict, sample_files: List, manifest_sessions: Dict, group_name: str, profiler_args):
# get sample and eval files, convert the sample scripts to chunks
random.seed(0)
total_tokens_prompted = 0
field2value, extract_w_value, num_toks = directly_extract_from_chunks_w_value(
file2chunks,
sample_files,
manifest_sessions[profiler_args.GOLD_KEY],
overwrite_cache=profiler_args.overwrite_cache,
topic=args.topic,
use_dynamic_backoff=profiler_args.use_dynamic_backoff,
)
total_tokens_prompted += num_toks
base_extraction_count, num_toks, order_of_addition = rerank_metadata(
extract_w_value,
field2value,
args.topic,
manifest_sessions[profiler_args.GOLD_KEY],
profiler_args.overwrite_cache,
)
total_tokens_prompted += num_toks
with open(f"{args.generative_index_path}/{run_string}_identified_schema.json", "w") as f:
json.dump(base_extraction_count, f)
with open(f"{args.generative_index_path}/{run_string}_order_of_addition.json", "w") as f:
json.dump(order_of_addition, f)
return total_tokens_prompted
|
evaporate-main
|
evaporate/schema_identification.py
|
############################ SCHEMA ID PROMPTS ############################
SCHEMA_ID_PROMPTS = [
f"""Sample text:
<tr class="mergedrow"><th scope="row" class="infobox-label"><div style="text-indent:-0.9em;margin-left:1.2em;font-weight:normal;">β’ <a href="/wiki/Monarchy_of_Canada" title="Monarchy of Canada">Monarch</a> </div></th><td class="infobox-data"><a href="/wiki/Charles_III" title="Charles III">Charles III</a></td></tr>
<tr class="mergedrow"><th scope="row" class="infobox-label"><div style="text-indent:-0.9em;margin-left:1.2em;font-weight:normal;">β’ <span class="nowrap"><a href="/wiki/Governor_General_of_Canada" title="Governor General of Canada">Governor General</a></span> </div></th><td class="infobox-data"><a href="/wiki/Mary_Simon" title="Mary Simon">Mary Simon</a></td></tr>
<b>Provinces and Territories</b class='navlinking countries'>
<ul>
<li>Saskatchewan</li>
<li>Manitoba</li>
<li>Ontario</li>
<li>Quebec</li>
<li>New Brunswick</li>
<li>Prince Edward Island</li>
<li>Nova Scotia</li>
<li>Newfoundland and Labrador</li>
<li>Yukon</li>
<li>Nunavut</li>
<li>Northwest Territories</li>
</ul>
Question: List all relevant attributes about 'Canada' that are exactly mentioned in this sample text if any.
Answer:
- Monarch: Charles III
- Governor General: Mary Simon
- Provinces and Territories: Saskatchewan, Manitoba, Ontario, Quebec, New Brunswick, Prince Edward Island, Nova Scotia, Newfoundland and Labrador, Yukon, Nunavut, Northwest Territories
----
Sample text:
Patient birth date: 1990-01-01
Prescribed medication: aspirin, ibuprofen, acetaminophen
Prescribed dosage: 1 tablet, 2 tablets, 3 tablets
Doctor's name: Dr. Burns
Date of discharge: 2020-01-01
Hospital address: 123 Main Street, New York, NY 10001
Question: List all relevant attributes about 'medications' that are exactly mentioned in this sample text if any.
Answer:
- Prescribed medication: aspirin, ibuprofen, acetaminophen
- Prescribed dosage: 1 tablet, 2 tablets, 3 tablets
----
Sample text:
{{chunk:}}
Question: List all relevant attributes about '{{topic:}}' that are exactly mentioned in this sample text if any.
Answer:"""
]
############################ PROMPTS FOR EXTRACTING A SPECIFIC FIELD BY DIRECTLY GIVING THE MODEL THE CONTEXT ############################
METADATA_EXTRACTION_WITH_LM = [
f"""Here is a file sample:
<th>Location</th>
<td><a href="/wiki/Cupertino">Cupertino</a>, <a href="/wiki/California">California</a>Since 1987</td>
Question: Return the full "location" span of this sample if it exists, otherwise output [].
Answer: ['Cupertino, California Since 1987']
----
Here is a file sample:
{{chunk:}}
Question: Return the full "{{attribute:}}" span of this sample if it exists, otherwise output [].
Answer:""",
]
METADATA_EXTRACTION_WITH_LM_ZERO_SHOT = [
f"""Sample text:
{{chunk:}}
Question: What is the "{{attribute:}}" value in the text?
Answer:"""
]
EXTRA_PROMPT = [
f"""Here is a file sample:
<a href="/year/2012;price=$550;url=http%www.myname.com;?" target="_blank"></a>
Question: Return the full "price" from this sample if it exists, otherwise output [].
Answer: ['$550']
----
Here is a file sample:
{{chunk:}}
Question: Return the full "{{attribute:}}" from this sample if it exists, otherwise output [].
Answer:""",
]
IS_VALID_ATTRIBUTE = [
f"""Question: Could "2014" be a "year" value in a "students" database?
Answer: Yes
----
Question: Could "cupcake" be a "occupation" value in a "employee" database?
Answer: No
----
Question: Could "''" be a "animal" value in a "zoo" database?
Answer: No
----
Question: Could "police officer" be a "occupation" value in a "employee" database?
Answer: Yes
----
Question: Could "{{value:}}" be a "{{attr_str:}}" value in a {{topic:}} database?
Answer:"""
]
PICK_VALUE = [
f"""Examples:
- 32
- 2014
- 99.4
- 2012
Question: Which example is a "year"?
Answer: 2012, 2014
----
Examples:
- police officer
- occupation
Question: Which example is a "occupation"?
Answer: police officer
----
Examples:
{{pred_str:}}
Question: Which example is a "{{attribute:}}"?
Answer:"""
]
############################## PROMPTS TO GENERATE FUNCTIONS THAT PARSE FOR A SPECIFIC FIELD ##############################
METADATA_GENERATION_FOR_FIELDS = [
# base prompt
f"""Here is a sample of text:
{{chunk:}}
Question: Write a python function to extract the entire "{{attribute:}}" field from text, but not any other metadata. Return the result as a list.
import re
def get_{{function_field:}}_field(text: str):
\"""
Function to extract the "{{attribute:}} field".
\"""
""",
# prompt with flexible library imports
f"""Here is a file sample:
DESCRIPTION: This file answers the question, "How do I sort a dictionary by value?"
DATES MODIFIED: The file was modified on the following dates:
2009-03-05T00:49:05
2019-04-07T00:22:14
2011-11-20T04:21:49
USERS: The users who modified the file are:
Jeff Jacobs
Richard Smith
Julia D'Angelo
Rebecca Matthews
FILE TYPE: This is a text file.
Question: Write a python function called "get_dates_modified_field" to extract the "DATES MODIFIED" field from the text. Include any imports.
import re
def get_dates_modified_field(text: str):
\"""
Function to extract the dates modified.
\"""
parts= text.split("USERS")[0].split("DATES MODIFIED")[-1]
pattern = r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}'
return re.findall(pattern, text)
----
Here is a file sample:
<title>U.S. GDP Rose 2.9% in the Fourth Quarter After a Year of High Inflation - WSJ</title>
<meta property="og:url" content="https://www.wsj.com/articles/us-gdp-economic-growth-fourth-quarter-2022-11674683034"/>
<meta name="article.published" content="2023-01-26T10:30:00Z"/><meta itemProp="datePublished" content="2023-01-26T10:30:00Z"/>
<meta name="article.created" content="2023-01-26T10:30:00Z"/><meta itemProp="dateCreated" content="2023-01-26T10:30:00Z"/>
<meta name="dateLastPubbed" content="2023-01-31T19:17:00Z"/><meta name="author" content="Sarah Chaney Cambon"/>
Question: Write a python function called "get_date_published_field" to extract the "datePublished" field from the text. Include any imports.
from bs4 import BeautifulSoup
def get_date_published_field(text: str):
\"""
Function to extract the date published.
\"""
soup = BeautifulSoup(text, parser="html.parser")
date_published_field = soup.find('meta', itemprop="datePublished")
date_published_field = date_published_field['content']
return date_published_field
----
Here is a sample of text:
{{chunk:}}
Question: Write a python function called "get_{{function_field:}}_field" to extract the "{{attribute:}}" field from the text. Include any imports."""
]
class Step:
def __init__(self, prompt) -> None:
self.prompt = prompt
def execute(self):
pass
|
evaporate-main
|
evaporate/prompts.py
|
import os
import json
from collections import Counter, defaultdict
from manifest import Manifest
from configs import get_args
from prompts import Step
cur_idx = 0
def apply_prompt(step : Step, max_toks = 50, do_print=False, manifest=None, overwrite_cache=False):
global cur_idx
manifest_lst = manifest.copy()
if len(manifest) == 1:
manifest = manifest_lst[0]
else:
manifest = manifest_lst[cur_idx]
# sometimes we want to rotate keys
cur_idx = cur_idx + 1
if cur_idx >= len(manifest_lst)-1:
cur_idx = 0
prompt = step.prompt
response, num_tokens = get_response(
prompt,
manifest,
max_toks = max_toks,
overwrite=overwrite_cache,
stop_token="---"
)
step.response = response
if do_print:
print(response)
return response, num_tokens
def get_file_attribute(attribute):
attribute = attribute.lower()
attribute = attribute.replace("/", "_").replace(")", "").replace("-", "_")
attribute = attribute.replace("(", "").replace(" ", "_")
if len(attribute) > 30:
attribute = attribute[:30]
return attribute
def get_all_files(data_dir):
files = []
for file in os.listdir(data_dir):
if os.path.isfile(os.path.join(data_dir, file)):
files.append(os.path.join(data_dir, file))
else:
files.extend(get_all_files(os.path.join(data_dir, file)))
return files
def get_directory_hierarchy(data_dir):
if not data_dir.endswith("/") and os.path.isdir(data_dir):
data_dir = data_dir + "/"
directories2subdirs = defaultdict(list)
for file in os.listdir(data_dir):
new_dir = os.path.join(data_dir, file)
if not new_dir.endswith("/") and os.path.isdir(new_dir):
new_dir = new_dir + "/"
if os.path.isdir(new_dir):
directories2subdirs[data_dir].append(new_dir)
if os.listdir(new_dir):
more_subdirs = get_directory_hierarchy(new_dir)
for k, v in more_subdirs.items():
directories2subdirs[k].extend(v)
else:
directories2subdirs[new_dir] = []
else:
directories2subdirs[data_dir].append(new_dir)
return directories2subdirs
def get_unique_file_types(files):
suffix2file = {}
suffix2count = Counter()
for file in files:
suffix = file.split(".")[-1]
if not suffix:
suffix = "txt"
suffix2count[suffix] += 1
if suffix not in suffix2file:
suffix2file[suffix] = file
return suffix2file, suffix2count
def get_structure(dataset_name):
args = get_args(dataset_name)
if not os.path.exists(args.cache_dir):
os.makedirs(args.cache_dir)
if not os.path.exists(args.generative_index_path):
os.makedirs(args.generative_index_path)
if not os.path.exists(args.generative_index_path):
os.makedirs(args.generative_index_path)
# all files
cache_path = f"{args.cache_dir}/all_files.json"
if not os.path.exists(cache_path) or args.overwrite_cache:
files = get_all_files(args.data_dir)
with open(cache_path, "w") as f:
json.dump(files, f)
else:
with open(cache_path) as f:
files = json.load(f)
# all directories
cache_path = f"{args.cache_dir}/all_dirs.json"
if not os.path.exists(cache_path) or args.overwrite_cache:
directory_hierarchy = get_directory_hierarchy(args.data_dir)
with open(cache_path, "w") as f:
json.dump(directory_hierarchy, f)
else:
with open(cache_path) as f:
directory_hierarchy = json.load(f)
suffix2file, suffix2count = get_unique_file_types(files)
file_examples = "\n".join(list(suffix2file.values()))
file_types = ", ".join((suffix2file.keys()))
return directory_hierarchy, files, file_examples, file_types, args
def get_files_in_group(dir_path):
file_group = []
for i, (root,dirs,files) in enumerate(os.walk(dir_path, topdown=True)):
files = [f"{root}/{f}" for f in files]
file_group.extend(files)
print(f"Working with a sample size of : {len(file_group)} files.")
return file_group
# MANIFEST
def get_manifest_sessions(MODELS, MODEL2URL=None, KEYS=[]):
manifest_sessions = defaultdict(list)
for model in MODELS:
if any(kwd in model for kwd in ["davinci", "curie", "babbage", "ada", "cushman"]):
if not KEYS:
raise ValueError("You must provide a list of keys to use these models.")
for key in KEYS:
manifest, model_name = get_manifest_session(
client_name="openai",
client_engine=model,
client_connection=key,
)
manifest_sessions[model].append(manifest)
elif any(kwd in model for kwd in ["gpt-4"]):
if not KEYS:
raise ValueError("You must provide a list of keys to use these models.")
for key in KEYS:
manifest, model_name = get_manifest_session(
client_name="openaichat",
client_engine=model,
client_connection=key,
)
manifest_sessions[model].append(manifest)
else:
manifest, model_name = get_manifest_session(
client_name="huggingface",
client_engine=model,
client_connection=MODEL2URL[model],
)
manifest_sessions[model].append(manifest)
return manifest_sessions
def get_manifest_session(
client_name="huggingface",
client_engine=None,
client_connection="http://127.0.0.1:5000",
cache_connection=None,
temperature=0,
top_p=1.0,
):
if client_name == "huggingface" and temperature == 0:
params = {
"temperature": 0.001,
"do_sample": False,
"top_p": top_p,
}
elif client_name in {"openai", "ai21", "openaichat"}:
params = {
"temperature": temperature,
"top_p": top_p,
"engine": client_engine,
}
else:
raise ValueError(f"{client_name} is not a valid client name")
cache_params = {
"cache_name": "sqlite",
"cache_connection": cache_connection,
}
manifest = Manifest(
client_name=client_name,
client_connection=client_connection,
**params,
**cache_params,
)
params = manifest.client_pool.get_client().get_model_params()
model_name = params["model_name"]
if "engine" in params:
model_name += f"_{params['engine']}"
return manifest, model_name
def get_response(
prompt,
manifest,
overwrite=False,
max_toks=10,
stop_token=None,
gold_choices=[],
verbose=False,
):
prompt = prompt.strip()
if gold_choices:
gold_choices = [" " + g.strip() for g in gold_choices]
response_obj = manifest.run(
prompt,
gold_choices=gold_choices,
overwrite_cache=overwrite,
return_response=True,
)
response_obj = response_obj.get_json_response()["choices"][0]
log_prob = response_obj["text_logprob"]
response = response_obj["text"]
num_tokens = response_obj['usage']['total_tokens']
else:
response_obj = manifest.run(
prompt,
max_tokens=max_toks,
stop_token=stop_token,
overwrite_cache=overwrite,
return_response=True
)
response_obj = response_obj.get_json_response()
response = response_obj["choices"][0]["text"]
stop_token = "---"
response = response.strip().split(stop_token)[0].strip() if stop_token else response.strip()
log_prob = None
num_tokens = -1
if 'usage' in response_obj:
num_tokens = response_obj['usage'][0]['total_tokens']
if verbose:
print("\n***Prompt***\n", prompt)
print("\n***Response***\n", response)
if log_prob:
return response, log_prob
return response, num_tokens
|
evaporate-main
|
evaporate/utils.py
|
import os
import random
import pickle
from tqdm import tqdm
from functools import partial
from multiprocessing import Pool
from collections import Counter, defaultdict
import signal
from contextlib import contextmanager
import re
import json
import math
import time
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import warnings
from bs4 import GuessedAtParserWarning
warnings.filterwarnings('ignore', category=GuessedAtParserWarning)
warnings.filterwarnings("ignore", category=UserWarning, module="bs4")
warnings.filterwarnings("ignore", category=UserWarning, module="BeautifulSoup")
warnings.filterwarnings("ignore", category=UserWarning, module="lxml")
from prompts import (METADATA_GENERATION_FOR_FIELDS, EXTRA_PROMPT, METADATA_EXTRACTION_WITH_LM, METADATA_EXTRACTION_WITH_LM_ZERO_SHOT, IS_VALID_ATTRIBUTE, Step,)
from utils import apply_prompt, get_file_attribute
from evaluate_profiler import get_topk_scripts_per_field, evaluate
from profiler_utils import filter_file2chunks, check_vs_train_extractions, clean_function_predictions
import sys
sys.path.append(f"./weak_supervision/")
from run_ws import run_ws
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def check_remove_attribute(
all_extractions,
attribute,
topic,
train_extractions={},
manifest_session=None,
overwrite_cache=False,
all_metrics={},
):
extraction_fraction = 1.0
for key, info in all_metrics.items():
extraction_fraction = info['extraction_fraction']
break
values = []
num_toks = 0
has_non_none = False
for i, (file, metadata) in enumerate(all_extractions.items()):
if metadata and (metadata.lower() not in ["none"]) and metadata != '':
has_non_none = True
if len(values) < 3 and metadata and metadata.lower() != "none" and metadata != '':
values.append(metadata)
if not has_non_none and extraction_fraction > 0.5:
return False, num_toks
elif not has_non_none and extraction_fraction <= 0.5:
return True, num_toks
extractions = [m for f, m in all_extractions.items()]
if len(set(extractions)) == 1 or (len(set(extractions)) == 2 and "" in set(extractions)):
keys = list(train_extractions.keys())
gold_extractions = train_extractions[keys[0]]
if Counter(gold_extractions).most_common(1)[0][0].lower() != Counter(extractions).most_common(1)[0][0].lower():
return False, num_toks
else:
return True, num_toks
attr_str = f"{attribute}"
prompt_template = IS_VALID_ATTRIBUTE[0]
votes = Counter()
for value in values:
prompt = prompt_template.format(value=value, attr_str=attr_str, topic=topic)
try:
check, num_toks = apply_prompt(
Step(prompt),
max_toks=10,
manifest=manifest_session,
overwrite_cache=overwrite_cache
)
check = check.split("----")[0]
if "yes" in check.lower():
votes["yes"] += 1
elif 'no' in check.lower():
votes["no"] += 1
except:
print(f"Rate limited...")
keep = False
if votes['yes']:
keep = True
return keep, num_toks
def combine_extractions(
args,
all_extractions,
all_metrics,
combiner_mode = "mv",
attribute=None,
train_extractions = None,
gold_key = None,
extraction_fraction_thresh=0.8,
):
final_extractions = {}
extraction_fraction = 0.0
for key, info in all_metrics.items():
extraction_fraction = info['extraction_fraction']
break
# collect all values by file
all_file2extractions = defaultdict(list)
total_tokens_prompted = 0
num_keys = all_extractions.keys()
for key, file2extractions in all_extractions.items():
for i, (file, extraction) in tqdm(enumerate(
file2extractions.items()),
total=len(file2extractions),
desc=f"Applying key {key}"
):
extraction = clean_function_predictions(
extraction,
attribute=attribute
)
all_file2extractions[file].append(extraction)
if combiner_mode == "mv" or combiner_mode == "top_k":
for file, extractions in all_file2extractions.items():
if extraction_fraction >= extraction_fraction_thresh:
extractions = [e for e in extractions if e]
if not extractions:
extractions = ['']
final_extractions[file] = str(Counter(extractions).most_common(1)[0][0])
elif combiner_mode == "ws":
preds, used_deps, missing_files = run_ws(
all_file2extractions,
args.gold_extractions_file,
attribute=attribute,
has_abstains=extraction_fraction,
extraction_fraction_thresh=extraction_fraction_thresh,
)
for i, (file, extractions) in enumerate(all_file2extractions.items()):
if file in missing_files:
continue
if len(extractions)== 1:
if type(extractions) == list:
extractions = extractions[0]
pred = extractions
final_extractions[file] = pred
elif len(Counter(extractions)) == 1:
pred = str(Counter(extractions).most_common(1)[0][0])
final_extractions[file] = pred
else:
pred = preds[len(final_extractions)]
if not pred:
final_extractions[file] = str(Counter(extractions).most_common(1)[0][0])
else:
final_extractions[file] = pred
if train_extractions:
final_extractions = check_vs_train_extractions(train_extractions, final_extractions, gold_key)
return final_extractions, total_tokens_prompted
def apply_final_ensemble(
group_files,
file2chunks,
file2contents,
selected_keys,
all_metrics,
attribute,
function_dictionary,
data_lake='',
function_cache=False,
manifest_sessions=[],
MODELS=[],
overwrite_cache=False,
do_end_to_end=False,
):
all_extractions = {}
total_tokens_prompted = 0
for key in selected_keys:
if "function" in key:
t0 = time.time()
print(f"Applying function {key}...")
extractions, num_function_errors = apply_final_profiling_functions(
file2contents,
group_files,
function_dictionary[key]['function'],
attribute,
data_lake=data_lake,
function_cache=function_cache,
)
t1 = time.time()
total_time = t1 - t0
all_extractions[key] = extractions
function_dictionary[key]['runtime'] = total_time
elif key in MODELS:
manifest_session = manifest_sessions[key]
extractions, num_toks, errored_out = get_model_extractions(
file2chunks,
group_files,
attribute,
manifest_session,
key,
overwrite_cache=overwrite_cache,
)
total_tokens_prompted += num_toks
if not errored_out:
all_extractions[key] = extractions
else:
raise ValueError(f"Key {key} not supported.")
if not do_end_to_end and not all_extractions:
default = {}
for file, _ in file2contents.items():
default[file] = ['']
all_extractions['default'] = default
return all_extractions, total_tokens_prompted
def apply_final_profiling_functions(
files2contents,
sample_files,
fn,
attribute,
data_lake='',
function_cache=False,
):
if function_cache:
original_fn = fn
file_attribute = attribute.replace(" ", "_").replace("/", "_").lower()
cache_dir = "./function_cache/"
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_path = f"{cache_dir}function_cache_{file_attribute}_{data_lake}.pkl"
if os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
function_cache_dict = pickle.load(f)
except:
function_cache_dict = defaultdict(dict)
else:
function_cache_dict = defaultdict(dict)
all_extractions = {}
num_function_errors = 0
num_timeouts = 0
for i, (file) in enumerate(sample_files):
content = files2contents[file]
extractions = []
global result
global text
global preprocessed_text
text = content
preprocessed_text = text.replace(">\n", ">")
if num_timeouts > 1:
all_extractions[file] = deduplicate_extractions(extractions)
continue
if function_cache and file in function_cache_dict and original_fn in function_cache_dict[file]:
extractions = function_cache_dict[file][original_fn]
else:
if type(fn) != str:
# Function is defined in code and not prompt-generated (e.g. for QA)
# So no need for special parsing and error handling
result = fn(text)
extractions.append(result)
else:
fn = "\n".join([l for l in fn.split("\n") if "print(" not in l])
fn = "\n".join([l for l in fn.split("\n") if not l.startswith("#")])
function_field = get_function_field_from_attribute(attribute)
err = 0
try:
try:
with time_limit(1):
exec(fn, globals())
exec(f"result = get_{function_field}_field(text)", globals())
except TimeoutException as e:
print(f"Timeout {num_timeouts}")
num_timeouts += 1
raise e
extractions.append(result)
except Exception as e:
# This error is due to compilation and execution errors in the synthesized functions
err = 1
pass
if err:
# applied to preprocessed text
try:
try:
with time_limit(1):
exec(fn, globals())
exec(f"result = get_{function_field}_field(preprocessed_text)", globals())
except TimeoutException as e:
print("Timeout")
raise e
extractions.append(result)
err = 0
except Exception as e:
# This error is due to compilation and execution errors in the synthesized functions
pass
if err:
num_function_errors = 1
if function_cache:
function_cache_dict[file][original_fn] = extractions
all_extractions[file] = deduplicate_extractions(extractions)
if function_cache:
try:
with open(cache_path, "wb") as f:
pickle.dump(function_cache_dict, f)
except Exception as e:
pass
return all_extractions, num_function_errors
def get_function_field_from_attribute(attribute):
return re.sub(r"[^A-Za-z0-9]", "_", attribute)
def get_functions(
file2chunks,
sample_files,
all_extractions,
attribute,
manifest_session,
overwrite_cache=False,
):
total_tokens_prompted = 0
functions = {}
function_promptsource = {}
for i, (file) in tqdm(
enumerate(sample_files),
total=len(sample_files),
desc=f"Generating functions for attribute {attribute}",
):
chunks = file2chunks[file]
for chunk in chunks:
function_field = get_function_field_from_attribute(attribute)
for prompt_num, prompt_template in enumerate(METADATA_GENERATION_FOR_FIELDS):
prompt = prompt_template.format(
attribute=attribute,
function_field=function_field,
chunk=chunk,
)
try:
script, num_toks = apply_prompt(
Step(prompt),
max_toks=500,
manifest=manifest_session,
overwrite_cache=overwrite_cache
)
total_tokens_prompted += num_toks
except Exception as e:
print(e)
print(f"Failed to generate function for {attribute}")
continue
if "def" not in script:
script = \
f"""def get_{function_field}_field(text: str):
\"""
Function to extract {attribute}.
\"""
{script}
"""
return_idx = [i for i, s in enumerate(script.split("\n")) if "return" in s]
if not return_idx:
continue
return_idx = return_idx[0]
script = "\n".join(script.split("\n")[: return_idx + 1])
script = "\n".join([s for s in script.split("\n") if "print(" not in s])
script = "\n".join([s for s in script.split("\n") if s.startswith(" ") or s.startswith("\t") or s.startswith("def")])
fn_num = len(functions)
functions[f"function_{fn_num}"] = script
function_promptsource[f"function_{fn_num}"] = prompt_num
return functions, function_promptsource, total_tokens_prompted
def trim_chunks(chunk, attribute, window=20):
# Handling context length issues.
tokenized_chunk = chunk.lower().split()
indices = [i for i, s in enumerate(tokenized_chunk) if attribute.lower() in s]
if indices:
index = indices[0]
lb = max(0, index-window)
ub = min(len(chunk), index)
trimmed_chunk = " ".join(tokenized_chunk[lb:ub])
else:
# split tokenized_chunk into groups of 50 tokens
mini_chunks = []
for i in range(0, len(tokenized_chunk), 50):
mini_chunks.append(" ".join(tokenized_chunk[i:i+50]))
# find the mini chunk with the most attribute tokens
max_num_attr_tokens = 0
max_num_attr_tokens_idx = 0
for i, mini_chunk in enumerate(mini_chunks):
num_attr_tokens = len([s for s in attribute.lower().split() if s in mini_chunk])
if num_attr_tokens > max_num_attr_tokens:
max_num_attr_tokens = num_attr_tokens
max_num_attr_tokens_idx = i
trimmed_chunk = mini_chunks[max_num_attr_tokens_idx]
return trimmed_chunk
def deduplicate_extractions(extractions):
deduplicated_extractions = []
for extraction in extractions:
duplicate = False
for prev_extraction in deduplicated_extractions:
if extraction == prev_extraction:
duplicate = True
if not duplicate:
deduplicated_extractions.append(extraction)
return deduplicated_extractions
def get_model_extractions(
file2chunks,
sample_files,
attribute,
manifest_session,
model_name,
overwrite_cache=False,
collecting_preds=False,
):
num_errors = 0
total_prompts = 0
total_tokens_prompted = 0
has_context_length_error = False
file2results = {}
errored_out = False
for i, (file) in tqdm(
enumerate(sample_files),
total=len(sample_files),
desc=f"Extracting attribute {attribute} using LM",
):
if num_errors > 10 and num_errors == total_prompts:
print(f"All errorring out.. moving on.")
errored_out = True
continue
chunks = file2chunks[file]
extractions = []
for chunk_num, chunk in enumerate(chunks):
if "flan" in model_name:
PROMPTS = METADATA_EXTRACTION_WITH_LM_ZERO_SHOT
else:
PROMPTS = METADATA_EXTRACTION_WITH_LM
if has_context_length_error:
chunk = trim_chunks(chunk, attribute)
for prompt_template in PROMPTS:
prompt = prompt_template.format(attribute=attribute, chunk=chunk)
err = 0
total_prompts += 1
try:
extraction, num_toks = apply_prompt(
Step(prompt),
max_toks=100,
manifest=manifest_session,
overwrite_cache=overwrite_cache
)
total_tokens_prompted += num_toks
except:
err = 1
num_errors += err
print(f"Failed to extract {attribute} for {file}")
has_context_length_error = True
continue
extraction = extraction.split("---")[0].strip("\n")
extraction = extraction.split("\n")[-1].replace("[", "").replace("]", "").replace("'", "").replace('"', '')
extraction = extraction.split(", ")
extractions.append(extraction)
if collecting_preds and (not any(e for e in extractions) or not any(e[0] for e in extractions)):
for prompt_template in EXTRA_PROMPT:
prompt = prompt_template.format(attribute=attribute, chunk=chunk)
err = 0
total_prompts += 1
try:
extraction, num_toks = apply_prompt(
Step(prompt),
max_toks=100,
manifest=manifest_session,
overwrite_cache=overwrite_cache
)
total_tokens_prompted += num_toks
except:
err = 1
num_errors += err
print(f"Failed to extract {attribute} for {file}")
has_context_length_error = True
continue
extraction = extraction.split("---")[0].strip("\n")
extraction = extraction.split("\n")[-1].replace("[", "").replace("]", "").replace("'", "").replace('"', '')
extraction = extraction.split(", ")
extractions.append(extraction)
file2results[file] = deduplicate_extractions(extractions)
return file2results, total_tokens_prompted, errored_out
def get_all_extractions(
file2chunks,
file2contents,
sample_files,
attribute,
manifest_sessions,
MODELS,
GOLD_KEY,
args,
use_qa_model=False,
overwrite_cache=False,
):
total_tokens_prompted = 0
all_extractions = {}
for model in MODELS:
manifest_session = manifest_sessions[model]
extractions, num_toks, errored_out = get_model_extractions(
file2chunks,
sample_files,
attribute,
manifest_session,
model,
overwrite_cache=overwrite_cache,
collecting_preds=True,
)
total_tokens_prompted += num_toks
if not errored_out:
all_extractions[model] = extractions
else:
print(f"Not applying {model} extractions")
return 0, 0, total_tokens_prompted
manifest_session = manifest_sessions[GOLD_KEY]
functions, function_promptsource, num_toks = get_functions(
file2chunks,
sample_files,
all_extractions[GOLD_KEY],
attribute,
manifest_session,
overwrite_cache=overwrite_cache,
)
total_tokens_prompted += num_toks
function_dictionary = defaultdict(dict)
for fn_key, fn in functions.items():
all_extractions[fn_key], num_function_errors = apply_final_profiling_functions(
file2contents,
sample_files,
fn,
attribute,
)
function_dictionary[fn_key]['function'] = fn
function_dictionary[fn_key]['promptsource'] = function_promptsource[fn_key]
return all_extractions, function_dictionary, total_tokens_prompted
def run_profiler(run_string, args, file2chunks, file2contents, sample_files, group_files, manifest_sessions, attribute, profiler_args):
total_tokens_prompted = 0
attribute = attribute.lower()
file_attribute = get_file_attribute(attribute)
save_path = f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json"
file2chunks = filter_file2chunks(file2chunks, sample_files, attribute)
if file2chunks is None:
return total_tokens_prompted, 0
# PREDICT: get extractions from the synthesized functions and the LM on the sample documents
all_extractions, function_dictionary, num_toks = get_all_extractions(
file2chunks,
file2contents,
sample_files,
attribute,
manifest_sessions,
profiler_args.EXTRACTION_MODELS,
profiler_args.GOLD_KEY,
args,
use_qa_model=profiler_args.use_qa_model,
overwrite_cache=profiler_args.overwrite_cache,
)
total_tokens_prompted += num_toks
if not all_extractions:
return total_tokens_prompted, 0
# SCORE: Determine a set of functions to utilize for the full data lake.
all_metrics, key2golds, num_toks = evaluate(
all_extractions,
profiler_args.GOLD_KEY,
field=attribute,
manifest_session=manifest_sessions[profiler_args.GOLD_KEY],
overwrite_cache=profiler_args.overwrite_cache,
combiner_mode=profiler_args.combiner_mode,
extraction_fraction_thresh=profiler_args.extraction_fraction_thresh,
use_abstension=profiler_args.use_abstension,
)
total_tokens_prompted += num_toks
selected_keys = get_topk_scripts_per_field(
all_metrics,
function_dictionary,
all_extractions,
gold_key = profiler_args.GOLD_KEY,
k=profiler_args.num_top_k_scripts,
do_end_to_end=profiler_args.do_end_to_end,
combiner_mode=profiler_args.combiner_mode,
)
if not selected_keys and profiler_args.do_end_to_end:
print(f"Removing {file_attribute}")
if os.path.exists(save_path):
os.remove(save_path)
return total_tokens_prompted, 0
# APPLY: Run the best performing functions on the data lake.
print(f"Apply the scripts to the data lake and save the metadata. Taking the top {profiler_args.num_top_k_scripts} scripts per field.")
top_k_extractions, num_toks = apply_final_ensemble(
group_files,
file2chunks,
file2contents,
selected_keys,
all_metrics,
attribute,
function_dictionary,
data_lake=args.data_lake,
manifest_sessions=manifest_sessions,
function_cache=True,
MODELS=profiler_args.EXTRACTION_MODELS,
overwrite_cache=profiler_args.overwrite_cache,
do_end_to_end=profiler_args.do_end_to_end,
)
total_tokens_prompted += num_toks
file2metadata, num_toks = combine_extractions(
args,
top_k_extractions,
all_metrics,
combiner_mode=profiler_args.combiner_mode,
train_extractions=all_extractions,
attribute=attribute,
gold_key = profiler_args.GOLD_KEY,
extraction_fraction_thresh=profiler_args.extraction_fraction_thresh,
)
total_tokens_prompted += num_toks
# FINAL CHECK: Ensure that the metadata is valid (Skip this for ClosedIE).
if profiler_args.do_end_to_end:
keep_attribute, num_toks = check_remove_attribute(
file2metadata,
attribute,
args.topic,
train_extractions=key2golds,
manifest_session=manifest_sessions[profiler_args.GOLD_KEY],
overwrite_cache=profiler_args.overwrite_cache,
all_metrics=all_metrics,
)
total_tokens_prompted += num_toks
if not keep_attribute:
print(f"Removing {file_attribute}")
if os.path.exists(save_path):
os.remove(save_path)
return total_tokens_prompted, 0
try:
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_all_extractions.json", "w") as f:
json.dump(all_extractions, f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_functions.json", "w") as f:
json.dump(function_dictionary, f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_all_metrics.json", "w") as f:
json.dump(all_metrics, f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_top_k_keys.json", "w") as f:
json.dump(selected_keys, f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json", "w") as f:
json.dump(file2metadata, f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_top_k_extractions.json", "w") as f:
json.dump(top_k_extractions, f)
print(f"Save path: {args.generative_index_path}/{run_string}_{file_attribute}_all_extractions.json")
return total_tokens_prompted, 1
except Exception as e:
pass
try:
clean_file2metadata = {}
for file, metadata in file2metadata.items():
clean_file2metadata[file] = str(metadata)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json", "w") as f:
json.dump(clean_file2metadata, f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_all_metrics.json", "w") as f:
json.dump(all_metrics, f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_top_k_keys.json", "w") as f:
json.dump(selected_keys, f)
print(f"Saved!")
print(f"Failed to save {file_attribute} metadata. Error: {e}")
return total_tokens_prompted, 1
except Exception as e:
pass
return total_tokens_prompted, 0
|
evaporate-main
|
evaporate/profiler.py
|
from collections import defaultdict, Counter
import numpy as np
from prompts import (PICK_VALUE, Step,)
from utils import apply_prompt
def clean_comparison(responses, field):
clean_responses = []
if type(responses) == str:
responses = [responses]
for response in responses:
response = response.lower()
field = field.lower()
field_reformat = field.replace("_", "-")
for char in ["'", field, field_reformat, ":", "<", ">", '"', "none"]:
response = response.replace(char, " ")
for char in [",", ".", "?", "!", ";", "(", ")", "[", "]", "{", "}", "-", "none", "\n", "\t", "\r"]:
response = response.replace(char, " ")
response = response.replace(" ", " ")
response = response.split()
response = [r.strip() for r in response]
response = [r for r in response if r]
response = ' '.join(response)
clean_responses.append(response)
clean_responses = ", ".join(clean_responses)
return clean_responses
def normalize_value_type(metadata, attribute):
# make everything a list of strings since functions can return diverse types
cleaned_items = []
if type(metadata) == str:
metadata = [metadata]
for item in metadata:
if type(item) == list:
item = [str(i) for i in item]
item = ", ".join(item)
elif type(item) == tuple:
item = list(item)
item = [str(i) for i in item]
item = ", ".join(item)
elif item is None:
item = ''
elif type(item) != str:
item = [str(item)]
item = ", ".join(item)
if item:
cleaned_items.append(item)
return cleaned_items
def pick_a_gold_label(golds, attribute="", manifest_session=None, overwrite_cache=False):
"""
To counteract the large model hallucinating on various chunks affecting the evaluation of good functions.
"""
pred_str = "- " + "\n- ".join(golds)
prompt_template = PICK_VALUE[0]
prompt = prompt_template.format(pred_str=pred_str, attribute=attribute)
try:
check, num_toks = apply_prompt(
Step(prompt),
max_toks=100,
manifest=manifest_session,
overwrite_cache=overwrite_cache
)
except:
return golds, 0
check = check.split("\n")
check = [c for c in check if c]
if check:
if "none" in check[0].lower():
check = golds
else:
check = check[0]
return check, num_toks
def text_f1(
preds=[],
golds=[],
extraction_fraction=1.0,
attribute=None,
extraction_fraction_thresh=0.8,
use_abstension=True,
):
"""Compute average F1 of text spans.
Taken from Squad without prob threshold for no answer.
"""
total_f1 = 0
total_recall = 0
total_prec = 0
f1s = []
total = 0
if extraction_fraction >= extraction_fraction_thresh and use_abstension:
new_preds = []
new_golds = []
for pred, gold in zip(preds, golds):
if pred:
new_preds.append(pred)
new_golds.append(gold)
preds = new_preds
golds = new_golds
if not preds:
return 0.0, 0.0
for pred, gold in zip(preds, golds):
if type(pred) == str:
pred_toks = pred.split()
else:
pred_toks = pred
if type(gold) == str:
gold_toks_list = [gold.split()]
else:
assert 0, print(gold)
gold_toks_list = gold
if type(gold_toks_list) == list and gold_toks_list:
for gold_toks in gold_toks_list:
# If both lists are lenght 1, split to account for example like:
# ["a b"], ["a"] -> ["a","b"], ["a"]
if len(gold_toks) == 1 and len(pred_toks) == 1:
gold_toks = gold_toks[0].split()
pred_toks = pred_toks[0].split()
common = Counter(pred_toks) & Counter(gold_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
total_f1 += int(gold_toks == pred_toks)
f1s.append(int(gold_toks == pred_toks))
total_recall += int(gold_toks == pred_toks)
elif num_same == 0:
total_f1 += 0
f1s.append(0)
else:
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
total_f1 += f1
total_recall += recall
total_prec += precision
f1s.append(f1)
total += 1
if not total:
return 0.0, 0.0
f1_avg = total_f1 / total
f1_median = np.percentile(f1s, 50)
return f1_avg, f1_median
def evaluate(
all_extractions:list,
gold_key:str,
field:str,
manifest_session=None,
overwrite_cache=False,
combiner_mode='mv',
extraction_fraction_thresh=0.8,
use_abstension=True,
):
normalized_field_name = field
for char in ["'", ":", "<", ">", '"', "_", "-", " ", "none"]:
normalized_field_name = normalized_field_name.replace(char, "")
key2golds = defaultdict(list)
key2preds = defaultdict(list)
total_tokens_prompted = 0
# handle FM golds on D_eval
gold_file2metadata = all_extractions[gold_key]
cleaned_gold_metadata = {}
for filepath, gold_metadata in gold_file2metadata.items():
gold_metadata = normalize_value_type(gold_metadata, field)
if len(gold_metadata) > 1:
gold_metadata, num_toks = pick_a_gold_label(
gold_metadata,
attribute=field,
manifest_session=manifest_session,
overwrite_cache=overwrite_cache
)
total_tokens_prompted += num_toks
gold_metadata = clean_comparison(gold_metadata, field)
cleaned_gold_metadata[filepath] = gold_metadata
# handle function preds on D_eval
for i, (key, file2metadata) in enumerate(all_extractions.items()):
if key == gold_key:
continue
for filepath, metadata in file2metadata.items():
gold_metadata = cleaned_gold_metadata[filepath]
pred_metadata = normalize_value_type(metadata, field)
pred_metadata = clean_comparison(pred_metadata, field)
key2golds[key].append(gold_metadata)
key2preds[key].append(pred_metadata)
# Handling abstensions
num_extractions = 0
for golds in key2golds[key]:
if golds and not any(golds.lower() == wd for wd in ['none']):
num_extractions += 1
extraction_fraction = float(num_extractions) / float(len(key2golds[key]))
if combiner_mode == "top_k":
# Don't use the extraction fraction in the naive setting for scoring
extraction_fraction = 0.0
print(f"Extraction fraction: {extraction_fraction}")
metrics = {}
for key, golds in key2golds.items():
preds = key2preds[key]
f1, f1_med = text_f1(
preds, golds,
extraction_fraction=extraction_fraction,
attribute=field,
extraction_fraction_thresh=extraction_fraction_thresh,
use_abstension=use_abstension,
)
priorf1, priorf1_med = text_f1(preds, golds, extraction_fraction=0.0, attribute=field)
metrics[key] = {
"average_f1": f1,
"median_f1": f1_med,
"extraction_fraction": extraction_fraction,
"prior_average_f1": priorf1,
"prior_median_f1": priorf1_med,
}
return metrics, key2golds, total_tokens_prompted
def get_topk_scripts_per_field(
script2metrics,
function_dictionary,
all_extractions,
gold_key='',
k=3,
do_end_to_end=False,
keep_thresh = 0.5,
cost_thresh = 1,
combiner_mode='mv',
):
script2avg = dict(
sorted(script2metrics.items(),
reverse=True,
key=lambda x: (x[1]['average_f1'], x[1]['median_f1']))
)
top_k_scripts = [k for k, v in script2avg.items() if k != gold_key]
top_k_values = [
max(v['average_f1'], v['median_f1']) for k, v in script2avg.items() if k != gold_key
]
if not top_k_values:
return []
best_value = top_k_values[0]
best_script = top_k_scripts[0]
if best_value < keep_thresh and do_end_to_end:
return []
filtered_fn_scripts = {
k:v for k, v in script2metrics.items() if (
v['average_f1'] >= keep_thresh or v['median_f1'] >= keep_thresh
) and "function" in k
}
top_k_fns = []
num_fns = 0
if filtered_fn_scripts:
script2avg = dict(
sorted(filtered_fn_scripts.items(),
reverse=True,
key=lambda x: (x[1]['average_f1'], x[1]['median_f1']))
)
top_k_fns = [
k for k, v in script2avg.items() if k != gold_key and abs(
max(v['average_f1'], v['median_f1'])-best_value
) < cost_thresh
]
num_fns = len(top_k_fns)
if num_fns:
top_k_scripts = top_k_scripts[0:min(k, num_fns)]
else:
return []
# construct final set of functions
final_set = []
for key in top_k_scripts:
if key in top_k_fns:
final_set.append(key)
if len(final_set) > k:
final_set = final_set[:k]
if not final_set and not do_end_to_end:
return [top_k_scripts[0]]
# print results
print(f"Top {k} scripts:")
for script in final_set:
print(f"- {script}; Score: {script2metrics[script]}")
print(f"Best script overall: {best_script}; Score: {script2metrics[best_script]}")
return final_set
|
evaporate-main
|
evaporate/evaluate_profiler.py
|
import os
import time
import random
import json
import datetime
from tqdm import tqdm
import pickle
import argparse
from collections import defaultdict, Counter
from utils import get_structure, get_manifest_sessions, get_file_attribute
from profiler_utils import chunk_file, sample_scripts, set_profiler_args
from schema_identification import identify_schema
from profiler import run_profiler
from evaluate_synthetic import main as evaluate_synthetic_main
random.seed(0)
def get_data_lake_info(args, data_lake, DATA_DIR = "/data/evaporate"):
extractions_file = None
if data_lake == "fda_510ks":
DATA_DIR = args.data_dir
file_groups = os.listdir(DATA_DIR)
if not DATA_DIR.endswith("/"):
DATA_DIR += "/"
file_groups = [f"{DATA_DIR}{file_group}" for file_group in file_groups if not file_group.startswith(".")]
full_file_groups = file_groups.copy()
extractions_file = args.gold_extractions_file
parser = "txt"
return file_groups, extractions_file, parser, full_file_groups
def chunk_files(file_group, parser, chunk_size, remove_tables, max_chunks_per_file, body_only):
file2chunks = {}
file2contents = {}
for file in tqdm(file_group, total=len(file_group), desc="Chunking files"):
content, chunks = chunk_file(
parser,
file,
chunk_size=chunk_size,
mode="train",
remove_tables=remove_tables,
body_only=body_only
)
if max_chunks_per_file > 0:
chunks = chunks[:max_chunks_per_file]
file2chunks[file] = chunks
file2contents[file] = content
return file2chunks, file2contents
# chunking & preparing data
def prepare_data(profiler_args, file_group, parser = "html"):
data_lake = profiler_args.data_lake
if profiler_args.body_only:
body_only = profiler_args.body_only
suffix = f"_bodyOnly{body_only}"
else:
suffix = ""
# prepare the datalake: chunk all files
manifest_sessions = get_manifest_sessions(profiler_args.MODELS, MODEL2URL=profiler_args.MODEL2URL, KEYS=profiler_args.KEYS)
if os.path.exists(f".cache/{data_lake}_size{len(file_group)}_chunkSize{profiler_args.chunk_size}_{suffix}_file2chunks.pkl"):
with open(f".cache/{data_lake}_size{len(file_group)}_chunkSize{profiler_args.chunk_size}_{suffix}_file2chunks.pkl", "rb") as f:
file2chunks = pickle.load(f)
with open(f".cache/{data_lake}_size{len(file_group)}_chunkSize{profiler_args.chunk_size}_{suffix}_file2contents.pkl", "rb") as f:
file2contents = pickle.load(f)
else:
file2chunks, file2contents = chunk_files(
file_group,
parser,
profiler_args.chunk_size,
profiler_args.remove_tables,
profiler_args.max_chunks_per_file,
profiler_args.body_only
)
with open(f".cache/{data_lake}_size{len(file_group)}_chunkSize{profiler_args.chunk_size}_removeTables{profiler_args.remove_tables}{suffix}_file2chunks.pkl", "wb") as f:
pickle.dump(file2chunks, f)
with open(f".cache/{data_lake}_size{len(file_group)}_chunkSize{profiler_args.chunk_size}_removeTables{profiler_args.remove_tables}{suffix}_file2contents.pkl", "wb") as f:
pickle.dump(file2contents, f)
return file2chunks, file2contents, manifest_sessions
def get_run_string(
data_lake, today, file_groups, profiler_args, do_end_to_end,
train_size, dynamicbackoff, models
):
body = profiler_args.body_only # Baseline systems only operate on the HTML body
model_ct = len(models)
if profiler_args.use_qa_model:
model_ct += 1
run_string = f"dataLake{data_lake}_date{today}_fileSize{len(file_groups)}_trainSize{train_size}_numAggregate{profiler_args.num_top_k_scripts}_chunkSize{profiler_args.chunk_size}_removeTables{profiler_args.remove_tables}_body{body}_cascading{do_end_to_end}_useBackoff{dynamicbackoff}_MODELS{model_ct}"
return run_string
def get_gold_metadata(args):
# get the list of gold metadata for closed-IE runs
try:
with open(args.gold_extractions_file) as f:
gold_file2extractions = json.load(f)
except:
with open(args.gold_extractions_file, "rb") as f:
gold_file2extractions = pickle.load(f)
frequency = Counter()
for file, dic in gold_file2extractions.items():
for k, v in dic.items():
if k != "topic_entity_name":
if type(v) == str and v:
frequency[k] += 1
elif type(v) == list and v and v[0]:
frequency[k] += 1
sorted_frequency = sorted(frequency.items(), key=lambda x: x[1], reverse=True)
gold_metadata = [x[0] for x in sorted_frequency]
gold_attributes = [m.lower() for m in gold_metadata if m not in ['topic_entity_name']]
return gold_attributes
def determine_attributes_to_remove(attributes, args, run_string, num_attr_to_cascade):
attributes_reordered = {}
attributes_to_remove = []
attributes_to_metrics = {}
attribute_to_first_extractions = {}
mappings_names = {}
for num, attribute in enumerate(attributes):
attribute = attribute.lower()
file_attribute = get_file_attribute(attribute)
if not os.path.exists(f"{args.generative_index_path}/{run_string}_{file_attribute}_all_metrics.json"):
continue
if not os.path.exists(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json"):
continue
if num >= num_attr_to_cascade:
os.remove(f"{args.generative_index_path}/{run_string}_{file_attribute}_all_metrics.json")
os.remove(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json")
continue
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_all_metrics.json") as f:
metrics = json.load(f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_top_k_keys.json") as f:
selected_keys = json.load(f)
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json") as f:
file2metadata = json.load(f)
attributes_reordered[attribute] = metrics[selected_keys[0]]
if selected_keys and metrics:
for a, m in attributes_to_metrics.items():
if attribute.lower() in a.lower() or a.lower() in attribute.lower():
if m == metrics[selected_keys[0]]['average_f1']:
attributes_to_remove.append(attribute)
mappings_names[a] = attribute
mappings_names[attribute] = a
break
first_extractions = [m for i, (f, m) in enumerate(file2metadata.items()) if i < 5]
if any(f != "" for f in first_extractions):
first_extractions = " ".join(first_extractions)
for a, m in attribute_to_first_extractions.items():
if m == first_extractions:
attributes_to_remove.append(attribute)
mappings_names[a] = attribute
mappings_names[attribute] = a
break
if attribute in attributes_to_remove:
continue
if selected_keys:
attributes_to_metrics[attribute] = metrics[selected_keys[0]]['average_f1']
attribute_to_first_extractions[attribute] = first_extractions
return attributes_to_remove, mappings_names, attributes
def measure_openie_results(
attributes,
args,
profiler_args,
run_string,
gold_attributes,
attributes_to_remove,
file_groups,
mappings_names
):
file2extractions = defaultdict(dict)
unique_attributes = set()
num_extractions2results = {}
data_lake = profiler_args.data_lake
for attr_num, attribute in enumerate(attributes):
attribute = attribute.lower()
file_attribute = get_file_attribute(attribute)
if os.path.exists(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json"):
if attribute in attributes_to_remove:
print(f"Removing: {attribute}")
os.remove(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json")
continue
with open(f"{args.generative_index_path}/{run_string}_{file_attribute}_file2metadata.json") as f:
file2metadata = json.load(f)
for file, extraction in file2metadata.items():
file2extractions[file][attribute] = extraction
unique_attributes.add(attribute)
if file2extractions:
num_extractions = len(unique_attributes)
nums = [1, 2, 3, 4, len(attributes) - 1, len(gold_attributes)]
if file2extractions and ((num_extractions) % 5 == 0 or num_extractions in nums) or attr_num == len(attributes) - 1:
if num_extractions in num_extractions2results:
continue
with open(f"{args.generative_index_path}/{run_string}_file2extractions.json", "w") as f:
json.dump(file2extractions, f)
results = evaluate_synthetic_main(
run_string,
args,
profiler_args,
data_lake,
sample_files=file_groups,
stage='openie',
mappings_names=mappings_names
)
num_extractions2results[num_extractions] = results
return num_extractions2results
def run_experiment(profiler_args):
do_end_to_end = profiler_args.do_end_to_end
num_attr_to_cascade = profiler_args.num_attr_to_cascade
train_size = profiler_args.train_size
data_lake = profiler_args.data_lake
print(f"Data lake")
today = datetime.datetime.today().strftime("%m%d%Y")
setattr(profiler_args, 'chunk_size', 3000)
_, _, _, _, args = get_structure(data_lake)
file_groups, extractions_file, parser, full_file_groups = get_data_lake_info(args, data_lake)
file2chunks, file2contents, manifest_sessions = prepare_data(
profiler_args, full_file_groups, parser
)
extraction_manifest_sessions = {
k: v for k, v in manifest_sessions.items() if k in profiler_args.EXTRACTION_MODELS
}
gold_attributes = get_gold_metadata(args)
results_by_train_size = defaultdict(dict)
total_time_dict = defaultdict(dict)
if 1:
total_tokens_prompted = 0
print(f"\n\nData-lake: {data_lake}, Train size: {train_size}")
setattr(profiler_args, 'train_size', train_size)
run_string = get_run_string(
data_lake, today, full_file_groups, profiler_args,
do_end_to_end, train_size,
profiler_args.use_dynamic_backoff,
profiler_args.EXTRACTION_MODELS,
)
sample_files = sample_scripts(
file_groups,
train_size=profiler_args.train_size,
)
# top-level schema identification
if do_end_to_end:
t0 = time.time()
num_toks = identify_schema(
run_string,
args,
file2chunks,
file2contents,
sample_files,
extraction_manifest_sessions,
data_lake,
profiler_args
)
t1 = time.time()
total_time = t1-t0
total_tokens_prompted += num_toks
total_time_dict[f'schemaId'][f'totalTime_trainSize{train_size}'] = int(total_time)
results = evaluate_synthetic_main(
run_string,
args,
profiler_args,
data_lake,
stage='schema_id'
)
results_by_train_size[train_size]['schema_id'] = results
if 1:
if do_end_to_end:
with open(f"{args.generative_index_path}/{run_string}_identified_schema.json") as f:
most_common_fields = json.load(f)
with open(f"{args.generative_index_path}/{run_string}_order_of_addition.json") as f:
order_of_addition = json.load(f)
order = {item: (len(order_of_addition) - i) for i, item in enumerate(order_of_addition)}
ctr = Counter(most_common_fields)
pred_metadata = sorted(
ctr.most_common(num_attr_to_cascade),
key=lambda x: (x[1], order[x[0]]),
reverse=True
)
attributes = [item[0].lower() for item in pred_metadata]
else:
attributes = gold_attributes
# top-level information extraction
num_collected = 0
for i, attribute in enumerate(attributes):
print(f"\n\nExtracting {attribute} ({i+1} / {len(attributes)})")
t0 = time.time()
num_toks, success = run_profiler(
run_string,
args,
file2chunks,
file2contents,
sample_files,
full_file_groups,
extraction_manifest_sessions,
attribute,
profiler_args
)
t1 = time.time()
total_time = t1-t0
total_tokens_prompted += num_toks
total_time_dict[f'extract'][f'totalTime_trainSize{train_size}'] = int(total_time)
if success:
num_collected += 1
if num_collected >= num_attr_to_cascade:
break
# run closed ie eval
results = evaluate_synthetic_main(
run_string,
args,
profiler_args,
data_lake,
gold_attributes=gold_attributes,
stage='extract'
)
results_by_train_size[train_size]['extract'] = results
# Determine whether to remove any attributes based on the extractions
# Potentially can rerank the attributes based on the metric comparison to big model
if do_end_to_end:
attributes_to_remove, mappings_names, attributes = determine_attributes_to_remove(
attributes,
args,
run_string,
num_attr_to_cascade,
)
numextractions2results = measure_openie_results(
attributes,
args,
profiler_args,
run_string,
gold_attributes,
attributes_to_remove,
full_file_groups,
mappings_names
)
if 'openie' not in results_by_train_size[train_size]:
results_by_train_size[train_size]['openie'] = {}
results_by_train_size[train_size]['openie'] = numextractions2results
results_by_train_size[train_size]['total_tokens_prompted'] = total_tokens_prompted
results_by_train_size[train_size]['num_total_files'] = len(full_file_groups)
results_by_train_size[train_size]['num_sample_files'] = len(sample_files)
if not os.path.exists("results_dumps"):
os.mkdir("results_dumps")
print(run_string)
with open(f"results_dumps/{run_string}_results_by_train_size.pkl", "wb") as f:
pickle.dump(results_by_train_size, f)
print(f"Saved!")
print(f"Total tokens prompted: {total_tokens_prompted}")
def get_experiment_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_lake",
type=str,
help="Name of the data lake to operate over. Must be in configs.py"
)
parser.add_argument(
"--do_end_to_end",
type=bool,
default=True,
help="True for OpenIE, False for ClosedIE"
)
parser.add_argument(
"--num_attr_to_cascade",
type=int,
default=35,
help="Number of attributes to generate functions for"
)
parser.add_argument(
"--num_top_k_scripts",
type=int,
default=10,
help="Number of generated functions to combine over for each attribute"
)
parser.add_argument(
"--train_size",
type=int,
default=10,
help="Number of files to prompt on"
)
parser.add_argument(
"--combiner_mode",
type=str,
default='ws',
help="Combiner mode for combining the outputs of the generated functions",
choices=['ws', 'mv', 'top_k']
)
parser.add_argument(
"--use_dynamic_backoff",
type=bool,
default=True,
help="Whether to generate functions or do Evaporate-Direct",
)
parser.add_argument(
"--KEYS",
type=str,
default=[],
help="List of keys to use the model api",
nargs='*'
)
experiment = parser.parse_args()
return experiment
def main():
experiment_args = get_experiment_args()
profiler_args = {}
profiler_args = set_profiler_args(profiler_args)
model_dict = {
'MODELS': ["text-davinci-003"],
'EXTRACTION_MODELS': ["text-davinci-003"],
'GOLD_KEY': "text-davinci-003",
}
# Example of how to use a locally-hosted FM
# model_dict = {
# 'MODELS': [" EleutherAI/gpt-j-6B"],
# 'EXTRACTION_MODELS': [" EleutherAI/gpt-j-6B"],
# 'GOLD_KEY': " EleutherAI/gpt-j-6B",
# 'MODEL2URL': {
# " EleutherAI/gpt-j-6B": "http://127.0.0.1:5000"
# },
# }
for k, v in model_dict.items():
setattr(profiler_args, k, v)
for k in vars(experiment_args):
setattr(profiler_args, k, getattr(experiment_args, k))
run_experiment(profiler_args)
if __name__ == "__main__":
main()
|
evaporate-main
|
evaporate/run_profiler.py
|
import numpy as np
import itertools
import matplotlib.pyplot as plt
import scipy.stats
class Ising():
def __init__(self, m, potentials, thetas = None, vals = [-1, 1], ) -> None:
self.m = m
self.v = m + 1 # total number of vertices
self.potentials = potentials
self.vals = vals
#TODO support values in 0, 1
if thetas is not None:
assert len(thetas) >= len(potentials), f"Need to specify at least {len(potentials)} theta parameters."
self.thetas = thetas
else:
self.thetas = np.random.rand(len(potentials))
self.support = np.array(list(map(list, itertools.product(vals, repeat=self.v))))
self._make_pdf()
self._make_cdf()
self._get_means()
self._get_balance()
self._get_accs()
def _exponential_family(self, labels):
x = 0.0
for i in range(len(self.potentials)):
x += self.thetas[i] * labels[self.potentials[i]].prod()
return np.exp(x)
def _make_pdf(self):
p = np.zeros(len(self.support))
for i, labels in enumerate(self.support):
p[i] = self._exponential_family(labels)
self.z = sum(p)
self.pdf = p/self.z
def _make_cdf(self):
self.cdf = np.cumsum(self.pdf)
def joint_p(self, C, values):
p = 0.0
for k, labels in enumerate(self.support):
flag = True
for i in range(len(C)):
prod = labels[C[i]].prod()
if prod != values[i]:
flag = False
if flag == True:
p += self.pdf[k]
return p
def expectation(self, C):
return self.vals[0] * self.joint_p(C, self.vals[0] * np.ones(len(C))) + self.vals[1] * self.joint_p(C, self.vals[1] * np.ones(len(C)))
def _get_means(self):
self.means = np.zeros(self.m)
for k in range(self.m):
self.means[k] = self.expectation([[k]])
def _get_balance(self):
self.balance = self.joint_p([[self.m]], [1])
# def _get_covariance(self):
def _get_accs(self):
"""
self.accs[k, i, j] = Pr(lf_k = j | y = i) (i, j scaled to -1, 1 if needed)
"""
self.accs = np.zeros((self.m, 2, 2))
for k in range(self.m):
self.accs[k, 1, 1] = self.joint_p([[k], [self.m]], [self.vals[1], self.vals[1]]) / self.balance
self.accs[k, 0, 0] = self.joint_p([[k], [self.m]], [self.vals[0], self.vals[0]]) / (1 - self.balance)
self.accs[k, 1, 0] = 1 - self.accs[k, 1, 1]
self.accs[k, 0, 1] = 1 - self.accs[k, 0, 0]
def sample(self):
r = np.random.random_sample()
smaller = np.where(self.cdf < r)[0]
if len(smaller) == 0:
i = 0
else:
i = smaller.max() + 1
return self.support[i]
def make_data(self, n, has_label = True):
L = np.zeros((n, self.m))
gold = np.zeros(n)
for i in range(n):
l = self.sample()
L[i, :] = l[:self.m]
if has_label:
gold[i] = l[self.m]
return L.astype(int), gold.astype(int)
def est_accs(m, vote, gold):
# compute pr(lf | y) accuracies. Each prompt has 4 values (2x2)
# we need to do this on the train/dev set
classes = [0, 1]
gold_idxs = [np.where(gold == -1)[0], np.where(gold == 1)[0]]
accs = np.zeros((m, 2, 2)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(m):
for i in classes:
for j in classes:
accs[p, i, j] = len(np.where(vote[gold_idxs[i], p] == 2*j-1)[0]) / len(gold_idxs[i])
return accs
def est_balance(gold, n):
return len(np.where(gold == 1)[0]) / n
# Pr(lf votes, y)
def get_cond_probs(m, votes, y, accs, balance):
pr_y = balance if y == 1 else 1 - balance
prod = pr_y
for i in range(m):
prod *= accs[i, y, int(0.5*(votes[i] + 1))] # this assumes everything is independent
return prod
# Pr(y = 1 | lf votes)
def get_probs(m, votes, accs, balance):
pos = get_cond_probs(m, votes, 1, accs, balance)
neg = get_cond_probs(m, votes, 0, accs, balance)
if pos == 0:
return 0
else:
return pos / (pos + neg)
def pick_best_prompt(m, vote, gold, n):
# overall accuracies Pr(lf_p = y) on test (we don't know these)
overall_train_acc = np.zeros(m)
for i in range(m):
overall_train_acc[i] = len(np.where((vote[:, i] == gold) == True)[0])/n
return overall_train_acc.argmax()
def main():
# number of weak labels
m = 5
# total number of vertices
v = m + 1
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
thetas = np.random.rand(30)
# all conditionally independent
potentials = [[5], [0], [1], [4], [0, 5], [1, 5], [2, 5], [3, 5], [4, 5]]
pgm = Ising(m, potentials, thetas)
n_train = 10000
vote_train, gold_train = pgm.make_data(n_train)
n_test = 1000
vote_test, gold_test = pgm.make_data(n_test)
accs = est_accs(m, vote_train, gold_train)
balance = est_balance(gold_train, n_train)
nb_output = np.zeros(n_test) # naive bayes
mv_output = np.zeros(n_test)
nb_err = 0
mv_err = 0
for i in range(n_test):
nb_output[i] = 2*np.round(get_probs(m, vote_test[i], accs, balance))-1
if nb_output[i] != gold_test[i]:
nb_err += 1
# note: play around with MV tie breaking strategy
if len(np.where(vote_test[i] == 1)[0]) >= m / 2:
mv_output[i] = 1
elif len(np.where(vote_test[i] == 1)[0]) < m / 2:
mv_output[i] = -1
else:
mv_output[i] = 2*np.random.randint(0, 2)-1
if mv_output[i] != gold_test[i]:
mv_err += 1
nb_acc = 1 - (nb_err / n_test)
mv_acc = 1 - (mv_err / n_test)
#fs_acc = 1 - (fs_err / n_test)
best_prompt = pick_best_prompt(m, vote_train, gold_train, n_train)
best_prompt_acc = len(np.where((vote_test[:, best_prompt] == gold_test) == True)[0]) / n_test
print(f"Naive bayes: {nb_acc}")
print(f"Best prompt: {best_prompt_acc}")
print(f"Majority vote: {mv_acc}")
if __name__ == "__main__":
main()
|
evaporate-main
|
evaporate/weak_supervision/pgm.py
|
import numpy as np
import itertools
def get_probabilties(num_lfs, num_examples, predictions, label_name_to_int):
lf_array = np.zeros((num_lfs, num_examples))
golds = []
# Collect golds and preds
for i, (k, item) in enumerate(predictions.items()):
preds = item['chosen_answers_lst']
preds_mapped = []
for p in preds:
if p in label_name_to_int:
preds_mapped.append(label_name_to_int[p])
else:
preds_mapped.append(0)
preds = preds_mapped.copy()
for lf_num, p in zip(range(num_lfs), preds):
lf_array[lf_num][i] = p
gold = label_name_to_int[item['gold']]
golds.append(gold)
golds = np.array(golds)
neg_indices, pos_indices = [np.where(golds == -1)[0], np.where(golds == 1)[0]]
indices = {
-1: neg_indices,
1: pos_indices
}
# [i, j, k] = Pr(prompt_i = j| y = k)
# Accuracies
lf_accuracies = []
for i in range(num_lfs):
lf_accuracies.append(np.sum(golds == np.array(lf_array[i]))/num_examples)
print(f"LF Accs: {lf_accuracies}")
# [i, j, k] = Pr(prompt_i = j| y = k)
classes = label_name_to_int.values()
accs = np.zeros((num_lfs, len(classes), len(classes)))
for p in range(num_lfs):
for i in classes:
for j in classes:
j_idx = j
if j == -1:
j_idx = 0
i_idx = i
if i == -1:
i_idx = 0
accs[p, i_idx, j_idx] = len(np.where(lf_array[p, indices[i]] == j)[0]) / len(indices[i])
# Compute probabilities
pos_probs = []
for i in range(num_lfs):
sub_preds = lf_array[i][pos_indices]
sub_golds = golds[pos_indices]
pos_probs.append(np.sum(sub_golds == np.array(sub_preds))/len(pos_indices))
print(f"Pos Probs: {pos_probs}")
neg_probs = []
for i in range(num_lfs):
sub_preds = lf_array[i][neg_indices]
sub_golds = golds[neg_indices]
neg_probs.append(np.sum(sub_golds == np.array(sub_preds))/len(neg_indices))
print(f"Neg Probs: {neg_probs}\n\n")
return lf_accuracies, accs, pos_probs, neg_probs, golds, indices
""" Independence Assumption: take the product of probabilities as p(L1, L2, ..., LK | y) """
# Pr(y = 1 | lf votes)
def get_cond_probs(votes, y, indices_train, golds_train, accs_train, num_lfs_test):
prop_pos = len(indices_train[1])/len(golds_train)
pr_y = prop_pos if y == 1 else 1 - prop_pos
prod = pr_y
for i in range(num_lfs_test):
if y == -1:
y = 0
prod *= accs_train[i, y, votes[i]]
return prod
# Pr(y = 1 | lf votes)
def get_probs(votes, indices_train, golds_train, acc_train, num_lfs_test):
votes = [max(v, 0) for v in votes]
numerator = get_cond_probs(votes, 1, indices_train, golds_train, acc_train, num_lfs_test)
denominator = numerator + get_cond_probs(votes, -1, indices_train, golds_train, acc_train, num_lfs_test)
return numerator / denominator
def get_nb_accuracy(num_examples_test, num_lfs_test, predictions_test, label_name_to_int, golds_test, indices_train, golds_train, accs_train):
output = np.zeros(num_examples_test)
errors = 0
for i, (k, item) in enumerate(predictions_test.items()):
votes = item['chosen_answers_lst']
votes_mapped = []
for v in votes:
if v in label_name_to_int:
votes_mapped.append(label_name_to_int[v])
else:
votes_mapped.append(0)
votes = votes_mapped.copy()
probs = np.round(get_probs(votes, indices_train, golds_train, accs_train, num_lfs_test))
output[i] = probs
# Mean squared error
g = golds_test[i]
if golds_test[i] == -1:
g = 0
error = np.abs(output[i] - g)**2
errors += error
accuracy = 1 - (errors / num_examples_test)
return accuracy, output
def estimate_matrix(m, n, L):
E_prod = np.zeros((m, m))
l_avg = np.zeros(m)
for i in range(n):
l = L[i, :]
l_avg += l
E_prod += np.outer(l, l)
l_avg = l_avg/n
E_prod = E_prod/n
cov = E_prod - np.outer(l_avg, l_avg)
return (E_prod, cov, l_avg)
def get_vote_vectors(num_samples, num_lfs, predictions, label_name_to_int):
vectors = np.zeros((num_samples, num_lfs+1), float)
vectors_no_y = np.zeros((num_samples, num_lfs), float)
labels_vector = np.zeros((num_samples, 1), float)
for i, p in enumerate(predictions.values()):
votes = p['chosen_answers_lst']
votes_mapped = []
for v in votes:
if v in label_name_to_int:
votes_mapped.append(label_name_to_int[v])
else:
votes_mapped.append(0)
votes = votes_mapped.copy()
# votes = [max(v, 0) for v in votes]
gold = p['gold']
gold = label_name_to_int[gold]
vectors_no_y[i] = np.array(votes)
vectors[i] = np.array(votes + [gold]) #- lf_accuracies_train
labels_vector[i] = np.array([gold])
print(f"Shape: {vectors.shape}")
print(f"Sample: {vectors[0]}")
return vectors, vectors_no_y, labels_vector
def get_feature_vector(vote_vectors, include_pairwise=False, include_singletons=True):
feature_vectors = []
for votes in vote_vectors:
if include_singletons:
feature_vector = list(votes[:])
else:
feature_vector = []
if include_pairwise:
for subset in itertools.combinations(votes[:], 2):
feature_vector.append(subset[0] * subset[1])
feature_vectors.append(feature_vector)
X = np.matrix(feature_vectors)
return X
|
evaporate-main
|
evaporate/weak_supervision/ws_utils.py
|
import networkx as nx
import numpy as np
from itertools import chain, product, combinations
from scipy.sparse import issparse
import more_itertools
import torch
class DependentPGM:
"""
This class describes a PGM learned from labeled data with specified edge structure.
Args:
edges: list of edges that are dependent
train_votes: n x m array of votes in {0, 1}
train_gold: n array of true labels in {0, 1}
"""
def __init__(
self, edges, train_votes, train_gold, abstains = False, classes = [0, 1], abstain_value = -1) -> None:
"""
Initialize the PGM by computing its junction tree factorization (c_tree and c_data)
and by computing individual LF accuracy and class balance.
"""
self.edges = edges
self.train_votes = train_votes
self.train_gold = train_gold
self.classes = classes
self.k = len(classes)
assert len(np.unique(self.train_gold)) == self.k
self.abstains = abstains
assert len(np.unique(self.train_votes)) == int(abstains) + self.k
self.abstain_value = abstain_value
self.n, self.m = self.train_votes.shape
self.nodes = np.arange(self.m)
self.higher_order = len(edges) != 0
# construct data structures containing dependency graph information (maximal cliques and separator sets)
self._set_clique_tree()
self._set_clique_data()
# compute LF accuracies and class balance
self._get_accs_and_cb()
def _get_scaled(self):
if self.classes == [0, 1]:
self.train_votes_scaled = 2*self.train_votes - 1
self.train_gold_scaled = 2*self.train_gold - 1
if self.abstains:
self.train_votes_scaled[self.train_votes == self.abstain_value] = 0
else:
self.train_votes_scaled = self.train_votes
self.train_gold_scaled = self.train_gold
def _set_clique_tree(self):
G1 = nx.Graph()
G1.add_nodes_from(self.nodes)
G1.add_edges_from(self.edges)
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise nx.NetworkXError("Graph triangulation not implemented.")
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for i, c in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c)
for i in G2.nodes():
for j in G2.nodes():
S = G2.nodes[i]["members"].intersection(G2.nodes[j]["members"])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S)
self.c_tree = nx.maximum_spanning_tree(G2) # should be maximum??? Because we want maximal separator sets
# Return a minimum spanning tree of G2
def _set_clique_data(self):
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
self.c_data = dict()
for i in range(self.m):
self.c_data[i] = {
"vertices": [i],
"max_cliques": set( # which max clique i belongs to
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.nodes[j]["members"]
]
),
}
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if self.higher_order:
counter = 0
for item in chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.nodes[item]
C_type = "node"
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
C_type = "edge"
else:
raise ValueError(item)
members = list(C["members"])
nc = len(members)
# Else add one column for each possible value
if nc != 1:
# Add to self.c_data as well
#idx = counter + m
self.c_data[tuple(members)] = {
"vertices": members,
"max_cliques": set([item]) if C_type == "node" else set(item),
}
counter += 1
def _get_accs_and_cb(self):
classes = [0, 1]
self.gold_idxs = [np.where(self.train_gold == c)[0] for c in classes]
self.accs = np.zeros((self.m, 2)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(self.m):
for i in classes:
self.accs[p, i] = len(np.where(self.train_votes[self.gold_idxs[i], p] == 1)[0]) / len(self.gold_idxs[i])
self.accs = np.clip(self.accs, 0.0001, 0.9999)
self.balance = len(self.gold_idxs[1]) / self.n
def get_clique_probs(self, idxs, vals, y):
"""
Computes marginal probability over voters indexed by idx, Pr(votes_idxs = vals | y).
"""
truth_matrix = np.ones(len(self.gold_idxs[y])).astype(bool)
for i, lf in enumerate(idxs):
truth_matrix = np.logical_and(truth_matrix, self.train_votes[self.gold_idxs[y], lf] == vals[i])
if len(np.where(truth_matrix == True)[0]) == 0:
return 0.00001
return len(np.where(truth_matrix == True)[0]) / len(self.gold_idxs[y])
def get_cond_probs(self, votes, y):
"""
Computes the probability Pr(votes | y).
"""
pr_y = self.balance if y == 1 else 1 - self.balance
prod = pr_y
for i in self.c_tree.nodes():
node = self.c_tree.nodes[i]
members = list(node['members'])
if len(members) == 1:
v = members[0]
print(f"multiplying by {votes[v] * self.accs[v, y]}")
prod *= votes[v] * self.accs[v, y] + (1 - votes[v]) * (1 - self.accs[v, y])
else:
print(members)
print(f"multiplying by {self.get_clique_probs(members, votes[members], y)}")
prod *= self.get_clique_probs(members, votes[members], y)
for i in self.c_tree.edges():
edge = self.c_tree.edges[i]
members = list(edge['members'])
if len(members) == 1:
v = members[0]
deg = len(self.c_data[v]['max_cliques'])
prod /= (votes[v] * self.accs[v, y] + (1 - votes[v]) * (1 - self.accs[v, y]))**(deg-1)
print(members)
print(f"Dividing by {votes[v] * self.accs[v, y] + (1 - votes[v]) * (1 - self.accs[v, y])} to the {deg - 1} power")
else:
deg = len(self.c_data[tuple(members)]['max_cliques'])
prod /= (self.get_clique_probs(members, votes[members], y))**(deg-1)
print(members)
print(f"Dividing by {self.get_clique_probs(members, votes[members], y)} to the {deg - 1} power")
print(prod)
return prod
def get_probs(self, votes):
"""
Computes the probability Pr(y = 1 | votes).
"""
pos = self.get_cond_probs(votes, 1)
neg = self.get_cond_probs(votes, 0)
if pos == 0:
return 0
else:
return pos / (pos + neg)
def evaluate(self, test_votes, test_gold):
"""
Using our learned PGM, output rounded estimates of Pr(y = 1 | votes) and computes its accuracy.
Args:
test_votes: vote array to perform inference on in {0, 1}
test_gold: true labels to compare to in {0, 1}
"""
n_test = len(test_votes)
output_rounded = np.zeros(n_test)
output_probs = np.zeros(n_test)
err = 0
for i in range(n_test):
output_probs[i] = self.get_probs(test_votes[i])
output_rounded[i] = np.round(output_probs[i])
err += output_rounded[i] != test_gold[i]
accuracy = 1 - err / n_test
return output_probs, output_rounded, accuracy
def is_triangulated(nodes, edges):
"""
If a graph is triangulated (e.g. if a junction tree factorization exists).
"""
G1 = nx.Graph()
G1.add_nodes_from(nodes)
G1.add_edges_from(edges)
return nx.is_chordal(G1)
def structure_learning(m, votes, gold, acc_theta, classes = [0, 1], l1_lambda=0.2):
"""
Structure learning algorithm (Ising model selection) from Ravikumar (2010).
Args:
- votes: n_train x m array of training votes
- gold: n_train array of gold labels on the training data
- acc_theta: E[vote_i y] (where vote and y are scaled to [-1, 1]). This is a scaled version of accuracy that we will initialize some of the
parameters in our PGM with in order to specify that we don't want to optimize over the edges between votes and y.
We only are learning edges among votes!
- classes: the list of classes the data can take on.
- l1_lambda: l1 regularization strength
"""
# scale the data
classes = np.sort(np.unique(gold))
vote_classes = np.sort(np.unique(votes))
if 0 in classes and 1 in classes:
votes_scaled = 2*votes - 1
gold_scaled = 2*gold - 1
if len(vote_classes) == len(classes) + 1:
votes_scaled[votes == -1] = 0
else:
votes_scaled = votes
gold_scaled = gold
acc_theta = torch.from_numpy(acc_theta).type(torch.FloatTensor)
all_thetas = np.zeros((m, m)) # learned thetas from alg
# for each prompt, we fit a logistic regression model on it with prompt_i's output as the response variable and all otehr prompt outputs as the covariates.
# big_theta is a vector of weights that denote dependence on each prompt (0 is independence).
for v in range(m):
print(f"Learning neighborhood of vertex {v}.")
if len(classes) == 2:
big_theta = learn_neighborhood(m, v, votes_scaled, gold_scaled, acc_theta, l1_lambda)
else:
big_theta = learn_neighborhood_multi(m, v, votes_scaled, gold_scaled, acc_theta, l1_lambda, classes)
all_thetas[v] = big_theta
return all_thetas
# v is the vertex whose neighborhood graph we are estimating
def learn_neighborhood(m, vertex, votes, gold, accs, l1_lambda, epochs = 50000):
"""
Learn the neighborhood graph for a vertex.
Args:
- m: number of prompts
- vertex: the index of the prompt we are selecting as the response variable
- votes: votes on training data
- gold: gold label of training data
- accs: training accuracies of each prompt we use to initialize the PGM parameters with
- l1_lambda: regularization strength
- epochs: number of iterations
"""
n = len(gold)
vote_y = np.concatenate((votes, gold.reshape(n, 1)), axis=1)
xr = vote_y[:, vertex]
x_notr = np.delete(vote_y, vertex, axis=1)
xr = torch.from_numpy(xr).type(torch.FloatTensor)
x_notr = torch.from_numpy(x_notr).type(torch.FloatTensor)
theta = torch.zeros(m) # last index is for accuracy between vertex and y
theta[m - 1] = accs[vertex] # initialize this to be the train accuracy. We do want this to be an optimizable variable still though.
theta.requires_grad_()
optimizer = torch.optim.SGD([theta], lr=0.0001)
for t in range(epochs):
optimizer.zero_grad()
# logistic regression from Ravikumar et al
fx = (torch.log(torch.exp(torch.matmul(x_notr, theta))
+ torch.exp(-torch.matmul(x_notr, theta))).mean())
loss = fx - torch.multiply(xr, x_notr.T).mean(dim=1).dot(theta) + l1_lambda * torch.linalg.vector_norm(theta[:m], ord=1)
loss.backward()
optimizer.step()
#if t % 1000 == 0:
# print(f"Loss: {loss}")
big_theta = np.concatenate([theta.detach().numpy()[:vertex], [0], theta.detach().numpy()[vertex:m - 1]])
return big_theta
# v is the vertex whose neighborhood graph we are estimating
def learn_neighborhood_multi(m, vertex, votes, gold, accs, l1_lambda, classes, epochs = 50000):
# votes: in range {0, ... k}
n = len(gold)
vote_y = np.concatenate((votes, gold.reshape(n, 1)), axis=1)
xr = vote_y[:, vertex]
x_notr = np.delete(vote_y, vertex, axis=1)
xr = torch.from_numpy(xr).type(torch.FloatTensor)
x_notr = torch.from_numpy(x_notr).type(torch.FloatTensor)
theta = torch.zeros(m) # last index is for accuracy between vertex and y
theta[m - 1] = accs[vertex] # initialize this
theta.requires_grad_()
optimizer = torch.optim.SGD([theta], lr=0.0001)
for t in range(epochs):
optimizer.zero_grad()
# logistic regression from Ravikumar et al
mu = 0
for i in range(x_notr.shape[1]):
# mu = \sum_i theta_i * \sum_data sign{x_r = x_i}
mu += (2*(xr == x_notr[:, i])-1).type(torch.FloatTensor).mean() * theta[i]
fx = 0
for k in classes:
# \sum_y exp( \sum_i theta_i sign(x_i = y)) "normalization"
fx += torch.exp(torch.matmul((2*(x_notr == k)-1).type(torch.FloatTensor), theta)).mean()
loss = fx - mu + l1_lambda * torch.linalg.vector_norm(theta[:m], ord=1)
loss.backward()
optimizer.step()
#if t % 1000 == 0:
# print(f"Loss: {loss}")
big_theta = np.concatenate([theta.detach().numpy()[:vertex], [0], theta.detach().numpy()[vertex:m - 1]])
return big_theta
def main():
# load data
vote_arr_train = np.load('./data/youtube-spam/train_votes.npy').T
vote_arr_test = np.load('./data/youtube-spam/test_votes.npy').T
gold_arr_train = np.load('./data/youtube-spam/train_gold.npy').T
gold_arr_test = np.load('./data/youtube-spam/test_gold.npy').T
# vote_arr_train = np.concatenate((vote_arr_train[:, 0: 2], vote_arr_train[:, 4:]), axis=1)
# vote_arr_test = np.concatenate((vote_arr_test[:, 0: 2], vote_arr_test[:, 4:]), axis=1)
n_train, num_prompts = vote_arr_train.shape
# make validation set
np.random.seed(4)
val_idxs = np.random.choice(np.arange(n_train), size= 28, replace=False)
vote_arr_val = vote_arr_train[val_idxs, :]
vote_arr_train = np.delete(vote_arr_train, val_idxs, axis=0)
gold_arr_val = gold_arr_train[val_idxs]
gold_arr_train = np.delete(gold_arr_train, val_idxs)
nodes = np. arange(num_prompts)
# specify edgeset
# edges =[(0, 1)]
#model = DependentPGM(edges, vote_arr_train, gold_arr_train)
#probs, output, acc = model.evaluate(vote_arr_test, gold_arr_test)
#print(acc)
# Brute-force iteration through a bunch of edges
all_edges = list(combinations(nodes, 2))
small_edgesets = list(more_itertools.powerset(all_edges))
#small_edgesets = list(combinations(all_edges, 0)) + list(combinations(all_edges, 1)) + list(combinations(all_edges, 2)) + list(combinations(all_edges, 3))
scores = np.zeros(len(small_edgesets))
for i, edgeset in enumerate(small_edgesets):
if len(edgeset) > 4:
break
if not is_triangulated(nodes, edgeset):
continue
model = DependentPGM(edgeset, vote_arr_train, gold_arr_train)
probs, output, scores[i] = model.evaluate(vote_arr_val, gold_arr_val)
if i % 100 == 0:
print(f"Edgeset: {edgeset} \n score: {scores[i]}")
print(f"Best edgeset score: {scores.max()}")
print(f"Best edgeset: {small_edgesets[scores.argmax()]}")
edges = small_edgesets[scores.argmax()]
vote_arr_train = np.concatenate((vote_arr_train, vote_arr_val))
gold_arr_train = np.concatenate((gold_arr_train, gold_arr_val))
model = DependentPGM(edges, vote_arr_train, gold_arr_train)
probs, output, acc = model.evaluate(vote_arr_test, gold_arr_test)
print(f"Final model accuracy: {acc}")
if __name__ == "__main__":
main()
|
evaporate-main
|
evaporate/weak_supervision/binary_deps.py
|
"""This script contains code to execute different methods"""
from readline import append_history_file
from sklearn.metrics import accuracy_score
import numpy as np
from snorkel.labeling.model import LabelModel
from snorkel.utils import probs_to_preds
import itertools
import math
import torch
import collections
from sklearn.linear_model import LogisticRegression
import networkx as nx
class Aggregator():
def __init__(self, train_votes, train_gold, test_votes, test_gold, abstains = False, classes=[0, 1], abstain_value = -1) -> None:
# set votes and golds
self.train_votes = train_votes
self.train_gold = train_gold
self.test_votes = test_votes
self.test_gold = test_gold
self.n_train, self.m = train_votes.shape
self.n_test = len(test_gold)
# in some cases, we need a validation set split from the training data
np.random.seed(0)
indices = np.random.permutation(self.n_train)
n_val = int(self.n_train / 5) # 20% of the training dataset
val_idx, train_idx = indices[:n_val], indices[n_val:]
self.train_no_val_votes = self.train_votes[train_idx, :]
self.val_votes = self.train_votes[val_idx, :]
self.train_no_val_gold = self.train_gold[train_idx]
self.val_gold = self.train_gold[val_idx]
# check classes
self.classes = classes
self.k = len(classes)
# print(np.unique(self.train_gold))
# print(np.unique(classes))
assert len(np.unique(self.train_gold)) == self.k
assert len(np.unique(self.test_gold)) == self.k
# check if abstains
self.abstains = abstains
#assert len(np.unique(self.train_votes)) == int(abstains) + self.k
#assert len(np.unique(self.test_votes)) == int(abstains) + self.k
self.abstain_value = abstain_value
self.vote_classes = self.classes.copy()
if abstains:
assert self.abstain_value in self.train_votes
assert self.abstain_value in self.test_votes
self.vote_classes.append(self.abstain_value)
self.nodes = np.arange(self.m)
# construct scaled arrays (for binary)
self._get_scaled()
# get true accuracies on train and test
self._get_train_acc()
self._get_test_acc()
# estimate some parameters
self._estimate_balance()
self._estimate_coverage()
self._estimate_accs()
self._estimate_test_accs()
self._estimate_symmetric_accs()
self._estimate_fs_accs()
def _get_scaled(self):
"""
For binary tasks defined with classes [0, 1] and abstain -1, we construct scaled versions with classes [-1, 1] and abstain 0.
Scaled versions of the data are used as input to certain methods that assume an Ising model (such as FlyingSquid).
"""
if self.classes == [0, 1]:
self.train_votes_scaled = 2*self.train_votes - 1
self.test_votes_scaled = 2*self.test_votes - 1
self.train_no_val_votes_scaled = 2*self.train_no_val_votes - 1
self.val_votes_scaled = 2*self.val_votes - 1
self.train_gold_scaled = 2*self.train_gold - 1
self.test_gold_scaled = 2*self.test_gold - 1
self.train_no_val_gold_scaled = 2*self.train_no_val_gold - 1
self.val_gold_scaled = 2*self.val_gold - 1
if self.abstains:
self.train_votes_scaled[self.train_votes == self.abstain_value] = 0
self.test_votes_scaled[self.test_votes == self.abstain_value] = 0
self.train_no_val_votes_scaled[self.train_no_val_votes == self.abstain_value] = 0
self.val_votes_scaled[self.val_votes == self.abstain_value] = 0
else:
self.train_votes_scaled = self.train_votes
self.test_votes_scaled = self.test_votes
self.train_no_val_votes_scaled = self.train_no_val_votes
self.val_votes_scaled = self.val_votes
self.train_gold_scaled = self.train_gold
self.test_gold_scaled = self.test_gold
self.train_no_val_gold_scaled = self.train_no_val_gold
self.val_gold_scaled = self.val_gold
def _set_clique_tree(self, edgeset):
"""
Constructs a data structure c_tree that contains nodes and edges of the junction tree.
Args:
edgeset: List of tuples (i, j) for i, j = {0, ..., m}
"""
G1 = nx.Graph()
G1.add_nodes_from(self.nodes)
G1.add_edges_from(edgeset)
self.higher_order = len(edgeset) != 0
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise nx.NetworkXError("Graph triangulation not implemented.")
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for i, c in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c)
for i in G2.nodes():
for j in G2.nodes():
S = G2.nodes[i]["members"].intersection(G2.nodes[j]["members"])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S)
self.c_tree = nx.maximum_spanning_tree(G2) # should be maximum??? Because we want maximal separator sets
# Return a minimum spanning tree of G2
def _set_clique_data(self):
"""
Creates a data structure c_data which maps cliques and separator sets to their maximal clique.
"""
self.c_data = dict()
for i in range(self.m):
self.c_data[i] = {
"vertices": [i],
"max_cliques": set( # which max clique i belongs to
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.nodes[j]["members"]
]
),
}
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if self.higher_order:
counter = 0
for item in itertools.chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.nodes[item]
C_type = "node"
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
C_type = "edge"
else:
raise ValueError(item)
members = list(C["members"])
nc = len(members)
# Else add one column for each possible value
if nc != 1:
# Add to self.c_data as well
#idx = counter + m
self.c_data[tuple(members)] = {
"vertices": members,
"max_cliques": set([item]) if C_type == "node" else set(item),
}
counter += 1
def _estimate_balance(self):
""" Estimates the class balance Pr(y) on training data. Sets self.balance to be an array of length k.
"""
self.gold_idxs = [np.where(self.train_gold == c)[0] for c in self.classes]
self.balance = np.array([len(self.gold_idxs[c])/self.n_train for c in range(self.k)])
def _estimate_accs(self):
""" Computes Pr(vote_i | y) on training data. Each prompt has k x k values.
We ignore the abstaining case Pr(vote_i = 0 | y), since this is handled by self.coverage.
"""
k_votes = self.k
vote_classes = self.classes
self.nb_accs = np.zeros((self.m, self.k, k_votes)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(self.m):
for i in range(self.k):
for j in range(k_votes):
vc = vote_classes[j]
self.nb_accs[p, i, j] = len(np.where(self.train_votes[self.gold_idxs[i], p] == vc)[0]) / len(self.gold_idxs[i])
# clip values to 0.0001, 0.9999
self.nb_accs[self.nb_accs > 1] = 0.9999
self.nb_accs[self.nb_accs == 0] = 0.0001
def _estimate_test_accs(self):
self.gold_test_idxs = [np.where(self.test_gold == c)[0] for c in self.classes]
self.nb_test_accs = np.zeros((self.m, self.k, self.k)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(self.m):
for i in range(self.k):
for j in range(self.k):
vc = self.classes[j]
self.nb_test_accs[p, i, j] = len(np.where(self.test_votes[self.gold_test_idxs[i], p] == vc)[0]) / len(self.gold_test_idxs[i])
# clip values to 0.0001, 0.9999
self.nb_test_accs[self.nb_test_accs > 1] = 0.9999
self.nb_test_accs[self.nb_test_accs == 0] = 0.0001
def _estimate_symmetric_accs(self):
""" Computes Pr(vote_i | y) on training data similarly to above, but assumes Pr(vote_i = c | y = c) = Pr(vote_i = y),
independent of what the value of y is. Then, Pr(vote_i = c' | y = c) = (1 - Pr(vote_i = y)) / (k - 1) (uniform assumption)
"""
self.sym_accs = np.zeros((self.m, self.k, self.k))
for i in range(self.m):
for j in range(self.k):
for k in range(self.k):
if j == k:
self.sym_accs[i, j, k] = self.train_acc[i] # Pr(lf_i = c | y = c) = Pr(lf = y)
else:
self.sym_accs[i, j, k] = (self.coverage[i] - self.train_acc[i])/(self.k - 1) # divide uniformly among other classes
def _estimate_coverage(self):
""" Computes Pr(vote_i != 0) (coverage) and Pr(vote_i = 0 | y) for each y (abstain_rate).
"""
# Pr(vote_i != 0)
self.coverage = np.array([len(np.where(self.train_votes[:, p] != self.abstain_value)[0]) / self.n_train for p in range(self.m)])
# Pr(vote_i = 0 | y)
self.abstain_rate = np.zeros((self.m, self.k))
for i in range(self.m):
for j in range(self.k):
self.abstain_rate[i, j] = len(np.where(self.train_votes[self.gold_idxs[j], i] == self.abstain_value)[0]) / len(self.gold_idxs[j])
def _estimate_fs_accs(self, on_all_data = True):
""" Estimates Pr(vote_i | y = 0, 1) using FlyingSquid algorithm.
Args:
- on_test: If we use the unlabeled test dataset or the labeled train dataset. Default is True.
This version of FlyingSquid only handles the binary case (and is called on one-vs-all for multiclass) and works with scaled data.
"""
if self.k > 2:
return
if on_all_data:
votes = np.concatenate((self.train_votes_scaled, self.test_votes_scaled))
n = self.n_train + self.n_test
else:
votes = self.test_votes_scaled
n = self.n_test
if self.abstains:
# compute M[i, j] = E[vote_i * vote_j | vote_i, vote_j not abstaining]
M = np.zeros((self.m, self.m))
for i in range(self.m):
for j in range(self.m):
no_abstains = np.where(np.logical_and(votes[:, i] != 0, votes[:, j] != 0))[0]
M[i, j] = votes[no_abstains, i].dot(votes[no_abstains, j]) / len(no_abstains)
else:
# M[i, j] = E[vote_i * vote_j]
M = votes.T.dot(votes)/n
triplets = list(itertools.combinations(np.arange(self.m), 3)) # all possible combinations of triplets
self.fs_accs = np.zeros((self.m, 2, 2))
total = math.comb(self.m-1, 2)
# average over every combination of triplets
for (i, j, k) in triplets:
a = np.zeros(3)
a[0] = 0.5*(np.sqrt(np.abs(M[i, j] * M[i, k] / M[j, k]))+1)
a[1] = 0.5*(np.sqrt(np.abs(M[j, k] * M[i, j] / M[i, k]))+1)
a[2] = 0.5*(np.sqrt(np.abs(M[i, k] * M[j, k] / M[i, j]))+1)
# edge cases
a[np.where(np.isnan(a))[0]] = 0.5
a[np.where(np.isinf(a))[0]] = 1
self.fs_accs[i, 1, 1] += a[0]
self.fs_accs[j, 1, 1] += a[1]
self.fs_accs[k, 1, 1] += a[2]
self.fs_accs /= total
self.fs_accs[self.fs_accs > 1] = 0.9999
# Flying Squid assumes symmetry, Pr(vote_i = 1 | y = 1) = Pr(vote_i = -1 | y = -1)
self.fs_accs[:, 0, 0] = self.fs_accs[:, 1, 1]
self.fs_accs[:, 1, 0] = 1 - self.fs_accs[:, 1, 1]
self.fs_accs[:, 0, 1] = 1 - self.fs_accs[:, 1, 1]
def _get_train_acc(self):
""" Compute Pr(vote_i = y) on the training data.
"""
self.train_acc = (self.train_votes.T == self.train_gold).mean(axis=1)
self.train_no_val_acc = (self.train_no_val_votes.T == self.train_no_val_gold).mean(axis=1)
def _get_test_acc(self):
""" Compute Pr(vote_i = y) on the test data.
"""
self.test_acc = (self.test_votes.T == self.test_gold).mean(axis=1)
def pick_best(self):
"""Use the predictor with the best performance on the train set.
"""
self.best_prompt = np.argmax(self.train_acc)
test_preds = self.test_votes[:, self.best_prompt]
return accuracy_score(self.test_gold, test_preds)
def majority_vote(self):
"""Take a majority vote over predictors. Current implementation ignores abstains.
When there is a tie, we pick the prompt with the lowest index.
When all prompts abstain, we just return the most common label argmax_y Pr(y).
"""
test_preds = np.zeros(self.n_test)
for i in range(self.n_test):
# Majority vote discards abstains if any
if self.abstains:
voters = self.test_votes[i, self.test_votes[i] != self.abstain_value]
else:
voters = self.test_votes[i]
counts = collections.Counter(voters)
if len(counts) == 0:
# e.g. all prompts abstain --> return most common class label
test_preds[i] = self.balance.argmax()
else:
test_preds[i] = counts.most_common(1)[0][0]
return test_preds.astype(int), accuracy_score(self.test_gold, test_preds)
def get_clique_probs(self, idxs, vals, y, symmetric = False):
"""
Computes marginal probability over votes indexed by idx, Pr(votes_idxs = vals | y), using training data.
"""
if symmetric:
truth_matrix = np.ones(self.n_train).astype(bool)
agree = np.where(vals == y)[0]
disagree = np.where((np.logical_and(vals != self.abstain_value, vals != y)) == True)[0]
for i, lf in enumerate(idxs):
if i in agree:
truth_matrix = np.logical_and(truth_matrix, self.train_votes[:, lf] == self.train_gold)
elif i in disagree:
truth_matrix = np.logical_and(truth_matrix, self.train_votes[:, lf] != self.train_gold)
else:
truth_matrix = np.logical_and(truth_matrix, self.train_votes[:, lf] == self.abstain_value)
else:
truth_matrix = np.ones(len(self.gold_idxs[y])).astype(bool)
for i, lf in enumerate(idxs):
truth_matrix = np.logical_and(truth_matrix, self.train_votes[self.gold_idxs[y], lf] == vals[i])
if len(np.where(truth_matrix == True)[0]) == 0:
return 0.00001
if symmetric:
return len(np.where(truth_matrix == True)[0]) / self.n_train
else:
return len(np.where(truth_matrix == True)[0]) / len(self.gold_idxs[y])
def get_clique_probs_unlabeled(self, idxs, on_all_data=True):
if on_all_data:
votes = np.concatenate((self.train_votes_scaled, self.test_votes_scaled))
n = self.n_train + self.n_test
else:
votes = self.test_votes_scaled
n = self.n_test
l = len(idxs)
e_y = 2*self.balance[1] - 1
vote_moment = votes[:, idxs].prod(axis=1).mean()
if l % 2 == 0:
# E[Y] * E[lfs] = E[lfs Y]
acc = vote_moment * e_y
else:
acc = vote_moment / e_y
def get_cond_probs(self, votes, y, accs, edgeset = None, symmetric=False, abstains_symmetric = True):
""" Computes the probability Pr(votes, y) assuming conditional independence.
Args:
- votes: m element array of votes in {-1, 0, ..., k-1}
- y: the value of the label, in {0, ..., k - 1}
- accs: the accuracy array, e.g. directly learned from data or from FlyingSquid
- edgeset: set of edges to factorize probability with
- abstains_symmetric: do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0) or not?
"""
pr_y = self.balance[y]
prod = pr_y
if edgeset is None:
# in this case, do not need junction tree factorization. Just multiply accs together
for p in range(len(votes)):
if self.abstains and votes[p] == self.abstain_value:
if abstains_symmetric:
# we can drop Pr(lf_i = 0 | y) since it appears the same amount of times in numerator and denominator
prod *= (1 - self.coverage[p])
# print(f"multiplying by abstain on {p}: {1 - self.coverage[p]}")
else:
prod *= self.abstain_rate[p, y]
else:
# print(f"multiplying by accuracy Pr(vote_{p} = {votes[p]} | y = {y}): {accs[p, y, votes[p]]}")
prod *= accs[p, y, votes[p]] # this assumes everything is independent
else:
# multiply over maximal cliques
for i in self.c_tree.nodes():
node = self.c_tree.nodes[i]
members = list(node['members'])
if len(members) == 1:
v = members[0]
if self.abstains and votes[v] == self.abstain_value:
if abstains_symmetric:
prod *= (1 - self.coverage[v])
# print(f"multiplying by abstain on {v}: {1 - self.coverage[v]}")
else:
#print("multiplying by abstains")
prod *= self.abstain_rate[v, y]
else:
#print(f"multiplying by accuracy of {v}: {accs[v, y, votes[v]] }")
prod *= accs[v, y, votes[v]]
# print(f"multiplying by Pr(vote_{v} = {votes[v]} | y = {y}): {accs[v, y, votes[v]]}")
else:
#print(f"multiplying by prob over clique {members}: {self.get_clique_probs(members, votes[members], y, symmetric)}")
prod *= self.get_clique_probs(members, votes[members], y, symmetric)
# divide over separator sets
for i in self.c_tree.edges():
edge = self.c_tree.edges[i]
members = list(edge['members'])
if len(members) == 1:
v = members[0]
deg = len(self.c_data[v]['max_cliques'])
if self.abstains and votes[v] == self.abstain_value:
if abstains_symmetric:
prod /= (1 - self.coverage[v])**(deg - 1)
else:
if self.abstain_rate[v, y] == 0:
prod /= 0.000001**(deg - 1) # edge cas
else:
prod /= self.abstain_rate[v, y]**(deg - 1)
else:
#print(f"Dividing by symmetric accuracy of {v}")
prod /= accs[v, y, votes[v]]**(deg - 1)
else:
#print(f"Dividing by prob over clique {members}: {self.get_clique_probs(members, votes[members], y, symmetric)}")
deg = len(self.c_data[tuple(members)]['max_cliques'])
prod /= (self.get_clique_probs(members, votes[members], y, symmetric))**(deg-1)
return prod
def get_probs(self, votes, accs, edgeset = None, symmetric=False, abstains_symmetric = True):
""" Computes the probability Pr(y | votes) using Bayes Rule over Pr(votes, y).
Args:
- votes: m element array of votes in {-1, 0, ..., k-1}
- accs: the accuracy array, e.g. directly learned from data or from FlyingSquid
- edgeset: set of edges to factorize probability with
- abstains_symmetric: do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0) or not?
"""
p = np.zeros(self.k)
for i in range(self.k):
p[i] = self.get_cond_probs(votes, self.classes[i], accs, edgeset, symmetric, abstains_symmetric)
p /= p.sum() # normalization
return p
def naive_bayes(self, accs = None, symmetric = False, abstains_symmetric=True):
""" Naive bayes estimation.
Estimate Pr(vote_i | y) from training data and use that to compute Pr(y = 1 | votes).
Assumes conditional independence.
Args:
- accs: the accuracies [m x k x k] we estimate with
- symmetric: Do we assume Pr(vote_i = c | y = c) = Pr(vote_i = y) for all c?
- abstains_symmetric: Do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0)? This is
reasonable when an abstain is due to a systematic error in the prompt that doesn't depend on the label of the data.
"""
test_preds = []
test_probs = []
if symmetric:
accs = self.sym_accs
else:
if accs is None:
accs = self.nb_accs
for votes in self.test_votes:
prob = self.get_probs(votes, accs, symmetric=symmetric, abstains_symmetric=abstains_symmetric)
test_probs.append(prob)
test_preds.append(np.argmax(prob))
return test_probs, accuracy_score(self.test_gold, test_preds)
def junction_tree(self, edgeset, symmetric=False, abstains_symmetric=True, data='test'):
""" Junction tree estimation.
Estimate Pr(vote_i | y) from training data and use that to compute Pr(y = 1 | votes).
Assumes edgeset structure.
Args:
- edgeset: List of tuples (i, j) for i, j in {0, ..., m} denoting edges to factorize distribution with.
- symmetric: Do we assume Pr(vote_i = c | y = c) = Pr(vote_i = y) for all c?
- abstains_symmetric: Do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0)? This is
reasonable when an abstain is due to a systematic error in the prompt that doesn't depend on the label of the data.
"""
# construct auxiliary data structures
self._set_clique_tree(edgeset)
self._set_clique_data()
# Get preds
preds = []
probs = []
if data=='val':
votes = self.val_votes
gold = self.val_gold
elif data=='test':
votes = self.test_votes
gold = self.test_gold
else:
votes = self.train_votes
gold = self.train_gold
if symmetric:
accs = self.sym_accs
else:
accs = self.nb_accs
for v in votes:
prob = self.get_probs(v, accs, edgeset, symmetric=False, abstains_symmetric= abstains_symmetric)
probs.append(prob)
preds.append(np.argmax(prob))
return probs, accuracy_score(gold, preds)
def conditional_entropy(self, votes, edgeset=None):
"""
Computes H(Y | votes) ~= -1/n sum_i sum_y' Pr(y = y' | votes_j) log Pr(y = y' | votes_j).
Uses learned distribution as true one over Pr(y | votes)
This computation is independent of aggregation approach.
It uses direct estimation on the training dataset to learn the PGM.
"""
ce = 0
if edgeset is not None:
self._set_clique_tree(edgeset)
self._set_clique_data()
#print("Votes 1")
#print(votes)
for i, vote in enumerate(votes):
# compute Pr(y | lf) for all y. We are treating this estimated probability as the true distribution.
prob_vector = self.get_probs(vote, self.nb_test_accs, edgeset, symmetric=False, abstains_symmetric=True)
# print(prob_vector, vote, i)
# print(prob_vector, vote)
for j in range(self.k):
if prob_vector[j] == 0:
continue
# print(vote, j, prob_vector[j])
ce += prob_vector[j] * np.log(prob_vector[j])
return -ce/len(votes)
def conditional_entropy_singleton(self, probs, gold, edgeset=None):
"""
Computes H(Y | WS output) = -1/n sum_i sum_j Pr(y-hat = y_j | lfs(x_i)) * sum_k Pr(y = y_k | y-hat = y_j) log Pr(y = y_k | y-hat = y_j)
"""
# First, compute WS estimate y-hat over dataset
preds = np.argmax(probs, axis=1) # need to
# Now estimate Pr(y | y-hat) (k by k) matrix
y_accs = np.zeros((self.k, self.k))
ws_idxs = [np.where(preds == c)[0] for c in self.classes]
for i in range(self.k):
for j in range(self.k):
y_accs[i, j] = len(np.where(gold[ws_idxs[i]] == self.classes[j])[0]) / len(ws_idxs[i])
# print(y_accs)
# finally, compute entropy: 1/n sum_i sum_j Pr(y-hat = y_j | lfs(x_i)) * sum_k Pr(y = y_k | y-hat = y_j) log Pr(y = y_k | y-hat = y_j)
ce = 0
for i in range(len(probs)):
for j in range(self.k):
for c in range(self.k):
y_prob = y_accs[c, j]
if y_prob == 0:
continue
ce += probs[i, j] * y_prob * np.log(y_prob)
return -ce/len(probs)
def conditional_entropy_mv(self, edgeset=None):
"""
Computes H(Y | MV output) = -1/n sum_i sum_k Pr(y = y_k | y-hat_i) log Pr(y = y_k | y-hat_i)
"""
# First, compute MV estimate y-hat over dataset
preds, _ = self.majority_vote()
# Now estimate Pr(y | y-hat) (k by k) matrix
y_accs = np.zeros((self.k, self.k))
ws_idxs = [np.where(preds == c)[0] for c in self.classes]
for i in range(self.k):
for j in range(self.k):
y_accs[i, j] = len(np.where(self.test_gold[ws_idxs[i]] == self.classes[j])[0]) / len(ws_idxs[i])
ce = 0
for i, vote in enumerate(self.test_votes):
v_pred = preds[i]
for j in range(self.k):
y_prob = y_accs[v_pred, j]
if y_prob == 0:
continue
ce += y_prob * np.log(y_prob)
return -ce/len(self.test_votes)
def cross_entropy_conditional(self, votes, golds, edgeset):
"""
Computes -1/n sum_i log Pr(y-hat = y | votes_j). This is the standard notion of CE loss.
"""
ce = 0
self._set_clique_tree(edgeset)
self._set_clique_data()
for i, vote in enumerate(votes):
# compute Pr(y | lf)
prob = self.get_probs(vote, self.nb_accs, edgeset, symmetric=False, abstains_symmetric=True)
ce += np.log(prob[golds[i]])
return -ce/len(votes)
def cross_entropy(self, votes, golds, edgeset):
"""
Computes -1/n sum_i log Pr(y-hat = y, votes_j), minimizing cross entropy over the joint distribution of Y, votes.
"""
ce = 0
self._set_clique_tree(edgeset)
self._set_clique_data()
for i, vote in enumerate(votes):
# compute Pr(votes, y)
prob = self.get_cond_probs(vote, golds[i], self.nb_accs, edgeset, symmetric=False, abstains_symmetric=True)
ce += np.log(prob)
return -ce/len(votes)
def cross_entropy_no_label(self, votes, edgeset):
"""
Computes -1/n sum_j log Pr(votes_j), minimizing cross entropy over the distribution of votes
"""
ce = 0
self._set_clique_tree(edgeset)
self._set_clique_data()
for i, vote in enumerate(votes):
# compute Pr(votes, y)
prob = 0
for c in self.classes:
prob+= self.get_cond_probs(vote, c, self.nb_accs, edgeset, symmetric=False, abstains_symmetric=True)
ce += np.log(prob)
return -ce/len(votes)
def flying_squid(self, abstains_symmetric=True):
""" FlyingSquid algorithm requires no labeled data (except for estimating class balance).
Assumes conditional independence (for now) and symmetric accuracies.
That is, Pr(vote_i = 1 | y = 1) = Pr(vote_i = 0 | y = 0).
Args:
- abstains_symmetric: Do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0)? This is
reasonable when an abstain is due to a systematic error in the prompt that doesn't depend on the label of the data.
"""
assert self.k == 2, "not implemented for more than 2 classes!"
assert 0 in self.classes
assert 1 in self.classes
# Get preds
test_preds = []
test_probs = []
for votes in self.test_votes:
prob = self.get_probs(votes, self.fs_accs, abstains_symmetric=abstains_symmetric)
test_probs.append(prob)
test_preds.append(np.argmax(prob))
return test_probs, accuracy_score(self.test_gold, test_preds)
def snorkel_lm(self, on_all_data=True):
""" Use Snorkel AI's label model. Under the hood: Metal "forward" algorithm.
"""
#assert self.k == 2, "not implemented for more than 2 classes!"
#assert 0 in self.classes
#assert 1 in self.classes
if on_all_data:
votes = np.concatenate((self.train_votes, self.test_votes))
n = self.n_train + self.n_test
else:
votes = self.test_votes
n = self.n_test
label_model = LabelModel(cardinality=self.k)
label_model.fit(L_train=votes, n_epochs=500, log_freq=100, seed=0)
probs_test = label_model.predict_proba(self.test_votes)
test_preds = np.argmax(probs_test, axis=1)
params = label_model.get_conditional_probs()
return params, accuracy_score(self.test_gold, test_preds)
def dp_learn_params(self, with_label=False, seed=0, lr=0.0001, epochs = 1000):
""" Learn the data programming parameters alpha and beta.
Args:
- with_label: Do we use y or not? If using label, use the train set and do MLE on Pr(y, votes);
else use the test set and do MLE on Pr(votes).
- seed: random seed for Pytorch.
- lr: learning rate
- epochs: number of epochs
Returns:
- alpha: parameter corresponding to accuracy Pr(vote_i = y) (symmetric)!
- beta: parameter corresponding to coverage Pr(vote_i != 0) (symmetric)!
"""
if with_label:
votes = self.train_votes_scaled
gold = self.train_gold_scaled
else:
votes = self.test_votes_scaled
gold = self.test_gold_scaled
torch.manual_seed(seed)
x = torch.from_numpy(votes).type(torch.FloatTensor)
# Initialize Parameters
alpha = torch.rand(self.m, requires_grad=True)
beta = torch.tensor(self.coverage, requires_grad = False).type(torch.FloatTensor) # we do not optimize beta for now
optimizer = torch.optim.SGD([alpha], lr=lr)
for t in range(epochs):
optimizer.zero_grad()
mu_1 = torch.prod((x == 1) * beta.multiply(alpha) + (x == -1) * beta.multiply(1 - alpha) + (x == 0) * (1 - beta), dim = 1)
mu_neg1 = torch.prod((x == -1) * beta.multiply(alpha) + (x == 1) * beta.multiply(1 - alpha) + (x == 0) * (1 - beta), dim=1)
if with_label:
# use the label information in MLE
snorkel_loss = -torch.log(mu_1[np.where(gold == 1)[0]]).sum() - torch.log(mu_neg1[np.where(gold == -1)[0]]).sum()
else:
# 50-50 for y = 1 vs -1
snorkel_loss = -torch.log(0.5*mu_1 + 0.5*mu_neg1).sum()
snorkel_loss.backward()
optimizer.step()
#if t % 100 == 0:
# print('Loss', snorkel_loss, 'alpha', alpha, 'beta', beta)
with torch.no_grad():
alpha.clamp_(0.5, 1) # assume that accuracy is better than random
beta.clamp_(0, 1) # coverage should be between 0 and 1
return alpha, beta
def data_programming(self, with_label=False, seed=0, lr = 0.0001, epochs=1000):
""" Data programming algorithm.
Args:
- with_label: Do we use y or not? If using label, use the train set and do MLE on Pr(y, votes);
else use the test set and do MLE on Pr(votes).
- seed: random seed for Pytorch.
- lr: learning rate
- epochs: number of epochs
"""
assert self.k == 2, "not implemented for more than 2 classes!"
assert 0 in self.classes
assert 1 in self.classes
# we don't need betas, will just cancel out when doing inference
alpha, beta = self.dp_learn_params(with_label, seed, lr, epochs)
alpha = alpha.detach().numpy()
if np.any(np.isnan(alpha)):
raise ValueError("SGD failed to converge.")
dp_accs = np.zeros((self.m, 2, 2))
dp_accs[:, 1, 1] = dp_accs[:, 0, 0] = alpha
dp_accs[:, 1, 0] = dp_accs[:, 0, 1] = 1 - alpha
if with_label:
self.dp_label_accs = dp_accs
else:
self.dp_nolabel_accs = dp_accs
# Get preds
test_preds = []
test_probs = []
for votes in self.test_votes:
prob = self.get_probs(votes, dp_accs)
test_probs.append(prob[1])
test_preds.append(np.argmax(prob))
return test_probs, accuracy_score(self.test_gold, test_preds)
def logistic_regression(self, pairwise=True, singleton=False, scaling=True, max_iter=100):
"""
Logistic regression baseline.
Args:
- pairwise: if true, we scale everything to [-1, 1] and look at vote_i * vote_j as (m choose 2) new features that
explicitly model their agreement and disagreement.
- singleton: do we include the original votes as features
- scaling: do logistic regression over [-1, 1] or [0, 1]
- max_iter: maximum number of iterations for sklearn LR algorithm.
"""
if scaling:
train_votes = self.train_no_val_votes_scaled
val_votes = self.val_votes_scaled
test_votes = self.test_votes_scaled
train_gold = self.train_no_val_gold_scaled
val_gold = self.val_gold_scaled
test_gold = self.test_gold_scaled
else:
train_votes = self.train_no_val_votes
val_votes = self.val_votes
test_votes = self.test_votes
train_gold = self.train_no_val_gold
val_gold = self.val_gold
test_gold = self.test_gold
if pairwise:
# get pairwise data
pair_idxs = list(itertools.combinations(np.arange(self.m), 2))
pairs_train = np.zeros((len(train_gold), len(pair_idxs)))
pairs_val = np.zeros((len(val_gold), len(pair_idxs)))
pairs_test = np.zeros((self.n_test, len(pair_idxs)))
for count, (i, j) in enumerate(pair_idxs):
pairs_train[:, count] = train_votes[:, i] * train_votes[:, j]
pairs_val[:, count] = val_votes[:, i] * val_votes[:, j]
pairs_test[:, count] = test_votes[:, i] * test_votes[:, j]
if not singleton:
train_votes = pairs_train
val_votes = pairs_val
test_votes = pairs_test
else:
train_votes = np.concatenate((train_votes, pairs_train), axis=1)
val_votes = np.concatenate((val_votes, pairs_val), axis=1)
test_votes = np.concatenate((test_votes, pairs_test), axis=1)
best_val = -1
best_test = -1
best_reg = -1
# grid search over regularization parameter using validation set
for c in [0.001, 0.01, 0.1, 0.25, 0.5, 5, 10, 100, 1000, 2000]:
clf = LogisticRegression(random_state=0, penalty='l1', solver='liblinear', fit_intercept=False, multi_class='ovr', C=c, max_iter=max_iter).fit(train_votes, train_gold)
test_score = clf.score(test_votes, test_gold)
val_score = clf.score(val_votes, val_gold)
if val_score >= best_val:
best_val = val_score
best_test = test_score
best_reg = c
clf = LogisticRegression(random_state=0, penalty='l1', solver='liblinear', fit_intercept=False, multi_class='ovr', C=best_reg, max_iter=max_iter).fit(train_votes, train_gold)
return clf.coef_, clf.predict_proba(test_votes), best_test
def exp_weight(self, option=1, etas=[0.25, 0.5, 1, 2, 4, 8, 16, 32]):
"""
Weighting rule 1: Pr(y | votes) ~ sum_i 1{vote_i = y} * exp(-eta*loss_i)
Weighting rule 2: Pr(y | votes) ~ exp(sum_i eta * accuracy * vote_i * y) (scaled to -1, 1)
Args:
- option: which weighting rule to use
- etas: list of temperature hyperparameters
"""
test_preds = []
test_probs = []
# grid search
best_eta = -1
best_acc = 0
for eta in etas:
val_preds = []
if option == 1:
weights = np.exp(-eta * (1 - self.train_no_val_acc))
else:
weights = eta*self.train_no_val_acc
for votes in self.val_votes:
if option == 1:
scores = np.array([weights[votes == y].sum() for y in self.classes])
else:
scores = np.array([np.exp((2*((votes == y).astype(int))-1).dot(weights)) for y in self.classes] )
if scores.sum() ==0:
# return prior
val_preds.append(np.argmax(self.balance))
else:
val_preds.append(np.argmax(scores))
val_acc = accuracy_score(self.val_gold, val_preds)
if val_acc > best_acc:
best_eta = eta
if option == 1:
weights = np.exp(-best_eta * (1 - self.train_no_val_acc))
else:
weights = best_eta*self.train_no_val_acc
for votes in self.test_votes:
if option == 1:
scores = np.array([weights[votes == y].sum() for y in self.classes])
else:
scores = np.array([np.exp((2*((votes == y).astype(int))-1).dot(weights)) for y in self.classes] )
if scores.sum() ==0:
# return prior
test_preds.append(np.argmax(self.balance))
else:
scores /= scores.sum()
test_probs.append(scores[1])
test_preds.append(np.argmax(scores))
return test_probs, accuracy_score(self.test_gold, test_preds)
class MultiAggregator(Aggregator):
def __init__(self, train_votes, train_gold, test_votes, test_gold, classes, abstains=False, abstain_value=-1) -> None:
super().__init__(train_votes, train_gold, test_votes, test_gold, abstains, classes, abstain_value)
def flying_squid(self, abstains_symmetric=True):
"""
For multi-class, FlyingSquid reduces into one-vs-all subproblems and picking the highest Pr(y | votes) from each of those.
"""
probs = np.zeros((self.n_test, self.k))
for i, c in enumerate(self.classes):
train_votes_c = np.where(self.train_votes == c, 1, 0)
train_votes_c[self.train_votes == self.abstain_value] = -1 # keep the abstains
train_gold_c = np.where(self.train_gold == c, 1, 0)
test_votes_c = np.where(self.test_votes == c, 1, 0)
test_votes_c[self.test_votes == self.abstain_value] = -1
test_gold_c = np.where(self.test_gold == c, 1, 0)
agg = Aggregator(train_votes_c, train_gold_c, test_votes_c, test_gold_c, self.abstains, classes=[0, 1])
fs_probs, fs_acc = agg.flying_squid(abstains_symmetric)
probs[:, i] = np.array(fs_probs)[:, 1]
test_preds = np.argmax(probs, axis=1)
return probs, accuracy_score(self.test_gold, test_preds)
def data_programming(self, epochs=1000, with_label=False, seed=0, lr=0.0001):
"""
For multi-class, data programming reduces into one-vs-all subproblems and picking the highest Pr(y | votes) from each of those.
"""
probs = np.zeros((self.n_test, self.k))
# one versus all
for i, c in enumerate(self.classes):
train_votes_c = np.where(self.train_votes == c, 1, 0)
train_votes_c[self.train_votes == self.abstain_value] = -1
train_gold_c = np.where(self.train_gold == c, 1, 0)
test_votes_c = np.where(self.test_votes == c, 1, 0)
test_votes_c[self.test_votes == self.abstain_value] = -1
test_gold_c = np.where(self.test_gold == c, 1, 0)
agg = Aggregator(train_votes_c, train_gold_c, test_votes_c, test_gold_c, self.abstains, classes=[0, 1])
probs[:, i], _ = agg.data_programming(with_label, seed, lr, epochs)
test_preds = np.argmax(probs, axis=1)
return accuracy_score(self.test_gold, test_preds)
|
evaporate-main
|
evaporate/weak_supervision/methods.py
|
import argparse
import numpy as np
import json
import sys
import pickle
import random
import cvxpy as cp
import scipy as sp
from tqdm import tqdm
from methods import Aggregator
from metal.label_model import LabelModel
from collections import defaultdict, Counter
sys.path.append("../")
from evaluate_synthetic import clean_comparison
def get_data(
all_votes,
gold_extractions_file,
attribute='',
has_abstains=1.0,
num_elts = 5,
extraction_fraction_thresh=0.9,
):
"""
Load in dataset from task_name depending on where files are saved.
- num_elts = number of ``choices'' to use in the multiple-choice setup.
"""
label_name_to_ints = []
has_abstains = has_abstains >= extraction_fraction_thresh
try:
with open(gold_extractions_file) as f:
gold_extractions = json.load(f)
except:
with open(gold_extractions_file, "rb") as f:
gold_extractions = pickle.load(f)
test_votes = []
test_golds = []
total_abstains = []
average_unique_votes = []
missing_files = []
random.seed(0)
for file, extractions in tqdm(all_votes.items()):
if file not in gold_extractions:
missing_files.append(file)
continue
extractions = [clean_comparison(e) for e in extractions]
if has_abstains:
extractions = [e if e else 'abstain' for e in extractions]
unique_votes = Counter(extractions).most_common(num_elts)
unique_votes = [i for i, _ in unique_votes if i != 'abstain']
average_unique_votes.append(len(unique_votes))
if len(unique_votes) < num_elts:
missing_elts = num_elts - len(unique_votes)
for elt_num in range(missing_elts):
unique_votes.append(f"dummy{elt_num}")
random.shuffle(unique_votes)
label_name_to_int = {elt: j for j, elt in enumerate(unique_votes)}
label_name_to_ints.append(label_name_to_int)
test_votes.append(np.array(
[label_name_to_int[ans] if ans in label_name_to_int else -1 for ans in extractions]
))
num_abstains = len([a for a in extractions if a not in label_name_to_int])
total_abstains.append(num_abstains)
# golds are just for class balance purposes
if attribute in gold_extractions[file]:
gold = gold_extractions[file][attribute]
elif clean_comparison(attribute) in gold_extractions[file]:
gold = gold_extractions[file][clean_comparison(attribute)]
else:
gold = ""
gold = clean_comparison(gold)
if gold in label_name_to_int:
test_golds.append(label_name_to_int[gold])
else:
gold = random.sample(range(len(label_name_to_int)), 1)
test_golds.append(gold[0])
test_votes = np.array(test_votes)
test_gold = np.array(test_golds)
test_votes = test_votes.astype(int)
test_gold = test_gold.astype(int)
print(f"Average abstains across documents: {np.mean(total_abstains)}")
print(f"Average unique votes per document: {np.mean(average_unique_votes)}")
return test_votes, test_gold, label_name_to_ints, missing_files
def get_top_deps_from_inverse_sig(J, k):
m = J.shape[0]
deps = []
sorted_idxs = np.argsort(np.abs(J), axis=None)
n = m*m
idxs = sorted_idxs[-k:]
for idx in idxs:
i = int(np.floor(idx / m))
j = idx % m
if (j, i) in deps:
continue
deps.append((i, j))
return deps
def learn_structure(L):
m = L.shape[1]
n = float(np.shape(L)[0])
sigma_O = (np.dot(L.T,L))/(n-1) - np.outer(np.mean(L,axis=0), np.mean(L,axis=0))
#bad code
O = 1/2*(sigma_O+sigma_O.T)
O_root = np.real(sp.linalg.sqrtm(O))
# low-rank matrix
L_cvx = cp.Variable([m,m], PSD=True)
# sparse matrix
S = cp.Variable([m,m], PSD=True)
# S-L matrix
R = cp.Variable([m,m], PSD=True)
#reg params
lam = 1/np.sqrt(m)
gamma = 1e-8
objective = cp.Minimize(0.5*(cp.norm(R @ O_root, 'fro')**2) - cp.trace(R) + lam*(gamma*cp.pnorm(S,1) + cp.norm(L_cvx, "nuc")))
constraints = [R == S - L_cvx, L_cvx>>0]
prob = cp.Problem(objective, constraints)
result = prob.solve(verbose=False, solver=cp.SCS)
opt_error = prob.value
#extract dependencies
J_hat = S.value
if J_hat is None:
raise ValueError("CVXPY failed to solve the structured learning problem, use result without dependencies.")
for i in range(m):
J_hat[i, i] = 0
return J_hat
def learn_structure_multiclass(L, k):
m = L.shape[1]
J_hats = np.zeros((k, m, m))
for c in range(k):
all_votes_c = np.where(L == c, 1, 0)
J_hats[c] = learn_structure(all_votes_c)
return J_hats
def get_min_off_diagonal(J_hat):
J_hat_copy = J_hat.copy()
for i in range(len(J_hat_copy)):
J_hat_copy[i, i] = np.inf
return np.abs(J_hat_copy).min()
def run_ws(
all_votes,
gold_extractions_file,
symmetric=True,
attribute='',
has_abstains=1.0,
extraction_fraction_thresh=0.9,
):
test_votes, test_gold, label_name_to_ints, missing_files = get_data(
all_votes,
gold_extractions_file,
attribute=attribute,
has_abstains=has_abstains,
extraction_fraction_thresh=extraction_fraction_thresh,
)
classes = np.sort(np.unique(test_gold))
vote_classes = np.sort(np.unique(test_votes))
n_test, m = test_votes.shape
k = len(classes)
abstains = len(vote_classes) == len(classes) + 1
print(f"Abstains: {abstains}")
m = test_votes.shape[1]
all_votes = test_votes
label_model = LabelModel(k=k, seed=123)
# scale to 0, 1, 2 (0 is abstain)
test_votes_scaled = (test_votes + np.ones((n_test, m))).astype(int)
test_gold_scaled = (test_gold + np.ones(n_test)).astype(int)
all_votes_scaled = test_votes_scaled
label_model.train_model(
all_votes_scaled,
Y_dev=test_gold_scaled,
abstains=abstains,
symmetric=symmetric,
n_epochs=10000,
log_train_every=1000,
lr=0.00001
)
print('Trained Label Model Metrics (No deps):')
scores, preds = label_model.score(
(test_votes_scaled, test_gold_scaled),
metric=['accuracy','precision', 'recall', 'f1']
)
print(scores)
all_votes_no_abstains = np.where(all_votes == -1, 0, all_votes)
used_deps = False
try:
if len(classes) == 2:
J_hat = learn_structure(all_votes_no_abstains)
else:
J_hats = learn_structure_multiclass(all_votes_no_abstains, len(classes))
J_hat = J_hats.mean(axis=0)
# if values in J are all too large, then everything is connected / structure learning isn't learning the right thing. Don't model deps then
min_entry = get_min_off_diagonal(J_hat)
if min_entry < 1:
deps = get_top_deps_from_inverse_sig(J_hat, 1)
print("Recovered dependencies: ", deps)
label_model.train_model(
all_votes_scaled,
Y_dev=test_gold_scaled,
abstains=abstains,
symmetric=symmetric,
n_epochs=80000,
log_train_every=1000,
lr=0.000001,
deps=deps
)
print('Trained Label Model Metrics (with deps):')
scores, preds = label_model.score(
(test_votes_scaled, test_gold_scaled),
metric=['accuracy', 'precision', 'recall', 'f1']
)
print(scores)
used_deps = True
except:
print(f"Not modeling dependencies.")
# convert the preds back
mapped_preds = []
for label_name_to_int, pred in tqdm(zip(label_name_to_ints, preds)):
int_to_label_name = {v:k for k, v in label_name_to_int.items()}
try:
pred = int_to_label_name[pred-1]
except:
pred = ''
mapped_preds.append(pred)
return mapped_preds, used_deps, missing_files
if __name__ == "__main__":
run_ws()
|
evaporate-main
|
evaporate/weak_supervision/run_ws.py
|
import numpy as np
import itertools
import scipy.stats
import math
import networkx as nx
from itertools import chain
from methods import Aggregator
from binary_deps import structure_learning
from binary_deps import DependentPGM
from sklearn.metrics import log_loss, accuracy_score
class Ising():
def __init__(self, m, potentials, thetas = None, vals = [-1, 1], ) -> None:
self.m = m
self.v = m + 1 # total number of vertices
self.potentials = potentials
self.vals = vals
#TODO support values in 0, 1
if thetas is not None:
assert len(thetas) >= len(potentials), f"Need to specify at least {len(potentials)} theta parameters."
self.thetas = thetas
else:
self.thetas = np.random.rand(len(potentials))
# 2^v size support over y and all prompts
self.support = np.array(list(map(list, itertools.product(vals, repeat=self.v))))
# 2^m size support over all prompts
self.support_no_y = np.array(list(map(list, itertools.product(vals, repeat=self.m))))
self.n_vals = len(self.support)
self._make_pdf()
self._make_cdf()
self._get_means()
self._get_balance()
self._get_accs()
# set graph true graph structure
self._get_edges_nodes()
self.c_tree = self._set_clique_tree(self.edges)
self.c_data = self._set_clique_data(self.c_tree)
def _get_edges_nodes(self):
self.nodes = np.arange(self.m)
self.edges = [p for p in self.potentials if len(p) == 2 and self.m not in p]
if self.edges != []:
self.higher_order = True
else:
self.higher_order = False
def _exponential_family(self, labels):
x = 0.0
for i in range(len(self.potentials)):
x += self.thetas[i] * labels[self.potentials[i]].prod()
return np.exp(x)
def _make_pdf(self):
p = np.zeros(len(self.support))
for i, labels in enumerate(self.support):
p[i] = self._exponential_family(labels)
self.z = sum(p)
self.pdf = p/self.z
def _make_cdf(self):
self.cdf = np.cumsum(self.pdf)
def joint_p(self, C, values):
p = 0.0
for k, labels in enumerate(self.support):
flag = True
for i in range(len(C)):
prod = labels[C[i]].prod()
if prod != values[i]:
flag = False
if flag == True:
p += self.pdf[k]
return p
def expectation(self, C):
return self.vals[0] * self.joint_p(C, self.vals[0] * np.ones(len(C))) + self.vals[1] * self.joint_p(C, self.vals[1] * np.ones(len(C)))
def _get_means(self):
self.means = np.zeros(self.m)
for k in range(self.m):
self.means[k] = self.expectation([[k]])
def _get_balance(self):
self.balance = self.joint_p([[self.m]], [1])
def _get_covariance_y(self):
self.cov = np.zeros((self.m + 1, self.m + 1))
def aug_covariance(self, rvs):
l = len(rvs)
M = np.zeros((l, l))
for i in range(l):
for j in range(i + 1, l):
M[i, j] = self.joint_p([rvs[i], rvs[j]], [1, 1]) + self.joint_p([rvs[i], rvs[j]], [-1, -1])
for i in range(l):
for j in range(i + 1):
if i != j:
M[i, j] = M[j, i]
else:
M[i, j] = 1
M = 2*M - 1
mu = np.zeros(l)
for i in range(l):
mu[i] = self.joint_p([rvs[i]], [1])
mu = 2*mu - 1
return M - np.outer(mu, mu)
def aug_covariance_y(self, rvs, y):
p_y = self.balance if y == 1 else 1 - self.balance
l = len(rvs)
M = np.zeros((l, l))
for i in range(l):
for j in range(i + 1, l):
M[i, j] = (self.joint_p([rvs[i], rvs[j], [self.m]], [1, 1, y]) + self.joint_p([rvs[i], rvs[j], [self.m]], [-1, -1, y])) / p_y
for i in range(l):
for j in range(i + 1):
if i != j:
M[i, j] = M[j, i]
else:
M[i, j] = 1
M = 2*M - 1
mu = np.zeros(l)
for i in range(l):
mu[i] = self.joint_p([rvs[i], [self.m]], [1, y]) / p_y
mu = 2*mu - 1
return M - np.outer(mu, mu)
def _get_accs(self):
"""
self.accs[k, i, j] = Pr(lf_k = j | y = i) (i, j scaled to -1, 1 if needed)
"""
self.accs = np.zeros((self.m, 2, 2))
for k in range(self.m):
self.accs[k, 1, 1] = self.joint_p([[k], [self.m]], [self.vals[1], self.vals[1]]) / self.balance
self.accs[k, 0, 0] = self.joint_p([[k], [self.m]], [self.vals[0], self.vals[0]]) / (1 - self.balance)
self.accs[k, 1, 0] = 1 - self.accs[k, 1, 1]
self.accs[k, 0, 1] = 1 - self.accs[k, 0, 0]
def sample(self):
r = np.random.random_sample()
smaller = np.where(self.cdf < r)[0]
if len(smaller) == 0:
i = 0
else:
i = smaller.max() + 1
return self.support[i]
def make_data(self, n, has_label = True):
L = np.zeros((n, self.m))
gold = np.zeros(n)
for i in range(n):
l = self.sample()
L[i, :] = l[:self.m]
if has_label:
gold[i] = l[self.m]
return L.astype(int), gold.astype(int)
def _set_clique_tree(self, edges):
G1 = nx.Graph()
G1.add_nodes_from(self.nodes)
G1.add_edges_from(edges)
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise NotImplementedError("Graph triangulation not implemented.")
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for i, c in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c)
for i in G2.nodes():
for j in G2.nodes():
S = G2.nodes[i]["members"].intersection(G2.nodes[j]["members"])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S)
return nx.maximum_spanning_tree(G2) # should be maximum??? Because we want maximal separator sets
# Return a minimum spanning tree of G2
def _set_clique_data(self, c_tree):
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
c_data = dict()
for i in range(self.m):
c_data[i] = {
"vertices": [i],
"max_cliques": set( # which max clique i belongs to
[
j
for j in c_tree.nodes()
if i in c_tree.nodes[j]["members"]
]
),
}
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if self.higher_order:
counter = 0
for item in chain(c_tree.nodes(), c_tree.edges()):
if isinstance(item, int):
C = c_tree.nodes[item]
C_type = "node"
elif isinstance(item, tuple):
C = c_tree[item[0]][item[1]]
C_type = "edge"
else:
raise ValueError(item)
members = list(C["members"])
nc = len(members)
# Else add one column for each possible value
if nc != 1:
# Add to self.c_data as well
#idx = counter + m
c_data[tuple(members)] = {
"vertices": members,
"max_cliques": set([item]) if C_type == "node" else set(item),
}
counter += 1
return c_data
def get_cond_probs(self, votes, y, edgeset = None):
"""
Computes the probability Pr(votes | y).
"""
pr_y = self.balance if y == 1 else 1 - self.balance
prod = pr_y
votes_scaled = 2*votes - 1
y_scaled = 2*y - 1
if edgeset is not None:
c_tree = self._set_clique_tree(edgeset)
c_data = self._set_clique_data(c_tree)
else:
c_tree = self.c_tree
c_data = self.c_data
for i in c_tree.nodes():
node = c_tree.nodes[i]
members = list(node['members'])
if len(members) == 1:
v = members[0]
prod *= self.accs[v, y, votes[v]]
else:
# prod *= self.get_clique_probs(members, votes[members], y)
member_votes = np.append(votes_scaled[members], y_scaled)
members = [[m] for m in members] + [[self.m]]
clique_probs = self.joint_p(members, member_votes)/self.joint_p([[self.m]], [y_scaled])
#print("clique probs")
#print(members, member_votes)
#print(self.joint_p(members, member_votes))
#print(clique_probs)
prod *= clique_probs
for i in c_tree.edges():
edge = c_tree.edges[i]
members = list(edge['members'])
if len(members) == 1:
v = members[0]
deg = len(c_data[v]['max_cliques'])
prod /= (self.accs[v, y, votes[v]])**(deg-1)
else:
deg = len(c_data[tuple(members)]['max_cliques'])
# prod /= (self.get_clique_probs(members, votes[members], y))**(deg-1)
member_votes = np.concatenate(votes[members], y_scaled)
members = [[m] for m in members] + [[self.m]]
clique_probs = self.joint_p(members, member_votes)/self.joint_p([[self.m]], [y_scaled])
prod /= clique_probs**(deg-1)
return prod
def get_probs(self, votes, edgeset = None):
"""
Computes the probability Pr(y = 1 | votes).
"""
pos = self.get_cond_probs(votes, 1, edgeset)
neg = self.get_cond_probs(votes, 0, edgeset)
if pos == 0:
return 0
else:
return pos / (pos + neg)
def cross_entropy(self, edgeset):
ce = 0
for i in range(self.n_vals):
votes_unscaled = (0.5*(self.support[i, :self.m]+1)).astype(int)
y_unscaled = int(0.5*(self.support[i, self.m]+1))
ce += self.pdf[i] * np.log(self.get_cond_probs(votes_unscaled, y_unscaled, edgeset))
return -ce
def cross_entropy_conditional(self, edgeset):
ce = 0
for i in range(self.n_vals):
votes_unscaled = (0.5*(self.support[i, :self.m]+1)).astype(int)
y_unscaled = int(0.5*(self.support[i, self.m]+1))
prob = self.get_probs(votes_unscaled, edgeset)
if y_unscaled == 0:
prob = 1 - prob
ce += self.pdf[i] * np.log(prob)
return -ce
def cross_entropy_no_label(self, edgeset):
ce = 0
for i in range(len(self.support_no_y)):
sequence = self.support_no_y[i]
sequence_scaled = (0.5*(sequence+1)).astype(int) # scale back to 0/1
voters = [[i] for i in np.arange(self.m)]
true_prob = self.joint_p(voters, sequence)
pos = self.get_cond_probs(sequence_scaled, 1, edgeset)
neg = self.get_cond_probs(sequence_scaled, 0, edgeset)
ce += true_prob * np.log(pos + neg)
return -ce
def to01(labels):
return (0.5*(labels + 1)).astype(int)
# MV and picking the best prompt should do well when things are conditionally independent and equally same
def test0():
m = 3
thetas = [0, 0.5, 0.5, 0.5]
# all conditionally independent, some singletons are fine
potentials = [[3], [0, 3], [1, 3], [2, 3]]
pgm = Ising(m, potentials, thetas)
# make data
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, sym_acc = agg.naive_bayes(symmetric=True)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"Naive bayes (symmetric): {sym_acc}") # should be worse!
print(f"Test passed: {nb_acc == mv_acc}\n ")
def test1():
m = 5
# randomly parametrize exponential family to determine accuracies and correlations
np.random.seed(2)
thetas = np.random.rand(30)
# all conditionally independent, some singletons are fine
potentials = [[5], [0], [1], [4], [0, 5], [1, 5], [2, 5], [3, 5], [4, 5]]
pgm = Ising(m, potentials, thetas)
# make data
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, sym_acc = agg.naive_bayes(symmetric=True)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"Naive bayes (symmetric): {sym_acc}") # should be worse!
print(f"Test passed: {nb_acc >= max(mv_acc, pb_acc) and sym_acc < nb_acc}\n ")
def test2():
m = 3
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
np.random.seed(3)
thetas = np.random.rand(30)
# all conditionally independent
potentials = [[3], [0, 3], [1, 3], [2, 3]]
pgm = Ising(m, potentials, thetas)
n_train = 100
train_votes, train_gold = pgm.make_data(n_train)
n_test = 100
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, sym_acc = agg.naive_bayes(symmetric=True)
_, fs_acc = agg.flying_squid()
print(pgm.joint_p([[3]], [1]))
print(pgm.balance)
print(pgm.expectation([[3]]), pgm.expectation([[0]]), pgm.expectation([[0, 3]]))
print(pgm.expectation([[3]])*pgm.expectation([[0, 3]]))
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"Naive bayes (symmetric): {sym_acc}")
print(f"FlyingSquid: {fs_acc}")
print(f"Test passed: {fs_acc >= max(mv_acc, pb_acc) and nb_acc == sym_acc}\n")
def test3():
m = 3
np.random.seed(2)
thetas = [0.5, 0.1, 0.4, 0.4]
# all conditionally independent
potentials = [[3], [0, 3], [1, 3], [2, 3]]
pgm = Ising(m, potentials, thetas)
# print(pgm.balance)
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
#print(agg.fs_accs, agg.nb_accs, pgm.accs)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, fs_acc = agg.flying_squid()
_, dp_nolabel_acc = agg.data_programming(with_label=False)
_, dp_label_acc = agg.data_programming(with_label=True)
#print(agg.dp_learn_params(with_label=False))
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"FlyingSquid: {fs_acc}")
print(f"Data Programming (no label): {dp_nolabel_acc}")
print(f"Data Programming (with label): {dp_label_acc}")
assert 0.69 <= mv_acc <= 0.7 and 0.69 <= pb_acc <= 0.7, f"MV and pick best should be 0.692 and 0.694."
assert 0.77 <= min(nb_acc, fs_acc, dp_nolabel_acc, dp_label_acc) <= 0.79, f"All methods should have accuracy 0.78."
print(f"Test passed: {min(nb_acc, fs_acc, dp_nolabel_acc, dp_label_acc) >= max(mv_acc, pb_acc)}\n")
def test4():
m = 3
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
np.random.seed(3)
thetas = np.random.rand(30)
thetas[0] = 0.1
thetas[1] = 0.2
thetas[2] = 0.01
thetas[3] = 0.1
thetas[4] = 0.5 # make this hugeeee
potentials = [[3], [0, 3], [1, 3], [2, 3], [0, 1]]
pgm = Ising(m, potentials, thetas)
print(pgm.joint_p([[0], [1], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[1], [3]], [1, 1]) / pgm.balance**2)
print(pgm.joint_p([[0], [2], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[2], [3]], [1, 1]) / pgm.balance**2)
n_train = 10000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
print(pgm.expectation([[3]]), pgm.expectation([[0, 1]]), pgm.expectation([[0, 1, 3]]))
print(pgm.expectation([[3]])*pgm.expectation([[0, 1]]))
edgeset = [(0, 1)]
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
nb_probs, nb_acc = agg.naive_bayes()
fs_probs, fs_acc = agg.flying_squid()
jt_probs, jt_acc = agg.junction_tree(edgeset)
jt_sym_probs, jt_sym_acc = agg.junction_tree(edgeset, symmetric=True)
# print(agg.fs_accs, agg.nb_accs, pgm.accs)
#print(pgm.joint_p([[0], [1], [3]], [1, 1, 1]) / pgm.balance)
#print(pgm.joint_p([[0], [1], [3]], [-1, -1, -1]) / pgm.balance)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"FlyingSquid: {fs_acc}")
print(f"Junction tree (with deps): {jt_acc}")
print(f"Junction tree (with deps, symmetric): {jt_sym_acc}")
print(agg.get_probs(np.array([1, 1, 0]), agg.sym_accs, edgeset=[(0, 1)], symmetric=True, abstains_symmetric=False))
print(agg.get_probs(np.array([1, 1, 0]), agg.nb_accs, edgeset=[(0, 1)], symmetric=False, abstains_symmetric=False))
print(pgm.get_probs(np.array([1, 1, 0])))
fail = False
for i, votes in enumerate(test_votes):
if np.abs(pgm.get_probs(votes) - jt_probs[i][1]) > 0.05:
print(votes)
print(pgm.get_probs(votes), jt_probs[i][1])
fail = True
#print(pgm.get_probs(votes), nb_probs[i], test_gold[i])
#print(np.round(pgm.get_probs(votes)), np.round(fs_probs[i]), test_gold[i])
if fail:
print("Test failed.")
else:
print("Test passed.")
def test5():
m = 3
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
np.random.seed(3) # 6 is good , 9 , 10
thetas = np.random.rand(30)
thetas[0] = 0
thetas[1] = 0.1
thetas[2] = 0.6
thetas[3] = 0.1
thetas[4] = 0.6 # make this hugeeee
potentials = [[3], [0, 3], [1, 3], [2, 3], [0, 1]]
pgm = Ising(m, potentials, thetas)
print(pgm.joint_p([[0], [1], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[1], [3]], [1, 1]) / pgm.balance**2)
print(pgm.joint_p([[0], [2], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[2], [3]], [1, 1]) / pgm.balance**2)
n_train = 10000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 100
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
edgeset = [(0, 1)]
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
nb_probs, nb_acc = agg.naive_bayes()
fs_probs, fs_acc = agg.flying_squid()
jt_probs, jt_acc = agg.junction_tree(edgeset)
jt_sym_probs, jt_sym_acc = agg.junction_tree(edgeset, symmetric=True)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {accuracy_score(test_gold, np.array(nb_probs).argmax(axis=1))}")
print(f"FlyingSquid: {accuracy_score(test_gold, np.array(fs_probs).argmax(axis=1))}")
print(f"Junction tree (with deps): {accuracy_score(test_gold, np.array(jt_probs).argmax(axis=1))}")
print(f"Junction tree (with deps, symmetric): {accuracy_score(test_gold, np.array(jt_sym_probs).argmax(axis=1))}")
print(f"NB log loss {log_loss(test_gold, nb_probs)}")
print(f"FS log loss {log_loss(test_gold, fs_probs)}")
print(f"JT log loss {log_loss(test_gold, jt_probs)}")
print(agg.get_probs(np.array([1, 1, 0]), agg.sym_accs, edgeset=[(0, 1)], symmetric=True, abstains_symmetric=False))
print(agg.get_probs(np.array([1, 1, 0]), agg.nb_accs, edgeset=[(0, 1)], symmetric=False, abstains_symmetric=False))
print(pgm.get_probs(np.array([1, 1, 0])))
jt_probs = np.array(jt_probs)
jt_sym_probs = np.array(jt_sym_probs)
fail = False
pgm_probs = np.zeros(len(test_votes))
for i, votes in enumerate(test_votes):
pgm_probs[i] = pgm.get_probs(votes)
avg_jt_err = np.linalg.norm(pgm_probs - jt_probs[:, 1]) / n_test
avg_jt_sym_err = np.linalg.norm(pgm_probs - jt_sym_probs[:, 1]) / n_test
if avg_jt_err > 0.05:
fail = True
if avg_jt_err < avg_jt_sym_err:
print(avg_jt_err, avg_jt_sym_err)
fail= True
if fail:
print("Test failed.")
else:
print("Test passed.")
def test6():
m = 3
np.random.seed(5)
thetas = np.random.rand(30)
# model some edges - see if we can recover it
potentials = [[3], [0, 3], [1, 3], [2, 3], [0, 1], [0, 2]]
pgm = Ising(m, potentials, thetas)
# make big datasets
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
# overall accuracies Pr(lf_p = y) on train
acc_theta = np.zeros(m)
for i in range(m):
acc_theta[i] = len(np.where((train_votes[:, i] == train_gold) == True)[0])/n_train
acc_theta = 2*acc_theta - 1
all_thetas = structure_learning(m, train_votes, train_gold, acc_theta)
print(all_thetas)
#idx = np.argsort(all_thetas, axis=None)[-1]
#i = int(np.round(idx / m))
#j = idx % m
#print(f"Recovered edge: ({i}, {j})") # should be (0, 1)
ce = np.ones(m*m) * np.inf
ce_cond = np.ones(m*m) * np.inf
ce_nolabel = np.ones(m*m) * np.inf
true_ce = np.ones(m*m) * np.inf
true_ce_cond = np.ones(m*m) * np.inf
true_ce_nolabel = np.ones(m*m) * np.inf
neighborhood_size = len(all_thetas.flatten())
all_edgesets = []
for n in range(neighborhood_size):
print(f"edgeset size is {n}")
# try edgeset of size n
if n != 0:
idxs = np.argsort(np.abs(all_thetas), axis=None)[-n:]
edgeset = []
for idx in idxs:
i = int(np.floor(idx / m))
j = idx % m
# print(all_thetas[i, j])
# print(f"Recovered edge: ({i}, {j})") # should be (0, 1)
edgeset.append((i, j))
else:
edgeset = []
print(edgeset)
all_edgesets.append(edgeset)
try:
ce[n] = agg.cross_entropy(train_votes, train_gold, edgeset)
ce_cond[n] = agg.cross_entropy_conditional(train_votes, train_gold, edgeset)
ce_nolabel[n] = agg.cross_entropy_no_label(test_votes, edgeset)
true_ce[n] = pgm.cross_entropy(edgeset)
true_ce_cond[n] = pgm.cross_entropy_conditional(edgeset)
true_ce_nolabel[n] = pgm.cross_entropy_no_label(edgeset)
except nx.NetworkXError:
# skip if proposed graph is not triangulated
pass
print(ce)
print(ce_cond)
print(ce_nolabel)
best_ce = ce.argmin()
best_ce_cond = ce_cond.argmin()
best_ce_nolabel = ce_nolabel.argmin()
print(f"Recovered edgeset using MLE: {all_edgesets[best_ce]}")
print(f"Recovered edgeset using MLE (conditional): {all_edgesets[best_ce_cond]}")
print(f"Recovered edgeset using MLE (no labels): {all_edgesets[best_ce_nolabel]}")
print(true_ce)
print(true_ce_cond)
print(true_ce_nolabel)
def main():
#test0()
#test1()
#test2()
#test3()
test4()
# test5()
#test6()
if __name__ == "__main__":
main()
|
evaporate-main
|
evaporate/weak_supervision/make_pgm.py
|
import numpy as np
import pandas as pd
from emptyheaded import *
class ResultError(Exception):
pass
def lollipop_agg(db):
lolli_agg = \
"""
LollipopAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x),z:long<-[COUNT(*)].
"""
print "\nQUERY: LOLLIPOP AGG"
db.eval(lolli_agg)
def barbell_agg(db):
b_agg = \
"""
BarbellAgg(;w) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x),Edge(x,y),Edge(y,z),Edge(x,z),w:long<-[COUNT(*)].
"""
print "\nQUERY: BARBELL AGG"
db.eval(b_agg)
def barbell_materialized(db):
barbell = \
"""
Barbell(a,b,c,x,y,z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x),Edge(x,y),Edge(y,z),Edge(x,z).
"""
print "\nQUERY: BARBELL"
db.eval(barbell)
def lollipop_materialized(db):
lollipop = \
"""
Lollipop(a,b,c,x) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x).
"""
print "\nQUERY: LOLLIPOP"
db.eval(lollipop)
def triangle_agg(db):
tri_agg = \
"""
TriangleAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),z:long<-[COUNT(*)].
"""
print "\nQUERY: TRIANGLE AGG"
db.eval(tri_agg)
def four_clique_agg(db):
flique = \
"""
FliqueAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),z:long<-[COUNT(*)].
"""
print "\nQUERY: FOUR CLIQUE AGG"
db.eval(flique)
def triangle_materialized(db):
triangle = \
"""
Triangle(a,b,c) :- Edge(a,b),Edge(b,c),Edge(a,c).
"""
print "\nQUERY: TRIANGLE"
db.eval(triangle)
def four_clique_materialized(db):
four_clique = \
"""
Flique(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d).
"""
print "\nQUERY: FOUR CLIQUE"
db.eval(four_clique)
def four_clique_agg_sel(db,node):
fourclique_sel_agg = \
"""
FliqueSelAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=%(node)s,z:long<-[COUNT(*)].
"""% locals()
print "\nQUERY: 4 CLIQUE SELECTION AGG"
db.eval(fourclique_sel_agg)
def four_clique_sel(db,node):
fourclique_sel = \
"""
FliqueSel(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=%(node)s.
"""% locals()
print "\nQUERY: 4 CLIQUE SELECTION"
db.eval(fourclique_sel)
def barbell_agg_sel(db,node):
barbell_sel_agg = \
"""
BarbellSelAgg(;w) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,p),Edge(p,x),Edge(x,y),Edge(y,z),Edge(x,z),p=%(node)s,w:long<-[COUNT(*)].
"""% locals()
print "\nQUERY: BARBELL SELECTION AGG"
db.eval(barbell_sel_agg)
def barbell_sel(db,node):
barbell_s = \
"""
BarbellSel(a,b,c,x,y,z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,p),Edge(p,x),Edge(x,y),Edge(y,z),Edge(x,z),p=%(node)s.
"""% locals()
print "\nQUERY: BARBELL SELECTION"
db.eval(barbell_s)
def pagerank(db):
pr="""
N(;w) :- Edge(x,y),w:long<-[SUM(x;1)].
PageRank(x;y) :- Edge(x,z),y:float<-[(1.0/N)].
PageRank(x;y)*[i=5]:-Edge(x,z),PageRank(z),InvDegree(z),y:float <- [0.15+0.85*SUM(z;1.0)].
"""
print "\nQUERY: PAGERANK"
db.eval(pr)
def sssp(db,node):
paths = \
"""
SSSP(x;y) :- Edge(w,x),w=%(node)s,y:long <- [1].
SSSP(x;y)*[c=0] :- Edge(w,x),SSSP(w),y:long <- [1+MIN(w;1)].
"""% locals()
print "\nQUERY: SSSP"
db.eval(paths)
def test_pruned(dataset):
build = True
ratings = pd.read_csv("/dfs/scratch0/caberger/datasets/eh_datasets/"+dataset+"/pruned/data.tsv",\
sep='\t',\
names=["0","1"],\
dtype={"0":np.uint32,"1":np.uint32})
graph = Relation(
name="Edge",
dataframe=ratings)
if build:
db = Database.create(
Config(num_threads=56),
"/dfs/scratch0/caberger/datasets/eh_datasets/databases/"+dataset+"/db_pruned",
[graph])
db.build()
db = Database.from_existing("/dfs/scratch0/caberger/datasets/eh_datasets/databases/"+dataset+"/db_pruned")
triangle_agg(db)
triangle_materialized(db)
#four_clique_materialized(db)
if dataset != "twitter2010":
four_clique_agg(db)
def test_duplicated(dataset):
build = True
ratings = pd.read_csv("/dfs/scratch0/caberger/datasets/eh_datasets/"+dataset+"/duplicated/data.tsv",\
sep='\t',\
names=["0","1"],\
dtype={"0":np.uint32,"1":np.uint32})
deg = pd.read_csv("/dfs/scratch0/caberger/datasets/eh_datasets/"+dataset+"/pagerank/inverse_degree.tsv",\
sep='\t',\
names=["0","a_0"],\
dtype={"0":np.uint32,"a_0":np.float32})
graph = Relation(
name="Edge",
dataframe=ratings)
inv_degree = Relation(
name="InvDegree",
dataframe=deg)
if build:
db = Database.create(
Config(num_threads=56),
"/dfs/scratch0/caberger/datasets/eh_datasets/databases/"+dataset+"/db_duplicated",
[graph,inv_degree])
db.build()
db = Database.from_existing("/dfs/scratch0/caberger/datasets/eh_datasets/databases/"+dataset+"/db_duplicated")
lollipop_agg(db)
barbell_agg(db)
node_ids = {
"googlePlus": {
"high":"209",
"low":"555",
"paths":"6966"
},
"higgs": {
"high":"5153",
"low":"34",
"paths":"83222"
},
"socLivejournal": {
"high":"10010",
"low":"57",
"paths":"10009"
},
"orkut": {
"high":"43609",
"low":"78",
"paths":"43608"
},
"cidPatents": {
"high":"4723130",
"low":"33156",
"paths":"5795784"
},
"twitter2010": {
"paths":"1037948"
}
}
if dataset != "twitter2010":
four_clique_agg_sel(db,node_ids[dataset]["high"])
four_clique_agg_sel(db,node_ids[dataset]["low"])
barbell_agg_sel(db,node_ids[dataset]["high"])
barbell_agg_sel(db,node_ids[dataset]["low"])
pagerank(db)
sssp(db,node_ids[dataset]["paths"])
#basically the main method down here.
start()
#datasets = ["googlePlus","higgs","socLivejournal","orkut","cidPatents","twitter2010"]
datasets = ["socLivejournal","orkut","cidPatents","twitter2010"]
#datasets = ["twitter2010"]
for dataset in datasets:
print "DATASET: " + dataset
test_pruned(dataset)
test_duplicated(dataset)
stop()
|
EmptyHeaded-master
|
test/graph/perf.py
|
import numpy as np
import pandas as pd
from emptyheaded import *
check_big_out = False
## TODO:
## 4-Clique SQL
## Fix Barbell and 4-Clique Selection Order
class ResultError(Exception):
pass
def lollipop_agg(db):
lolli_agg = \
"""
LollipopAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x),z:long<-[COUNT(*)].
"""
print "\nLollipop AGG"
db.eval(lolli_agg)
tri = db.get("LollipopAgg")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 1426911480L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def lollipop_agg_sql(db):
lolli_agg = \
"""
CREATE TABLE LollipopAggSQL AS (
SELECT COUNT(*)
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.a AND e1.a = e3.b
JOIN Edge e4 ON e1.a = e4.a)
"""
print "\nLollipop AGG SQL"
db.eval(lolli_agg, useSql=True)
tri = db.get("LollipopAggSQL")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 1426911480L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def barbell_agg(db):
b_agg = \
"""
BarbellAgg(;w) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x),Edge(x,y),Edge(y,z),Edge(x,z),w:long<-[COUNT(*)].
"""
print "\nBarbell AGG"
db.eval(b_agg)
tri = db.get("BarbellAgg")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 20371831447136L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def barbell_agg_sql(db):
b_agg = \
"""
CREATE TABLE BarbellAggSQL AS (
SELECT COUNT(*)
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.a AND e3.b = e1.a
JOIN Edge e4 ON e4.a = e1.b
JOIN Edge e5 ON e5.a = e4.b
JOIN Edge e6 ON e5.b = e6.a
JOIN Edge e7 ON e6.b = e7.a AND e7.b = e5.a
)
"""
b_agg = \
"""
CREATE TABLE BarbellAggSQL AS (
SELECT COUNT(*) FROM Edge e1 JOIN Edge e2 ON e1.b = e2.a JOIN Edge e3 ON e2.b = e3.a AND e3.b = e1.a
JOIN Edge e4 ON e4.a = e1.b JOIN Edge e5 ON e5.a = e4.b JOIN Edge e6 ON e5.b = e6.a
JOIN Edge e7 ON e6.b = e7.a AND e7.b = e5.a
)
"""
print "\nBarbell AGG SQL"
db.eval(b_agg, useSql=True)
tri = db.get("BarbellAggSQL")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 20371831447136L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def barbell_materialized(db):
barbell = \
"""
Barbell(a,b,c,x,y,z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x),Edge(x,y),Edge(y,z),Edge(x,z).
"""
print "\nBARBELL"
db.eval(barbell)
if check_big_out:
tri = db.get("Barbell")
df = tri.getDF()
if tri.num_rows != 56L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[55]
if row0[0] != 5l or row0[1] != 3l or row0[2] != 4l or row0[3] != 3l or row0[4] != 4l or row0[5] != 5l: #5 4 4 3 5 3
raise ResultError("ROW0 INCORRECT: " + str(row0))
def barbell_materialized_sql(db):
barbell = \
"""
CREATE TABLE BarbellSQL AS (
SELECT e1.a, e2.a, e3.b, e5.a, e6.a, e7.b
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.b
AND e3.a = e1.a
JOIN Edge e4 ON e4.a = e1.a
JOIN Edge e5 ON e5.a = e4.b
JOIN Edge e6 ON e5.b = e6.a
JOIN Edge e7 ON e6.b = e7.b
AND e7.a = e5.a
)
"""
print "\nBARBELL SQL"
db.eval(barbell, useSql=True)
if check_big_out:
tri = db.get("BarbellSQL")
df = tri.getDF()
if tri.num_rows != 56L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[55]
# Rows are permuted here too.
if row0[0] != 5l or row0[1] != 3l or row0[2] != 4l or row0[3] != 3l or row0[4] != 4l or row0[5] != 5l: #5 3 4 3 4 5
raise ResultError("ROW0 INCORRECT: " + str(row0))
def lollipop_materialized(db):
lollipop = \
"""
Lollipop(a,b,c,x) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x).
"""
print "\nLOLLIPOP"
db.eval(lollipop)
tri = db.get("Lollipop")
df = tri.getDF()
if tri.num_rows != 28L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[27]
if row0[0] != 5l or row0[1] != 3l or row0[2] != 4l or row0[3] != 4l:
raise ResultError("ROW0 INCORRECT: " + str(row0))
def lollipop_materialized_sql(db):
lollipop = \
"""
CREATE TABLE LollipopSQL AS (
SELECT e1.a, e2.a, e3.a, e4.b
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.a AND e1.a = e3.b
JOIN Edge e4 ON e1.a = e4.a)
"""
print "\nLOLLIPOP SQL"
db.eval(lollipop, useSql=True)
if check_big_out:
tri = db.get("LollipopSQL")
df = tri.getDF()
if tri.num_rows != 28L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[27]
if row0[0] != 5l or row0[1] != 3l or row0[2] != 4l or row0[3] != 4l:
raise ResultError("ROW0 INCORRECT: " + str(row0))
def triangle_agg(db):
tri_agg = \
"""
TriangleAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),z:long<-[COUNT(*)].
"""
print "\nTRIANGLE AGG"
db.eval(tri_agg)
tri = db.get("TriangleAgg")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 1612010L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def triangle_agg_sql(db):
tri_agg = \
"""
CREATE TABLE TriangleAggSQL AS (
SELECT COUNT(*)
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.b AND e3.a = e1.a
)
"""
print "\nTRIANGLE AGG SQL"
db.eval(tri_agg, useSql=True)
tri = db.get("TriangleAggSQL")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 1612010L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def four_clique_agg(db):
flique = \
"""
FliqueAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),z:long<-[COUNT(*)].
"""
print "\nFOUR CLIQUE AGG"
db.eval(flique)
tri = db.get("FliqueAgg")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 30004668L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def four_clique_agg_sql(db):
flique = \
"""
CREATE TABLE FliqueAggSQL AS (
SELECT COUNT(*)
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.b AND e3.a = e1.a
JOIN Edge e4 ON e4.a = e1.a
JOIN Edge e5 ON e5.a = e1.b AND e4.b = e5.b
JOIN Edge e6 ON e6.a = e2.b AND e6.b = e5.b
)
"""
print "\nFOUR CLIQUE AGG SQL"
db.eval(flique, useSql=True)
tri = db.get("FliqueAggSQL")
df = tri.getDF()
print df
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
if df.iloc[0][0] != 30004668L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def triangle_project(db):
triangle = \
"""
TriangleProj(a,b) :- Edge(a,b),Edge(b,c),Edge(a,c).
"""
print "\nTRIANGLE PROJECT"
ir = db.optimize(triangle)
db.eval(triangle)
tri = db.get("TriangleProj")
df = tri.getDF()
if tri.num_rows != 85658l:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[0]
print row0
if row0[0] != 6l or row0[1] != 5l: #(6l,5l,2l)
raise ResultError("ROW0 INCORRECT: " + str(row0))
def triangle_materialized(db):
triangle = \
"""
Triangle(a,b,c) :- Edge(a,b),Edge(b,c),Edge(a,c).
"""
print "\nTRIANGLE"
db.eval(triangle)
tri = db.get("Triangle")
df = tri.getDF()
if tri.num_rows != 1612010L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[0]
print row0
if row0[0] != 6l or row0[1] != 5l or row0[2]!=2l: #(6l,5l,2l)
raise ResultError("ROW0 INCORRECT: " + str(row0))
def triangle_materialized_sql(db):
triangle = \
"""
CREATE TABLE TriangleSQL AS (
SELECT e1.a, e2.a, e3.b
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.b AND e3.a = e1.a
)
"""
print "\nTRIANGLE SQL"
db.eval(triangle, useSql=True)
tri = db.get("TriangleSQL")
df = tri.getDF()
if tri.num_rows != 1612010L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
# The query appears to be the same as the Datalog triangle, but the columns
# are permuted for some reason.
if len(df[(df[0] == 2l) & (df[1] == 6l) & (df[2] == 5l)]) != 1: #(2l,6l,5l)
raise ResultError("ROW (2, 6, 5) NOT FOUND")
def four_clique_materialized(db):
four_clique = \
"""
Flique(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d).
"""
print "\nFOUR CLIQUE"
db.eval(four_clique)
tri = db.get("Flique")
df = tri.getDF()
if tri.num_rows != 30004668L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[0]
if row0[0]!= 9 and row0[1] != 8 and row0[2] != 7 and row0[3] != 0:
raise ResultError("ROW0 INCORRECT: " + str(row0))
def four_clique_materialized_sql(db):
four_clique = \
"""
CREATE TABLE FliqueSQL AS (
SELECT e1.a, e2.a, e3.b, e4.b
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.b AND e3.a = e1.a
JOIN Edge e4 ON e4.a = e1.a
JOIN Edge e5 ON e5.a = e1.b AND e4.b = e5.b
JOIN Edge e6 ON e6.a = e2.b AND e6.b = e5.b
)
"""
print "\nFOUR CLIQUE SQL"
db.eval(four_clique, useSql=True)
tri = db.get("FliqueSQL")
df = tri.getDF()
if tri.num_rows != 30004668L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[0]
if row0[0]!= 9 and row0[1] != 8 and row0[2] != 7 and row0[3] != 0:
raise ResultError("ROW0 INCORRECT: " + str(row0))
def four_clique_agg_sel(db):
fourclique_sel_agg = \
"""
FliqueSelAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=0,z:long<-[COUNT(*)].
"""
print "\n4 CLIQUE SELECTION AGG"
db.eval(fourclique_sel_agg)
foursel = db.get("FliqueSelAgg")
df = foursel.getDF()
if foursel.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(foursel.num_rows))
if df.iloc[0][0] != 3759972L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def four_clique_agg_sel_sql(db):
fourclique_sel_agg = \
"""
CREATE TABLE FliqueSelAggSQL AS (
SELECT COUNT(*)
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.b AND e3.a = e1.a
JOIN Edge e4 ON e4.a = e1.a
JOIN Edge e5 ON e5.a = e1.b AND e4.b = e5.b
JOIN Edge e6 ON e6.a = e2.b AND e6.b = e5.b
JOIN Edge e7 ON e7.a = e1.a
WHERE e7.b = 0
)
"""
print "\n4 CLIQUE SELECTION AGG SQL"
db.eval(fourclique_sel_agg, useSql=True)
foursel = db.get("FliqueSelAggSQL")
df = foursel.getDF()
if foursel.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(foursel.num_rows))
if df.iloc[0][0] != 3759972L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def four_clique_sel(db):
fourclique_sel = \
"""
FliqueSel(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=0.
"""
print "\n4 CLIQUE SELECTION"
db.eval(fourclique_sel)
foursel = db.get("FliqueSel")
df = foursel.getDF()
if foursel.num_rows != 3759972L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[0]
if row0[0] != 1l or row0[1] != 0l or row0[2] != 48l or row0[3]!=53l: #(6l,5l,2l)
raise ResultError("ROW0 INCORRECT: " + str(row0))
def four_clique_sel_sql(db):
fourclique_sel = \
"""
CREATE TABLE FliqueSelSQL AS (
SELECT e1.a, e2.a, e3.a, e4.b
FROM Edge e1
JOIN Edge e2 ON e1.b = e2.a
JOIN Edge e3 ON e2.b = e3.a
JOIN Edge e4 ON e3.b = e4.b AND e4.a = e1.a
JOIN Edge e5 ON e5.a = e1.a AND e5.b = e2.b
JOIN Edge e6 ON e6.a = e1.b AND e6.b = e3.b
JOIN Edge e7 ON e7.a = e1.a
WHERE e7.b = 0
)
"""
print "\n4 CLIQUE SELECTION SQL"
db.eval(fourclique_sel, useSql=True)
foursel = db.get("FliqueSelSQL")
df = foursel.getDF()
if foursel.num_rows != 3759972L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
row0 = df.iloc[0]
if row0[0] != 1l or row0[1] != 0l or row0[2] != 48l or row0[3]!=53l:
raise ResultError("ROW0 INCORRECT: " + str(row0))
def barbell_agg_sel(db):
barbell_sel_agg = \
"""
BarbellSelAgg(;w) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,p),Edge(p,x),Edge(x,y),Edge(y,z),Edge(x,z),p=6,w:long<-[COUNT(*)].
"""
print "\nBARBELL SELECTION AGG"
db.eval(barbell_sel_agg)
bs = db.get("BarbellSelAgg")
df = bs.getDF()
if bs.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(bs.num_rows))
if df.iloc[0][0] != 26936100L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def barbell_agg_sel_sql(db):
barbell_sel_agg = \
"""
CREATE TABLE BarbellSelAggSQL AS (
SELECT COUNT(*)
FROM Edge e1
JOIN Edge e2 ON e1.a = e2.a
JOIN Edge e3 ON e2.b = e3.b AND e3.a = e1.b
JOIN Edge e4 ON e4.a = e1.b
JOIN Edge e5 ON e4.b = e5.a
JOIN Edge e6 ON e6.a = e5.b
JOIN Edge e7 ON e6.b = e7.b
JOIN Edge e8 ON e7.a = e8.b AND e8.a = e6.a
WHERE e5.a = 6
)
"""
print "\nBARBELL SELECTION AGG SQL"
db.eval(barbell_sel_agg, useSql=True)
if check_big_out:
bs = db.get("BarbellSelAggSQL")
df = bs.getDF()
if bs.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(bs.num_rows))
if df.iloc[0][0] != 26936100L:
raise ResultError("ANNOTATION INCORRECT: " + str(df.iloc[0][0]))
def barbell_sel(db):
barbell_s = \
"""
BarbellSel(a,b,c,x,y,z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,p),Edge(p,x),Edge(x,y),Edge(y,z),Edge(x,z),p=6.
"""
print "\nBARBELL SELECTION"
db.eval(barbell_s)
if check_big_out:
bs = db.get("BarbellSel")
if bs.num_rows != 26936100L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(bs.num_rows))
def barbell_sel_sql(db):
barbell_s = \
"""
CREATE TABLE BarbellSelSQL AS (
SELECT e1.a, e2.b, e3.a, e6.a, e7.b, e8.b
FROM Edge e1
JOIN Edge e2 ON e1.a = e2.a
JOIN Edge e3 ON e2.b = e3.b AND e3.a = e1.b
JOIN Edge e4 ON e4.a = e1.b
JOIN Edge e5 ON e4.b = e5.a
JOIN Edge e6 ON e6.a = e5.b
JOIN Edge e7 ON e6.b = e7.b
JOIN Edge e8 ON e7.a = e8.b AND e8.a = e6.a
WHERE e5.a = 6
)
"""
print "\nBARBELL SELECTION SQL"
db.eval(barbell_s, useSql=True)
if check_big_out:
bs = db.get("BarbellSelSQL")
if bs.num_rows != 26936100L:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(bs.num_rows))
def sssp(db):
paths = \
"""
SSSP(x;y) :- Edge(w,x),w=0,y:long <- [1].
SSSP(x;y)*[c=0] :- Edge(w,x),SSSP(w),y:long <- [1+MIN(w;1)].
"""
print "\nSSSP"
db.eval(paths)
bs = db.get("SSSP")
df = bs.getDF()
if df.iloc[1000][1] != 2:
raise ResultError("SSSP value incorrect: " + str(df.iloc[1000][1]))
def sssp_sql(db):
paths = \
"""
WITH RECURSIVE SSSPSQL AS (
SELECT e.b, 1 FROM Edge e WHERE e.a = 0
UNION
SELECT e.b, 1 + MIN(e.a) FROM Edge e JOIN SSSPSQL s ON s.b = e.a
)
"""
print "\nSSSP SQL"
db.eval(paths, useSql=True)
bs = db.get("SSSPSQL")
df = bs.getDF()
if df.iloc[1000][1] != 2:
raise ResultError("SSSP value incorrect: " + str(df.iloc[1000][1]))
def pagerank(db):
pr="""
N(;w) :- Edge(x,y),w:long<-[SUM(x;1)].
PageRank(x;y) :- Edge(x,z),y:float<-[(1.0/N)].
PageRank(x;y)*[i=5]:-Edge(x,z),PageRank(z),InvDegree(z),y:float <- [0.15+0.85*SUM(z;1.0)].
"""
print "\nPAGERANK"
db.eval(pr)
bs = db.get("PageRank")
df = bs.getDF()
if (df.iloc[0][1]-15.227079960463206) > 0.0001:
raise ResultError("PageRank value incorrect: " + str(df.iloc[0][1]))
def pagerank_sql(db):
pr="""
CREATE TABLE N AS (
SELECT SUM(e.a) FROM Edge e
);
WITH RECURSIVE FOR 5 ITERATIONS PageRankSQL AS (
SELECT e.a, 1.0 / N FROM Edge e
UNION
SELECT e.a, 0.15+0.85*SUM(e.b) FROM Edge e JOIN PageRankSQL pr ON e.b = pr.a JOIN InvDegree i ON pr.a = i.a
)
"""
print "\nPAGERANK SQL"
db.eval(pr, useSql=True)
bs = db.get("PageRankSQL")
df = bs.getDF()
if (df.iloc[0][1]-15.227079960463206) > 0.0001:
raise ResultError("PageRank value incorrect: " + str(df.iloc[0][1]))
def test_pruned():
build = True
ratings = pd.read_csv(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/data/facebook_pruned.tsv",\
sep='\t',\
names=["0","1"],\
dtype={"0":np.uint32,"1":np.uint32})
graph = Relation(
name="Edge",
dataframe=ratings,
attribute_names=["a","b"])
if build:
db = Database.create(
Config(num_threads=4),
os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases/db_pruned",
[graph])
db.build()
db = Database.from_existing(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases/db_pruned")
triangle_project(db)
triangle_materialized(db)
triangle_materialized_sql(db)
triangle_agg(db)
triangle_agg_sql(db)
four_clique_materialized(db)
#four_clique_materialized_sql(db)
four_clique_agg(db)
four_clique_agg_sql(db)
def test_duplicated():
build = True
ratings = pd.read_csv(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/data/facebook_duplicated.tsv",\
sep='\t',\
names=["0","1"],\
dtype={"0":np.uint32,"1":np.uint32})
graph = Relation(
name="Edge",
dataframe=ratings,
attribute_names=["a","b"])
deg = pd.read_csv(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/data/inv_degree.tsv",\
sep='\t',\
names=["0","a_0"],\
dtype={"0":np.uint32,"a_0":np.float32})
inv_degree = Relation(
name="InvDegree",
dataframe=deg,
attribute_names=["a"])
if build:
db = Database.create(
Config(num_threads=4),
os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases/db_duplicated",
[graph,inv_degree])
db.build()
db = Database.from_existing(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases/db_duplicated")
lollipop_agg(db)
lollipop_agg_sql(db)
barbell_agg(db)
barbell_agg_sql(db)
four_clique_agg_sel(db)
four_clique_agg_sel_sql(db)
four_clique_sel(db)
four_clique_sel_sql(db)
barbell_agg_sel(db)
#barbell_agg_sel_sql(db)
barbell_sel(db)
#barbell_sel_sql(db)
pagerank(db)
pagerank_sql(db)
sssp(db)
sssp_sql(db)
def test_simple():
build = True
ratings = pd.read_csv(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/data/simple.tsv",\
sep='\t',\
names=["0","1"],\
dtype={"0":np.uint32,"1":np.uint32})
graph = Relation(
name="Edge",
dataframe=ratings,
attribute_names=["a","b"])
if build:
db = Database.create(
Config(num_threads=4),
os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases/db_simple",
[graph])
db.build()
db = Database.from_existing(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases/db_simple")
lollipop_materialized(db)
#lollipop_materialized_sql(db)
barbell_materialized(db)
#barbell_materialized_sql(db)
if(len(sys.argv) < 2):
check_big_out = True
#basically the main method down here.
start()
os.system("rm -rf "+os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases"+" && mkdir -p "+os.path.expandvars("$EMPTYHEADED_HOME")+"/test/graph/databases")
test_pruned()
test_duplicated()
test_simple()
stop()
|
EmptyHeaded-master
|
test/graph/travis.py
|
import sys
import os
import re
logdir = os.path.expandvars("$EMPTYHEADED_HOME") + "/logs"
def get_query_times(filename):
dataset = ""
queryname = ""
time = ""
writefile = open(logdir+"/"+filename + ".csv","w")
for line in open(filename+ ".log","r"):
matchObj = re.match(r'.*DATASET: (.*)', line, re.M|re.I)
if matchObj:
dataset = matchObj.group(1)
matchObj = re.match(r'.*QUERY: (.*)', line, re.M|re.I)
if matchObj:
queryname = matchObj.group(1)
matchObj = re.match(r'.*Time\[QUERY TIME\]: (\d+.\d+) s.*', line, re.M|re.I)
if matchObj:
first = True
time = matchObj.group(1)
if time != "" and queryname != "":
writefile.write(dataset + "," + queryname + "," + time + "\n")
queryname = ""
time = ""
return -1.0
def main():
os.system("rm -rf " + logdir)
os.system("mkdir -p " + logdir)
get_query_times("regression")
if __name__ == "__main__": main()
|
EmptyHeaded-master
|
test/graph/parse.py
|
import time
import os
import argparse
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import Row
def test_aggregation_query(query, query_name):
if sql_context is not None:
print("\nTESTING {0}\nSPARK SQL\n".format(query_name) + "#" * 80)
result_set = sql_context.sql(query)
start = time.time()
count = result_set.collect()[0][0]
end = time.time()
print("ELAPSED TIME: {0} s".format(end - start))
print("FOUND {0}.".format(count))
def test_triangle_agg(table_name):
query = """
SELECT COUNT(*) FROM
{table_name} e1
JOIN {table_name} e2 ON e1.dst = e2.src
JOIN {table_name} e3 ON e1.src = e3.src AND e2.dst = e3.dst
""".format(table_name=table_name)
test_aggregation_query(query, "TRIANGLE_AGG")
def test_triangle_materialized(table_name):
query = """
SELECT e1.src, e1.dst, e3.dst FROM
{table_name} e1
JOIN {table_name} e2 ON e1.dst = e2.src
JOIN {table_name} e3 ON e1.src = e3.src AND e2.dst = e3.dst
""".format(table_name=table_name)
print("\nTESTING TRIANGLE MATERIALIZED\nSPARK SQL\n" + "#" * 80)
triangles = sql_context.sql(query)
start = time.clock()
result = triangles.collect()[0]
end = time.clock()
print("ELAPSED TIME: {0} s".format(end - start))
print("FIRST TRIANGLE: {0}.".format(result))
def test_lollipop_agg(table_name):
query = """
SELECT COUNT(*)
FROM {table_name} e1
JOIN {table_name} e2 ON e1.dst = e2.src
JOIN {table_name} e3 ON e2.dst = e3.src AND e1.src = e3.dst
JOIN {table_name} e4 ON e1.src = e4.src
""".format(table_name=table_name)
test_aggregation_query(query, "LOLLIPOP_AGG")
def test_barbell_agg(table_name):
query = """
SELECT COUNT(*)
FROM {table_name} e1
JOIN {table_name} e2 ON e1.dst = e2.src
JOIN {table_name} e3 ON e2.dst = e3.src AND e3.dst = e1.src
JOIN {table_name} e4 ON e4.src = e1.dst
JOIN {table_name} e5 ON e5.src = e4.dst
JOIN {table_name} e6 ON e5.dst = e6.src
JOIN {table_name} e7 ON e6.dst = e7.src AND e7.dst = e5.src
""".format(table_name=table_name)
test_aggregation_query(query, "BARBELL_AGG")
def test_four_clique_agg_sel(table_name):
query = """
SELECT COUNT(*)
FROM {table_name} e1
JOIN {table_name} e2 ON e1.dst = e2.src
JOIN {table_name} e3 ON e2.dst = e3.dst AND e3.src = e1.src
JOIN {table_name} e4 ON e4.src = e1.src
JOIN {table_name} e5 ON e5.src = e1.dst AND e4.dst = e5.dst
JOIN {table_name} e6 ON e6.src = e2.dst AND e6.dst = e5.dst
JOIN {table_name} e7 ON e7.src = e1.src
WHERE e7.dst = 0
""".format(table_name=table_name)
test_aggregation_query(query, "FOUR_CLIQUE_AGG_SEL")
def test_four_clique_agg(table_name):
query = """
SELECT COUNT(*)
FROM {table_name} e1
JOIN {table_name} e2 ON e1.dst = e2.src
JOIN {table_name} e3 ON e2.dst = e3.dst AND e3.src = e1.src
JOIN {table_name} e4 ON e4.src = e1.src
JOIN {table_name} e5 ON e5.src = e1.dst AND e4.dst = e5.dst
JOIN {table_name} e6 ON e6.src = e2.dst AND e6.dst = e5.dst
""".format(table_name=table_name)
test_aggregation_query(query, "FOUR_CLIQUE_AGG")
def test_all_queries(path):
# Setup SparkSQL
lines = sc.textFile(path)
parts = lines.map(lambda l: l.split())
edges = parts.map(lambda p: Row(src=int(p[0]), dst=int(p[1])))
df = sql_context.createDataFrame(edges)
table_name = os.path.basename(path)[:-4]
df.registerTempTable(table_name)
test_triangle_agg(table_name)
test_triangle_materialized(table_name)
test_lollipop_agg(table_name)
test_barbell_agg(table_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs SparkSQL on graph datasets. Should work locally or"
" in a cluster."
)
parser.add_argument("test_dir")
args = parser.parse_args()
sc = SparkContext(appName="SparkSQL")
sql_context = SQLContext(sc)
print("\nTESTING SIMPLE")
test_all_queries(args.test_dir + "/simple.tsv")
print("\nTESTING DUPLICATED")
test_all_queries(args.test_dir + "/facebook_duplicated.tsv")
print("\nTESTING PRUNED")
test_all_queries(args.test_dir + "/facebook_pruned.tsv")
sc.stop()
|
EmptyHeaded-master
|
test/graph/spark_sql.py
|
import numpy as np
import pandas as pd
from emptyheaded import *
class ResultError(Exception):
pass
def lubm1(db):
lbm1 = \
"""
lubm1(a) :- b='http://www.Department0.University0.edu/GraduateCourse0',
c='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent',
takesCourse(a,b),rdftype(a,c).
"""
print "\nQUERY: LUBM 1"
db.eval(lbm1)
def lubm2(db):
lbm2 = \
"""
lubm2(a,b,c) :- x='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent',
y='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department',
z='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#University',
memberOf(a,b),subOrganizationOf(b,c),undergraduateDegreeFrom(a,c),rdftype(a,x),rdftype(b,y),rdftype(c,z).
"""
print "\nQUERY: LUBM 2"
db.eval(lbm2)
def lubm3(db):
lbm3 = \
"""
lubm3(a) :- b='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Publication',
c='http://www.Department0.University0.edu/AssistantProfessor0',
rdftype(a,b),publicationAuthor(a,c).
"""
print "\nQUERY: LUBM 3"
db.eval(lbm3)
def lubm4(db):
lbm4 = \
"""
lubm4(a,b,c,d) :- e='http://www.Department0.University0.edu',
f='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#AssociateProfessor',
worksFor(a,e),name(a,b),emailAddress(a,d),telephone(a,c),rdftype(a,f).
"""
print "\nQUERY: LUBM 4"
db.eval(lbm4)
def lubm5(db):
lbm5 = \
"""
lubm5(a) :- b='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
c='http://www.Department0.University0.edu',
rdftype(a,b),memberOf(a,c).
"""
print "\nQUERY: LUBM 5"
db.eval(lbm5)
def lubm7(db):
lbm7 = \
"""
lubm7(b,c) :- a='http://www.Department0.University0.edu/AssociateProfessor0',
d='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Course',
e='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
teacherOf(a,b),takesCourse(c,b),rdftype(b,d),rdftype(c,e).
"""
print "\nQUERY: LUBM 7"
db.eval(lbm7)
def lubm8(db):
lbm8 = \
"""
lubm8(a,b,c) :- d='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
e='http://www.University0.edu',
f='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department',
memberOf(a,b),emailAddress(a,c),rdftype(a,d),subOrganizationOf(b,e),rdftype(b,f).
"""
print "\nQUERY: LUBM 8"
db.eval(lbm8)
def lubm9(db):
lbm9 = \
"""
lubm9(a,b,c) :- x='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
y='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Course',
z='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#AssistantProfessor',
rdftype(a,x),rdftype(b,y),rdftype(c,z),advisor(a,c),teacherOf(c,b),takesCourse(a,b).
"""
print "\nQUERY: LUBM 9"
db.eval(lbm9)
def lubm11(db):
lbm11 = \
"""
lubm11(a) :- x='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#ResearchGroup',
y='http://www.University0.edu',
rdftype(a,x),subOrganizationOf(a,y).
"""
print "\nQUERY: LUBM 11"
db.eval(lbm11)
def lubm12(db):
lbm12 = \
"""
lubm12(a,b) :- c='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#FullProfessor',
d='http://www.University0.edu',
e='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department',
worksFor(b,a),rdftype(b,c),subOrganizationOf(a,d),rdftype(a,e).
"""
print "\nQUERY: LUBM 12"
db.eval(lbm12)
def lubm13(db):
lbm13 = \
"""
lubm13(a) :- x='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent',
y='http://www.University567.edu',
rdftype(a,x),undergraduateDegreeFrom(a,y).
"""
print "\nQUERY: LUBM 13"
db.eval(lbm13)
def lubm14(db):
lbm14 = \
"""
lubm14(a) :- b='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
rdftype(a,b).
"""
print "\nQUERY: LUBM 14"
db.eval(lbm14)
def test_lubm():
build = True
takesCourse = Relation(
name="takesCourse",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/takesCourse.tsv")
memberOf = Relation(
name="memberOf",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/memberOf.tsv")
advisor = Relation(
name="advisor",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/advisor.tsv")
publicationAuthor = Relation(
name="publicationAuthor",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/publicationAuthor.tsv")
subOrganizationOf = Relation(
name="subOrganizationOf",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/subOrganizationOf.tsv")
undergraduateDegreeFrom = Relation(
name="undergraduateDegreeFrom",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/undergraduateDegreeFrom.tsv")
rdftype = Relation(
name="rdftype",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/rdftype.tsv")
worksFor = Relation(
name="worksFor",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/worksFor.tsv")
name = Relation(
name="name",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/name.tsv")
emailAddress = Relation(
name="emailAddress",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/emailAddress.tsv")
telephone = Relation(
name="telephone",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/telephone.tsv")
teacherOf = Relation(
name="teacherOf",
schema=Schema(attributes=["string","string"]),
filename="/dfs/scratch0/caberger/datasets/eh_datasets/lubm10000/teacherOf.tsv")
if build:
db = Database.create(
Config(num_threads=56),
"/dfs/scratch0/caberger/datasets/eh_datasets/databases/db_lubm",
[ takesCourse,
memberOf,
subOrganizationOf,
advisor,
publicationAuthor,
undergraduateDegreeFrom,
rdftype,
worksFor,
name,
emailAddress,
telephone,
teacherOf ] )
db.build()
db = Database.from_existing("/dfs/scratch0/caberger/datasets/eh_datasets/databases/db_lubm")
lubm1(db)
lubm2(db)
lubm3(db)
lubm4(db)
lubm5(db)
lubm7(db)
lubm8(db)
lubm9(db)
lubm11(db)
lubm12(db)
lubm13(db)
lubm14(db)
#basically the main method down here.
start()
test_lubm()
stop()
|
EmptyHeaded-master
|
test/rdf/perf.py
|
import numpy as np
import pandas as pd
from emptyheaded import *
class ResultError(Exception):
pass
def lubm1(db):
lubm1 = \
"""
lubm1(a) :- b='http://www.Department0.University0.edu/GraduateCourse0',
c='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent',
takesCourse(a,b),rdftype(a,c).
"""
print "\nLUBM 1"
db.eval(lubm1)
tri = db.get("lubm1")
if tri.num_rows != 4:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm1_sql(db):
lubm1 = \
"""
CREATE TABLE lubm1SQL AS (
SELECT tc.a FROM takesCourse tc JOIN rdftype r ON tc.a = r.a
WHERE tc.b = 'http://www.Department0.University0.edu/GraduateCourse0' AND
r.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent'
)
"""
print "\nLUBM 1 SQL"
db.eval(lubm1, useSql=True)
tri = db.get("lubm1SQL")
if tri.num_rows != 4:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm2(db):
lubm2 = \
"""
lubm2(a,b,c) :- x='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent',
y='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department',
z='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#University',
memberOf(a,b),subOrganizationOf(b,c),undegraduateDegreeFrom(a,c),rdftype(a,x),rdftype(b,y),rdftype(c,z).
"""
print "\nLUBM 2"
db.eval(lubm2)
tri = db.get("lubm2")
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm2_sql(db):
lubm2 = \
"""
CREATE TABLE lubm2SQL AS (
SELECT mo.a, soo.a, udf.b FROM
memberOf mo
JOIN subOrganizationOf soo ON mo.b = soo.a
JOIN undegraduateDegreeFrom udf ON mo.a = udf.a AND soo.b = udf.b
JOIN rdftype r1 ON mo.a = r1.a
JOIN rdftype r2 ON mo.b = r2.a
JOIN rdftype r3 ON soo.b = r3.a
WHERE r1.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent' AND
r2.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department' AND
r3.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#University'
)
"""
print "\nLUBM 2 SQL"
db.eval(lubm2, useSql=True)
tri = db.get("lubm2SQL")
if tri.num_rows != 0:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm4(db):
lubm4 = \
"""
lubm4(a,b,c,d) :- e='http://www.Department0.University0.edu',
f='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#AssociateProfessor',
worksFor(a,e),name(a,b),emailAddress(a,d),telephone(a,c),rdftype(a,f).
"""
print "\nLUBM 4"
db.eval(lubm4)
tri = db.get("lubm4")
if tri.num_rows != 14:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm4_sql(db):
lubm4 = \
"""
CREATE TABLE lubm4SQL AS (
SELECT wf.a, n.b, t.b, ea.b FROM
worksFor wf
JOIN name n ON wf.a = n.a
JOIN emailAddress ea ON wf.a = ea.a
JOIN telephone t ON wf.a = t.a
JOIN rdftype r ON wf.a = r.a
WHERE wf.b = 'http://www.Department0.University0.edu' AND
r.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#AssociateProfessor'
)
"""
print "\nLUBM 4 SQL"
db.eval(lubm4, useSql=True)
tri = db.get("lubm4SQL")
if tri.num_rows != 14:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm6(db):
lubm6 = \
"""
lubm6(a) :- b='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
rdftype(a,b).
"""
print "\nLUBM 6"
db.eval(lubm6)
tri = db.get("lubm6")
if tri.num_rows != 5916:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm6_sql(db):
lubm6 = \
"""
CREATE TABLE lubm6SQL AS (
SELECT r.a FROM rdftype r
WHERE r.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent'
)
"""
print "\nLUBM 6 SQL"
db.eval(lubm6, useSql=True)
tri = db.get("lubm6SQL")
if tri.num_rows != 5916:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm7(db):
lubm7 = \
"""
lubm7(a,b) :- c='http://www.Department0.University0.edu/AssociateProfessor0',
d='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Course',
e='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
teacherOf(c,b),takesCourse(a,b),rdftype(b,d),rdftype(a,e).
"""
print "\nLUBM 7"
db.eval(lubm7)
tri = db.get("lubm7")
if tri.num_rows != 59:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm7_sql(db):
lubm7 = \
"""
CREATE TABLE lubm7SQL AS (
SELECT tc.a, tc.b FROM teacherOf to
JOIN takesCourse tc ON to.b = tc.b
JOIN rdftype r1 ON to.b = r1.a
JOIN rdftype r2 ON tc.a = r2.a
WHERE to.a = 'http://www.Department0.University0.edu/AssociateProfessor0' AND
r1.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Course' AND
r2.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent'
)
"""
print "\nLUBM 7 SQL"
db.eval(lubm7, useSql=True)
tri = db.get("lubm7SQL")
if tri.num_rows != 59:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm8(db):
lubm8 = \
"""
lubm8(a,b,c) :- d='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent',
e='http://www.University0.edu',
f='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department',
memberOf(a,b),emailAddress(a,c),rdftype(a,d),subOrganizationOf(b,e),rdftype(b,f).
"""
print "\nLUBM 8"
db.eval(lubm8)
tri = db.get("lubm8")
if tri.num_rows != 5916:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm8_sql(db):
lubm8 = \
"""
CREATE TABLE lubm8SQL AS (
SELECT mo.a, mo.b, ea.b FROM memberOf mo
JOIN emailAddress ea ON mo.a = ea.a
JOIN rdftype r1 ON mo.a = r1.a
JOIN subOrganizationOf soo ON mo.b = soo.a
JOIN rdftype r2 ON soo.a = r2.a
WHERE r1.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#UndergraduateStudent' AND
soo.b = 'http://www.University0.edu' AND
r2.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department'
)
"""
print "\nLUBM 8 SQL"
db.eval(lubm8, useSql=True)
tri = db.get("lubm8SQL")
if tri.num_rows != 5916:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm12(db):
lubm12 = \
"""
lubm12(a,b) :- c='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#FullProfessor',
d='http://www.University0.edu',
e='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department',
worksFor(a,b),rdftype(a,c),subOrganizationOf(b,d),rdftype(b,e).
"""
print "\nLUBM 12"
db.eval(lubm12)
tri = db.get("lubm12")
if tri.num_rows != 125:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def lubm12_sql(db):
lubm12 = \
"""
CREATE TABLE lubm12SQL AS (
SELECT wf.a, wf.b FROM worksFor wf
JOIN rdftype r1 ON wf.a = r1.a
JOIN subOrganizationOf soo ON wf.b = soo.a
JOIN rdftype r2 ON r2.a = soo.a
WHERE r1.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#FullProfessor' AND
soo.b = 'http://www.University0.edu' AND
r2.b = 'http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department'
)
"""
print "\nLUBM 12 SQL"
db.eval(lubm12, useSql=True)
tri = db.get("lubm12SQL")
if tri.num_rows != 125:
raise ResultError("NUMBER OF ROWS INCORRECT: " + str(tri.num_rows))
def test_lubm():
build = True
takesCourse = Relation(
name="takesCourse",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/takesCourse.tsv")
memberOf = Relation(
name="memberOf",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/memberOf.tsv")
subOrganizationOf = Relation(
name="subOrganizationOf",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/subOrganizationOf.tsv")
undegraduateDegreeFrom = Relation(
name="undegraduateDegreeFrom",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/undergraduateDegreeFrom.tsv")
rdftype = Relation(
name="rdftype",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/rdftype.tsv")
worksFor = Relation(
name="worksFor",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/worksFor.tsv")
name = Relation(
name="name",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/name.tsv")
emailAddress = Relation(
name="emailAddress",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/emailAddress.tsv")
telephone = Relation(
name="telephone",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/telephone.tsv")
teacherOf = Relation(
name="teacherOf",
schema=Schema(attributes=["string","string"], attribute_names=["a", "b"]),
filename=os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/data/teacherOf.tsv")
if build:
db = Database.create(
Config(num_threads=4),
os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/databases/db_lubm1",
[ takesCourse,
memberOf,
subOrganizationOf,
undegraduateDegreeFrom,
rdftype,
worksFor,
name,
emailAddress,
telephone,
teacherOf ] )
db.build()
db = Database.from_existing(os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/databases/db_lubm1")
lubm1(db)
lubm1_sql(db)
lubm2(db)
lubm2_sql(db)
lubm4(db)
lubm4_sql(db)
lubm6(db)
lubm6_sql(db)
lubm7(db)
lubm7_sql(db)
lubm8(db)
lubm8_sql(db)
lubm12(db)
lubm12_sql(db)
#basically the main method down here.
start()
os.system("rm -rf "+os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/databases"+" && mkdir -p "+os.path.expandvars("$EMPTYHEADED_HOME")+"/test/rdf/databases")
test_lubm()
stop()
|
EmptyHeaded-master
|
test/rdf/travis.py
|
import numpy as np
import pandas as pd
from emptyheaded import *
def triangle():
return datalog("""
Triangle(a,b,c) :- Edge(a,b),Edge(b,c),Edge(a,c).
""").ir
def triangle_counting():
return datalog("""
Triangle(a;z) :- Edge(a,b),Edge(b,c),Edge(a,c),z:uint64<-[COUNT(b,c)].
""").ir
def triangle_agg():
return datalog("""
TriangleAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),z:uint64<-[COUNT(*)].
""").ir
def fourclique():
return datalog("""
Flique(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d).
""").ir
def fourclique_agg():
return datalog("""
FliqueAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),z:uint64<-[COUNT(*)].
""").ir
def fourclique_sel_agg():
return datalog("""
FliqueSelAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=0,z:uint64<-[COUNT(*)].
""").ir
def fourclique_sel():
return datalog("""
FliqueSel(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=0.
""").ir
def pagerank():
return datalog("""
N(;w) :- Edge(x,y),w:uint64<-[SUM(x;1)].
PageRank(x;y) :- Edge(x,z),y:float32<-[(1.0/N)].
PageRank(x;y)*[i=5]:-Edge(x,z),PageRank(z),InvDegree(z),y:float32 <- [0.15+0.85*SUM(z;1.0)].
""").ir
def lubm1():
return datalog("""
lubm1(a) :-
takesCourse(a,b),
b='http://www.Department0.University0.edu/GraduateCourse0',
rdftype(a,c),
c='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent'.
""").ir
####Main method beloe
start() #spin up the JVM
queries = {
"triangle":triangle,
"triangle_counting":triangle_counting,
"pagerank":pagerank,
"lubm1":lubm1,
"triangle_agg":triangle_agg,
"fourclique":fourclique,
"fourclique_agg":fourclique_agg,
"fourclique_sel":fourclique_sel,
"fourclique_sel_agg":fourclique_sel_agg
}
for query in queries:
print "\n\n"+query
ir = queries[query]()
for rule in ir.rules:
print rule
stop() #tear down the JVM
|
EmptyHeaded-master
|
python/test_parser.py
|
import numpy as np
import pandas as pd
from emptyheaded import *
def triangle():
return optimize("""
Triangle(a,b,c) :- Edge(a,b),Edge(b,c),Edge(a,c).
""").ir
def triangle_counting():
return optimize("""
Triangle(a;z) :- Edge(a,b),Edge(b,c),Edge(a,c),z:long<-[COUNT(b,c)].
""").ir
def triangle_agg():
return optimize("""
TriangleAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),z:long<-[COUNT(*)].
""").ir
def fourclique():
return optimize("""
Flique(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d).
""").ir
def fourclique_agg():
return optimize("""
FliqueAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),z:long<-[COUNT(*)].
""").ir
def fourclique_sel_agg():
return optimize("""
FliqueSelAgg(;z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=0,z:long<-[COUNT(*)].
""").ir
def fourclique_sel():
return optimize("""
FliqueSel(a,b,c,d) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,d),Edge(b,d),Edge(c,d),Edge(a,x),x=0.
""").ir
def barbell_sel():
return optimize("""
BarbellSel(a,b,c,x,y,z) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,p),Edge(p,x),Edge(x,y),Edge(y,z),Edge(x,z),p=0.
""").ir
def barbell_agg():
return optimize("""
BarbellAgg(;w) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,x),Edge(x,y),Edge(y,z),Edge(x,z),w:long<-[COUNT(*)].
""").ir
def barbell_sel_agg():
return optimize("""
BarbellSelAgg(;w) :- Edge(a,b),Edge(b,c),Edge(a,c),Edge(a,p),Edge(p,x),Edge(x,y),Edge(y,z),Edge(x,z),p=0,w:long<-[COUNT(*)].
""").ir
def pagerank():
return optimize("""
N(;w) :- Edge(x,y),w:long<-[SUM(x;1)].
PageRank(x;y) :- Edge(x,z),y:float<-[(1.0/N)].
PageRank(x;y)*[i=5]:-Edge(x,z),PageRank(z),InvDegree(z),y:float <- [0.15+0.85*SUM(z;1.0)].
""").ir
def lubm1():
return optimize("""
lubm1(a) :-
takesCourse(a,b),
b='http://www.Department0.University0.edu/GraduateCourse0',
rdftype(a,c),
c='http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent'.
""").ir
####Main method beloe
start() #spin up the JVM
queries = {
"triangle":triangle,
"triangle_counting":triangle_counting,
"pagerank":pagerank,
"lubm1":lubm1,
"triangle_agg":triangle_agg,
"fourclique":fourclique,
"fourclique_agg":fourclique_agg,
"fourclique_sel":fourclique_sel,
"fourclique_sel_agg":fourclique_sel_agg,
"barbell_sel":barbell_sel,
"barbell_agg":barbell_agg,
"barbell_sel_agg":barbell_sel_agg
}
for query in queries:
print "\n\n"+query
ir = queries[query]()
for rule in ir.rules:
print rule
stop() #tear down the JVM
|
EmptyHeaded-master
|
python/test_optimizer.py
|
## Stores the relations. Each relation has:
## 1) A file (csv or tsv) the data comes from
## 2) A schema
## IMPORTANT
## The order the types are specified in the schema must be
## the same as the order in the CSV or TSV file. The annotations
## (if specified) must come last.
from schema import Schema
import pandas as pd
class Relation:
def __init__(self,name,schema=Schema([]),filename="",dataframe=pd.DataFrame(),check=True,attribute_names=[],annotation_names=[]):
# Replace int64 with int32. TODO: Make int64 work?
int64_cols = [
col for col, dtype in dataframe.dtypes.to_dict().items()
if dtype == "int64"
]
dataframe[int64_cols] = dataframe[int64_cols].astype(pd.np.int32)
self.name = name #name of the relation
self.schema = schema #schema of relation
self.filename = filename #file relation comes from
self.df = not dataframe.empty
self.dataframe = dataframe
if not dataframe.empty:
self.schema = Schema.fromDF(dataframe,attribute_names,annotation_names)
if check:
if dataframe.empty and filename == "":
raise Exception("Relation "+self.name+" needs a filename (either a file or dataframe).")
def python2java(self,duncecap):
self.javaschema = self.schema.python2java(duncecap)
return duncecap.Relation(self.name,self.javaschema,str(self.filename),self.df)
@staticmethod
def java2python(jobject):
name = jobject.getName()
filename = jobject.getFilename()
schema = Schema.java2python(jobject.getSchema())
df = jobject.getDF()
return Relation(name=name,schema=schema,filename=filename,check=False)
#for printing purposess
def __repr__(self):
return """(%s,%s,%s)""" % (self.name,self.schema,self.filename)
|
EmptyHeaded-master
|
python/relation.py
|
import sys
from schema import Schema
from relation import Relation
from database import Database
from config import Config
from parsers import *
from ir import *
import glob
import jpype
import os
import numpy as np
import pandas as pd
#launch the JVM
def start():
ehhome = os.path.expandvars("$EMPTYHEADED_HOME")
jars = (":").join(glob.glob(ehhome+"/query_compiler/target/pack/lib/*.jar"))
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.class.path="+jars)
#kill the JVM
def stop():
jpype.shutdownJVM()
if __name__ == "__main__": main(sys.argv[1:])
|
EmptyHeaded-master
|
python/emptyheaded.py
|
## Stores the configuration of the database
class Config:
def __init__(self,system="emptyheaded",num_threads=1,num_sockets=4,layout="hybrid",memory="RAM"):
self.system = system#delite/spark
self.num_threads = num_threads
self.num_sockets = num_sockets
self.layout = layout #EmptyHeaded only
self.memory = memory #EmptyHeaded only
@staticmethod
def java2python(jobject):
self = Config()
self.num_threads = jobject.getNumThreads()
self.num_sockets = jobject.getNumSockets()
self.layout = jobject.getLayout()
self.memory = jobject.getMemory()
return self
#for printing purposes
def __repr__(self):
return """(system: %s, num_threads: %s,
\t\tnum_sockets: %s, layout: %s, memory: %s)"""\
% (self.system, \
str(self.num_threads), \
str(self.num_sockets), \
self.layout,self.memory)
|
EmptyHeaded-master
|
python/config.py
|
## High-level class to store the database
## The database contains a filename and relations
## Relations contain schemas
## Database -> Relations -> Schema
## Only one database should be created per python process.
## Database spins up the JVM which serves as our Query Compiler.
import jpype
from config import Config
from parsers import *
from relation import Relation
from DB import DB
import os
import time
from sys import platform as _platform
dbhash = 0
class Database:
#pulls the data from EH into a pandas dataframe
def get(self,name):
#code generation
self.qc.genTrieWrapper(name)
#execution
fname = self.folder+"/libs/trie_"+name
os.system("cd "+fname+" && ./build.sh >compilation.log 2>&1 && cd - > /dev/null")
return self.backend.get(name)
def load(self,name):
#code generation
self.qc.genTrieWrapper(name)
#execution
fname = self.folder+"/libs/trie_"+name
os.system("cd "+fname+" && ./build.sh >compilation.log 2>&1 && cd - > /dev/null")
self.backend.load(name)
#parses, codegens, and runs
def sql(self,sql):
self.qc.sql(sql)
execute("Query")
#parses, codegens, and runs
def datalog(self,datalog):
self.qc.datalog(datalog)
execute("Query")
#runs GHD optimizer
#returns an IR
def optimize(self,datalog):
return IR.java2python(self.qc.optimize(datalog))
def eval(self, query, useSql=False):
global dbhash
folder = time.strftime("D%d_%m_%Y_T%H_%M_%S") + "_Q" + str(self.dbhash)
num = self.qc.generate(query, str(self.dbhash), folder, useSql)
os.system("""cd """+self.folder+"""/libs/"""+folder+""" && ./build.sh >compilation.log 2>&1 && cd - > /dev/null""")
self.backend.evaluate(self.relations,folder,str(self.dbhash),num)
self.dbhash += num
#should return a (Trie,name,ordering)
def execute(filename):
print "compiles and executes query"
def compile_backend(self):
#compilation (compiles EH backend and the creation query)
storage_engine = os.environ['EMPTYHEADED_HOME']+"/storage_engine"
#clean up the previous build
os.system("""rm -rf """+storage_engine+"""/build && mkdir """+storage_engine+"""/build""")
#make the backend
buildtools=""
# linux we use gcc 5
if _platform == "linux" or _platform == "linux2":
buildtools=" -DCMAKE_C_COMPILER=gcc-5 -DCMAKE_CXX_COMPILER=g++-5 "
cmd="""cd """+storage_engine+"/build && cmake -DNUM_THREADS="+str(self.config.num_threads)+buildtools+" .. && make && cd - > /dev/null"
print cmd
os.system(cmd)
#Build the db in backend and save to disk
def build(self):
#Save our schemas to disk (so we can run from_existing)
self.qc.toDisk()
#code generation
self.qc.createDB()
#compile the generated load query
self.compile_backend()
os.system("""cd """+self.folder+"""/libs/createDB && ./build.sh >compilation.log 2>&1 && cd - > /dev/null""")
#execution
self.backend.create(self.relations,str(self.dbhash))
#reads files from an existing database on disk
#takes java QC and translates it to respective python classes
@staticmethod
def from_existing(folder):
self = Database()
self.duncecap = jpype.JPackage('duncecap')
self.folder = folder #string
self.qc = self.duncecap.QueryCompiler.fromDisk(self.folder+"/schema.bin")
dbInstance = self.qc.getDBInstance()
self.folder = dbInstance.getFolder()
self.config = Config.java2python(dbInstance.getConfig())
num_relations = dbInstance.getNumRelations()
self.relations = []
for i in range(0,num_relations):
self.relations.append(
Relation.java2python(dbInstance.getRelation(i)))
self.backend = DB(self.folder)
self.dbhash = dbhash
return self
#create a database from scratch
@staticmethod
def create(config,folder,relations):
global dbhash
self = Database()
self.folder = folder #string
self.relations = relations #list of Relation (relation.py)
self.duncecap = jpype.JPackage('duncecap')
self.config = config # Config.py
javaConfig = self.duncecap.Config(
config.system,
config.num_threads,
config.num_sockets,
config.layout,
config.memory)
javaDB = self.duncecap.DBInstance(folder,javaConfig)
for relation in relations:
javaDB.addRelation(relation.python2java(self.duncecap))
self.dbhash = dbhash
self.qc = self.duncecap.QueryCompiler(javaDB,str(dbhash))
#execution
self.backend = DB(self.folder)
return self
#for printing purposes
def __repr__(self):
return """Database< liverelations:%s \n\t folder:%s
\t Config%s
\t Relations%s >""" % (self.liverelations,self.folder,self.config,self.relations)
|
EmptyHeaded-master
|
python/database.py
|
## Contains the bridge for each front-end parser.
## The parser pontentially spins up the JVM
## creates the respective object in scala
## Sends the string to the object which returns an IR
import jpype
from ir import *
class Parser:
def __init__(self):
self.duncecap = jpype.JPackage('duncecap')
class sql(Parser):
def __init__(self,query):
Parser.__init__(self)
ir = self.duncecap.SQL(query).parse()
#return IR.java2python(ir)
class datalog(Parser):
def __init__(self,query):
Parser.__init__(self)
self.jir = self.duncecap.Datalog(query).parse()
self.ir = IR.java2python(self.jir)
class optimize(Parser):
def __init__(self,query):
Parser.__init__(self)
self.jir = self.duncecap.Datalog(query).parse()
self.jir = self.duncecap.QueryPlanner.findOptimizedPlans(self.jir)
self.ir = IR.java2python(self.jir)
|
EmptyHeaded-master
|
python/parsers.py
|
## Maintains the interface for the intermediate representation
## This can be sent in and out of both code generators and
## the GHD optimizer.
import jpype
def strip_unicode(values):
return [str(x) for x in values]
#Convinance class for expressing relations
class RELATION:
def __init__(self,name,attributes,annotations=[],scaling=1):
self.name = name
self.attributes = attributes
self.annotations = annotations
def __repr__(self):
return """[%s %s,%s]""" % (self.name,\
self.attributes,\
self.annotations)
class IR:
def __init__(self,rules):
if not isinstance(rules[0],RULE):
raise Exception("IR type incorrect (list of RULES).")
self.rules = rules
self.duncecap = jpype.JPackage('duncecap')
def python2java(self):
irbuilder = self.duncecap.IRBuilder()
for r in self.rules:
irbuilder.addRule(r.python2java())
return irbuilder.build()
@staticmethod
def java2python(jobject):
rules = []
nRules = jobject.getNumRules()
for i in range(0,nRules):
rules.append(RULE.java2python(jobject.getRule(i)))
return IR(rules)
#The top level class
class RULE:
def __init__(self,
result,
recursion,
operation,
order,
project,
join,
aggregates,
filters,
):
if not isinstance(result,RESULT) or \
not isinstance(recursion,RECURSION) or \
not isinstance(operation,OPERATION) or \
not isinstance(order,ORDER) or \
not isinstance(project,PROJECT) or \
not isinstance(join,JOIN) or \
not isinstance(aggregates,AGGREGATES) or \
not isinstance(filters,FILTERS):
raise Exception("Types not correct for IR.")
self.duncecap = jpype.JPackage('duncecap')
self.result = result
self.recursion = recursion
self.operation = operation
self.order = order
self.project = project
self.join = join
self.aggregates = aggregates
self.filters = filters
def python2java(self):
javaResult = self.result.python2java(self.duncecap)
javaRecursion = self.recursion.python2java(self.duncecap)
javaOperation = self.operation.python2java(self.duncecap)
javaOrder = self.order.python2java(self.duncecap)
javaProject = self.project.python2java(self.duncecap)
javaJoin = self.join.python2java(self.duncecap)
javaAgg = self.aggregates.python2java(self.duncecap)
javaFilter = self.filters.python2java(self.duncecap)
return self.duncecap.Rule(
javaResult,
javaRecursion,
javaOperation,
javaOrder,
javaProject,
javaJoin,
javaAgg,
javaFilter)
@staticmethod
def java2python(jobject):
result = RESULT.java2python(jobject.getResult())
recursion = RECURSION.java2python(jobject.getRecursion())
operation = OPERATION.java2python(jobject.getOperation())
order = ORDER.java2python(jobject.getOrder())
project = PROJECT.java2python(jobject.getProject())
join = JOIN.java2python(jobject.getJoin())
filters = FILTERS.java2python(jobject.getFilters())
aggregates = AGGREGATES.java2python(jobject.getAggregations())
return RULE(result,recursion,operation,order,project,join,aggregates,filters)
def __repr__(self):
return """RULE :-\t %s \n\t %s \n\t %s \n\t %s \n\t %s \n\t %s \n\t %s \n\t %s>""" \
% (self.result,\
self.recursion,\
self.operation,\
self.order,\
self.project,\
self.join,\
self.aggregates,\
self.filters)
#Relation for the result
class RESULT:
def __init__(self,rel,isIntermediate):
self.rel = rel
self.isIntermediate = isIntermediate
def python2java(self,duncecap):
return duncecap.Result(duncecap.IR.buildRel(self.rel.name,
strip_unicode(self.rel.attributes),
strip_unicode(self.rel.annotations)), self.isIntermediate)
@staticmethod
def java2python(jobject):
return RESULT(RELATION(
jobject.getRel().getName(),
strip_unicode(jobject.getRel().getAttributes()),
strip_unicode(jobject.getRel().getAnnotations())), jobject.getIsIntermediate())
def __repr__(self):
return """RESULT: %s %s""" % (self.rel, self.isIntermediate)
#Holder for recursive statements
class RECURSION:
def __init__(self,criteria="",operation="",value=""):
self.criteria = criteria
self.operation = operation
self.value = value
def python2java(self,duncecap):
return duncecap.RecursionBuilder().build(
self.criteria,
self.operation,
self.value)
@staticmethod
def java2python(jobject):
if jobject.isEmpty():
return RECURSION()
else:
newobj = jobject.get()
return RECURSION(
newobj.getCriteria(),
newobj.getOperation(),
newobj.getValue())
def __repr__(self):
return """RECURSION: %s %s %s """ % (self.criteria,self.operation,self.value)
#Order attributes are processed in NPRR
class ORDER:
def __init__(self,attributes):
self.attributes = attributes
def python2java(self,duncecap):
return duncecap.Order(
duncecap.IR.buildAttributes(self.attributes))
@staticmethod
def java2python(jobject):
return ORDER(strip_unicode(jobject.getAttributes()))
def __repr__(self):
return """ORDER: %s""" % (self.attributes)
#Attributes that are projected away.
class PROJECT:
def __init__(self,attributes=[]):
self.attributes = attributes
def python2java(self,duncecap):
return duncecap.Project(
duncecap.IR.buildAttributes(self.attributes))
@staticmethod
def java2python(jobject):
return PROJECT(strip_unicode(jobject.getAttributes()))
def __repr__(self):
return """PROJECT: %s""" % (self.attributes)
#Join operation for aggregations
class OPERATION:
def __init__(self,operation):
self.operation = operation
def python2java(self,duncecap):
return duncecap.Operation(self.operation)
@staticmethod
def java2python(jobject):
return OPERATION(str(jobject.getOperation()))
def __repr__(self):
return """OPERATION: %s""" % (self.operation)
#Relations and attributes that are joined.
class JOIN:
def __init__(self,relations):
self.relations = relations
def python2java(self,duncecap):
joinBuilder = duncecap.JoinBuilder()
for rel in self.relations:
joinBuilder.addRel(rel.name,rel.attributes,rel.annotations)
return joinBuilder.build()
@staticmethod
def java2python(jobject):
nRels = jobject.getNumRels()
relations = []
for i in range(0,nRels):
relations.append(RELATION(
jobject.getRel(i).getName(),
strip_unicode(jobject.getRel(i).getAttributes()),
strip_unicode(jobject.getRel(i).getAnnotations())))
return JOIN(relations)
def __repr__(self):
return """JOIN: %s""" % (self.relations)
class SELECT:
def __init__(self,attribute,operation,value):
self.attribute = attribute
self.operation = operation
self.value = value
def python2java(self,filterBuilder):
return filterBuilder.buildSelection(self.attribute,self.operation,self.value)
@staticmethod
def java2python(jobject):
return SELECT(jobject.getAttr(),jobject.getOperation(),jobject.getValue())
def __repr__(self):
return """SELECT(%s,%s,%s)""" % (self.attribute,self.operation,self.value)
#Relations and attributes that are joined.
class FILTERS:
def __init__(self,filters=[]):
if filters:
if not isinstance(filters[0],SELECT):
raise Exception("Filters types incorrect.")
self.filters = filters
def python2java(self,duncecap):
filterBuilder = duncecap.FilterBuilder()
for f in self.filters:
filterBuilder.addSelection(f.python2java(filterBuilder))
return filterBuilder.build()
@staticmethod
def java2python(jobject):
nFilters = jobject.getNumFilters()
filters = []
for i in range(0,nFilters):
filters.append(SELECT.java2python(jobject.getSelect(i)))
return FILTERS(filters)
def __repr__(self):
return """FILTERS: %s""" % (self.filters)
#Aggregations over attributes.
class AGGREGATE:
def __init__(self,annotation="",datatype="",operation="",attributes=[],init="",expression="",usedScalars=[]):
self.annotation = annotation
self.datatype = datatype
self.operation = operation
self.attributes = attributes
self.init = init
self.expression = expression
self.usedScalars = usedScalars
def __repr__(self):
return """(%s,%s,%s,%s,%s,%s,%s)""" % (
self.annotation,
self.datatype,
self.operation,
self.attributes,
self.init,
self.expression,
[RELATION(
jobject.getName(),
strip_unicode(jobject.getAttributes()),
strip_unicode(jobject.getAnnotations())) for jobject in self.usedScalars])
class AGGREGATES:
def __init__(self,aggregates):
if aggregates:
if not isinstance(aggregates[0],AGGREGATE):
raise Exception("Aggregate types incorrect.")
self.aggregates = aggregates
def python2java(self,duncecap):
aggBuilder = duncecap.AggregationsBuilder()
for agg in self.aggregates:
aggBuilder.addAggregation(
agg.annotation,
agg.type,
agg.operation,
agg.attributes,
agg.init,
agg.expression,
agg.usedScalars)
return aggBuilder.build()
@staticmethod
def java2python(jobject):
nAggs = jobject.getNumAggregations()
aggs = []
for i in range(0,nAggs):
aggs.append(AGGREGATE(
jobject.getAnnotation(i),
jobject.getDatatype(i),
jobject.getOperation(i),
strip_unicode(jobject.getAttributes(i)),
jobject.getInit(i),
jobject.getExpression(i),
jobject.getDependedOnRels(i)))
return AGGREGATES(aggs)
def __repr__(self):
return """AGGREGATES: %s""" % (self.aggregates)
|
EmptyHeaded-master
|
python/ir.py
|
## Stores the schemas for each relation.
## Enables users to define a schema.
## Note: defining a schema does not add a
## relation to the database. A user must define
## all schemas they wish to add to a database
## before creating the database. Once the database
## is created they can execute queries over the respective
## schemas.
## The order the types are specified in the schema must be
## the same as the order in the CSV or TSV file. The annotations
## (if specified) must come last.
import numpy as np
from sets import Set
#map from dataframe type to EH accepted types
#check in the QC occurs in Schema.scala
#we also check in DFMap when we pass the array
#difference is here we figure out the annotations
map_types = {
"int32":np.int32,
"int64":np.int64,
"uint32":np.uint32,
"uint64":np.uint64,
"float32":np.float32,
"float64":np.float64
}
def strip_unicode(values):
return [str(x.name) for x in values]
def wrap_strings(values):
return [np.dtype(map_types[str(x)]) for x in values]
def wrap_types(values):
return [np.dtype(x) for x in values]
class Schema:
def python2java(self,duncecap):
return duncecap.QueryCompiler.buildSchema(
strip_unicode(self.attributes),
strip_unicode(self.annotations),
self.attribute_names,
self.annotation_names)
@staticmethod
def java2python(jobject):
attribute_types = wrap_types(jobject.getAttributeTypes())
annotation_types = wrap_types(jobject.getAnnotationTypes())
return Schema(attribute_types,annotation_types)
def __init__(self,attributes,annotations=[],attribute_names=[],annotation_names=[]):
self.attributes = wrap_types(attributes)
self.annotations = wrap_types(annotations)
self.attribute_names = attribute_names
self.annotation_names = annotation_names
@staticmethod
def fromDF(df, attribute_names=[], annotation_names=[]):
#dataframe names for annnotations must start with "a_"
pattern = r'^a_.*'
typesSeries = df.dtypes
found = typesSeries.index.str.contains(pattern)
notfound = map(lambda x: not x, found)
attribute_types = []
for a in typesSeries[notfound]:
if str(a.name) not in map_types:
raise Exception("DataFrame attribute type error. Type " + str(a.name) + " not accepted.")
attribute_types.append(a)
annotation_types = []
for a in typesSeries[found]:
if str(a.name) not in map_types:
raise Exception("DataFrame annotation type error. Type " + str(a.name) + " not accepted.")
annotation_types.append(a)
return Schema(attribute_types,annotation_types,attribute_names,annotation_names)
#for printing purposes
def __repr__(self):
return """(%s,%s)""" % (self.attributes,self.annotations)
|
EmptyHeaded-master
|
python/schema.py
|
import os
import platform
import sys
import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
EH_PATH=os.path.expandvars("$EMPTYHEADED_HOME")
if platform.uname()[0] == "Darwin":
clibs = ["-arch","x86_64","-mavx",'-Wno-unused-function',
'-stdlib=libc++',
'-std=c++11',
'-mmacosx-version-min=10.8',]
largs = ["-arch","x86_64"]
else:
clibs = ["-std=c++0x"]
largs = ["-Wl,-rpath="+EH_PATH+"/storage_engine/build/lib","-Wl,--Bshareable"]
os.environ["CC"] = "g++-5"
os.environ["CXX"] = "g++-5"
extensions = [
Extension(
"#DFMap#",
["#DFMap#.pyx"],
libraries = ["emptyheaded"],
library_dirs = [EH_PATH+"/storage_engine/build/lib"],
include_dirs = [EH_PATH+"/storage_engine/include",numpy.get_include()],
extra_compile_args = clibs,
extra_link_args = largs,
language="c++"
)
]
setup(
name='#PTrie#',
ext_modules=cythonize(
extensions,
)
)
|
EmptyHeaded-master
|
cython/createDB/setup.py
|
import os
import platform
import sys
import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
EH_PATH=os.path.expandvars("$EMPTYHEADED_HOME")
if platform.uname()[0] == "Darwin":
clibs = ["-arch","x86_64","-mavx",'-Wno-unused-function',
'-stdlib=libc++',
'-std=c++11',
'-mmacosx-version-min=10.8',]
largs = ["-arch","x86_64"]
else:
clibs = ["-std=c++0x"]
largs = ["-Wl,-rpath="+EH_PATH+"/storage_engine/build/lib","-Wl,--Bshareable"]
os.environ["CC"] = "g++-5"
os.environ["CXX"] = "g++-5"
extensions = [
Extension(
"#PTrie#",
["#PTrie#.pyx"],
libraries = ["emptyheaded"],
library_dirs = [EH_PATH+"/storage_engine/build/lib"],
include_dirs = [EH_PATH+"/storage_engine/include",numpy.get_include()],
extra_compile_args = clibs,
extra_link_args = largs,
language="c++"
)
]
setup(
name='#PTrie#',
ext_modules=cythonize(
extensions,
)
)
|
EmptyHeaded-master
|
cython/trie/setup.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import sys
import numpy
import platform
import os
EH_PATH=os.path.expandvars("$EMPTYHEADED_HOME")
if platform.uname()[0] == "Darwin":
clibs = ["-arch","x86_64","-mavx",'-Wno-unused-function',
'-stdlib=libc++',
'-std=c++11',
'-mmacosx-version-min=10.8',]
largs = ["-arch","x86_64"]
else:
clibs = ["-std=c++0x"]
largs = ["-Wl,--Bshareable"]
extensions = [
Extension(
"DB",
["DB.pyx"],
include_dirs = [numpy.get_include()],
extra_compile_args = clibs,
extra_link_args = largs,
language="c++"
)
]
setup(
name='DB',
ext_modules=cythonize(
extensions,
)
)
|
EmptyHeaded-master
|
cython/db/setup.py
|
import os
import platform
import sys
import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
EH_PATH=os.path.expandvars("$EMPTYHEADED_HOME")
clibs = [ "-mavx2",
"-fPIC",
"-std=c++0x",
"-pedantic",
"-O3",
"-Wall",
"-Wno-unused-function",
"-Wextra",
"-march=native",
"-mtune=native",
"-DNUM_THREADS_IN=#NUMTHREADS#"]
EH_PATH=os.path.expandvars("$EMPTYHEADED_HOME")
if platform.uname()[0] == "Darwin":
clibs += ["-arch","x86_64","-mavx",'-Wno-unused-function',
'-stdlib=libc++',
'-std=c++11',
'-mmacosx-version-min=10.8',]
largs = ["-arch","x86_64"]
else:
clibs += ["-std=c++0x"]
largs = ["-Wl,-rpath="+EH_PATH+"/storage_engine/build/lib","-Wl,--Bshareable"]
os.environ["CC"] = "g++-5"
os.environ["CXX"] = "g++-5"
extensions = [
Extension(
"#QUERY#",
["#FILES#"],
libraries = ["emptyheaded"],
library_dirs = [EH_PATH+"/storage_engine/build/lib"],
include_dirs = [EH_PATH+"/storage_engine/include",numpy.get_include()],
extra_compile_args = clibs,
extra_link_args = largs,
language="c++"
)
]
setup(
name='#QUERY#',
ext_modules=cythonize(
extensions,
)
)
setup(
ext_modules = cythonize(extensions)
)
|
EmptyHeaded-master
|
cython/query/setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
# coding: utf-8
"""Dataset Loader for Memory Dialogs.
Author(s): noctli, skottur
(c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
"""
import json
import logging
import os
import pickle
import re
from itertools import chain
import numpy as np
import torch
import torch.utils.data
import tqdm
from dataset import tokenize
from torch.utils.data import Dataset
# from train import SPECIAL_TOKENS, MODEL_INPUTS, PADDED_INPUTS
# SPECIAL_TOKENS = ["<bos>", "<eos>", "<user>", "<system>", "<video>", "<pad>"]
# SPECIAL_TOKENS_DICT = {
# "bos_token": "<bos>",
# "eos_token": "<eos>",
# "additional_special_tokens": ["<user>", "<system>", "<video>", "<cap>"],
# "pad_token": "<pad>",
# }
MODEL_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
PADDED_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
MEMORY_BREAK = "<MM_BREAK>"
ANCHOR_TOKENS = ["<USER>", "<SYSTEM>", "<MM>", "<SOAC>", "<SOAR>", "<SOR>"]
def get_dataset(tokenizer, data_file, feature_path=None, feature_width=None):
"""Get dataset given tokenizer and data file."""
with open(data_file, "r") as file_id:
instance_data = json.load(file_id)
# Read the features from the folder.
if feature_path is not None:
feature_map = {}
feature_type = None
listings = [ii for ii in os.listdir(feature_path) if ".npy" in ii]
for file_name in listings:
search_slots = re.findall(r"mscoco_([^_]*)_([\d]*).npy", file_name)
extracted_type, memory_id = search_slots[0]
if not feature_type:
feature_type = extracted_type
else:
assert feature_type == extracted_type, (
f"Mismatch feature type: {feature_type} != {extracted_type}"
)
file_path = os.path.join(feature_path, file_name)
feature_map[memory_id] = file_path
else:
feature_map = None
feature_type = None
# instance_data = instance_data[:10]
for datum in tqdm.tqdm(instance_data, desc="Preparing dataset"):
context = datum["predict"]
target = datum["target"]
# Identify memory features (if any) in the context.
# NOTE: Make this cleaner, slightly adhoc at the moment.
split_str = context.split(MEMORY_BREAK)
memory_ids = []
for ii in split_str[:-1]:
memory_ids.append(int(ii.rsplit(" ", 1)[-1]))
assert len(memory_ids) + 1 == len(split_str), "Invalid MM breaks!"
# Alternatively zip the two lists.
zipped_context = [None for _ in range(len(memory_ids) + len(split_str))]
zipped_context[::2] = split_str
zipped_context[1::2] = [
{
"memory_id": ii,
"memory_feature_path": os.path.join(
feature_path, f"mscoco_{feature_type}_{ii}.npy"
),
}
for ii in memory_ids
]
# Extract the token types.
zipped_token_type_ids = []
zipped_context_tokens = []
current_type = None
for context_part in zipped_context:
if not isinstance(context_part, dict):
tokenized_substr, substr_type_ids, current_type = tokenize_by_type(
context_part, tokenizer, current_type
)
assert len(tokenized_substr) == len(
substr_type_ids
), "String tokens and token ids should be of same length!"
zipped_context_tokens.append(tokenized_substr)
zipped_token_type_ids.extend(substr_type_ids)
else:
assert "memory_id" in context_part, "Not a memory!"
if feature_path:
zipped_token_type_ids.extend(
[tokenizer.convert_tokens_to_ids("<MM>")] * feature_width
)
zipped_context_tokens.append(context_part)
datum["context_tokens"] = zipped_context_tokens
datum["context_token_types"] = zipped_token_type_ids
assert MEMORY_BREAK not in target, "Target cannot have multimodal entries!"
datum["target_tokens"] = tokenize(target, tokenizer)
if datum["type"] == "API":
target_token_type_ids = [tokenizer.convert_tokens_to_ids("<SOAC>")] * len(
datum["target_tokens"]
)
else:
target_token_type_ids = [tokenizer.convert_tokens_to_ids("<SOR>")] * len(
datum["target_tokens"]
)
datum["target_token_types"] = target_token_type_ids
# Get input tokens by merging the two.
input_tokens, input_token_types, lm_labels = merge_context_target_tokens(datum)
datum["input_tokens"] = input_tokens
datum["input_token_types"] = input_token_types
datum["lm_labels"] = lm_labels
return instance_data, feature_map
def merge_context_target_tokens(datum):
"""Merge context and target tokens."""
input_tokens = datum["context_tokens"] + [datum["target_tokens"]]
input_token_types = datum["context_token_types"] + datum["target_token_types"]
lm_labels = [-1] * len(datum["context_token_types"]) + datum["target_tokens"]
return input_tokens, input_token_types, lm_labels
def tokenize_by_type(string, tokenizer, start_type=None):
# Raw tokenization.
tokens = string.split(" ")
current_type = start_type
start_index = 0
token_splits = []
for index, token in enumerate(tokens):
if token in ANCHOR_TOKENS:
# First discovered token type, do nothing.
if current_type is not None:
reconstructed_str = " ".join(tokens[start_index:index])
token_splits.append((reconstructed_str, current_type))
start_index = index
current_type = token
# Repeat for the last section.
reconstructed_str = " ".join(tokens[start_index : index + 1])
token_splits.append((reconstructed_str, current_type))
# Now tokenize the substrings.
tokenized_str = []
tokenized_type_ids = []
for substring, current_type in token_splits:
tokenized_substring = tokenize(substring, tokenizer)
tokenized_str.extend(tokenized_substring)
tokenized_type_ids.extend(
[
tokenizer.convert_tokens_to_ids(current_type)
for _ in range(len(tokenized_substring))
]
)
return tokenized_str, tokenized_type_ids, current_type
class MemoryDialogDataset(Dataset):
def __init__(self, dialogs, tokenizer, features=None, drop_rate=0.5, train=True):
self.dialogs = dialogs
self.features = features
self.tokenizer = tokenizer
self.drop_rate = drop_rate
self.train = train
def __len__(self):
return len(self.dialogs)
def __getitem__(self, index):
instance = self.dialogs[index]
input_ids = []
# TODO: Move this to initialization?
for ii in instance["input_tokens"]:
if isinstance(ii, list):
input_ids.append(torch.Tensor(ii).long())
else:
if self.features:
memory_features = np.load(
ii["memory_feature_path"], allow_pickle=True
)[()]["features"]
input_ids.append({"features": memory_features})
token_type_ids = torch.Tensor(instance["input_token_types"]).long()
lm_labels = torch.Tensor(instance["lm_labels"]).long()
return input_ids, token_type_ids, lm_labels
def padding(seq, pad_token):
max_len = max([i.size(0) for i in seq])
input_mask = torch.zeros((len(seq), max_len)).long()
if len(seq[0].size()) == 1:
result = torch.ones((len(seq), max_len)).long() * pad_token
else:
result = torch.ones(
(len(seq), max_len, seq[0].size(-1)),
dtype=seq[0].dtype,
device=seq[0].device,
)
for i in range(len(seq)):
result[i, : seq[i].size(0)] = seq[i]
input_mask[i, : seq[i].size(0)] = 1.0
return result, input_mask
def collate_fn(batch, pad_token, features=None):
input_ids_list, token_type_ids_list, lm_labels_list, i3d_list = [], [], [], []
for i in batch:
input_ids_list.append(i[0])
token_type_ids_list.append(i[1])
lm_labels_list.append(i[2])
token_type_ids, input_mask = padding(token_type_ids_list, pad_token)
lm_labels, _ = padding(lm_labels_list, -1)
return input_ids_list, token_type_ids, lm_labels, input_mask
def pad_dataset(dataset, padding=0):
"""Pad the dataset.
This could be optimized by defining a Dataset class and pad only
batches but this is simpler.
"""
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [
x + [padding if name != "labels" else -1] * (max_l - len(x))
for x in dataset[name]
]
return dataset
|
comet_memory_dialog-main
|
models/gpt2_mm/dataset_memory.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
import copy
import json
import logging
import random
import time
from argparse import ArgumentParser
from itertools import chain
import os
from pprint import pformat
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from transformers import *
from VideoGPT2 import *
from dataset import build_input_from_segments
from dataset_memory import get_dataset
def top_filtering(
logits, top_k=0, top_p=0.0, threshold=-float("Inf"), filter_value=-float("Inf")
):
"""Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert (
logits.dim() == 1
) # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(
instance,
tokenizer,
model,
args,
feature_map,
current_output=None,
):
special_tokens_ids = tokenizer.convert_tokens_to_ids(["<EOAC>", "<EOS>"])
if current_output is None:
current_output = []
context_embeds = None
for time_step in range(args.max_length):
input_ids = []
# For the first time step, work on context_tokens, context_token_types.
if context_embeds is None:
context_embeds = []
for ii in instance["context_tokens"]:
if isinstance(ii, list):
context_embeds.append(
model.transformer.wte(torch.Tensor(ii).long().to(args.device))
)
else:
memory_features = np.load(
ii["memory_feature_path"], allow_pickle=True
)[()]["features"]
memory_embeds = model.video_ff(
torch.Tensor(memory_features).to(args.device)
)
context_embeds.append(memory_embeds)
context_embeds = torch.cat(context_embeds)
context_token_type_ids = (
torch.Tensor(instance["context_token_types"]).long().to(args.device)
)
context_embeds = context_embeds.unsqueeze(0)
context_token_type_ids = context_token_type_ids.unsqueeze(0)
else:
new_context_embed = model.transformer.wte(
torch.Tensor([current_output[-1]]).long().to(args.device)
).unsqueeze(0)
context_embeds = torch.cat([context_embeds, new_context_embed], dim=1)
context_token_type_ids = torch.cat(
[
context_token_type_ids,
context_token_type_ids[0][-1].clone().view(1, -1),
],
dim=1,
)
logits = model(context_embeds, token_type_ids=context_token_type_ids)
if "gpt2" == args.model:
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = (
torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
)
if (time_step < args.min_length) and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def beam_search(
caption, history, tokenizer, model, args, current_output=None, video=None
):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
hyplist = [([], 0.0, current_output)]
best_state = None
comp_hyplist = []
for i in range(args.max_length):
new_hyplist = []
argmin = 0
for out, lp, st in hyplist:
instance, sequence = build_input_from_segments(
caption, history, st, tokenizer, with_eos=False, drop_caption=False
)
input_ids = torch.tensor(
instance["input_ids"], device=args.device
).unsqueeze(0)
token_type_ids = torch.tensor(
instance["token_type_ids"], device=args.device
).unsqueeze(0)
input_embs = model.transformer.wte(input_ids)
if video is not None:
input_embs = torch.cat([model.video_ff(video), input_embs], dim=1)
token_type_ids = torch.cat(
[
torch.ones((1, video.size(1))).long().cuda()
* tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-2]),
token_type_ids,
],
dim=1,
)
logits = model(input_embs, token_type_ids=token_type_ids)
if "gpt2" == args.model:
logits = logits[0]
logp = F.log_softmax(logits, dim=-1)[:, -1, :]
lp_vec = logp.cpu().data.numpy() + lp
lp_vec = np.squeeze(lp_vec)
if i >= args.min_length:
new_lp = lp_vec[tokenizer.eos_token_id] + args.penalty * (len(out) + 1)
comp_hyplist.append((out, new_lp))
if best_state is None or best_state < new_lp:
best_state = new_lp
count = 1
for o in np.argsort(lp_vec)[::-1]:
if o == tokenizer.unk_token_id or o == tokenizer.eos_token_id:
continue
new_lp = lp_vec[o]
if len(new_hyplist) == args.beam_size:
if new_hyplist[argmin][1] < new_lp:
new_st = copy.deepcopy(st)
new_st.append(int(o))
new_hyplist[argmin] = (out + [o], new_lp, new_st)
argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0]
else:
break
else:
new_st = copy.deepcopy(st)
new_st.append(int(o))
new_hyplist.append((out + [o], new_lp, new_st))
if len(new_hyplist) == args.beam_size:
argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0]
count += 1
hyplist = new_hyplist
if len(comp_hyplist) > 0:
maxhyps = sorted(comp_hyplist, key=lambda h: -h[1])[:1]
return maxhyps
else:
return [([], 0)]
def greedy_decode(
caption, history, tokenizer, model, args, current_output=None, video=None
):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
ys = []
for i in range(args.max_length):
instance, sequence = build_input_from_segments(
caption, history, ys, tokenizer, with_eos=False, drop_caption=False
)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(
instance["token_type_ids"], device=args.device
).unsqueeze(0)
input_embs = model.transformer.wte(input_ids)
if video is not None:
input_embs = torch.cat([model.video_ff(video), input_embs], dim=1)
token_type_ids = torch.cat(
[
torch.ones((1, video.size(1))).long().cuda()
* tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-2]),
token_type_ids,
],
dim=1,
)
logits = model(input_embs, token_type_ids=token_type_ids)
if "gpt2" == args.model:
logits = logits[0][0]
logits = logits.cpu().data.numpy()
next_word = np.argsort(logits[-1])[-1]
if next_word == special_tokens_ids[1]:
break
ys.append(next_word)
return ys
# Evaluation routine
def generate_response(model, data, dataset, feature_map, tokenizer, args, ref_data=None):
result_dialogs = []
model.eval()
with torch.no_grad():
iterator = tqdm.tqdm(enumerate(dataset), desc="Generating responses")
for index, instance in iterator:
# logging.info(f"{index}:")
# logging.info("QS: " + instance["predict"])
# prepare input data
start_time = time.time()
if args.beam_search:
raise NotImplementedError("Beam search is not supported!")
hypstr = beam_search(
dataset[idx]["caption"],
dataset[idx]["history"],
tokenizer,
model,
args,
video=i3d,
)
hypstr = hypstr[0][0]
else:
hypstr = sample_sequence(
instance,
tokenizer,
model,
args,
feature_map,
)
hypstr = tokenizer.decode(hypstr, skip_special_tokens=False)
# logging.info("HYP: " + hypstr)
# Create an instance dictionary.
instance_result = {
"dialog_id": instance["dialog_id"],
"turn_id": instance["turn_id"],
"model_prediction": hypstr,
"type": instance["type"],
}
result_dialogs.append(instance_result)
# logging.info("ElapsedTime: %f" % (time.time() - start_time))
# logging.info("-----------------------")
return result_dialogs
def read_commandline_options():
parser = ArgumentParser()
parser.add_argument(
"--model", type=str, default="gpt2", help="Model type (gpt or gpt2)"
)
parser.add_argument(
"--model_checkpoint",
type=str,
default="log_without_caption_with_valid/",
help="Path, url or short name of the model",
)
parser.add_argument(
"--model_epoch", type=int, default=-1, help="Epoch to chose for a given folder"
)
parser.add_argument(
"--max_history",
type=int,
default=3,
help="Number of previous utterances to keep in history",
)
parser.add_argument(
"--device",
type=str,
default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)",
)
parser.add_argument(
"--no_sample",
action="store_true",
help="Set to use greedy decoding instead of sampling",
)
parser.add_argument(
"--beam_search",
action="store_true",
help="Set to use beam search instead of sampling",
)
parser.add_argument("--beam_size", type=int, default=5, help="Beam size")
parser.add_argument(
"--max_length",
type=int,
default=100,
help="Maximum length of the output utterances",
)
parser.add_argument(
"--min_length",
type=int,
default=1,
help="Minimum length of the output utterances",
)
parser.add_argument("--penalty", type=float, default=0.3, help="elngth penalty")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument(
"--temperature", type=int, default=0.7, help="Sampling softmax temperature"
)
parser.add_argument(
"--visual_feature_width",
type=int,
default=10,
help="Feature width for each image; 10 - BUTD; 1 - others"
)
parser.add_argument(
"--visual_feature_size",
type=int,
default=2053,
help="Feature size for each image; 2053 - BUTD; 512 - CLIP",
)
parser.add_argument(
"--feature_path", type=str, default="data/", help="Path to features"
)
parser.add_argument(
"--top_k",
type=int,
default=0,
help="Filter top-k tokens before sampling (<=0: no filtering)",
)
parser.add_argument(
"--top_p",
type=float,
default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)",
)
parser.add_argument("--test_set", type=str, default="data/test_set4DSTC8-AVSD.json")
parser.add_argument(
"--lbl_test_set",
type=str,
default="data/lbl_undisclosedonly_test_set4DSTC7-AVSD.json",
)
parser.add_argument(
"--special_tokens_path",
type=str,
required=True,
help="Path tp the special tokens used in training/evaluation",
)
parser.add_argument("--output", type=str, default="result.json")
# args = parser.parse_args()
args, unknown = parser.parse_known_args()
return args, parser, unknown
def generate(args):
for arg in vars(args):
print("{}={}".format(arg, getattr(args, arg)))
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s"
)
logging.info("Loading model params from " + args.model_checkpoint)
tokenizer_class = GPT2Tokenizer if "gpt2" == args.model else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
with open(args.special_tokens_path, "r") as file_id:
special_tokens_dict = json.load(file_id)
tokenizer.add_special_tokens(special_tokens_dict)
model_class = VideoGPT2LMHeadModel if "gpt2" == args.model else OpenAIGPTLMHeadModel
model_config = GPT2Config.from_pretrained(args.model_checkpoint)
if args.model_epoch:
model = model_class.from_pretrained(
os.path.join(args.model_checkpoint, f"checkpoint_mymodel_{args.model_epoch}.pth"),
config=model_config,
# custom_args={"visual_feature_size": args.visual_feature_size}
)
else:
model = model_class.from_pretrained(args.model_checkpoint, config=model_config)
model.to(args.device)
model.eval()
logging.info("Loading test data from " + args.test_set)
test_data = json.load(open(args.test_set, "r"))
test_dataset, feature_map = get_dataset(
tokenizer,
args.test_set,
args.feature_path,
args.visual_feature_width,
)
# generate sentences
logging.info("-----------------------generate--------------------------")
start_time = time.time()
results = generate_response(model, test_data, test_dataset, feature_map, tokenizer, args)
logging.info("----------------")
logging.info("wall time = %f" % (time.time() - start_time))
if args.output:
logging.info("writing results to " + args.output)
with open(args.output, "w") as file_id:
json.dump(results, file_id)
logging.info("done")
# main
if __name__ == "__main__":
args, _, _ = read_commandline_options()
generate(args)
|
comet_memory_dialog-main
|
models/gpt2_mm/generate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from transformers import *
import math
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
def gelu(x):
return (
0.5
* x
* (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
)
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)
)
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = (
set(heads) - self.pruned_heads
) # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat(
[index, index + self.split_size, index + (2 * self.split_size)]
)
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
# w = w * b - 1e18 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
b = torch.gt(b + attention_mask[0], 0).float()
w = w * b - 1e18 * (1 - b)
w = w - 1e18 * (1 - attention_mask[1])
else:
w = w * b - 1e18 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = (
layer_past[0].transpose(-2, -1),
layer_past[1],
) # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack(
(key.transpose(-2, -1), value)
) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(
self.ln_1(x),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class VideoGPT2Model(GPT2Model):
def __init__(self, config):
super(VideoGPT2Model, self).__init__(config)
self.h = nn.ModuleList(
[Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)]
)
def forward(
self,
input_embs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(
past_length,
input_embs.size(-2) + past_length,
dtype=torch.long,
device=input_embs.device,
)
position_ids = position_ids.unsqueeze(0).expand_as(input_embs[:, :, 0])
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask[0] = attention_mask[0].unsqueeze(1).unsqueeze(2)
attention_mask[1] = attention_mask[1].unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask[0] = attention_mask[0].to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
attention_mask[1] = attention_mask[1].to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
# attention_mask = (1.0 - attention_mask) * -1e18
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
input_shape = input_embs.size()[:2]
# input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
# inputs_embeds = self.wte(input_ids)
inputs_embeds = input_embs
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (
hidden_states.view(*output_shape),
)
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = (
input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
)
all_attentions = tuple(
t.view(*attention_output_shape) for t in all_attentions
)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
class VideoGPT2LMHeadModel(GPT2PreTrainedModel):
# def __init__(self, config, **kwargs):
# super(VideoGPT2LMHeadModel, self).__init__(config)
# self.config = config
# self.transformer = VideoGPT2Model(config)
# self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# self.video_ff = nn.Linear(
# kwargs["custom_args"]["visual_feature_size"],
# config.n_embd
# )
# self.video_inverse_ff = nn.Linear(
# config.n_embd, kwargs["custom_args"]["visual_feature_size"]
# )
# self.init_weights()
# self.tie_weights()
def __init__(self, config):
super(VideoGPT2LMHeadModel, self).__init__(config)
self.transformer = VideoGPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# BUTD - 2053.
self.video_ff = nn.Linear(2053, config.n_embd)
self.video_inverse_ff = nn.Linear(config.n_embd, 2053)
# CLIP - 512.
# self.video_ff = nn.Linear(512, config.n_embd)
# self.video_inverse_ff = nn.Linear(config.n_embd, 512)
self.init_weights()
self.tie_weights()
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head, self.transformer.wte)
def forward(
self,
input_embs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
labels=None,
mode="reply",
):
transformer_outputs = self.transformer(
input_embs,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
if mode == "reply":
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[0][..., 1:].contiguous()
# Flatten the tokens
loss_text_fct = CrossEntropyLoss(ignore_index=-1)
loss_text = loss_text_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
)
loss = loss_text
else:
lm_video_regs = self.video_inverse_ff(
hidden_states[:, : labels[1].size(1), :]
)
shift_video_regs = lm_video_regs[..., :-1, :].contiguous()
shift_video_labels = labels[1][..., :-1, :].contiguous()
loss_video_fct = MSELoss(reduce=True, size_average=True)
loss_video = loss_video_fct(shift_video_regs, shift_video_labels)
loss = loss_video
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
|
comet_memory_dialog-main
|
models/gpt2_mm/VideoGPT2.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# coding: utf-8
# author: noctli
import json
import pickle
from itertools import chain
import numpy as np
import torch
import torch.utils.data
from torch.utils.data import Dataset
# from train import SPECIAL_TOKENS, MODEL_INPUTS, PADDED_INPUTS
SPECIAL_TOKENS = [
"<bos>",
"<eos>",
"<speaker1>",
"<speaker2>",
"<cap>",
"<video>",
"<pad>",
]
SPECIAL_TOKENS_DICT = {
"bos_token": "<bos>",
"eos_token": "<eos>",
"additional_special_tokens": ["<speaker1>", "<speaker2>", "<video>", "<cap>"],
"pad_token": "<pad>",
}
MODEL_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
PADDED_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
def tokenize(obj, tokenizer):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
def get_dataset(
tokenizer, data_file, feature_path=None, undisclosed_only=False, n_history=3
):
dialog_data = json.load(open(data_file, "r"))
dialog_list = []
vid_set = set()
for dialog in dialog_data["dialogs"]:
caption = [tokenize(dialog["caption"], tokenizer)] + [
tokenize(dialog["summary"], tokenizer)
]
questions = [tokenize(d["question"], tokenizer) for d in dialog["dialog"]]
answers = [tokenize(d["answer"], tokenizer) for d in dialog["dialog"]]
vid = dialog["image_id"]
vid_set.add(vid)
if undisclosed_only:
it = range(len(questions) - 1, len(questions))
else:
it = range(len(questions))
qalist = []
history = []
if undisclosed_only:
for n in range(len(questions) - 1):
qalist.append(questions[n])
qalist.append(answers[n])
history = qalist[max(-len(qalist), -n_history * 2) :]
for n in it:
if undisclosed_only:
assert dialog["dialog"][n]["answer"] == "__UNDISCLOSED__"
question = questions[n]
answer = answers[n]
history.append(question)
if n_history == 0:
item = {
"vid": vid,
"history": [question],
"answer": answer,
"caption": caption,
}
else:
item = {
"vid": vid,
"history": history,
"answer": answer,
"caption": caption,
}
dialog_list.append(item)
qalist.append(question)
qalist.append(answer)
history = qalist[max(-len(qalist), -n_history * 2) :]
all_features = {}
if feature_path is not None:
fea_types = ["vggish", "i3d_flow", "i3d_rgb"]
dataname = "<FeaType>/<ImageID>.npy"
for ftype in fea_types:
if undisclosed_only:
basename = dataname.replace("<FeaType>", ftype + "_testset")
else:
basename = dataname.replace("<FeaType>", ftype)
features = {}
for vid in vid_set:
filename = basename.replace("<ImageID>", vid)
filepath = feature_path + filename
features[vid] = (filepath, filepath)
all_features[ftype] = features
return dialog_list, all_features
return dialog_list
class AVSDDataSet(Dataset):
def __init__(self, dialogs, tokenizer, features=None, drop_rate=0.5, train=True):
self.dialogs = dialogs
self.features = features
self.tokenizer = tokenizer
self.drop_rate = drop_rate
self.train = train
def __len__(self):
return len(self.dialogs)
def __getitem__(self, index):
dialog = self.dialogs[index]
vid = dialog["vid"]
his = self.dialogs[index]["history"]
cap = self.dialogs[index]["caption"]
ans = self.dialogs[index]["answer"]
if np.random.rand() < self.drop_rate:
instance, _ = build_input_from_segments(
cap,
his,
ans,
self.tokenizer,
video=False,
drop_caption=True,
train=self.train,
)
else:
instance, _ = build_input_from_segments(
cap,
his,
ans,
self.tokenizer,
video=False,
drop_caption=False,
train=self.train,
)
input_ids = torch.Tensor(instance["input_ids"]).long()
token_type_ids = torch.Tensor(instance["token_type_ids"]).long()
lm_labels = torch.Tensor(instance["lm_labels"]).long()
if self.features is not None:
try:
vgg = np.load(self.features[0]["vggish"][vid][0])
i3d_flow = np.load(self.features[0]["i3d_flow"][vid][0])
i3d_rgb = np.load(self.features[0]["i3d_rgb"][vid][0])
except KeyError:
vgg = np.load(self.features[1]["vggish"][vid][0])
i3d_flow = np.load(self.features[1]["i3d_flow"][vid][0])
i3d_rgb = np.load(self.features[1]["i3d_rgb"][vid][0])
sample_i3d_flow = i3d_flow[range(1, i3d_flow.shape[0], 1)]
sample_i3d_rgb = i3d_rgb[range(1, i3d_rgb.shape[0], 1)]
vgg = torch.from_numpy(vgg).float()
i3d_flow = torch.from_numpy(sample_i3d_flow).float()
i3d_rgb = torch.from_numpy(sample_i3d_rgb).float()
min_length = min([i3d_flow.size(0), i3d_rgb.size(0), vgg.size(0)])
i3d = torch.cat(
[i3d_flow[:min_length], i3d_rgb[:min_length], vgg[:min_length]], dim=1
)
return input_ids, token_type_ids, lm_labels, i3d
else:
return input_ids, token_type_ids, lm_labels
def collate_fn(batch, pad_token, features=None):
def padding(seq, pad_token):
max_len = max([i.size(0) for i in seq])
if len(seq[0].size()) == 1:
result = torch.ones((len(seq), max_len)).long() * pad_token
else:
result = torch.ones((len(seq), max_len, seq[0].size(-1))).float()
for i in range(len(seq)):
result[i, : seq[i].size(0)] = seq[i]
return result
input_ids_list, token_type_ids_list, lm_labels_list, i3d_list = [], [], [], []
for i in batch:
input_ids_list.append(i[0])
token_type_ids_list.append(i[1])
lm_labels_list.append(i[2])
if features is not None:
i3d_list.append(i[3])
input_ids = padding(input_ids_list, pad_token)
token_type_ids = padding(token_type_ids_list, pad_token)
lm_labels = padding(lm_labels_list, -1)
input_mask = input_ids != pad_token
if features is not None:
i3d = padding(i3d_list, pad_token)
i3d_mask = torch.sum(i3d != 1, dim=2) != 0
input_mask = torch.cat([i3d_mask, input_mask], dim=1)
i3d_labels = torch.ones((i3d.size(0), i3d.size(1))).long() * -1
video_mask = torch.cat(
[torch.zeros((i3d.size(0), i3d.size(1))), torch.ones(lm_labels.size())], 1
)
reply_mask = torch.zeros(video_mask.size())
lm_labels = torch.cat([i3d_labels, lm_labels], dim=1)
return (
input_ids,
token_type_ids,
lm_labels,
input_mask,
i3d,
video_mask,
reply_mask,
)
else:
return input_ids, token_type_ids, lm_labels, input_mask
def pad_dataset(dataset, padding=0):
"""Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler."""
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [
x + [padding if name != "labels" else -1] * (max_l - len(x))
for x in dataset[name]
]
return dataset
def build_input_from_segments(
caption,
history,
reply,
tokenizer,
with_eos=True,
video=False,
drop_caption=False,
train=True,
):
"""Build a sequence of input from 3 segments: caption(caption+summary) history and last reply"""
bos, eos, speaker1, speaker2, cap = tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-2]
)
if not drop_caption:
instance = {}
sequence = (
[[bos] + list(chain(*caption))]
+ history
+ [reply + ([eos] if with_eos else [])]
)
sequence = [[cap] + sequence[0] + [eos]] + [
[speaker2 if (len(sequence) - i) % 2 else speaker1] + s
for i, s in enumerate(sequence[1:])
]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [cap] * len(sequence[0]) + [
speaker2 if i % 2 else speaker1
for i, s in enumerate(sequence[1:])
for _ in s
]
if video and train:
# instance["lm_labels"] = sequence[0] + ([-1]*sum(len(s) for s in sequence[1:-1])) + sequence[-1]
instance["lm_labels"] = (
sequence[0]
+ ([-1] * sum(len(s) for s in sequence[1:-1]))
+ sequence[-1]
)
else:
instance["lm_labels"] = (
[-1] * sum(len(s) for s in sequence[:-1])
) + sequence[-1]
else:
instance = {}
sequence = history + [reply + ([eos] if with_eos else [])]
sequence = [
[speaker2 if (len(sequence) - i) % 2 else speaker1] + s
for i, s in enumerate(sequence)
]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [
speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s
]
if video:
instance["lm_labels"] = (
[-1] * sum(len(s) for s in sequence[:-1])
) + sequence[-1]
else:
instance["lm_labels"] = (
[-1] * sum(len(s) for s in sequence[:-1])
) + sequence[-1]
return instance, sequence
|
comet_memory_dialog-main
|
models/gpt2_mm/dataset.py
|
# Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
import json
import logging
import math
import os
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
from pprint import pformat
import torch
from ignite.contrib.handlers import PiecewiseLinear, ProgressBar
from ignite.contrib.handlers.tensorboard_logger import (
OptimizerParamsHandler,
OutputHandler,
TensorboardLogger,
)
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from transformers import *
from VideoGPT2 import *
import pickle as pkl
from dataset_memory import collate_fn, get_dataset, MemoryDialogDataset, padding
MODEL_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
PADDED_INPUTS = ["input_ids", "token_type_ids", "lm_labels"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, args):
"""Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation."""
if args.local_rank == -1:
return scalar
scalar_t = (
torch.tensor(scalar, dtype=torch.float, device=args.device)
/ torch.distributed.get_world_size()
)
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def get_data_loaders_new(args, tokenizer):
train_data = get_dataset(
tokenizer,
args.train_path,
args.feature_path,
args.visual_feature_width,
)
# with open("train_data_gpt2.pkl", "rb") as f:
# train_data = pkl.load(f)
# pkl.dump(train_data, f)
valid_data = get_dataset(
tokenizer,
args.valid_path,
args.feature_path,
args.visual_feature_width,
)
# with open("valid_data_gpt2.pkl", "rb") as f:
# valid_data = pkl.load(f)
# pkl.dump(valid_data, f)
train_dataset = MemoryDialogDataset(
train_data[0],
tokenizer,
(train_data[1], valid_data[1]),
drop_rate=0,
train=True,
)
valid_dataset = MemoryDialogDataset(
valid_data[0],
tokenizer,
(valid_data[1], train_data[1]),
drop_rate=0,
train=False,
)
# for ii in range(len(train_dataset)):
# train_dataset[ii]
# batch = [train_dataset[ii] for ii in range(3)]
# features = True
# collate_fn(batch, tokenizer.pad_token_id, features=features)
# NOTE: FIX this later.
# features = None if args.video_agnostic else True
features = True
train_loader = DataLoader(
train_dataset,
batch_size=args.train_batch_size,
num_workers=1,
shuffle=(not args.distributed),
collate_fn=lambda x: collate_fn(x, tokenizer.pad_token_id, features=features),
)
valid_loader = DataLoader(
valid_dataset,
batch_size=args.valid_batch_size,
num_workers=1,
shuffle=False,
collate_fn=lambda x: collate_fn(x, tokenizer.pad_token_id, features=features),
)
return train_loader, valid_loader
def read_commandline_options():
parser = ArgumentParser()
parser.add_argument(
"--train_path",
type=str,
default="data/train_set4DSTC7-AVSD.json",
help="Path of the trainset",
)
parser.add_argument(
"--feature_path", type=str, default="data/", help="Path to features"
)
parser.add_argument(
"--valid_path",
type=str,
default="data/valid_set4DSTC7-AVSD.json",
help="Path of the validset",
)
parser.add_argument(
"--special_tokens_path",
type=str,
required=True,
help="Path to the special tokens for training",
)
parser.add_argument(
"--model_checkpoint",
type=str,
default="gpt2",
help="Path, url or short name of the model",
)
parser.add_argument(
"--max_history",
type=int,
default=3,
help="Number of previous exchanges to keep in history",
)
parser.add_argument(
"--visual_feature_width",
type=int,
default=10,
help="Feature width for each image; 10 - BUTD; 1 - others"
)
parser.add_argument(
"--visual_feature_size",
type=int,
default=2053,
help="Feature size for each image; 2053 - BUTD; 512 - CLIP",
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size for training"
)
parser.add_argument(
"--valid_batch_size", type=int, default=4, help="Batch size for validation"
)
parser.add_argument(
"--drop_rate", type=float, default=0.5, help="drop rate for caption"
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=8,
help="Accumulate gradients on several steps",
)
parser.add_argument("--lr", type=float, default=6.25e-5, help="Learning rate")
parser.add_argument(
"--max_norm", type=float, default=1.0, help="Clipping gradient norm"
)
parser.add_argument(
"--n_epochs", type=int, default=8, help="Number of training epochs"
)
parser.add_argument(
"--eval_before_start",
action="store_true",
help="If true start with a first evaluation before training",
)
parser.add_argument(
"--dataloader_dry_run",
action="store_true",
help="Flag to set only dataloader components",
)
parser.add_argument(
"--video_agnostic",
action="store_true",
help="Ignore video features",
)
parser.add_argument(
"--predict_belief_state", action="store_true", help="Predict belief state"
)
parser.add_argument(
"--device",
type=str,
default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)",
)
parser.add_argument(
"--fp16",
type=str,
default="",
help="Set to O0, O1, O2 or O3 for fp16 training (see apex documentation)",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="Local rank for distributed training (-1: not distributed)",
)
parser.add_argument("--log_path", type=str, default="log/", help="Log path")
# args = parser.parse_args()
args, unknown = parser.parse_known_args()
return args, parser, unknown
def train(args):
if not os.path.exists(args.log_path):
os.makedirs(args.log_path)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN
)
logger.warning(
"Running process %d", args.local_rank
) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(args))
# Initialize distributed training if needed
args.distributed = args.local_rank != -1
if args.distributed:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
logger.info(
"Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning"
)
tokenizer_class = GPT2Tokenizer
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
# Read special tokens from the file.
with open(args.special_tokens_path, "r") as file_id:
special_tokens_dict = json.load(file_id)
tokenizer.add_special_tokens(special_tokens_dict)
if not args.dataloader_dry_run:
model_class = VideoGPT2LMHeadModel
model = model_class.from_pretrained(args.model_checkpoint)
# model_config = model_class.config_class.from_pretrained(args.model_checkpoint)
# model = model_class(
# model_config,
# custom_args={"visual_feature_size": args.visual_feature_size},
# )
model.resize_token_embeddings(len(tokenizer))
model.to(args.device)
optimizer = AdamW(model.parameters(), lr=args.lr)
# Prepare model for FP16 and distributed training if needed
# (order is important, distributed should be the last)
if args.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16)
if args.distributed:
model = DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank
)
logger.info("Prepare datasets")
train_loader, val_loader = get_data_loaders_new(args, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
# Process the input_tokens for the batch.
input_embeds = []
for datum in batch[0]:
instance_embeds = []
for datum_input in datum:
if isinstance(datum_input, dict):
datum_output = model.video_ff(
torch.Tensor(datum_input["features"]).to(args.device)
)
else:
datum_output = model.transformer.wte(datum_input.to(args.device))
instance_embeds.append(datum_output)
input_embeds.append(torch.cat(instance_embeds))
input_embeds, _ = padding(input_embeds, tokenizer.pad_token_id)
token_type_ids = batch[1].to(args.device)
lm_labels = batch[2].to(args.device)
input_mask = batch[3].to(args.device)
reply_mask = torch.zeros(
input_mask.size(), dtype=input_mask.dtype, device=input_mask.device
)
reply_loss = model(
input_embeds,
token_type_ids=token_type_ids,
labels=(lm_labels, None),
attention_mask=[reply_mask, input_mask],
mode="reply",
)[0]
loss = reply_loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
if engine.state.iteration % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
# Process the input_tokens for the batch.
input_embeds = []
for datum in batch[0]:
instance_embeds = []
for datum_input in datum:
if isinstance(datum_input, dict):
datum_output = model.video_ff(
torch.Tensor(datum_input["features"]).to(args.device)
)
else:
datum_output = model.transformer.wte(
datum_input.to(args.device)
)
instance_embeds.append(datum_output)
input_embeds.append(torch.cat(instance_embeds))
input_embeds, _ = padding(input_embeds, tokenizer.pad_token_id)
token_type_ids = batch[1].to(args.device)
lm_labels = batch[2].to(args.device)
input_mask = batch[3].to(args.device)
reply_mask = torch.zeros(
input_mask.size(), dtype=input_mask.dtype, device=input_mask.device
)
model_outputs = model(
input_embeds,
token_type_ids=token_type_ids,
attention_mask=[reply_mask, input_mask],
mode="reply",
)[0]
lm_logits = model_outputs # So we can also use GPT2 outputs
lm_logits_flat_shifted = (
lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
)
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return lm_logits_flat_shifted, lm_labels_flat_shifted
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(
Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader)
)
if args.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if args.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(
optimizer, "lr", [(0, args.lr), (args.n_epochs * len(train_loader), 0.0)]
)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {
"nll": Loss(
torch.nn.CrossEntropyLoss(ignore_index=-1),
output_transform=lambda x: (x[0], x[1]),
)
}
metrics.update(
{"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], args)}
)
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and
# save model, configuration and tokenizer before we start to train
if args.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(
Events.COMPLETED,
lambda _: pbar.log_message(
"Validation: %s" % pformat(evaluator.state.metrics)
),
)
tb_logger = TensorboardLogger(log_dir="./tb_logs")
tb_logger.attach(
trainer,
log_handler=OutputHandler(tag="training", metric_names=["loss"]),
event_name=Events.ITERATION_COMPLETED,
)
tb_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED,
)
tb_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=list(metrics.keys()),
another_engine=trainer,
),
event_name=Events.EPOCH_COMPLETED,
)
checkpoint_handler = ModelCheckpoint(
args.log_path, "checkpoint", save_interval=1, n_saved=args.n_epochs, require_empty=False
)
trainer.add_event_handler(
Events.EPOCH_COMPLETED,
checkpoint_handler,
{"mymodel": getattr(model, "module", model)},
) # "getattr" take care of distributed encapsulation
torch.save(args, args.log_path + "model_training_args.bin")
getattr(model, "module", model).config.to_json_file(
os.path.join(args.log_path, CONFIG_NAME)
)
tokenizer.save_vocabulary(args.log_path)
# Run the training
trainer.run(train_loader, max_epochs=args.n_epochs)
# On the main process: close tensorboard logger and rename the last
# checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if args.local_rank in [-1, 0] and args.n_epochs > 0:
os.rename(
checkpoint_handler._saved[-1][1][-1],
os.path.join(args.log_path, WEIGHTS_NAME),
) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
args, _, _ = read_commandline_options()
train(args)
|
comet_memory_dialog-main
|
models/gpt2_mm/train.py
|
#! /usr/bin/env python
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Create API and MM-DST result JSONS from model result file.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import collections
import copy
import json
import ast
import re
def parse_flattened_result(to_parse):
"""
Parse out the belief state from the raw text.
Return an empty list if the belief state can't be parsed
Input:
- A single <str> of flattened result
e.g. 'User: Show me something else => Belief State : DA:REQUEST ...'
Output:
- Parsed result in a JSON format, where the format is:
[
{
'act': <str> # e.g. 'DA:REQUEST',
'slots': [
<str> slot_name,
<str> slot_value
]
}, ... # End of a frame
] # End of a dialog
"""
dialog_act_regex = re.compile(r"([\w:?.?]*) *\[(.*)\] *\(([^\]]*)\) *\<([^\]]*)\>")
slot_regex = re.compile(r"([A-Za-z0-9_.-:]*) *= *(\[([^\]]*)\]|[^,]*)")
request_regex = re.compile(r"([A-Za-z0-9_.-:]+)")
object_regex = re.compile(r"([A-Za-z0-9]+)")
belief = []
# Parse
to_parse = to_parse.strip()
# to_parse: 'DIALOG_ACT_1 : [ SLOT_NAME = SLOT_VALUE, ... ] ...'
for dialog_act in dialog_act_regex.finditer(to_parse):
d = {
"act": dialog_act.group(1),
"slots": {},
"request_slots": [],
"memories": [],
}
for slot in slot_regex.finditer(dialog_act.group(2)):
# If parsing python list eval it else keep unique string.
slot_name = slot.group(1).strip()
slot_values = slot.group(2).strip()
# If there are nones, replace them with Nones and later remove them.
if re.match('\[.*\]', slot_values):
try:
slot_values = slot_values.replace("none", "None")
parsed_slot_values = ast.literal_eval(slot_values)
d["slots"][slot_name] = [ii for ii in parsed_slot_values if ii]
except:
# If error when parsing the slots add empty string
print(f"Error parsing: {to_parse}")
d["slots"][slot_name] = ""
else:
d["slots"][slot_name] = slot_values
for request_slot in request_regex.finditer(dialog_act.group(3)):
d["request_slots"].append(request_slot.group(1).strip())
for object_id in object_regex.finditer(dialog_act.group(4)):
d["memories"].append(object_id.group(1).strip())
if d != {}:
belief.append(d)
return belief
def create_result_jsons(results, test_data):
"""Creates two JSON files from results.
Args:
results: List of generated results from the model.
test_data: Raw JSON test file.
Returns:
response_results: Dict containing response results
dst_results: Dict containing DST results
"""
dst_results = copy.deepcopy(test_data)
response_results = collections.defaultdict(list)
dst_pool = {}
for instance in results:
dialog_id = instance["dialog_id"]
turn_id = instance["turn_id"]
if instance["type"] == "API":
index = (dialog_id, turn_id)
dst_pool[index] = instance
else:
if dialog_id not in response_results:
response_results[dialog_id] = {
"dialog_id": dialog_id,
"predictions": [],
}
response_results[dialog_id]["predictions"].append(
{
"turn_id": turn_id,
"response": instance["model_prediction"],
}
)
num_missing = 0
num_present = 0
for dialog_datum in dst_results["dialogue_data"]:
del dialog_datum["mentioned_memory_ids"]
del dialog_datum["memory_graph_id"]
dialog_id = dialog_datum["dialogue_idx"]
for datum in dialog_datum["dialogue"]:
turn_id = datum["turn_idx"]
index = (dialog_id, turn_id)
if index in dst_pool:
model_pred_datum = dst_pool[index]
model_pred = model_pred_datum["model_prediction"].strip(" ")
parsed_result = parse_flattened_result(model_pred)
datum["transcript_annotated"] = parsed_result
num_present += 1
else:
del datum["transcript_annotated"]
print(f"Missing! -- {index}")
num_missing += 1
print(f"Missing: {num_missing} Present: {num_present}")
return list(response_results.values()), dst_results
def main(args):
with open(args["memory_test_json"], "r") as file_id:
test_data = json.load(file_id)
with open(args["model_output_json"], "r") as file_id:
results = json.load(file_id)
response_results, dst_results = create_result_jsons(results, test_data)
# Save the results.
response_results_path = args["model_output_json"].replace(
".json", "_response_results.json"
)
with open(response_results_path, "w") as file_id:
json.dump(response_results, file_id)
dst_results_path = args["model_output_json"].replace(".json", "_dst_results.json")
with open(dst_results_path, "w") as file_id:
json.dump(dst_results, file_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--memory_test_json",
required=True,
help="JSON file for test data",
)
parser.add_argument(
"--model_output_json", required=True, help="JSON file with model outputs"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_mm/utils/create_result_jsons.py
|
#! /usr/bin/env python
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Extract BUTD features for memories.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import base64
import json
import os
import pickle
import lmdb
import numpy as np
from PIL import Image
import torch
import tqdm
FEATURE_REGISTRY = {}
def register(cls):
FEATURE_REGISTRY[cls.label] = cls
return cls
# Extracts top-down bottom-up image features.
@register
class ImageFeatureReader(object):
label = "butd"
def __init__(self, feature_path, max_bboxes=-1):
"""Reads BUTD image features.
Args:
feature_path: Path to read the image features.
max_bboxes: Maximum number of bounding boxes.
"""
self.reader = lmdb.open(
feature_path,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False,
)
with self.reader.begin(write=False) as file_ptr:
self.image_id_list = pickle.loads(file_ptr.get(b"keys"))
self.num_bboxes = max_bboxes
def __getitem__(self, image_id):
image_id = str(image_id).encode()
assert image_id in self.image_id_list, "Missing image_id!"
with self.reader.begin(write=False) as file_ptr:
item = pickle.loads(file_ptr.get(image_id))
num_boxes = int(item["num_boxes"])
features = np.frombuffer(
base64.b64decode(item["features"]), dtype=np.float32
).reshape(num_boxes, 2048)
boxes = np.frombuffer(
base64.b64decode(item["boxes"]), dtype=np.float32
).reshape(num_boxes, 4)
class_probs = np.frombuffer(
base64.b64decode(item["cls_prob"]), dtype=np.float32
).reshape(num_boxes, 1601)
features_dict = {
"features": features,
"bboxes": boxes,
"class_probs": class_probs,
"num_boxes": num_boxes,
"image_w": int(item["image_w"]),
"image_h": int(item["image_h"]),
}
if self.num_bboxes > 0:
features_dict = self.trim_butd_features(features_dict)
return features_dict
def trim_butd_features(self, features_dict):
"""Trim BUTD features based on class probability.
Args:
feature_dict: BUTD features for images
"""
# Get top class in each bbox and pick ones with highest class probability.
top_class_prob = np.max(features_dict["class_probs"], axis=1)
top_bboxes = np.argsort(-top_class_prob)[: self.num_bboxes]
# Modify the elements.
features_dict["bboxes"] = features_dict["bboxes"][top_bboxes]
features_dict["features"] = features_dict["features"][top_bboxes]
features_dict["num_boxes"] = self.num_bboxes
del features_dict["class_probs"]
return self.augment_butd_features(features_dict)
def augment_butd_features(self, features_dict):
"""Augment BUTD feature with spatial location relative to height x width."""
# Aliases.
image_w = features_dict["image_w"]
image_h = features_dict["image_h"]
location = np.zeros((features_dict["num_boxes"], 5), dtype=np.float32)
location[:, :4] = features_dict["bboxes"]
location[:, 4] = (
(location[:, 3] - location[:, 1])
* (location[:, 2] - location[:, 0])
/ (float(image_w) * float(image_h))
)
location[:, 0] = location[:, 0] / float(image_w)
location[:, 1] = location[:, 1] / float(image_h)
location[:, 2] = location[:, 2] / float(image_w)
location[:, 3] = location[:, 3] / float(image_h)
features = np.concatenate([features_dict["features"], location], axis=-1)
features_dict["features"] = features
return features_dict
# Extracts clip features.
@register
class CLIPFeatureExtractor(object):
"""Extracts visual features using CLIP architecture."""
label = "clip"
def __init__(self, image_folder):
"""Initializes the feature extractor.
Args:
image_folder: Path to the raw COCO images.
"""
self._device = "cuda" if torch.cuda.is_available() else "cpu"
self._model, self._preprocess = clip.load("ViT-B/32", device=self._device)
self._image_folder = image_folder
def __getitem__(self, image_id):
"""Extracts image features for a given image_id.
Args:
image_id: Corresponding MSCOCO image_id
"""
image_path = os.path.join(
self._image_folder, f"COCO_train2014_{image_id:012d}.jpg"
)
image = (
self._preprocess(PIL.Image.open(image_path)).unsqueeze(0).to(self._device)
)
with torch.no_grad():
image_feature = self._model.encode_image(image)
return {
"features": image_feature.cpu().numpy(),
}
# Extracts clip features.
@register
class SWINFeatureExtractor(object):
"""Extracts visual features using SWIN architecture."""
label = "swin"
def __init__(self, image_folder):
"""Initializes the feature extractor.
Args:
image_folder: Path to the raw COCO images.
"""
self._use_gpu = torch.cuda.is_available()
self._model = timm.create_model(
"swin_base_patch4_window7_224",
pretrained=True,
num_classes=0,
)
self._image_folder = image_folder
def _prepare_image(self, image_path):
"""Given image path, load and prepare the image.
Args:
image_path: Path to the image to load
Returns:
image: Loaded image adjusted to the size
"""
image = Image.open(image_path)
image = np.array(image.resize((224, 224)), dtype=np.float32)
if image.ndim != 3:
image = np.stack([image, image, image], axis=2)
image = torch.as_tensor(image).transpose(2, 0)[None]
return image
def __getitem__(self, image_id):
"""Extracts image features for a given image_id.
Args:
image_id: Corresponding MSCOCO image_id
"""
image_path = os.path.join(
self._image_folder, f"COCO_train2014_{image_id:012d}.jpg"
)
image = self._prepare_image(image_path)
with torch.no_grad():
image_feature = self._model(image)
return {
"features": image_feature.cpu().numpy(),
}
def main(args):
memory_graphs = {}
for file_path in args["input_memory_json"]:
# print(f"Reading: {file_path}")
with open(file_path, "r") as file_id:
graph_data = json.load(file_id)
for datum in graph_data:
if datum["memory_graph_id"] in memory_graphs:
print("Multiple memory graph ids exist!")
else:
memory_graphs[datum["memory_graph_id"]] = datum
print(f"# memory dialogs: {len(memory_graphs)}")
memory_dialogs = {}
for file_path in args["input_dialog_json"]:
# print(f"Reading: {file_path}")
with open(file_path, "r") as file_id:
dialog_data = json.load(file_id)
for datum in dialog_data["dialogue_data"]:
dialog_id = datum["dialogue_idx"]
memory_dialogs[dialog_id] = datum
print(f"# dialogs: {len(memory_dialogs)}")
# Load image features and trim if necessary.
if args["feature_type"] == "butd":
feature_extractor = ImageFeatureReader(
args["input_feature_path"], args["max_bboxes"]
)
elif args["feature_type"] == "clip":
feature_extractor = CLIPFeatureExtractor(args["input_image_folder"])
elif args["feature_type"] == "swin":
feature_extractor = SWINFeatureExtractor(args["input_image_folder"])
else:
raise NotImplementedError(f"""Invalid type: {args["feature_type"]}!""")
progress_bar = tqdm.tqdm(memory_dialogs.items(), desc="Getting relevant images")
relevant_image_ids = set()
for dialog_id, datum in progress_bar:
assert datum["memory_graph_id"] in memory_graphs, "Memory graph missing!"
graph = memory_graphs[datum["memory_graph_id"]]
sample_memories = {}
for ii in graph["memories"]:
if ii["memory_id"] in datum["mentioned_memory_ids"]:
sample_memories[ii["memory_id"]] = ii
for mem_id, mem_datum in sample_memories.items():
relevant_image_ids.add((mem_id, mem_datum["media"][0]["media_id"]))
progress_bar = tqdm.tqdm(relevant_image_ids, desc="Extracting features")
for memory_id, image_id in progress_bar:
feature_save_path = os.path.join(
args["feature_save_path"],
f"""mscoco_{args["feature_type"]}_{memory_id}.npy""",
)
np.save(feature_save_path, feature_extractor[image_id])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--input_dialog_json", nargs="+", required=True, help="Input memories JSON"
)
parser.add_argument(
"--input_memory_json", nargs="+", required=True, help="Input memories metadata"
)
parser.add_argument(
"--feature_save_path", required=True, help="Folder to save memory features"
)
parser.add_argument(
"--input_feature_path",
default=None,
help="Path to image features",
)
parser.add_argument(
"--input_image_folder",
default=None,
help="Path to raw input images",
)
parser.add_argument(
"--feature_type",
choices=["butd", "clip", "swin"],
required=True,
help="Type of visual features to extract",
)
parser.add_argument(
"--max_bboxes", default=-1, type=int, help="Maximum bounding boxes to retain"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# Conditional imports.
if parsed_args["feature_type"] == "clip":
import clip
if parsed_args["feature_type"] == "swin":
import timm
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_mm/utils/extract_memory_features.py
|
#! /usr/bin/env python
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Preprocess the memory dialog dataset.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import os
MM_CONTEXT = "<MM>"
START_API_CALL = "<SOAC>"
END_API_CALL = "<EOAC>"
START_API_RESULT = "<SOAR>"
START_RESPONSE = "<SOR>"
END_SENTENCE = "<EOS>"
PAD_TOKEN = "<PAD>"
SYSTEM = "<SYSTEM>"
USER = "<USER>"
TEMPLATE_API_PREDICT = "{context} {START_API_CALL} "
TEMPLATE_API_TARGET = "{belief_state} {END_API_CALL}"
TEMPLATE_RESPONSE_PREDICT = (
"{context} {START_API_CALL} {belief_state} {END_API_CALL} "
"{START_API_RESULT} {api_result} {START_RESPONSE}"
)
TEMPLATE_RESPONSE_TARGET = "{response} {END_SENTENCE}"
def format_memory_dialog_json(json_path, context_length=2, train=False):
""" """
print(f"Reading: {json_path}")
with open(json_path, "r") as file_id:
data = json.load(file_id)
if train:
additional_special_tokens = set(
[
SYSTEM,
USER,
START_API_CALL,
END_API_CALL,
START_RESPONSE,
START_API_RESULT,
MM_CONTEXT,
]
)
instances = []
for dialog_datum in data["dialogue_data"]:
prev_asst_uttr = None
prev_turn = None
context_history = []
for turn in dialog_datum["dialogue"]:
user_uttr = turn["transcript"].replace("\n", " ").strip()
user_uttr_api_call_type = turn["api_call"]["call_type"]
user_uttr_api_result = turn.get("api_result", {})
user_uttr_parameters = turn["transcript_annotated"][-1]["act_attributes"]
asst_uttr = turn["system_transcript"].replace("\n", " ").strip()
# Format main input context
if prev_asst_uttr:
memory_objects = prev_turn["system_transcript_annotated"][-1][
"act_attributes"
]["memories"]
else:
memory_objects = []
context = format_context(
prev_asst_uttr,
user_uttr,
memory_objects,
)
prev_asst_uttr = asst_uttr
prev_turn = turn
# Concat with previous contexts
context_history.append(context)
context = " ".join(context_history[-context_length:])
# Format belief state
# Skip if the api_call is unknown
if user_uttr_api_call_type == "None":
continue
if (
user_uttr_api_result == {}
or user_uttr_api_result.get("status", "None") == "None"
):
continue
belief_state = []
# ***** Temp fix for null participant *****
if "participant" in user_uttr_parameters["slot_values"]:
user_uttr_parameters["slot_values"]["participant"] = [
p
for p in user_uttr_parameters["slot_values"]["participant"]
if p is not None
]
# ************************************************
# Format for API Call.
str_belief_state = format_api_call(
user_uttr_api_call_type, user_uttr_parameters
)
# Track OOVs
if train:
additional_special_tokens.add(user_uttr_api_call_type)
for slot_name in user_uttr_parameters["slot_values"]:
additional_special_tokens.add(str(slot_name))
# Format for API Result
str_api_result = format_api_result(user_uttr_api_result)
new_instance = {
"dialog_id": dialog_datum["dialogue_idx"],
"turn_id": turn["turn_idx"],
}
# Model two prediction problems.
# A: Context -> API call
api_predict = TEMPLATE_API_PREDICT.format(
context=context,
START_API_CALL=START_API_CALL,
)
api_target = TEMPLATE_API_TARGET.format(
belief_state=str_belief_state,
END_API_CALL=END_API_CALL,
)
instances.append(
{
"dialog_id": dialog_datum["dialogue_idx"],
"turn_id": turn["turn_idx"],
"predict": api_predict,
"target": api_target,
"type": "API",
}
)
# B: Context API call, API result --> Response
response_predict = TEMPLATE_RESPONSE_PREDICT.format(
context=context,
START_API_CALL=START_API_CALL,
belief_state=str_belief_state,
END_API_CALL=END_API_CALL,
START_API_RESULT=START_API_RESULT,
api_result=str_api_result,
START_RESPONSE=START_RESPONSE,
)
response_target = TEMPLATE_RESPONSE_TARGET.format(
response=asst_uttr, END_SENTENCE=END_SENTENCE
)
instances.append(
{
"dialog_id": dialog_datum["dialogue_idx"],
"turn_id": turn["turn_idx"],
"predict": response_predict,
"target": response_target,
"type": "RESPONSE",
}
)
if train:
special_tokens = {"eos_token": END_SENTENCE, "pad_token": PAD_TOKEN}
special_tokens["additional_special_tokens"] = list(additional_special_tokens)
else:
special_tokens = None
return instances, data["split"], special_tokens
def format_context(prev_asst_uttr, user_uttr, memory_objects):
context = ""
if prev_asst_uttr:
context += f"{SYSTEM} {prev_asst_uttr} "
# Add multimodal contexts.
context += represent_memory_objects(memory_objects) + " "
context += f"{USER} {user_uttr}"
return context
def format_api_call(user_uttr_api_call_type, user_uttr_parameters):
str_belief_state_per_frame = (
"{act} [ {slot_values} ] ({request_slots}) < {objects} >".format(
act=user_uttr_api_call_type.strip(),
slot_values=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in user_uttr_parameters["slot_values"].items()
]
),
request_slots=", ".join(user_uttr_parameters["request_slots"]),
objects=", ".join([str(o) for o in user_uttr_parameters["memories"]]),
)
)
return str_belief_state_per_frame
def format_api_result(user_uttr_api_result):
simple_retrieved_info = {}
if user_uttr_api_result["results"]["retrieved_info"] != []:
for memory_id, info in user_uttr_api_result["results"][
"retrieved_info"
].items():
# memory_id: '[Memory ID: 1035119]'
simple_memory_id = memory_id.split("[Memory ID: ")[-1][:-1]
simple_retrieved_info[simple_memory_id] = {}
for slot, value in info.items():
if slot == "location":
simple_retrieved_info[simple_memory_id][slot] = value["place"]
else:
simple_retrieved_info[simple_memory_id][slot] = value
str_api_result = (
"{api_status} [ {retrieved_info} ] < {retrieved_memories} >".format(
api_status=user_uttr_api_result["status"],
retrieved_info=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in simple_retrieved_info.items()
]
).replace("'", ""),
retrieved_memories=", ".join(
[str(o) for o in user_uttr_api_result["results"]["retrieved_memories"]]
),
)
)
return str_api_result
def represent_memory_objects(object_ids):
# Stringify visual objects (JSON)
str_objects = ", ".join([f"{oo}<MM_BREAK>" for oo in object_ids])
return f"{MM_CONTEXT} {str_objects}"
def main(args):
instances, split, special_tokens = format_memory_dialog_json(
args["train_json_path"], train=True
)
save_file_path = os.path.join(args["save_folder"], "mem_dials_gpt2_train.json")
with open(save_file_path, "w") as file_id:
json.dump(instances, file_id)
save_file_path = os.path.join(
args["save_folder"], "mem_dials_gpt2_special_tokens.json"
)
with open(save_file_path, "w") as file_id:
json.dump(special_tokens, file_id)
for file_path in args["unseen_json_path"]:
instances, split, _ = format_memory_dialog_json(file_path)
save_file_path = os.path.join(
args["save_folder"], f"mem_dials_gpt2_{split}.json"
)
with open(save_file_path, "w") as file_id:
json.dump(instances, file_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--train_json_path",
required=True,
help="Path to the train dataset",
)
parser.add_argument(
"--unseen_json_path",
default=[],
required=False,
nargs="+",
help="Path to other unseen datsets (val|devtest|test)",
)
parser.add_argument(
"--predict_belief_state",
action="store_true",
help="Include belief state in the prediction",
)
parser.add_argument(
"--save_folder", required=True, help="Path to save the processed files"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_mm/utils/preprocess_memory_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#!/usr/bin/env python3
"""
Script for converting the main SIMMC datasets (.JSON format)
into the line-by-line stringified format (and back).
The reformatted data is used as input for the GPT-2 based
DST model baseline.
"""
import json
import re
import os
# DSTC style dataset fieldnames
FIELDNAME_DIALOG = "dialogue"
FIELDNAME_USER_UTTR = "transcript"
FIELDNAME_ASST_UTTR = "system_transcript"
FIELDNAME_API_CALL = "api_call"
FIELDNAME_API_RESULT = "api_result"
FIELDNAME_USER_STATE = "transcript_annotated"
FIELDNAME_SYSTEM_STATE = "system_transcript_annotated"
# Templates for GPT-2 formatting
START_OF_MULTIMODAL_CONTEXTS = "<SOM>"
END_OF_MULTIMODAL_CONTEXTS = "<EOM>"
START_OF_API_CALL = "=> <SOAC>:"
END_OF_API_CALL = "<EOAC>"
START_OF_API_RESULT = "<SOAR>"
END_OF_API_RESULT = "<EOAR>"
START_OF_RESPONSE = "<SOR>"
END_OF_SENTENCE = "<EOS>"
SYSTEM = "<SYSTEM>"
USER = "<USER>"
TEMPLATE_PREDICT_API = "{context} {START_OF_API_CALL} "
TEMPLATE_TARGET_API = "{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL}"
TEMPLATE_PREDICT_RESPONSE = (
"{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL} "
"{api_result} {END_OF_API_RESULT} "
)
TEMPLATE_TARGET_RESPONSE = (
"{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL} "
"{api_result} {END_OF_API_RESULT} "
"{response} {END_OF_SENTENCE}"
)
TEMPLATE_PREDICT = "{context} {START_OF_API_CALL} "
TEMPLATE_TARGET = (
"{context} {START_OF_API_CALL} {belief_state} {END_OF_API_CALL} "
"{api_result} {END_OF_API_RESULT} "
"{response} {END_OF_SENTENCE}"
)
# No belief state predictions and target.
TEMPLATE_PREDICT_NOBELIEF = "{context} {START_OF_RESPONSE} "
TEMPLATE_TARGET_NOBELIEF = "{context} {START_OF_RESPONSE} {response} {END_OF_SENTENCE}"
def convert_json_to_flattened(
input_path_json,
output_path_predict,
output_path_target,
len_context=2,
use_multimodal_contexts=True,
use_belief_states=True,
input_path_special_tokens="",
output_path_special_tokens="",
):
"""
Input: JSON representation of the dialogs
Output: line-by-line stringified representation of each turn
"""
with open(input_path_json, "r") as f_in:
data = json.load(f_in)["dialogue_data"]
# Predictions and targets for:
# (a) API call
# (b) Response Generation
# Dialog id and turn id for each instance.
predicts = []
targets = []
dialog_turn_info = []
if input_path_special_tokens != "":
with open(input_path_special_tokens, "r") as f_in:
special_tokens = json.load(f_in)
else:
special_tokens = {"eos_token": END_OF_SENTENCE}
additional_special_tokens = [SYSTEM, USER]
if use_belief_states:
additional_special_tokens.append(END_OF_API_CALL)
additional_special_tokens.append(END_OF_API_RESULT)
else:
additional_special_tokens.append(START_OF_RESPONSE)
if use_multimodal_contexts:
additional_special_tokens.extend(
[START_OF_MULTIMODAL_CONTEXTS, END_OF_MULTIMODAL_CONTEXTS]
)
special_tokens["additional_special_tokens"] = additional_special_tokens
if output_path_special_tokens != "":
# If a new output path for special tokens is given,
# we track new OOVs
oov = set()
for _, dialog in enumerate(data):
prev_asst_uttr = None
prev_turn = None
lst_context = []
for turn in dialog[FIELDNAME_DIALOG]:
user_uttr = turn[FIELDNAME_USER_UTTR].replace("\n", " ").strip()
user_uttr_api_call_type = turn[FIELDNAME_API_CALL]["call_type"]
user_uttr_api_result = turn.get(FIELDNAME_API_RESULT, {})
user_uttr_parameters = turn[FIELDNAME_USER_STATE][-1]["act_attributes"]
asst_uttr = turn[FIELDNAME_ASST_UTTR].replace("\n", " ").strip()
# Format main input context
if prev_asst_uttr and use_multimodal_contexts:
memory_objects = prev_turn[FIELDNAME_SYSTEM_STATE][-1][
"act_attributes"
]["memories"]
else:
memory_objects = []
context = format_context(
prev_asst_uttr, user_uttr, memory_objects, use_multimodal_contexts
)
prev_asst_uttr = asst_uttr
prev_turn = turn
# Add multimodal contexts -- user shouldn't have access to ground-truth
"""
if use_multimodal_contexts:
memory_objects = turn[FIELDNAME_API_CALL]['act_attributes']['memories']
context += ' ' + represent_memory_objects(memory_objects)
"""
# Concat with previous contexts
lst_context.append(context)
context = " ".join(lst_context[-len_context:])
# Format belief state
if use_belief_states:
# Skip if the api_call is unknown
if user_uttr_api_call_type == "None":
continue
if (
user_uttr_api_result == {}
or user_uttr_api_result.get("status", "None") == "None"
):
continue
belief_state = []
# for bs_per_frame in user_uttr_api_call_type:
# ***** Temp fix for null participant *****
if "participant" in user_uttr_parameters["slot_values"]:
user_uttr_parameters["slot_values"]["participant"] = [
p
for p in user_uttr_parameters["slot_values"]["participant"]
if p is not None
]
# ************************************************
# Format for API Call
str_belief_state = format_api_call(
user_uttr_api_call_type, user_uttr_parameters
)
# Track OOVs
if output_path_special_tokens != "":
oov.add(user_uttr_api_call_type)
for slot_name in user_uttr_parameters["slot_values"]:
oov.add(str(slot_name))
# slot_name, slot_value = kv[0].strip(), kv[1].strip()
# oov.add(slot_name)
# oov.add(slot_value)
# Format for API Result
str_api_result = format_api_result(user_uttr_api_result)
# A. Format the predicts and targets for API call.
predict = TEMPLATE_PREDICT_API.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
)
predicts.append(predict)
target = TEMPLATE_TARGET_API.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_belief_state,
END_OF_API_CALL=END_OF_API_CALL,
)
targets.append(target)
dialog_turn_info.append(
str((dialog["dialogue_idx"], turn["turn_idx"], "api_call"))
)
# B. Format the predicts and targets for response.
predict = TEMPLATE_PREDICT_RESPONSE.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_belief_state,
END_OF_API_CALL=END_OF_API_CALL,
api_result=str_api_result,
END_OF_API_RESULT=END_OF_API_RESULT,
)
predicts.append(predict)
target = TEMPLATE_TARGET_RESPONSE.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_belief_state,
END_OF_API_CALL=END_OF_API_CALL,
api_result=str_api_result,
END_OF_API_RESULT=END_OF_API_RESULT,
response=asst_uttr,
END_OF_SENTENCE=END_OF_SENTENCE,
)
targets.append(target)
dialog_turn_info.append(
str((dialog["dialogue_idx"], turn["turn_idx"], "response"))
)
else:
# Format the main input
predict = TEMPLATE_PREDICT_NOBELIEF.format(
context=context, START_OF_RESPONSE=START_OF_RESPONSE
)
predicts.append(predict)
# Format the main output
target = TEMPLATE_TARGET_NOBELIEF.format(
context=context,
response=asst_uttr,
END_OF_SENTENCE=END_OF_SENTENCE,
START_OF_RESPONSE=START_OF_RESPONSE,
)
targets.append(target)
# Create a directory if it does not exist
directory = os.path.dirname(output_path_predict)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
directory = os.path.dirname(output_path_target)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
# Output into text files
with open(output_path_predict, "w") as f_predict:
X = "\n".join(predicts)
f_predict.write(X)
with open(output_path_target, "w") as f_target:
Y = "\n".join(targets)
f_target.write(Y)
output_path_dialog_info = output_path_target.replace(".txt", "_dialog_turn.txt")
with open(output_path_dialog_info, "w") as f_target:
Y = "\n".join(dialog_turn_info)
f_target.write(Y)
if output_path_special_tokens != "":
# Create a directory if it does not exist
directory = os.path.dirname(output_path_special_tokens)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
with open(output_path_special_tokens, "w") as f_special_tokens:
# Add oov's (acts and slot names, etc.) to special tokens as well
special_tokens["additional_special_tokens"].extend(list(oov))
json.dump(special_tokens, f_special_tokens)
def format_context(prev_asst_uttr, user_uttr, memory_objects, use_multimodal_contexts):
context = ""
if prev_asst_uttr:
context += f"{SYSTEM} : {prev_asst_uttr} "
if use_multimodal_contexts:
# Add multimodal contexts
context += represent_memory_objects(memory_objects) + " "
context += f"{USER} : {user_uttr}"
return context
def format_api_call(user_uttr_api_call_type, user_uttr_parameters):
str_belief_state_per_frame = (
"{act} [ {slot_values} ] ({request_slots}) < {objects} >".format(
act=user_uttr_api_call_type.strip(),
slot_values=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in user_uttr_parameters["slot_values"].items()
]
),
request_slots=", ".join(user_uttr_parameters["request_slots"]),
objects=", ".join([str(o) for o in user_uttr_parameters["memories"]]),
)
)
return str_belief_state_per_frame
def format_api_result(user_uttr_api_result):
simple_retrieved_info = {}
if user_uttr_api_result["results"]["retrieved_info"] != []:
for memory_id, info in user_uttr_api_result["results"][
"retrieved_info"
].items():
# memory_id: '[Memory ID: 1035119]'
simple_memory_id = memory_id.split("[Memory ID: ")[-1][:-1]
simple_retrieved_info[simple_memory_id] = {}
for slot, value in info.items():
if slot == "location":
simple_retrieved_info[simple_memory_id][slot] = value["place"]
else:
simple_retrieved_info[simple_memory_id][slot] = value
str_api_result = (
"{api_status} [ {retrieved_info} ] < {retrieved_memories} >".format(
api_status=user_uttr_api_result["status"],
retrieved_info=", ".join(
[
f"{k.strip()} = {str(v).strip()}"
for k, v in simple_retrieved_info.items()
]
).replace("'", ""),
retrieved_memories=", ".join(
[str(o) for o in user_uttr_api_result["results"]["retrieved_memories"]]
),
)
)
return str_api_result
def represent_memory_objects(object_ids):
# Stringify visual objects (JSON)
"""
target_attributes = ['pos', 'color', 'type', 'class_name', 'decor_style']
list_str_objects = []
for obj_name, obj in memory_objects.items():
s = obj_name + ' :'
for target_attribute in target_attributes:
if target_attribute in obj:
target_value = obj.get(target_attribute)
if target_value == '' or target_value == []:
pass
else:
s += f' {target_attribute} {str(target_value)}'
list_str_objects.append(s)
str_objects = ' '.join(list_str_objects)
"""
str_objects = ", ".join([str(o) for o in object_ids])
return f"{START_OF_MULTIMODAL_CONTEXTS} {str_objects} {END_OF_MULTIMODAL_CONTEXTS}"
def parse_flattened_results_from_file(path):
results = []
with open(path, "r") as f_in:
for line in f_in:
parsed = parse_flattened_result(line)
results.append(parsed)
return results
def parse_flattened_result(to_parse):
"""
Parse out the belief state from the raw text.
Return an empty list if the belief state can't be parsed
Input:
- A single <str> of flattened result
e.g. 'User: Show me something else => Belief State : DA:REQUEST ...'
Output:
- Parsed result in a JSON format, where the format is:
[
{
'act': <str> # e.g. 'DA:REQUEST',
'slots': [
<str> slot_name,
<str> slot_value
]
}, ... # End of a frame
] # End of a dialog
"""
# dialog_act_regex = re.compile(r'([\w:?.?]*) *\[([^\]]*)\] *\(([^\]]*)\) *\<([^\]]*)\>')
dialog_act_regex = re.compile(r"([\w:?.?]*) *\[(.*)\] *\(([^\]]*)\) *\<([^\]]*)\>")
slot_regex = re.compile(r"([A-Za-z0-9_.-:]*) *= *(\[([^\]]*)\]|[^,]*)")
# TODO: More elegant way to match in a non-greedy way. Needs testing.
# slot_regex = re.compile(r"([A-Za-z0-9_.-:]*) *= *(\[(.*?)\]|[^,]*)")
request_regex = re.compile(r"([A-Za-z0-9_.-:]+)")
object_regex = re.compile(r"([A-Za-z0-9]+)")
belief = []
# Parse
splits = to_parse.strip().split(START_OF_API_CALL)
if len(splits) == 2:
to_parse = splits[1].strip()
splits = to_parse.split(END_OF_API_CALL)
if len(splits) == 2:
# to_parse: 'DIALOG_ACT_1 : [ SLOT_NAME = SLOT_VALUE, ... ] ...'
to_parse = splits[0].strip()
for dialog_act in dialog_act_regex.finditer(to_parse):
d = {
"act": dialog_act.group(1),
"slots": [],
"request_slots": [],
"memories": [],
}
for slot in slot_regex.finditer(dialog_act.group(2)):
d["slots"].append([slot.group(1).strip(), slot.group(2).strip()])
for request_slot in request_regex.finditer(dialog_act.group(3)):
d["request_slots"].append(request_slot.group(1).strip())
for object_id in object_regex.finditer(dialog_act.group(4)):
d["memories"].append(object_id.group(1).strip())
if d != {}:
belief.append(d)
return belief
def test_example(to_parse):
"""Tests parser on an example string.
Args:
to_parse: String to parse.
"""
print(to_parse)
result = parse_flattened_result(to_parse)
for slot, value in result[0]["slots"]:
print(f"{slot} = {value}")
print("-" * 50)
if __name__ == "__main__":
test_examples = [
" => <SOAC>: API_CALL_TYPE.SEARCH [ location = Alki Beach ] () < > <EOAC>",
" => <SOAC>: API_CALL_TYPE.GET_INFO [ ] (time) < 1022778 > <EOAC>",
" => <SOAC>: API_CALL_TYPE.SEARCH [ activity = ['cooking sausages', 'another activity'], time = 3 ] () < > <EOAC>",
" => <SOAC>: API_CALL_TYPE.SEARCH [ location = Bear Mountain, participant = ['Shane', 'Satwik'], activity = ['eating', 'typing'], time = 2021 ] () < > <EOAC>",
]
for example in test_examples:
test_example(example)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/utils/convert.py
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
Adapted from:
https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_language_modeling.py
"""
import argparse
import glob
import json
import logging
import os
import random
import re
import shutil
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
class TextDataset(Dataset):
def __init__(
self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512
):
assert os.path.isfile(file_path)
block_size = block_size - (
tokenizer.model_max_length - tokenizer.model_max_length_single_sentence
)
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(
0, len(tokenized_text) - block_size + 1, block_size
): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(
tokenized_text[i : i + block_size]
)
)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
class LineByLineTextDataset(Dataset):
def __init__(
self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512
):
print(file_path)
assert os.path.isfile(file_path)
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info("Creating features from dataset file at %s", file_path)
with open(file_path, encoding="utf-8") as f:
lines = [
line
for line in f.read().splitlines()
if (len(line) > 0 and not line.isspace())
]
self.examples = tokenizer.batch_encode_plus(
lines, add_special_tokens=True, max_length=block_size
)["input_ids"]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i], dtype=torch.long)
def load_and_cache_examples(args, tokenizer, evaluate=False):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
dataset = LineByLineTextDataset(
tokenizer, args, file_path=file_path, block_size=args.block_size
)
else:
dataset = TextDataset(
tokenizer, args, file_path=file_path, block_size=args.block_size
)
# Unknown issues have been reported around not being able to handle incomplete batches (e.g. w/ older CUDA 9.2)
# Below is a workaround in case you encounter this issue.
# Alternatively, --nocuda could avoid this issue too.
# Comment out the following if you do not encounuter this issue or if you are not using any GPU.
n = len(dataset) % args.per_gpu_train_batch_size
if n != 0:
print("Truncating from %d examples" % len(dataset.examples))
dataset.examples = dataset.examples[:-n]
print("Truncating to %d examples" % len(dataset.examples))
return dataset
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(
args, checkpoint_prefix="checkpoint", use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(
os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix))
)
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append(
(int(regex_match.groups()[0]), path)
)
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(
0, len(checkpoints_sorted) - args.save_total_limit
)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(
"Deleting older checkpoint [{}] due to args.save_total_limit".format(
checkpoint
)
)
shutil.rmtree(checkpoint)
def mask_tokens(
inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original."""
if tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, args.mlm_probability)
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.tolist()
]
probability_matrix.masked_fill_(
torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0
)
if tokenizer._pad_token is not None:
padding_mask = labels.eq(tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(labels.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def train(
args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer
) -> Tuple[int, float]:
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(
examples, batch_first=True, padding_value=tokenizer.pad_token_id
)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=collate,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
model = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model.resize_token_embeddings(len(tokenizer))
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if (
args.model_name_or_path
and os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))
)
scheduler.load_state_dict(
torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (
len(train_dataloader) // args.gradient_accumulation_steps
)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(
" Continuing training from checkpoint, will skip to saved global_step"
)
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch,
)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproducibility
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs, labels = (
mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)
)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = (
model(inputs, masked_lm_labels=labels)
if args.mlm
else model(inputs, labels=labels)
)
loss = outputs[
0
] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "{}-{}".format(checkpoint_prefix, global_step)
)
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(
optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")
)
torch.save(
scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")
)
logger.info(
"Saving optimizer and scheduler states to %s", output_dir
)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(
args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix=""
) -> Dict:
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(
examples, batch_first=True, padding_value=tokenizer.pad_token_id
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
collate_fn=collate,
)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
inputs, labels = (
mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)
)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = (
model(inputs, masked_lm_labels=labels)
if args.mlm
else model(inputs, labels=labels)
)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--train_data_file",
default=None,
type=str,
required=True,
help="The input training data file (a text file).",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--model_type",
type=str,
required=True,
help="The model architecture to be trained or fine-tuned.",
)
# Other parameters
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).",
)
parser.add_argument(
"--line_by_line",
action="store_true",
help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
)
parser.add_argument(
"--should_continue",
action="store_true",
help="Whether to continue from latest checkpoint in output_dir",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--mlm",
action="store_true",
help="Train with masked-language modeling loss instead of language modeling.",
)
parser.add_argument(
"--mlm_probability",
type=float,
default=0.15,
help="Ratio of tokens to mask for masked language modeling loss",
)
parser.add_argument(
"--config_name",
default=None,
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.",
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.",
)
parser.add_argument(
"--add_special_tokens",
default=None,
type=str,
help="Optional file containing a JSON dictionary of special tokens that should be added to the tokenizer.",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument(
"--block_size",
default=-1,
type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=1.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--logging_steps", type=int, default=500, help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip", type=str, default="", help="For distant debugging."
)
parser.add_argument(
"--server_port", type=str, default="", help="For distant debugging."
)
args = parser.parse_args()
if (
args.model_type in ["bert", "roberta", "distilbert", "camembert"]
and not args.mlm
):
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if args.eval_data_file is None and args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if args.should_continue:
sorted_checkpoints = _sorted_checkpoints(args)
if len(sorted_checkpoints) == 0:
raise ValueError(
"Used --should_continue but no checkpoint was found in --output_dir."
)
else:
args.model_name_or_path = sorted_checkpoints[-1]
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
and not args.should_continue
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True
)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir
)
else:
# When we release a pip version exposing CONFIG_MAPPING,
# we can do `config = CONFIG_MAPPING[args.model_type]()`.
raise ValueError(
"You are instantiating a new config instance from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --config_name"
)
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, cache_dir=args.cache_dir
)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir, local_files_only=True
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if args.add_special_tokens:
if not os.path.exists(args.add_special_tokens):
raise ValueError(
"Additional special tokens file {args.add_special_tokens} not found}"
)
with open(args.add_special_tokens, "rb") as handle:
special_tokens_dict = json.load(handle)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
logger.info(f"Added {num_added_toks} tokens")
logger.info(f"All special tokens: {tokenizer.all_special_tokens}")
if args.block_size <= 0:
args.block_size = tokenizer.model_max_length
# Our input block size will be the max possible for the model
else:
args.block_size = min(args.block_size, tokenizer.model_max_length)
if args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config)
# ensure model aligns with any addition of special tokens
# (unclear if this step is needed for a new model)
if args.add_special_tokens:
model.resize_token_embeddings(len(tokenizer))
model.to(args.device)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelWithLMHead.from_pretrained(args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = [
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
]
logging.getLogger("transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = (
checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
)
model = AutoModelWithLMHead.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = {k + "_{}".format(global_step): v for k, v in result.items()}
results.update(result)
return results
if __name__ == "__main__":
main()
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/run_language_modeling.py
|
#!/usr/bin/env python3
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Scripts for converting the main SIMMC datasets (.JSON format)
into the line-by-line stringified format (and back).
The reformatted data is used as input for the GPT-2 based
DST model baseline.
"""
from gpt2_dst.utils.convert import convert_json_to_flattened
import argparse
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path_json", help="input path to the original dialog data"
)
parser.add_argument("--output_path_predict", help="output path for model input")
parser.add_argument("--output_path_target", help="output path for full target")
parser.add_argument(
"--input_path_special_tokens",
help="input path for special tokens. blank if not provided",
default="",
)
parser.add_argument(
"--output_path_special_tokens",
help="output path for special tokens. blank if not saving",
default="",
)
parser.add_argument(
"--len_context",
help="# of turns to include as dialog context",
type=int,
default=2,
)
parser.add_argument(
"--use_multimodal_contexts",
help="determine whether to use the multimodal contexts each turn",
type=int,
default=1,
)
parser.add_argument(
"--no_belief_states",
dest="use_belief_states",
action="store_false",
default=True,
help="determine whether to use belief state for each turn",
)
args = parser.parse_args()
input_path_json = args.input_path_json
output_path_predict = args.output_path_predict
output_path_target = args.output_path_target
input_path_special_tokens = args.input_path_special_tokens
output_path_special_tokens = args.output_path_special_tokens
len_context = args.len_context
use_multimodal_contexts = bool(args.use_multimodal_contexts)
# DEBUG:
print("Belief states: {}".format(args.use_belief_states))
# Convert the data into GPT-2 friendly format
convert_json_to_flattened(
input_path_json,
output_path_predict,
output_path_target,
input_path_special_tokens=input_path_special_tokens,
output_path_special_tokens=output_path_special_tokens,
len_context=len_context,
use_multimodal_contexts=use_multimodal_contexts,
use_belief_states=args.use_belief_states,
)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/preprocess_input.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#!/usr/bin/env python3
"""
Scripts for evaluating the GPT-2 DST model predictions.
First, we parse the line-by-line stringified format into responses
and compute BLEU score.
"""
import argparse
import json
from gpt2_dst.utils.convert import parse_flattened_results_from_file
from utils.evaluate_dst import evaluate_from_flat_list
import nltk
import numpy as np
def normalize_sentence(sentence):
"""Normalize the sentences and tokenize."""
return nltk.tokenize.word_tokenize(sentence.lower())
def parse_response_from_file(input_path):
"""Parses the response from a flattened file.
Args:
input_path: Path to read the responses from.
"""
lines = []
with open(input_path, "r") as file_id:
for ii in file_id.readlines():
split_line = ii.split("<SOR>", 1)
lines.append(
(split_line[0].strip("\n"), split_line[1].strip("\n").strip("<EOS>"))
)
return lines
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path_target", help="path for target, line-separated format (.txt)"
)
parser.add_argument(
"--input_path_predicted",
help="path for model prediction output, line-separated format (.txt)",
)
parser.add_argument(
"--output_path_report", help="path for saving evaluation summary (.json)"
)
args = parser.parse_args()
input_path_target = args.input_path_target
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Convert the data from the GPT-2 friendly format to JSON
list_target = parse_response_from_file(input_path_target)
list_predicted = parse_response_from_file(input_path_predicted)
# Compute BLEU scores.
bleu_scores = []
# Smoothing function.
chencherry = nltk.translate.bleu_score.SmoothingFunction()
for response, gt_response in zip(list_predicted, list_target):
assert response[0] == gt_response[0], "Input contexts do not match!"
bleu_score = nltk.translate.bleu_score.sentence_bleu(
[normalize_sentence(gt_response[1])],
normalize_sentence(response[1]),
smoothing_function=chencherry.method7,
)
bleu_scores.append(bleu_score)
print(
"BLEU score: {} +- {}".format(
np.mean(bleu_scores), np.std(bleu_scores) / np.sqrt(len(bleu_scores))
)
)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/evaluate_response.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#! /usr/bin/env python
"""
Gets the best model given all the checkpoints.
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import re
def main(args):
for folder_name in args["model_checkpoint_folder"]:
listing = [ii for ii in os.listdir(folder_name) if "checkpoint-" in ii]
valid_metrics = {}
for checkpoint_name in listing:
checkpoint_folder = os.path.join(folder_name, checkpoint_name)
eval_path = os.path.join(checkpoint_folder, "eval_results.txt")
epoch_search = re.search(r"checkpoint-(\d*)", checkpoint_name)
with open(eval_path, "r") as file_id:
result = [ii.strip("\n") for ii in file_id.readlines()][0]
perplexity_search = re.search(r"([0-9\.]+)", result)
# NOTE: Does not handle error conditions.
if perplexity_search is None or epoch_search is None:
print(f"Missing epoch: {checkpoint_name}")
continue
perplexity = float(perplexity_search.group(1))
epoch = int(epoch_search.group(1))
valid_metrics[epoch] = perplexity
best_epoch, _ = sorted(valid_metrics.items(), key=lambda x: x[1])[0]
best_folder = os.path.join(folder_name, f"checkpoint-{best_epoch}")
print(best_folder)
print("." * 50)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--model_checkpoint_folder",
nargs="+",
required=True,
help="List of model checkpoint folders",
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/get_best_model.py
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
Adapted from
https://github.com/huggingface/transformers/blob/master/examples/text-generation/run_generation.py
"""
import argparse
import logging
import os
import numpy as np
import torch
from transformers import (
CTRLLMHeadModel,
CTRLTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
TransfoXLLMHeadModel,
TransfoXLTokenizer,
XLMTokenizer,
XLMWithLMHeadModel,
XLNetLMHeadModel,
XLNetTokenizer,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
MODEL_CLASSES = {
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
"ctrl": (CTRLLMHeadModel, CTRLTokenizer),
"openai-gpt": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"xlnet": (XLNetLMHeadModel, XLNetTokenizer),
"transfo-xl": (TransfoXLLMHeadModel, TransfoXLTokenizer),
"xlm": (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
#
# Functions to prepare models' input
#
def prepare_ctrl_input(args, _, tokenizer, prompt_text):
if args.temperature > 0.7:
logger.info(
"CTRL typically works better with lower temperatures (and lower top_k)."
)
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=True)
if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):
logger.info(
"WARNING! You are not starting your generation from a control code so you won't get good results"
)
return prompt_text
def prepare_xlm_input(args, model, tokenizer, prompt_text):
# kwargs = {"language": None, "mask_token_id": None}
# Set the language
use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb
if hasattr(model.config, "lang2id") and use_lang_emb:
available_languages = model.config.lang2id.keys()
if args.xlm_language in available_languages:
language = args.xlm_language
else:
language = None
while language not in available_languages:
language = input(
"Using XLM. Select language in "
+ str(list(available_languages))
+ " >>> "
)
model.config.lang_id = model.config.lang2id[language]
# kwargs["language"] = tokenizer.lang2id[language]
# TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers
# XLM masked-language modeling (MLM) models need masked token
# is_xlm_mlm = "mlm" in args.model_name_or_path
# if is_xlm_mlm:
# kwargs["mask_token_id"] = tokenizer.mask_token_id
return prompt_text
def prepare_xlnet_input(args, _, tokenizer, prompt_text):
prompt_text = (
args.padding_text if args.padding_text else PADDING_TEXT
) + prompt_text
return prompt_text
def prepare_transfoxl_input(args, _, tokenizer, prompt_text):
prompt_text = (
args.padding_text if args.padding_text else PADDING_TEXT
) + prompt_text
return prompt_text
PREPROCESSING_FUNCTIONS = {
"ctrl": prepare_ctrl_input,
"xlm": prepare_xlm_input,
"xlnet": prepare_xlnet_input,
"transfo-xl": prepare_transfoxl_input,
}
def adjust_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
def load_model(model_type, model_name_or_path, device, length=100):
try:
model_class, tokenizer_class = MODEL_CLASSES[model_type]
except KeyError:
raise KeyError(
"the model {} you specified is not supported. You are welcome to add it and open a PR :)"
)
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
model = model_class.from_pretrained(model_name_or_path)
model.to(device)
length = adjust_length_to_model(
length, max_sequence_length=model.config.max_position_embeddings
)
return model, tokenizer, length
def generate_sequences(
model,
tokenizer,
prompt,
device="cpu",
length=100,
temperature=1.0,
k=0,
p=0.9,
repetition_penalty=1.0,
num_return_sequences=1,
stop_token="<EOS>",
verbose=True,
):
output_sequences, encoded_prompt = generate_sequence_tokens(
model,
tokenizer,
prompt,
device,
length,
temperature,
k,
p,
repetition_penalty,
num_return_sequences,
)
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
if verbose:
print(
"=== GENERATED SEQUENCE {sequence_idx} ===".format(
sequence_idx=generated_sequence_idx + 1,
)
)
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(stop_token) if stop_token else None]
# Add the prompt at the beginning of the sequence. Remove the
# excess text that was used for pre-processing
generated_text = (
prompt
+ text[
len(
tokenizer.decode(
encoded_prompt[0], clean_up_tokenization_spaces=True
)
) :
]
)
# generated_text = text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
generated_sequences.append(generated_text)
if verbose:
print(prompt)
print("-")
print(generated_text)
return generated_sequences
def generate_sequence_tokens(
model,
tokenizer,
prompt_text,
device="cpu",
length=100,
temperature=1.0,
k=0,
p=0.9,
repetition_penalty=1.0,
num_return_sequences=1,
):
# Assumes model_type not in PREPROCESSING_FUNCTIONS
# Strip any trailing \n if provided
prompt_text = prompt_text.strip("\n")
# Enode prompt
encoded_prompt = tokenizer.encode(
prompt_text, add_special_tokens=True, return_tensors="pt"
)
encoded_prompt = encoded_prompt.to(device)
output_sequences = model.generate(
input_ids=encoded_prompt,
max_length=length + len(encoded_prompt[0]),
temperature=temperature,
top_k=k,
top_p=p,
repetition_penalty=repetition_penalty,
do_sample=True,
num_return_sequences=num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
return output_sequences, encoded_prompt
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument(
"--prompts_from_file",
type=str,
default=None,
help="""
read prompts from a file and generate, overrides any prompt given on the
command line""",
)
parser.add_argument("--length", type=int, default=20)
parser.add_argument(
"--stop_token",
type=str,
default=None,
help="Token at which text generation is stopped",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty",
type=float,
default=1.0,
help="primarily useful for CTRL model; in that case, use 1.2",
)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument(
"--padding_text",
type=str,
default="",
help="Padding text for Transfo-XL and XLNet.",
)
parser.add_argument(
"--xlm_language",
type=str,
default="",
help="Optional language when used with the XLM model.",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--num_return_sequences",
type=int,
default=1,
help="The number of samples to generate.",
)
parser.add_argument(
"--path_output",
type=str,
default=None,
help="Path to output predictions in a line separated text file.",
)
args = parser.parse_args()
args.device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
set_seed(args)
if args.prompts_from_file and not os.path.exists(args.prompts_from_file):
raise Exception(f"prompt file '{args.prompts_from_file}' not found")
# Initialize the model and tokenizer
args.model_type = args.model_type.lower()
# Load model
model, tokenizer, args.length = load_model(
args.model_type, args.model_name_or_path, args.device, args.length
)
logger.info(args)
results = []
prompts = []
if args.prompts_from_file:
with open(args.prompts_from_file) as handle:
prompts = handle.readlines()
while True:
if not prompts:
prompts = [args.prompt if args.prompt else input("Model prompt >>> ")]
if not args.prompt and (
len(prompts) == 0
or prompts[0].strip() == ""
or prompts[0].lower() == "quit"
):
break # break while True loop
n_prompts = len(prompts)
for i, prompt_text in enumerate(prompts):
generated_sequences = generate_sequences(
model,
tokenizer,
prompt_text,
args.device,
args.length,
args.temperature,
args.k,
args.p,
args.repetition_penalty,
args.num_return_sequences,
args.stop_token,
)
results.append(generated_sequences)
prompts = []
if args.prompt or args.prompts_from_file:
break # break while True loop
if args.path_output is not None:
# Create a directory if it does not exist
directory = os.path.dirname(args.path_output)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
# Format results into a line-separated string file
str_results = "\n".join(
[" || ".join(generated_sequences) for generated_sequences in results]
)
# Save to a file
with open(args.path_output, "w") as f_out:
f_out.write(str_results)
return results
if __name__ == "__main__":
main()
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/run_generation.py
|
#!/usr/bin/env python3
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Scripts for evaluating the GPT-2 DST model predictions.
First, we parse the line-by-line stringified format into responses
and compute BLEU score.
"""
import argparse
import ast
import copy
import json
import re
import numpy as np
import tqdm
from gpt2_dst.utils.convert import parse_flattened_result
def convert_slots_to_dict(api_call_json):
"""Converts the slots from list of lists to a dict.
Args:
api_call_json: JSON containing the parsed API call
"""
for frame_ind, frame in enumerate(api_call_json):
slot_dict = {}
for slot_name, slot_value in frame["slots"]:
if re.match("\[.*\]", slot_value):
try:
slot_dict[slot_name] = ast.literal_eval(slot_value)
except:
# If error when parsing the slots add empty string
print(f"Error parsing: {slot_value} -> {frame}")
slot_dict[slot_name] = ""
else:
slot_dict[slot_name] = slot_value
frame["slots"] = slot_dict
return api_call_json
def parse_results_from_file(input_path, turn_info, original_data):
"""Parse targets from a flattened file to create response, dst evaluation files.
Args:
input_path: Path to read the responses from.
turn_info: List of dialog, turn info.
original_data: Original JSON target.
Returns:
dst_json: JSON file with DST results
responses_json: JSON file with responses
"""
# Collate all lines to ensure they start with either <USER> or <SYSTEM>.
with open(input_path, "r") as file_id:
lines = [ii.strip() for ii in file_id.readlines()]
fixed_lines = []
current_line = ""
for line in lines:
if line[:6] == "<USER>" or line[:8] == "<SYSTEM>":
fixed_lines.append(line)
else:
fixed_lines[-1] += line
print(f"Collating: {len(lines)} -> {len(fixed_lines)}")
lines = fixed_lines
# Identify API call string and response in each line.
assert len(lines) == len(turn_info), "#lines and #turn_info do not match!"
responses_json = {}
dst_pool = {}
for line_ind, line in enumerate(lines):
dialog_id, turn_id, prediction_type = turn_info[line_ind]
if prediction_type == "api_call":
api_call_json = parse_flattened_result(line.split("<EOAC>")[0] + "<EOAC>")
# Convert slots from list of list to dicts.
api_call_json = convert_slots_to_dict(api_call_json)
dst_index = (dialog_id, turn_id)
assert dst_index not in dst_pool, "Result already exists!"
dst_pool[dst_index] = api_call_json
# Check if memories are integers, else skip.
for frame_info in api_call_json:
memories = []
for ii in frame_info["memories"]:
try:
ii_int = int(ii)
memories.append(ii)
except:
pass
frame_info["memories"] = memories
elif prediction_type == "response":
response_str = line.split("<EOAR>")[-1].strip()
if dialog_id not in responses_json:
responses_json[dialog_id] = {
"dialog_id": dialog_id,
"predictions": [],
}
responses_json[dialog_id]["predictions"].append(
{
"turn_id": turn_id,
"response": response_str,
}
)
else:
raise ValueError(f"Invalid prediction_type: {prediction_type}!")
responses_json = list(responses_json.values())
num_missing = 0
num_present = 0
dst_json = copy.deepcopy(original_data)
for dialog_datum in dst_json["dialogue_data"]:
del dialog_datum["mentioned_memory_ids"]
del dialog_datum["memory_graph_id"]
dialog_id = dialog_datum["dialogue_idx"]
for datum in dialog_datum["dialogue"]:
del datum["transcript_annotated"]
turn_id = datum["turn_idx"]
index = (dialog_id, turn_id)
if index in dst_pool:
datum["transcript_annotated"] = dst_pool[index]
num_present += 1
else:
print(f"Missing! -- {index}")
num_missing += 1
print(f"Missing: {num_missing} Present: {num_present}")
return dst_json, responses_json
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_target_json", required=True, help="Path to target JSON file"
)
parser.add_argument(
"--input_dialog_ids",
required=True,
help="Path for dialog, turn ids for input (.txt)",
)
parser.add_argument(
"--input_path_predicted",
required=True,
help="path for model prediction output, line-separated format (.txt)",
)
parser.add_argument(
"--output_path_report",
required=True,
help="Path to save evaluation summary (dst and response) (.json)",
)
args = parser.parse_args()
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Read the input target JSON file.
with open(args.input_target_json, "r") as file_id:
original_data = json.load(file_id)
# Read the dialog and turn ids.
with open(args.input_dialog_ids, "r") as file_id:
turn_info = [ast.literal_eval(ii.strip("\n")) for ii in file_id.readlines()]
# Convert the data from the GPT-2 friendly format to JSON formats.
dst_json, responses_json = parse_results_from_file(
input_path_predicted, turn_info, original_data
)
# Saving both the DST and response JSON.
dst_json_path = args.output_path_report.replace(".json", "_dst_results.json")
print(f"Saving DST results: {dst_json_path}")
with open(dst_json_path, "w") as file_id:
json.dump(dst_json, file_id)
responses_json_path = args.output_path_report.replace(
".json", "_response_results.json"
)
print(f"Saving responses: {responses_json_path}")
with open(responses_json_path, "w") as file_id:
json.dump(responses_json, file_id)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/reformat_dst_response_outputs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
#!/usr/bin/env python3
"""
Scripts for evaluating the GPT-2 DST model predictions.
First, we parse the line-by-line stringified format into
the structured DST output.
We then run the main DST Evaluation script to get results.
"""
import argparse
import json
from gpt2_dst.utils.convert import parse_flattened_results_from_file
from utils.evaluate_dst import evaluate_from_flat_list
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path_target", help="path for target, line-separated format (.txt)"
)
parser.add_argument(
"--input_path_predicted",
help="path for model prediction output, line-separated format (.txt)",
)
parser.add_argument(
"--output_path_report", help="path for saving evaluation summary (.json)"
)
args = parser.parse_args()
input_path_target = args.input_path_target
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Convert the data from the GPT-2 friendly format to JSON
list_target = parse_flattened_results_from_file(input_path_target)
list_predicted = parse_flattened_results_from_file(input_path_predicted)
# Evaluate
report = evaluate_from_flat_list(list_target, list_predicted)
# Save report
with open(output_path_report, "w") as f_out:
json.dump(report, f_out)
|
comet_memory_dialog-main
|
models/gpt2_text/gpt2_dst/scripts/evaluate.py
|
"""
Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
Script evaluates response generation using GT responses.
Expected JSON format:
[
"dialog_id": <dialog_id>,
"predictions": [
{
"turn_id": <turn_id>,
"response": <str; model output>,
}
...
]
...
]
Author(s): Satwik Kottur
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import nltk
import numpy as np
import tqdm
def normalize_sentence(sentence):
"""Normalize the sentences and tokenize."""
return nltk.tokenize.word_tokenize(sentence.lower())
def evaluate_response_generation(
gt_responses,
model_responses,
single_round_eval=False,
record_instance_results=None,
compute_bert_score=False,
):
"""Evaluates response generation using the raw data and model predictions.
Args:
gt_responses: Ground truth responses.
model_responses: Generated responses.
single_round_eval: Evaluate only for the last turn.
record_instance_results: Save path for instance level metrics.
"""
gt_responses_pool = {ii["dialogue_idx"]: ii for ii in gt_responses["dialogue_data"]}
bleu_scores = []
# Smoothing function.
chencherry = nltk.translate.bleu_score.SmoothingFunction()
# Lazy initialization for bert score.
if compute_bert_score:
import bert_score
bert_scorer = bert_score.BERTScorer(lang="en")
bert_scores = []
num_evaluations = 0
for model_datum in tqdm.tqdm(model_responses, desc="Evaluating"):
dialog_id = model_datum["dialog_id"]
num_gt_rounds = len(gt_responses_pool[dialog_id]["dialogue"])
for round_datum in model_datum["predictions"]:
round_id = round_datum["turn_id"]
# Skip if single_round_eval and this is not the last round.
if single_round_eval and round_id != num_gt_rounds - 1:
continue
response = round_datum["response"]
gt_datum = gt_responses_pool[dialog_id]["dialogue"][round_id]
gt_response = gt_datum["system_transcript"]
try:
gt_response_clean = normalize_sentence(gt_response)
response_clean = normalize_sentence(response)
bleu_score = nltk.translate.bleu_score.sentence_bleu(
[gt_response_clean],
response_clean,
smoothing_function=chencherry.method7,
)
bleu_scores.append(bleu_score)
if compute_bert_score:
_, _, bert_f1 = bert_scorer.score(
[" ".join(response_clean)], [" ".join(gt_response_clean)]
)
bert_scores.append(bert_f1.item())
except:
print(f"Model: {response} -> GT: {gt_response}")
# Add the result to datum and save it back.
if record_instance_results:
round_datum["bleu"] = bleu_score
round_datum["response_len"] = len(normalize_sentence(gt_response))
if compute_bert_score:
round_datum["bert_score"] = bert_f1
print("#Instances evaluated BLEU: {}".format(len(bleu_scores)))
if record_instance_results:
print(f"Saving per instance results: {record_instance_results}")
with open(record_instance_results, "w") as file_id:
json.dump(model_responses, file_id)
bleu_str_mean = np.mean(bleu_scores)
bleu_str_err = np.std(bleu_scores) / np.sqrt(len(bleu_scores))
if compute_bert_score:
bert_score_mean = np.mean(bert_scores)
bert_score_err = np.std(bert_scores) / np.sqrt(len(bert_scores))
else:
bert_score_mean, bert_score_err = None, None
return bleu_str_mean, bleu_str_err, bert_score_mean, bert_score_err
def main(args):
print("Reading: {}".format(args["data_json_path"]))
with open(args["data_json_path"], "r") as file_id:
gt_responses = json.load(file_id)
print("Reading: {}".format(args["model_response_path"]))
with open(args["model_response_path"], "r") as file_id:
model_responses = json.load(file_id)
if args["record_instance_results"]:
instance_results_path = args["model_response_path"].replace(
".json", "_results.json"
)
else:
instance_results_path = None
bleu_score, bleu_std_err, bert_score, bert_score_err = evaluate_response_generation(
gt_responses,
model_responses,
args["single_round_evaluation"],
instance_results_path,
args["compute_bert_score"],
)
print(f"BLEU Score: {bleu_score:.4f} +- {bleu_std_err}")
if args["compute_bert_score"]:
print(f"BERT Score: {bert_score:.4f} +- {bert_score_err}")
report = {
"bleu_score": bleu_score,
"bleu_std_err": bleu_std_err,
"bert_score": bert_score,
"bert_score_err": bert_score_err,
}
return report
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Response Generation Evaluation")
parser.add_argument(
"--data_json_path",
default="data/mem_dials_devtest.json",
help="Data with gold responses",
)
parser.add_argument(
"--model_response_path", default=None, help="Responses generated by the model"
)
parser.add_argument(
"--single_round_evaluation",
dest="single_round_evaluation",
action="store_true",
default=False,
help="Single round evaluation for hidden split",
)
parser.add_argument(
"--record_instance_results",
dest="record_instance_results",
action="store_true",
default=False,
help="Records per instance results and save it back",
)
parser.add_argument(
"--compute_bert_score",
dest="compute_bert_score",
action="store_true",
default=False,
help="Compute BERT score along with BLEU-4",
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
comet_memory_dialog-main
|
models/gpt2_text/utils/response_evaluation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# !/usr/bin/env python3
"""
Util functions for evaluating the DST model predictions.
The script includes a main function which takes
the original JSON data file and the predicted model output file
(in the same format), and outputs the report.
"""
import argparse
import json
import copy
import numpy as np
def reformat_turn_intents(turn_intents, ground_truth_act=None):
new_intents = []
for intent in turn_intents:
frame_intent = copy.deepcopy(intent)
if "act_attributes" in frame_intent:
frame_intent.update(frame_intent["act_attributes"])
del frame_intent["act_attributes"]
# Process ground truth examples.
if "slot_values" in frame_intent:
# Tuples are inmutable so we use list of two.
frame_intent["slots"] = [
[key, value] for key, value in frame_intent["slot_values"].items()
]
# FIX: Temporarily remove "None" from participants.
for index, (slot, values) in enumerate(frame_intent["slots"]):
if slot == "participant":
frame_intent["slots"][index][1] = [
ii for ii in values if ii is not None
]
del frame_intent["slot_values"]
# Process model predictions.
else:
frame_intent["slots"] = [
[key, value] for key, value in frame_intent["slots"].items()
]
# Removes repeated slots and sorts them for correct comparison for both
# ground truth and model predictions.
for index, (slot, values) in enumerate(frame_intent["slots"]):
if type(values) is list:
frame_intent["slots"][index][1] = sorted(list(set(values)))
else:
frame_intent["slots"][index][1] = [values]
# If new act is provided, re-assign.
if ground_truth_act:
frame_intent["act"] = ground_truth_act
# Convery memories from string to integer.
if frame_intent["memories"] and ground_truth_act is None:
frame_intent["memories"] = [
int(ii) for ii in intent["memories"] if ii.isnumeric()
]
new_intents.append(frame_intent)
return new_intents
def evaluate_from_json(d_true, d_pred):
"""
<list>d_true and <list>d_pred are in the following format:
(Equivalent to "dialogue_data" field in the input data JSON file)
[
{
"dialogue": [
{
"belief_state": [
[
{
'act': <str>,
'slots': [
[
SLOT_NAME, SLOT_VALUE
], ...
]
},
[End of a frame]
...
],
]
}
[End of a turn]
...
],
}
[End of a dialogue]
...
]
"""
d_true_flattened = []
d_pred_flattened = []
for i in range(len(d_true)):
# Iterate through each dialog
dialog_true = d_true[i]["dialogue"]
dialog_pred = d_pred[i]["dialogue"]
dialogue_idx = d_true[i]["dialogue_idx"]
for j in range(len(dialog_true)):
# Iterate through each turn
turn_true = dialog_true[j]["belief_state"]
turn_pred = dialog_pred[j]["belief_state"]
turn_true["turn_idx"] = j
turn_true["dialogue_idx"] = dialogue_idx
d_true_flattened.append(turn_true)
d_pred_flattened.append(turn_pred)
return evaluate_from_flat_list(d_true_flattened, d_pred_flattened)
def evaluate_from_json_conservative(d_true, d_pred, lowercase=False):
"""
<list>d_true and <list>d_pred are in the following format:
(Equivalent to "dialogue_data" field in the input data JSON file)
[
{
"dialogue": [
{
"belief_state": [
[
{
'act': <str>,
'slots': [
[
SLOT_NAME, SLOT_VALUE
], ...
]
},
[End of a frame]
...
],
]
}
[End of a turn]
...
],
}
[End of a dialogue]
...
]
"""
d_true_flattened = []
d_pred_flattened = []
num_present = 0
num_absent = 0
dst_pool = {ii["dialogue_idx"]: ii for ii in d_pred}
for gt_datum in d_true:
# Iterate through each dialog
dialog_true = gt_datum["dialogue"]
dialogue_idx = gt_datum["dialogue_idx"]
if dialogue_idx not in dst_pool:
print(f"Missing: {dialogue_idx}")
num_absent += len(gt_datum["dialogue"])
continue
# num_present += len(gt_datum["dialogue"])
dialog_pred = dst_pool[dialogue_idx]["dialogue"]
for turn_id in range(len(dialog_true)):
# Iterate through each turn
if "transcript_annotated" not in dialog_pred[turn_id]:
print(f"Missing: {dialogue_idx} {turn_id}")
num_absent += 1
continue
num_present += 1
turn_true = dialog_true[turn_id]["transcript_annotated"]
turn_pred = dialog_pred[turn_id]["transcript_annotated"]
# API calls are formatted as acts.
reformatted_act = dialog_true[turn_id]["api_call"]["call_type"]
turn_true = reformat_turn_intents(turn_true, reformatted_act)
turn_pred = reformat_turn_intents(turn_pred)
d_true_flattened.append(turn_true)
d_pred_flattened.append(turn_pred)
# print(len(d_true_flattened))
# print(len(d_pred_flattened))
print(f"# present: {num_present} # absent: {num_absent}")
return evaluate_from_flat_list(
d_true_flattened, d_pred_flattened, lowercase=lowercase
)
def evaluate_from_flat_list(d_true, d_pred, lowercase=False):
"""
<list>d_true and <list>d_pred are in the following format:
(Each element represents a single turn, with (multiple) frames)
[
[
{
'act': <str>,
'slots': [
[
SLOT_NAME, SLOT_VALUE
], ...
]
},
[End of a frame]
...
],
[End of a turn]
...
]
"""
c = initialize_count_dict()
# Count # corrects & # wrongs
for i in range(len(d_true)):
true_turn = d_true[i]
pred_turn = d_pred[i]
turn_evaluation = evaluate_turn(true_turn, pred_turn, lowercase=lowercase)
c = add_dicts(c, turn_evaluation)
# Calculate metrics
joint_accuracy = c["n_correct_beliefs"] / c["n_frames"]
act_rec, act_prec, act_f1 = rec_prec_f1(
n_correct=c["n_correct_acts"], n_true=c["n_true_acts"], n_pred=c["n_pred_acts"]
)
slot_rec, slot_prec, slot_f1 = rec_prec_f1(
n_correct=c["n_correct_slots"],
n_true=c["n_true_slots"],
n_pred=c["n_pred_slots"],
)
request_slot_rec, request_slot_prec, request_slot_f1 = rec_prec_f1(
n_correct=c["n_correct_request_slots"],
n_true=c["n_true_request_slots"],
n_pred=c["n_pred_request_slots"],
)
object_rec, object_prec, object_f1 = rec_prec_f1(
n_correct=c["n_correct_objects"],
n_true=c["n_true_objects"],
n_pred=c["n_pred_objects"],
)
# Calculate std err
act_f1_stderr = d_f1(c["n_true_acts"], c["n_pred_acts"], c["n_correct_acts"])
slot_f1_stderr = d_f1(c["n_true_slots"], c["n_pred_slots"], c["n_correct_slots"])
request_slot_f1_stderr = d_f1(
c["n_true_request_slots"],
c["n_pred_request_slots"],
c["n_correct_request_slots"],
)
object_f1_stderr = d_f1(
c["n_true_objects"], c["n_pred_objects"], c["n_correct_objects"]
)
return {
"joint_accuracy": joint_accuracy,
"act_rec": act_rec,
"act_prec": act_prec,
"act_f1": act_f1,
"act_f1_stderr": act_f1_stderr,
"slot_rec": slot_rec,
"slot_prec": slot_prec,
"slot_f1": slot_f1,
"slot_f1_stderr": slot_f1_stderr,
"request_slot_rec": request_slot_rec,
"request_slot_prec": request_slot_prec,
"request_slot_f1": request_slot_f1,
"request_slot_f1_stderr": request_slot_f1_stderr,
"object_rec": object_rec,
"object_prec": object_prec,
"object_f1": object_f1,
"object_f1_stderr": object_f1_stderr,
}
def evaluate_turn(true_turn, pred_turn, lowercase=False):
count_dict = initialize_count_dict()
# Must preserve order in which frames appear.
for frame_idx in range(len(true_turn)):
# For each frame
true_frame = true_turn[frame_idx]
if frame_idx >= len(pred_turn):
pred_frame = {}
else:
pred_frame = pred_turn[frame_idx]
count_dict = add_dicts(
count_dict,
evaluate_frame(true_frame, pred_frame, strict=False, lowercase=lowercase),
)
return count_dict
def evaluate_frame(true_frame, pred_frame, strict=True, lowercase=False):
"""
If strict=True,
For each dialog_act (frame), set(slot values) must match.
If dialog_act is incorrect, its set(slot values) is considered wrong.
"""
count_dict = initialize_count_dict()
count_dict["n_frames"] += 1
# Compare Dialog Actss
true_act = true_frame["act"] if "act" in true_frame else None
pred_act = pred_frame["act"] if "act" in pred_frame else None
if not lowercase:
b_correct_act = true_act == pred_act
else:
# Lowercase evaluation.
b_correct_act = true_act.lower() == str(pred_act).lower()
count_dict["n_correct_acts"] += b_correct_act
count_dict["n_true_acts"] += "act" in true_frame
count_dict["n_pred_acts"] += "act" in pred_frame
# Compare Slots
if not lowercase:
true_frame_slot_values = {f"{k}={v}" for k, v in true_frame.get("slots", [])}
pred_frame_slot_values = {f"{k}={v}" for k, v in pred_frame.get("slots", [])}
else:
true_frame_slot_values = {
f"{k}={v}".lower() for k, v in true_frame.get("slots", [])
}
pred_frame_slot_values = {
f"{k}={v}".lower() for k, v in pred_frame.get("slots", [])
}
count_dict["n_true_slots"] += len(true_frame_slot_values)
count_dict["n_pred_slots"] += len(pred_frame_slot_values)
if strict and not b_correct_act:
pass
else:
count_dict["n_correct_slots"] += len(
true_frame_slot_values.intersection(pred_frame_slot_values)
)
# if len(true_frame_slot_values.intersection(pred_frame_slot_values)) != len(pred_frame_slot_values):
# print(true_frame_slot_values)
# print(pred_frame_slot_values)
# print(len(true_frame_slot_values.intersection(pred_frame_slot_values)) == len(pred_frame_slot_values))
# print('--')
# Compare Request slots
true_frame_request_slot_values = {rs for rs in true_frame.get("request_slots", [])}
pred_frame_request_slot_values = {rs for rs in pred_frame.get("request_slots", [])}
# print(true_frame_request_slot_values)
if not lowercase:
true_frame_request_slot_values = {
rs for rs in true_frame.get("request_slots", [])
}
pred_frame_request_slot_values = {
rs for rs in pred_frame.get("request_slots", [])
}
else:
true_frame_request_slot_values = {
rs.lower() for rs in true_frame.get("request_slots", [])
}
pred_frame_request_slot_values = {
rs.lower() for rs in pred_frame.get("request_slots", [])
}
count_dict["n_true_request_slots"] += len(true_frame_request_slot_values)
count_dict["n_pred_request_slots"] += len(pred_frame_request_slot_values)
if strict and not b_correct_act:
pass
else:
count_dict["n_correct_request_slots"] += len(
true_frame_request_slot_values.intersection(pred_frame_request_slot_values)
)
# Compare Objects
true_frame_object_values = {
object_id for object_id in true_frame.get("memories", [])
}
pred_frame_object_values = {
object_id for object_id in pred_frame.get("memories", [])
}
count_dict["n_true_objects"] += len(true_frame_object_values)
count_dict["n_pred_objects"] += len(pred_frame_object_values)
if strict and not b_correct_act:
pass
else:
count_dict["n_correct_objects"] += len(
true_frame_object_values.intersection(pred_frame_object_values)
)
# Joint
count_dict["n_correct_beliefs"] += (
b_correct_act
and true_frame_slot_values == pred_frame_slot_values
and true_frame_request_slot_values == pred_frame_request_slot_values
and true_frame_object_values == pred_frame_object_values
)
return count_dict
def add_dicts(d1, d2):
return {k: d1[k] + d2[k] for k in d1}
def rec_prec_f1(n_correct, n_true, n_pred):
rec = n_correct / n_true if n_true != 0 else 0
prec = n_correct / n_pred if n_pred != 0 else 0
f1 = 2 * prec * rec / (prec + rec) if (prec + rec) != 0 else 0
return rec, prec, f1
def d_f1(n_true, n_pred, n_correct):
# 1/r + 1/p = 2/F1
# dr / r^2 + dp / p^2 = 2dF1 /F1^2
# dF1 = 1/2 F1^2 (dr/r^2 + dp/p^2)
dr = b_stderr(n_true, n_correct)
dp = b_stderr(n_pred, n_correct)
r = n_correct / n_true if n_true else 0
p = n_correct / n_pred if n_pred else 0
f1 = 2 * p * r / (p + r) if p + r != 0 else 0
d_f1 = 0.5 * f1**2 * (dr / r**2 + dp / p**2) if p * r != 0 else 0
return d_f1
def b_stderr(n_total, n_pos):
return np.std(b_arr(n_total, n_pos)) / np.sqrt(n_total)
def b_arr(n_total, n_pos):
out = np.zeros(int(n_total))
out[: int(n_pos)] = 1.0
return out
def initialize_count_dict():
c = {
"n_frames": 0.0,
"n_true_acts": 0.0,
"n_pred_acts": 0.0,
"n_correct_acts": 0.0,
"n_true_slots": 0.0,
"n_pred_slots": 0.0,
"n_correct_slots": 0.0,
"n_true_request_slots": 0.0,
"n_pred_request_slots": 0.0,
"n_correct_request_slots": 0.0,
"n_true_objects": 0.0,
"n_pred_objects": 0.0,
"n_correct_objects": 0.0,
"n_correct_beliefs": 0.0,
}
return copy.deepcopy(c)
if __name__ == "__main__":
# Parse input args
parser = argparse.ArgumentParser()
parser.add_argument("--input_path_target", help="path for target (.json)")
parser.add_argument(
"--input_path_predicted", help="path for model prediction output (.json)"
)
parser.add_argument(
"--output_path_report", help="path for saving evaluation summary (.json)"
)
parser.add_argument(
"--lowercase",
action="store_true",
default=False,
help="Evaluate a lowercase model",
)
args = parser.parse_args()
input_path_target = args.input_path_target
input_path_predicted = args.input_path_predicted
output_path_report = args.output_path_report
# Read the JSON file input
# json_predicted must have the same structure as the original input JSON
# e.g. {'dialogue_data': [ ... ]}
json_target = json.load(open(input_path_target, "r"))
json_predicted = json.load(open(input_path_predicted, "r"))
# Evaluate
report = evaluate_from_json_conservative(
json_target["dialogue_data"],
json_predicted["dialogue_data"],
lowercase=args.lowercase,
)
# report = evaluate_from_json(json_target['dialogue_data'], json_predicted['dialogue_data'])
print(report)
# Save report
with open(output_path_report, "w") as f_out:
json.dump(report, f_out)
|
comet_memory_dialog-main
|
models/gpt2_text/utils/evaluate_dst.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
"""
Description: merges the synthetically generated dialogs (.json, .p)
and the tab-separated Appen annotations (.txt)
to putput the merged dialogs in both .json and .p formats
"""
import os
import json
import csv
import random
import pickle
from utils import load_data_pickle
if __name__ == "__main__":
# Parameters for generation
path_tuples = [
# Pilot 1: 50 dialogs
# [
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials.p',
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv',
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials_merged.json',
# '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials_merged.p',
# ],
# Pilot 2: 450 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials_merged.p",
],
# Batch 1: 2000 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials_merged.p",
],
# Batch 2: 500 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials_merged.p",
],
# Batch 3: 2000 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials_merged.p",
],
# Batch 4: 6000 dialogs
[
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/paraphrased_0622.csv",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials_merged.json",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials_merged.p",
],
]
for path_tuple in path_tuples:
path_in_synth = path_tuple[0]
path_in_appen = path_tuple[1]
path_out_json = path_tuple[2]
path_out_pickle = path_tuple[3]
# Load original synth
original_dialogs = load_data_pickle(path_in_synth)
mm_dialogs = []
# Load paraphrased
fieldname_to_turn_idx = {
"turn0_paraphrase": 0,
"turn1_paraphrase": 1,
"turn2_paraphrase": 2,
"turn3_paraphrase": 3,
"turn4_paraphrase": 4,
"turn5_paraphrase": 5,
"turn6_paraphrase": 6,
"turn7_paraphrase": 7,
"turn8_paraphrase": 8,
"turn9_paraphrase": 9,
"turn10_paraphrase": 10,
"turn11_paraphrase": 11,
"turn12_paraphrase": 12,
"turn13_paraphrase": 13,
"turn14_paraphrase": 14,
"turn15_paraphrase": 15,
"turn16_paraphrase": 16,
"turn17_paraphrase": 17,
"turn18_paraphrase": 18,
"turn19_paraphrase": 19,
"turn20_paraphrase": 20,
"turn21_paraphrase": 21,
"turn22_paraphrase": 22,
"turn23_paraphrase": 23,
}
COL_DIALOG_ID = 88
turn_idx_to_col = {}
dialog_id_to_utter = {}
with open(path_in_appen, "r", encoding="mac_roman") as f:
reader = csv.reader(f, delimiter=",", quotechar='"')
for i, line in enumerate(reader):
if i == 0:
for col_id, fieldname in enumerate(line):
if fieldname in fieldname_to_turn_idx:
turn_idx = fieldname_to_turn_idx[fieldname]
turn_idx_to_col[turn_idx] = col_id
else:
dialog_id = int(line[COL_DIALOG_ID])
dialog_id_to_utter[dialog_id] = []
for turn_idx in range(len(turn_idx_to_col)):
if turn_idx in turn_idx_to_col:
utter = line[turn_idx_to_col[turn_idx]]
utter = utter.strip()
if utter != "":
dialog_id_to_utter[dialog_id].append(utter)
else:
if turn_idx < 16:
print(
"Check dialog id %d, turn %d"
% (dialog_id, turn_idx)
)
# Merge
for i, mm_d in enumerate(original_dialogs):
d = mm_d.dialog
dialog_id = d.idx
if dialog_id not in dialog_id_to_utter:
print("Dialog %d is missing." % dialog_id)
continue
mm_dialogs.append(mm_d)
n_rounds = int(len(dialog_id_to_utter[dialog_id]) / 2)
# TODO: discarding the utterances with missing paraphrases for now
# Causes: residuals & incompletes from annotations, etc.
mm_dialogs[-1].dialog.user_turns = mm_dialogs[-1].dialog.user_turns[
:n_rounds
]
mm_dialogs[-1].dialog.asst_turns = mm_dialogs[-1].dialog.asst_turns[
:n_rounds
]
for j in range(n_rounds):
try:
user_turn = d.user_turns[j]
asst_turn = d.asst_turns[j]
user_turn_idx = j * 2
asst_turn_idx = j * 2 + 1
user_paraphrase = dialog_id_to_utter[dialog_id][user_turn_idx]
asst_paraphrase = dialog_id_to_utter[dialog_id][asst_turn_idx]
mm_dialogs[-1].dialog.user_turns[j].frames[
-1
].uttr = user_paraphrase
mm_dialogs[-1].dialog.asst_turns[j].frames[
-1
].uttr = asst_paraphrase
except:
print("Missing rounds %d from dialog %d" % (j, dialog_id))
print(len(dialog_id_to_utter[dialog_id]))
print(len(d.user_turns))
# Output
print("Outputting JSON file at %s..." % path_out_json)
json.dump(
{"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs]},
open(path_out_json, "w"),
indent=4,
)
pickle.dump(mm_dialogs, open(path_out_pickle, "wb"))
|
comet_memory_dialog-main
|
dialog_simulator/merge_synth_and_appen.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
import numpy as np
from typing import List, Tuple
from SimulatorBase import SimulatorBase
from constants import GoalType, DialogAct, GoalMemoryRefType
from Data import MemoryDialog, Goal, GoalParameter, Frame, ActAttributes, APIResponse
from MemoryServiceAPI import MemoryServiceAPI
from utils import (
str_slot_values,
str_request_slots,
str_memories,
get_template,
get_slot_values_simple_from_json,
)
random.seed(0)
class UserSimulator(SimulatorBase):
def __init__(self, *args, **kwargs):
super(UserSimulator, self).__init__(*args, **kwargs)
self.memory_service_api = None
class ModelBasedUserSimulator(UserSimulator):
def __init__(self, *args, **kwargs):
super(ModelBasedUserSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
# Need to define this behavior e.g. as a config, a model, etc.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class RuleBasedUserSimulator(UserSimulator):
def __init__(self, *args, **kwargs):
super(RuleBasedUserSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
# Need to define this behavior e.g. as a config, a model, etc.
pass
class HybridUserSimulator(UserSimulator):
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
# If a Goal is servable by the model based simulator,
# generate with a model based simulator first.
# Otherwise resort to the predefined rules.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class PilotUserSimulator(UserSimulator):
"""
Includes the simplest implementation of a UserSimulator.
Use this class as a guide for implementing more complex
simulators.
"""
def __init__(self, *args, **kwargs):
super(PilotUserSimulator, self).__init__(*args, **kwargs)
# Simple interaction deterministic mapping
self._goal_to_handler = {
GoalType.UNKNOWN: self.UserGoalHandler(),
GoalType.SEARCH: self.UserSearchGoalHandler(),
GoalType.REFINE_SEARCH: self.UserRefineSearchGoalHandler(),
GoalType.GET_RELATED: self.UserGetRelatedGoalHandler(),
GoalType.GET_INFO: self.UserGetInfoGoalHandler(),
# GoalType.GET_AggregatedINFO: self.UserGetAggregatedInfoGoalHandler(),
GoalType.SHARE: self.UserShareGoalHandler(),
}
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
return True
def execute_turn(self, goal: Goal, memory_dialog: MemoryDialog) -> Frame:
handler = self._goal_to_handler[goal.goal_type]
return handler.execute_turn(goal, memory_dialog, self.memory_service_api)
def generate_uttr(self, frame: Frame, goal: Goal) -> Frame:
handler = self._goal_to_handler[goal.goal_type]
uttr = handler.generate_uttr(frame, goal, self.memory_service_api)
frame.uttr = uttr
return frame
class UserGoalHandler:
def __init__(self, *args, **kwargs):
self.available_user_main_acts = [
DialogAct.UNKNOWN,
]
self.available_user_disambiguation_acts = [DialogAct.INFORM_DISAMBIGUATE]
self._uttr_template_disambiguate_memories = {
DialogAct.INFORM_DISAMBIGUATE: ["I mean these ones: {memories}"],
}
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Frame:
if len(memory_dialog.dialog.asst_turns) > 0:
last_asst_turn = memory_dialog.dialog.asst_turns[-1]
else:
last_asst_turn = None
if last_asst_turn is None or (
not last_asst_turn.is_disambiguation_request()
):
# 1. User does a main act according to the Goal
if True:
# 1. (1) Main Act
# Get a random dialog act label
user_dialog_act = random.choice(self.available_user_main_acts)
# Randomly fill the act_attributes
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
act_attributes = ActAttributes(
slot_values=self.get_slot_values(goal_parameter),
slot_values_resolved=self.get_slot_values_resolved(
goal_parameter
),
request_slots=self.get_request_slots(goal_parameter),
memories=self.get_memories(
goal.goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Answer follow-up questions
# TODO
pass
else:
# 2. Answer disambiguation request
user_dialog_act, list_act_attributes = self.disambiguate_last_turn(
memory_dialog
)
# Return an Frame memory with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return Frame("", user_dialog_act, list_act_attributes[0])
def get_slot_values(self, goal_parameter: GoalParameter):
return get_slot_values_simple_from_json(goal_parameter.filter)
def get_slot_values_resolved(self, goal_parameter: GoalParameter):
# return {k: str(v) for k, v in goal_parameter.filter.items()}
return goal_parameter.filter
def get_request_slots(self, goal_parameter: GoalParameter):
return goal_parameter.request_slots
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> List:
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=0,
n_max_memories=2,
)
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
if frame.dialog_act in set([DialogAct.INFORM_DISAMBIGUATE]):
template = get_template(
self._uttr_template_disambiguate_memories, frame
)
return template.format(
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
)
)
else:
return self.generate_uttr_main(frame, goal, memory_service_api)
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
template = get_template(self._uttr_template, frame)
uttr = template.format(
search_filter=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
def disambiguate_last_turn(self, memory_dialog: MemoryDialog):
# TODO: Make it more robust
user_dialog_act = random.choice(self.available_user_disambiguation_acts)
assert len(memory_dialog.dialog.user_turns) > 0
# **** TODO **** : handle multiple goal parameters & multiple acts
# **** TODO 8*** : pick the right frame instead of choosing the last frame
list_act_attributes = [
memory_dialog.dialog.user_turns[-1].frames[-1].act_attributes
]
return user_dialog_act, list_act_attributes
class UserSearchGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.REQUEST_GET,
]
self._uttr_template = {
DialogAct.REQUEST_GET: [
"Show me photos.",
"I am looking for some photos.",
],
}
self._uttr_template_s = {
DialogAct.REQUEST_GET: [
"Show me photos with {search_filter}.",
"I am looking for some photos with {search_filter}.",
],
}
def get_request_slots(self, goal_parameter: GoalParameter):
return []
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return []
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
search_filter = frame.act_attributes.slot_values
if search_filter == {}:
template = get_template(self._uttr_template, frame)
else:
template = get_template(self._uttr_template_s, frame)
uttr = template.format(search_filter=str_slot_values(search_filter))
return uttr
class UserRefineSearchGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.INFORM_REFINE,
]
self._uttr_template = {
DialogAct.INFORM_REFINE: [
"Do I have any other photos?",
"Are there any other photos?",
],
}
self._uttr_template_s = {
DialogAct.INFORM_REFINE: [
"I would like to refine/change my search to include {search_filter}.",
"Refine/change my search to include {search_filter}.",
"Do I have any other photos that also include {search_filter}?",
],
}
def get_slot_values(self, goal_parameter: GoalParameter):
# TODO: Need to account for invalid refine, e.g. looking for wooden area rugs
return get_slot_values_simple_from_json(goal_parameter.filter)
def get_request_slots(self, goal_parameter: GoalParameter):
return []
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return []
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
search_filter = frame.act_attributes.slot_values
if len(search_filter) > 0:
template = get_template(self._uttr_template_s, frame)
elif len(search_filter) == 0:
template = get_template(self._uttr_template, frame)
else:
print("This should not happen")
uttr = template.format(
search_filter=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
class UserGetRelatedGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.INFORM_GET,
]
self._uttr_template_o = {
DialogAct.INFORM_GET: [
"I would like to see something similar/related to {memories}.",
"Is there anything related to {memories}.",
"Is there any other photo/video related to {memories}.",
"Do I have any other photos/videos similar/related to {memories}?",
"Could you show me any other photos/videos like {memories}?",
"Show me other photos/videos like {memories}.",
]
}
self._uttr_template_or = {
DialogAct.INFORM_GET: [
"I would like to see something related to {memories} with the similar/same {request_slots}.",
"Is there anything related to {memories} with the similar/same {request_slots}.",
"Is there any other photo/video related to {memories} with the similar/same {request_slots}.",
"Do I have any other photo/video like {memories} with the similar/same {request_slots}?",
"Could you show me any other photo/video related to {memories} with the similar/same {request_slots}?",
"Show me other photos/videos like {memories} with the similar/same {request_slots}?",
]
}
self._uttr_template_os = {
DialogAct.INFORM_GET: [
"I would like to see something related to {memories}, and/but with {search_filter}.",
"Is there anything related to {memories}, and/but with {search_filter}.",
"Is there any other photo/video related to {memories}, and/but with {search_filter}.",
"Do I have any other photo/video like {memories} , and/but with {search_filter}?",
"Could you show me any other photo/video related to {memories}, and/but with {search_filter}?",
"Show me other photos/videos like {memories}, and/but with {search_filter}.",
]
}
self._uttr_template_ors = {
DialogAct.INFORM_GET: [
"I would like to see something related "
"to {memories} on {request_slots}, but with {search_filter}.",
"Is there anything related "
"to {memories} on {request_slots}, but with {search_filter}.",
"Show me something like "
"{memories} on paremters: {request_slots}, but with {search_filter}.",
"Do I have any photos/videos like "
"{memories} on paremters: {request_slots}, but with {search_filter}?",
]
}
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
search_filter = frame.act_attributes.slot_values
request_slots = frame.act_attributes.request_slots
memories = frame.act_attributes.memories
if len(request_slots) > 0 and len(search_filter) > 0:
template = get_template(self._uttr_template_ors, frame)
elif len(request_slots) > 0 and len(search_filter) == 0:
template = get_template(self._uttr_template_or, frame)
elif len(request_slots) == 0 and len(search_filter) > 0:
template = get_template(self._uttr_template_os, frame)
elif len(request_slots) == 0 and len(search_filter) == 0:
template = get_template(self._uttr_template_o, frame)
else:
print("This should not happen")
uttr = template.format(
search_filter=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=1,
n_max_memories=1,
)
class UserGetInfoGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.ASK_GET,
]
self._uttr_template = {
DialogAct.ASK_GET: [
"Can I get {request_slots} of {memories}?",
"Do you know {request_slots} of {memories}?",
"(Who/where/when/what/...) {request_slots} of {memories}?",
],
}
def get_slot_values(self, goal_parameter: GoalParameter):
return {}
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
n_max_memories = 2 if random.random() > 0.9 else 1
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=1,
n_max_memories=n_max_memories,
)
class UserCompareGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.REQUEST_COMPARE,
]
self._uttr_template_o = {
DialogAct.REQUEST_COMPARE: [
"How do they compare: {memories}?",
]
}
self._uttr_template_or = {
DialogAct.REQUEST_COMPARE: [
"How do they compare on {request_slots}: {memories}?"
]
}
def get_slot_values(self, goal_parameter: GoalParameter):
return {}
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=2,
n_max_memories=2,
)
def generate_uttr_main(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
request_slots = frame.act_attributes.request_slots
memories = frame.act_attributes.memories
if len(request_slots) > 0:
template = get_template(self._uttr_template_or, frame)
else:
template = get_template(self._uttr_template_o, frame)
uttr = template.format(
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories, memory_service_api, verbose=False
),
)
return uttr
class UserShareGoalHandler(UserGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_user_main_acts = [
DialogAct.REQUEST_SHARE,
]
self._uttr_template = {
DialogAct.REQUEST_SHARE: [
"Please share: {memories}.",
"Could you please share: {memories}?",
"I like these: {memories} - could you please share them.",
"Love these photos: {memories} - please share them.",
]
}
def get_request_slots(self, goal_parameter: GoalParameter):
return []
def get_slot_values(self, goal_parameter: GoalParameter):
return {}
def get_memories(
self,
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
# Need to pick from previous turns
n_max_memories = 2 if random.random() > 0.7 else 1
return get_memories(
goal_type,
goal_parameter,
memory_dialog,
memory_service_api,
n_min_memories=1,
n_max_memories=n_max_memories,
)
def get_memories(
goal_type: GoalType,
goal_parameter: GoalParameter,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
n_min_memories=0,
n_max_memories=2,
) -> List:
# TODO: implement
n_memories = random.randint(n_min_memories, n_max_memories)
candidate_memories = []
# (1) Determine where to choose the memory from
if goal_parameter.reference_type == GoalMemoryRefType.PREV_TURN:
# Candidate memories are from the immediate previous turn
# TODO: add a more robust report-abort if candidates are empty
# ** TODO ** : pick the right frame instead of just the last one
candidate_memories.extend(
memory_dialog.dialog.asst_turns[-1].frames[-1].act_attributes.memories
)
elif goal_parameter.reference_type == GoalMemoryRefType.DIALOG:
# Candidate memories are anywhere from the previous dialog
# TODO: add a more robust report-abort if candidates are empty
# ** TODO ** : pick the right frame instead of just the last one
for turn in memory_dialog.dialog.asst_turns + memory_dialog.dialog.user_turns:
candidate_memories.extend(turn.frames[-1].act_attributes.memories)
elif goal_parameter.reference_type == GoalMemoryRefType.GRAPH:
# Candidate memories are anywhere from the scene
candidate_memories = memory_dialog.memory_graph.get_memories()
else:
print("Object reference not specified")
pass
# (2) Weighted sampling: favor the ones that are talked the most
memory_id_to_memory_dedup = {}
memory_id_to_count = {}
for memory in candidate_memories:
memory_id = memory.data["memory_id"]
# Count
memory_id_to_count[memory_id] = memory_id_to_count.get(memory_id, 0.0) + 1
# Dedup for each memory_id
if memory_id not in memory_id_to_memory_dedup:
memory_id_to_memory_dedup[memory_id] = memory
else:
pass
candidate_memories_dedup = []
candidate_memories_p = []
sum_counts = sum([c for c in memory_id_to_count.values()])
sum_counts = 1.0 if sum_counts == 0 else sum_counts
for memory_id in memory_id_to_count:
candidate_memories_dedup.append(memory_id_to_memory_dedup[memory_id])
candidate_memories_p.append(memory_id_to_count[memory_id] / sum_counts)
return np.random.choice(
candidate_memories_dedup, p=candidate_memories_p, size=n_memories, replace=False
)
return candidate_memories
"""
# e.g. COMPARE / GET_RELATED / GET_INFO should be used only
# among memories with the same type
if goal_type in \
set([GoalType.COMPARE, GoalType.GET_RELATED, GoalType.GET_INFO]):
memory_types = []
for candidate_memory in candidate_memories:
prefab_path = candidate_memory['prefab_path']
obj_metadata = memory_service_api.lookup(prefab_path)
memory_types.append(obj_metadata['type'])
target_memory_type = random.choice(memory_types)
candidate_memories = [
o for o in candidate_memories \
if memory_service_api.lookup(o['prefab_path'])['type'] == target_memory_type
]
"""
|
comet_memory_dialog-main
|
dialog_simulator/UserSimulator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from constants import API_CALL_TYPE, TurnSpeaker, DialogAct
from Data import Turn, Frame, ActAttributes, MemoryDialog, APIResponse, APIRequest
from typing import Dict, Tuple
import sys
sys.path.append("/Users/shanemoon/workspace/memory_dialog/models/")
from gpt2_dst.scripts.run_generation import generate_sequences
from gpt2_dst.utils.convert import (
format_context,
format_api_call,
format_api_result,
parse_flattened_result,
TEMPLATE_PREDICT,
TEMPLATE_PREDICT_RESPONSE,
START_OF_API_CALL,
END_OF_API_CALL,
END_OF_API_RESULT,
END_OF_SENTENCE,
)
from utils import resolve_sv_entities
class MemoryDialogModelBase:
def __init__(self, *args, **kwargs):
self.displayed_memories = []
def predict_api_call(self, query: str, memory_dialog: MemoryDialog) -> Dict:
return {
"call_type": API_CALL_TYPE.UNDEFINED,
"slot_values": {},
"request_slots": [],
"memories": [], # <list> of <Memory> objects
}
def construct_api_request(
self, query: str, memory_dialog: MemoryDialog
) -> Tuple[Turn, APIRequest]:
# Predict / extract call_type and parameters from query
predicted = self.predict_api_call(query, memory_dialog)
# Cast user query into a Turn instance
query_frame = Frame(
uttr=query,
dialog_act=predicted["dialog_act"],
act_attributes=ActAttributes(
slot_values=predicted["slot_values"],
request_slots=predicted["request_slots"],
# <list> of <Memory> objects
memories=predicted["memories"],
),
)
# For now, we assume one frame per turn
user_turn = Turn(frames=[query_frame], speaker=TurnSpeaker.USER, goal=None)
# Gegenerate an API request from the predicted values
str_call_type = predicted["call_type"]
try:
call_type = eval(str_call_type)
except Exception:
call_type = API_CALL_TYPE.UNDEFINED
api_parameters = {
"slot_values": predicted["slot_values"],
"request_slots": predicted["request_slots"],
"memories": predicted["memories"], # <list> of <Memory> objects
"n_max_results": 2,
}
# Call API
api_request = APIRequest(
call_type=call_type, parameters=api_parameters, memory_dialog=memory_dialog
)
return user_turn, api_request
def update_display(self, api_response: APIResponse):
if api_response.status is not None:
retrieved_memories = (
api_response.to_dict().get("results", {}).get("retrieved_memories", [])
)
self.displayed_memories = retrieved_memories
def predict_assistant_response(
self,
query: str,
api_call: APIRequest,
api_response: APIResponse,
memory_dialog: MemoryDialog,
) -> Dict:
return {
"uttr": "",
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": [],
}
def construct_assistant_response(
self,
query: str,
api_call: APIRequest,
api_response: APIResponse,
memory_dialog: MemoryDialog,
) -> Turn:
predicted = self.predict_assistant_response(
query, api_call, api_response, memory_dialog
)
response_frame = Frame(
uttr=predicted["uttr"],
dialog_act=predicted["dialog_act"],
act_attributes=ActAttributes(
slot_values=predicted["slot_values"],
slot_values_resolved={},
request_slots=predicted["request_slots"],
memories=predicted["memories"],
),
)
# For now, we assume one frame per turn
assistant_turn = Turn(
frames=[response_frame], speaker=TurnSpeaker.ASSISTANT, goal=None
)
return assistant_turn
class PilotMemoryDialogModel(MemoryDialogModelBase):
def __init__(self, *args, **kwargs):
super(PilotMemoryDialogModel, self).__init__(*args, **kwargs)
self.model = kwargs.pop("model")
self.tokenizer = kwargs.pop("tokenizer")
self.length = kwargs.pop("length")
self.parameter_ontology = kwargs.pop("parameter_ontology")
self.prev_asst_uttr = None
self.lst_context = []
self.turn_id = 0
def predict_api_call(self, query: str, memory_dialog: MemoryDialog) -> Dict:
# Form the prompt
to_predict = self.form_prompt_for_api_call(
self.lst_context, self.prev_asst_uttr, query
)
# Generate the sequence
generated = generate_sequences(
self.model, self.tokenizer, to_predict, verbose=False
)[0]
# Extract the api_call
parsed_api_call, _ = self.parse_assistant_response(generated)
call_type = parsed_api_call.get("act", None)
slot_values = {k: v for k, v in parsed_api_call.get("slots", [])}
request_slots = parsed_api_call.get("request_slots", [])
memory_ids = parsed_api_call.get("memories", [])
memories = memory_dialog.memory_graph.get_memories_by_ids(memory_ids)
# Entity Resolution for locations, etc.
slot_values = resolve_sv_entities(slot_values, self.parameter_ontology)
# Form an API call
return {
"call_type": call_type,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": slot_values,
"request_slots": request_slots,
"memories": memories, # <list> of <Memory> objects
}
def predict_assistant_response(
self,
query: str,
api_call: APIRequest,
api_response: APIResponse,
memory_dialog: MemoryDialog,
) -> Dict:
# Form the prompt
to_predict = self.form_prompt_for_response(
self.lst_context, self.prev_asst_uttr, query, api_call, api_response
)
# Generate the sequence
generated = generate_sequences(
self.model, self.tokenizer, to_predict, verbose=False
)[0]
_, response_text = self.parse_assistant_response(generated)
self.prev_asst_uttr = response_text
if api_response.results is not None:
memories = api_response.results.get("retrieved_memories", [])
else:
memories = []
return {
"uttr": response_text,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": memories, # <list> of <Memory> objects
}
def form_prompt_for_api_call(
self, lst_context, prev_asst_uttr, user_uttr, len_context=2
):
# Format main input context
context = format_context(
prev_asst_uttr,
user_uttr,
self.displayed_memories,
use_multimodal_contexts=True,
)
# Concat with previous contexts
lst_context.append(context)
context = " ".join(lst_context[-len_context:])
# Format the main input
predict = TEMPLATE_PREDICT.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
)
print("============== Prompt Sequence ==============")
print(predict)
print("=============================================")
return predict
def form_prompt_for_response(
self,
lst_context,
prev_asst_uttr,
user_uttr,
api_call,
api_response,
len_context=2,
):
# Format main input context
# Context should already have been formatted
context = " ".join(lst_context[-len_context:])
# Format API call
json_api_call = api_call.to_dict(simple=True)
str_api_call = format_api_call(
json_api_call["call_type"], json_api_call["parameters"]
)
# Format API result
json_api_response = api_response.to_dict()
str_api_result = format_api_result(json_api_response)
# Format the main input
predict = TEMPLATE_PREDICT_RESPONSE.format(
context=context,
START_OF_API_CALL=START_OF_API_CALL,
belief_state=str_api_call,
END_OF_API_CALL=END_OF_API_CALL,
api_result=str_api_result,
END_OF_API_RESULT=END_OF_API_RESULT,
)
print("============== Prompt Sequence ==============")
print(predict)
print("=============================================")
return predict
def parse_assistant_response(self, generated):
print("============== Generated Sequence ==============")
print(generated)
print("================================================")
parsed = parse_flattened_result(generated)
if parsed == []:
parsed_api_call = {}
else:
# For now, we only consider one api_call per turn
parsed_api_call = parsed[-1]
if parsed_api_call == {}:
response_text = "I could not understand. Could you repeat please?"
if END_OF_API_RESULT in generated:
response_text = generated.split(END_OF_API_RESULT)[-1]
response_text = response_text.replace(END_OF_SENTENCE, "")
else:
response_text = "(No system response)"
return parsed_api_call, response_text
|
comet_memory_dialog-main
|
dialog_simulator/MemoryDialogModel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import json, random, traceback, os
from typing import List, Tuple
from constants import TurnSpeaker, DialogAct, API_STATUS
from Data import Dialog, MemoryDialog, MemoryGraph, Turn, Goal
from UserSimulator import PilotUserSimulator
from AssistantSimulator import PilotAssistantSimulator
from GoalGenerator import RuleBasedGoalGenerator
from MemoryServiceAPI import MemoryServiceAPI
from utils import build_parameter_ontology
random.seed(0)
class MemoryDialogSimulator:
def __init__(self, *args, **kwargs):
# Initialize user simulator, assistant simulator, memory_graphs etc.
self.domain = kwargs.pop("domain")
self._memory_service_api = kwargs.pop("memory_service_api", MemoryServiceAPI())
self._user_simulator = kwargs.pop("user_simulator", PilotUserSimulator())
self._assistant_simulator = kwargs.pop(
"assistant_simulator", PilotAssistantSimulator()
)
self._goal_generator = kwargs.pop(
"goal_generator", RuleBasedGoalGenerator(domain=self.domain)
)
self._memory_graph_bank = kwargs.pop("memory_graph_bank", {})
self._user_simulator.register_memory_service_api(self._memory_service_api)
self._assistant_simulator.register_memory_service_api(self._memory_service_api)
def set_user_simulator(self, user_simulator):
self._user_simulator = user_simulator
def set_assistant_simulator(self, assistant_simulator):
self._assistant_simulator = assistant_simulator
def set_goal_generator(self, goal_generator):
self._goal_generator = goal_generator
def set_memory_service_api(self, memory_service_api):
self._memory_service_api = memory_service_api
def sample_goals(self, memory_graph, goal_config) -> List[Goal]:
return self._goal_generator.sample_goals(
memory_graph=memory_graph, goal_config=goal_config
)
def sample_memory_graph(self) -> MemoryGraph:
if self._memory_graph_bank == {}:
# Empty memory graph
return MemoryGraph()
# Randomly sample a memory
# TODO: allow for more organized way of sampling memories
memory_graph_id = random.choice(list(self._memory_graph_bank.keys()))
memory_graph = self._memory_graph_bank[memory_graph_id]
return MemoryGraph(data=memory_graph)
def batch_generate_dialog_flows(
self,
n_dialogs: int,
n_max_turns: int,
start_dialog_idx: int,
goal_config: dict = {},
) -> List[MemoryGraph]:
# Batch generate multiple dialogs using the same simulators
memory_dialogs = []
for i in range(n_dialogs):
# Continue until generation is successful
generation_success = False
while not generation_success:
try:
# Sample a memory graph (user)
memory_graph = self.sample_memory_graph()
# Create an empty memory dialog
memory_dialog = MemoryDialog(memory_graph=memory_graph)
# Generate Goal Config
goal_config["parameter_ontology"] = build_parameter_ontology(
memory_dialog.memory_graph,
self._memory_service_api.metadata,
self.domain,
)
# Sample goals for this dialog
goals = self.sample_goals(
memory_graph=memory_dialog.memory_graph, goal_config=goal_config
)
# Generate dialog flow
memory_dialog = self.generate_dialog_flow(
goals, memory_dialog, n_max_turns
)
memory_dialog.dialog.idx = start_dialog_idx + i
# If everything is successful, append to memory_dialogs
generation_success = True
memory_dialogs.append(memory_dialog)
except:
# TODO: Make a more robust abort strategy
print("** Error in generating dialog. Ignoring this one. **")
traceback.print_exc()
print()
return memory_dialogs
def generate_dialog_flow(
self,
goals: List[Goal],
memory_dialog: MemoryDialog,
n_max_turns: int,
initialize=True,
) -> MemoryDialog:
if initialize:
# Initialize memory_dialog
memory_dialog.initialize()
# Iterate and generate a dialog turn by turn
i = 0
while not goals == [] and i < n_max_turns:
# Pick a goal
current_goal = goals.pop(0)
goal_met = False
print("Goal:", current_goal)
while not goal_met and i < n_max_turns:
# Generate a turn
memory_dialog = self.generate_turn(current_goal, memory_dialog)
# End of a turn: update dialog & goals
i += 1
goal_met = memory_dialog.is_goal_met(current_goal)
is_valid_dialog = self.validate_dialog(memory_dialog)
if not is_valid_dialog:
# If something is not right about this dialog, abort.
# TODO: abort gracefully
assert False
return memory_dialog
def generate_turn(self, goal: Goal, memory_dialog: MemoryDialog) -> MemoryDialog:
# TODO: extend it for multiple frames per turn
# (1) Generate a User turn, given a target goal and a memory_dialog
# Generate dialog act and slots
user_frame = self._user_simulator.execute_turn(goal, memory_dialog)
# Template based utterance generation
user_frame = self._user_simulator.generate_uttr(user_frame, goal)
# Instantiate a user turn, and update the memory_dialog
user_turn = Turn([user_frame], TurnSpeaker.USER, goal)
memory_dialog.dialog.add_user_turn(user_turn)
print("U:", user_turn)
# (2) Generate a Assistant turn, given a target goal and a memory_dialog
# Generate dialog act and slots
asst_frame, api_request, api_result = self._assistant_simulator.execute_turn(
goal, memory_dialog
)
# Template based utterance generation
asst_frame = self._assistant_simulator.generate_uttr(asst_frame, goal)
# Instantiate a user turn, and update the memory_dialog
asst_turn = Turn([asst_frame], TurnSpeaker.ASSISTANT, goal)
memory_dialog.dialog.add_asst_turn(asst_turn)
print("A:", asst_turn)
# Add goals and api_calls
memory_dialog.dialog.add_goal(goal)
memory_dialog.dialog.add_api_call(api_request)
memory_dialog.dialog.add_api_result(api_result)
return memory_dialog
def validate_dialog(self, memory_dialog: MemoryDialog) -> bool:
# Check for any undesirable traits of a dialog
n_turns = len(memory_dialog.dialog.asst_turns)
# (1) Multiple sharing of the same memory
set_shared_memory_ids = set()
for user_turn in memory_dialog.dialog.user_turns:
# TODO: Handle multiple frames per turn
dialog_act = user_turn.frames[-1].dialog_act
if dialog_act == DialogAct.REQUEST_SHARE:
memories_to_share = user_turn.frames[-1].act_attributes.memories
for m in memories_to_share:
memory_id = m.data["memory_id"]
if memory_id in set_shared_memory_ids:
# If this memory_id is already shared, abort
return False
set_shared_memory_ids.add(memory_id)
# (2) Too frequent search fails
n_search_fails = 0
for api_result in memory_dialog.dialog.api_results:
status = api_result.status
if status == API_STATUS.SEARCH_NOT_FOUND:
n_search_fails += 1
if (n_turns <= 4 and n_search_fails >= 2) or (
n_turns > 4 and n_search_fails >= 3
):
return False
# Otherwise, this dialog is good.
return True
|
comet_memory_dialog-main
|
dialog_simulator/MemoryDialogSimulator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
import json
from MemoryDialogModel import PilotMemoryDialogModel
from Data import MemoryGraph, MemoryDialog, Turn
from MemoryServiceAPI import MemoryServiceAPI
import sys
sys.path.append("/Users/shanemoon/workspace/memory_dialog/models/")
from gpt2_dst.scripts.run_generation import load_model
class InteractiveDialogHandler:
def __init__(self, *args, **kwargs):
self.model = kwargs.pop("model", None)
self.memory_graph = kwargs.pop("memory_graph", None)
self.api = kwargs.pop("api", None)
# Start an empty dialog data
self.memory_dialog = MemoryDialog(memory_graph=self.memory_graph)
self.memory_dialog.initialize()
def execute_turn(self, user_query: str) -> Turn:
"""
Given user_query, construct an API call,
get the API response, and return an Assistant Turn.
"""
# Construct the API request
try:
user_turn, api_request = self.model.construct_api_request(
user_query, self.memory_dialog
)
print("============== API Request ==============")
print(api_request)
print("=========================================\n")
# Call API to get responses back
api_response = self.api.call_api(api_request)
print("============== API Response ==============")
print(api_response)
print("==========================================\n")
# Update the display based on the API results
self.model.update_display(api_response)
# Generate an Assistant response based on the API response
assistant_turn = self.model.construct_assistant_response(
user_query, api_request, api_response, self.memory_dialog
)
print("============== Assistant Response ==============")
print(assistant_turn)
print("================================================\n")
# Update the memory_dialog with the new user and assistant turns
self.memory_dialog.dialog.add_user_turn(user_turn)
self.memory_dialog.dialog.add_asst_turn(assistant_turn)
# Update the model
self.model.prev_asst_uttr = assistant_turn.frames[-1].uttr
self.model.turn_id += 1
return assistant_turn
except:
return None
def run_loop_command_prompt(self):
while True:
print()
user_query = input(">> Enter your query (or type quit): ")
if user_query == "quit":
break
response = self.execute_turn(user_query=user_query)
if __name__ == "__main__":
# Define paths
# path_memory_graph_list = '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/mscoco_memory_graphs_1k.json'
path_memory_graph_list = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/mscoco_memory_graphs_mini.json"
path_model = (
"/Users/shanemoon/workspace/memory_dialog/models/gpt2_dst/save/model_v2"
)
path_parameter_ontology = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/all_parameter_ontology.json"
# Hyperparameters for the demo
random_memory_graph = False
# Load parameters
memory_graph_list = json.load(open(path_memory_graph_list, "r"))
memory_graph_bank = {}
for memory_graph in memory_graph_list:
memory_graph_id = memory_graph["memory_graph_id"]
for i in range(len(memory_graph["memories"])):
memory_graph["memories"][i]["memory_graph_id"] = memory_graph_id
memory_graph_bank[memory_graph_id] = memory_graph
parameter_ontology = json.load(open(path_parameter_ontology, "r"))
# Select a Memory Graph
if random_memory_graph:
memory_graph = MemoryGraph(
data=memory_graph_bank[random.choice(list(memory_graph_bank.keys()))]
)
else:
memory_graph_id = "RbXAfFDz8r72"
memory_graph = MemoryGraph(data=memory_graph_bank[memory_graph_id])
# Load the model parameters
gpt2_model, tokenizer, length = load_model(
model_type="gpt2", model_name_or_path=path_model, device="cpu", length=150
)
# Instsantiate the dialog handler
model = PilotMemoryDialogModel(
model=gpt2_model,
tokenizer=tokenizer,
length=length,
parameter_ontology=parameter_ontology,
)
api = MemoryServiceAPI()
dialog_handler = InteractiveDialogHandler(
model=model, memory_graph=memory_graph, api=api
)
# Run loop
dialog_handler.run_loop_command_prompt()
|
comet_memory_dialog-main
|
dialog_simulator/InteractiveDialogHandler.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
"""
Merges multiple batches of SIMMC 2.0 files into one,
and also outputs train, dev, devtest, and test sets.
"""
import os
import json
import csv
import random
import pickle
import numpy as np
from utils import load_data_pickle
if __name__ == "__main__":
random.seed(0)
np.random.seed(0)
# Paths for merge
path_in_pickle = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_merged.p"
path_out_tsv = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/user_utterances.tsv"
mm_dialogs = []
mm_dialogs.extend(load_data_pickle(path_in_pickle))
# Output
print("Total: %d dialogs" % len(mm_dialogs))
with open(path_out_tsv, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter="\t", quotechar="'")
writer.writerow(["dialog_id", "turn_id", "user_utterance"])
for i, mm_dialog in enumerate(mm_dialogs):
user_turns = mm_dialog.dialog.user_turns
dialog_id = mm_dialog.dialog.idx
for j, user_turn in enumerate(user_turns):
user_uttr = user_turn.frames[-1].uttr
if user_uttr not in set(["N/A", "NA"]):
row = [dialog_id, j, user_uttr]
writer.writerow(row)
|
comet_memory_dialog-main
|
dialog_simulator/get_user_utterances.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from enum import Enum
class GoalType(Enum):
UNKNOWN = "unknown"
SEARCH = "search"
REFINE_SEARCH = "refine_search"
GET_RELATED = "get_related"
GET_INFO = "get_info"
GET_AGGREGATED_INFO = "get_aggregated_info"
SHARE = "share"
CHITCHAT = "chitchat"
class DialogAct(Enum):
UNKNOWN = "unknown"
INFORM_GET = "INFORM:GET"
INFORM_REFINE = "INFORM:REFINE"
INFORM_PREFER = "INFORM:PREFER"
INFORM_DISPREFER = "INFORM:DISPREFER"
INFORM_SHARE = "INFORM:SHARE"
INFORM_DISAMBIGUATE = "INFORM:DISAMBIGUATE"
INFORM_CHITCHAT = "INFORM:CHITCHAT"
REQUEST_GET = "REQUEST:GET"
REQUEST_REFINE = "REQUEST:REFINE"
REQUEST_PREFER = "REQUEST:PREFER"
REQUEST_DISPREFER = "REQUEST:DISPREFER"
REQUEST_SHARE = "REQUEST:SHARE"
REQUEST_DISAMBIGUATE = "REQUEST:DISAMBIGUATE"
CONFIRM_GET = "CONFIRM:GET"
CONFIRM_REFINE = "CONFIRM:REFINE"
CONFIRM_PREFER = "CONFIRM:PREFER"
CONFIRM_DISPREFER = "CONFIRM:DISPREFER"
CONFIRM_SHARE = "CONFIRM:SHARE"
CONFIRM_DISAMBIGUATE = "CONFIRM:DISAMBIGUATE"
PROMPT_GET = "PROMPT:GET"
PROMPT_REFINE = "PROMPT:REFINE"
PROMPT_PREFER = "PROMPT:PREFER"
PROMPT_DISPREFER = "PROMPT:DISPREFER"
PROMPT_SHARE = "PROMPT:SHARE"
PROMPT_DISAMBIGUATE = "PROMPT:DISAMBIGUATE"
ASK_GET = "ASK:GET"
ASK_REFINE = "ASK:REFINE"
ASK_PREFER = "ASK:PREFER"
ASK_DISPREFER = "ASK:DISPREFER"
ASK_SHARE = "ASK:SHARE"
ASK_DISAMBIGUATE = "ASK:DISAMBIGUATE"
class GoalMemoryRefType(Enum):
PREV_TURN = "PREV_TURN"
DIALOG = "DIALOG"
GRAPH = "GRAPH"
NOT_SPECIFIED = "Not Specified"
class ObjectRefType(Enum):
R1 = "R1" # Unique object in the scene
R2 = "R2" # Object in the dialog history, same view point
R3 = "R3" # Object in the dialog history, previous view point
NOT_SPECIFIED = "Not Specified"
class API_STATUS(Enum):
SEARCH_FOUND = "Search Founud"
SEARCH_NOT_FOUND = "Search Not Founud"
INFO_FOUND = "Info Found"
INFO_NOT_FOUND = "Info Not Found"
SHARED = "Shared"
class API_CALL_TYPE(Enum):
SEARCH = "Search"
REFINE_SEARCH = "Refine Search"
GET_INFO = "Get Info"
SHARE = "Share"
GET_RELATED = "Get Related"
UNDEFINED = "Undefined"
class TurnSpeaker(Enum):
USER = "User"
ASSISTANT = "Assistant"
numeric_slots = {"time"}
non_visual_slots = {
"location",
"time",
}
visual_slots = {"participant", "activity"}
all_slots = {"time", "location", "participant", "activity"}
|
comet_memory_dialog-main
|
dialog_simulator/constants.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from constants import API_CALL_TYPE, TurnSpeaker, DialogAct
from Data import Turn, Frame, ActAttributes, MemoryDialog, APIResponse, APIRequest
from typing import Dict, Tuple
class DummyMemoryDialogModel(MemoryDialogModelBase):
def __init__(self, *args, **kwargs):
super(DummyMemoryDialogModel, self).__init__(*args, **kwargs)
def predict_api_call(self, query: str) -> Dict:
return {
"call_type": API_CALL_TYPE.SEARCH,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": [],
}
def predict_assistant_response(
self, query: str, api_response: APIResponse, memory_dialog: MemoryDialog
):
response_str = (
"User asked:"
+ query
+ ". Dialog history: "
+ str(memory_dialog)
+ ". API response:"
+ str(api_response)
)
return {
"uttr": response_str,
"dialog_act": DialogAct.UNKNOWN,
"slot_values": {},
"request_slots": [],
"memories": api_response.results.get("retrieved_memories"),
}
|
comet_memory_dialog-main
|
dialog_simulator/DummyMemoryDialogModel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
"""
Merges multiple batches of SIMMC 2.0 files into one,
and also outputs train, dev, devtest, and test sets.
"""
import os
import json
import csv
import random
import pickle
import numpy as np
from utils import load_data_pickle
if __name__ == "__main__":
random.seed(0)
np.random.seed(0)
# Paths for merge
paths_to_merge = [
#'/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_1_mem_dials_merged.p',
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/pilot_2_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_1_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_2_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_3_mem_dials_merged.p",
"/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials_merged.p",
]
path_out_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_merged.json"
path_out_pickle = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_merged.p"
mm_dialogs = []
for path_in_pickle in paths_to_merge:
# Load original synth
mm_dialogs.extend(load_data_pickle(path_in_pickle))
# Output
print("Total: %d dialogs" % len(mm_dialogs))
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs],
"split": "all",
"year": 2021,
"domain": "memory",
},
open(path_out_json, "w"),
indent=4,
)
pickle.dump(mm_dialogs, open(path_out_pickle, "wb"))
# Split
r_train = 0.85
r_dev = 0.10
r_devtest = 0.04
r_test = 0.01
r_mini = 0.001
path_out_train_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_train.json"
path_out_dev_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_dev.json"
path_out_devtest_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_devtest.json"
path_out_test_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_test.json"
path_out_mini_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/mem_dials_mini.json"
n_dialogs = len(mm_dialogs)
indices = np.arange(n_dialogs)
np.random.shuffle(indices)
n_train = int(n_dialogs * r_train)
n_dev = int(n_dialogs * r_dev)
n_devtest = int(n_dialogs * r_devtest)
n_test = int(n_dialogs * r_test)
n_mini = int(n_dialogs * r_mini)
train_indices = indices[:n_train]
dev_indices = indices[n_train : n_train + n_dev]
devtest_indices = indices[n_train + n_dev : n_train + n_dev + n_devtest]
test_indices = indices[n_train + n_dev + n_devtest :]
mini_indices = test_indices[:n_mini]
mm_dialogs_train = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in train_indices]
mm_dialogs_dev = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in dev_indices]
mm_dialogs_devtest = [
mm_d for i, mm_d in enumerate(mm_dialogs) if i in devtest_indices
]
mm_dialogs_test = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in test_indices]
mm_dialogs_mini = [mm_d for i, mm_d in enumerate(mm_dialogs) if i in mini_indices]
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_train],
"split": "train",
"year": 2021,
"domain": "memory",
},
open(path_out_train_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_dev],
"split": "dev",
"year": 2021,
"domain": "memory",
},
open(path_out_dev_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_devtest],
"split": "devtest",
"year": 2021,
"domain": "memory",
},
open(path_out_devtest_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_test],
"split": "test",
"year": 2021,
"domain": "memory",
},
open(path_out_test_json, "w"),
indent=4,
)
json.dump(
{
"dialogue_data": [mm_d.to_dict() for mm_d in mm_dialogs_mini],
"split": "mini",
"year": 2021,
"domain": "memory",
},
open(path_out_mini_json, "w"),
indent=4,
)
|
comet_memory_dialog-main
|
dialog_simulator/merge_data_json.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from constants import visual_slots, all_slots
import random
random.seed(0)
def build_parameter_ontology(memory_graph, metadata, domain=None, ontology=None):
if ontology is None:
ontology = {
"visual": {},
"non_visual": {},
"all": {},
}
for memory in memory_graph.get_memories():
for slot, value in memory.data.items():
if slot not in all_slots:
continue
slot_category = "visual" if slot in visual_slots else "non_visual"
if slot not in ontology["all"]:
ontology["all"][slot] = []
ontology[slot_category][slot] = []
if value not in ontology["all"][slot]:
ontology["all"][slot].append(value)
ontology[slot_category][slot].append(value)
return ontology
def batch_build_parameter_ontology(memory_graph_bank):
ontology = {
"visual": {},
"non_visual": {},
"all": {},
}
for i, memory_graph in enumerate(memory_graph_bank.values()):
if i % 100 == 0:
print("Processing memory graph %d" % i)
ontology = build_parameter_ontology(
memory_graph=memory_graph, metadata={}, ontology=ontology
)
return ontology
def str_memory(memory, memory_service_api=None, verbose=True):
"""
memory: <Memory> object
"""
memory_index = str(memory.data["memory_id"])
memory_activity = str(
", ".join([a["activity_name"] for a in memory.data["activity"]])
)
time = str(memory.data["time"])[:-3] + " (" + memory.data["time_part"] + ")"
location = memory.data["location"]["geo_tag"].get("place", "")
if verbose:
template = (
"[Memory ID: {memory_index} ({memory_activity}), {time}, @ {location}]"
)
else:
template = "[Memory ID: {memory_index}]"
return template.format(
memory_index=memory_index,
memory_activity=memory_activity,
time=time,
location=location,
)
def str_slot_values(slot_values):
return "{ " + ", ".join([f"{k}: {v}" for k, v in slot_values.items()]) + " }"
def str_request_slots(request_slots):
return "{ " + ", ".join([s for s in request_slots]) + " }"
def str_memories(memories, memory_service_api=None, verbose=True):
# memories: <list> of <Memory> objects
return (
"{ "
+ str([str_memory(o, memory_service_api, verbose) for o in memories])
+ " }"
)
def int_memory_ids(memories):
return [int(m.data["memory_id"]) for m in memories]
def get_template(template_map, nlu_label):
return random.choice(template_map.get(nlu_label.dialog_act))
def load_data_pickle(path_pickle):
import pickle
return pickle.load(open(path_pickle, "rb"))
def weighted_choice(population, weights):
return random.choices(population=population, weights=weights, k=1)[0]
def get_slot_values_simple_from_json(
slot_values,
location_target="place",
participant_target="name",
activity_target="activity_name",
):
if slot_values == None:
return {}
out = {}
for slot, value in slot_values.items():
if slot == "location":
out[slot] = get_location_simple_from_json(value, target=location_target)
elif slot == "participant":
out[slot] = get_participant_simple_from_json(
value, target=participant_target
)
elif slot == "activity":
out[slot] = get_activity_simple_from_json(value, target=activity_target)
else:
out[slot] = str(value)
return out
def get_location_simple_from_json(location_json, target="place"):
"""
JSON format:
"location":{
"gps":{
"lat":40.00,
"lon":100.00
},
"geo_tag":{
"place":"Summit at Snoqualmie",
"city":"Seattle",
"state":"Washington",
"country":"USA"
}
"""
if target in location_json["geo_tag"]:
return location_json["geo_tag"][target]
return location_json["geo_tag"].get("city")
def get_participant_simple_from_json(participant_json, target="name"):
"""
JSON format:
"participant":[
{
"name":"John",
"memory_graph_id":1
},
{
"name":"Mary",
"memory_graph_id":2
}
],
"""
return [p[target] for p in participant_json]
def get_activity_simple_from_json(activity_json, target="activity_name"):
"""
JSON format:
"activity":[
{
"activity_name":"skiing"
}
]
"""
return [a[target] for a in activity_json]
def get_edit_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(
1 + min((distances[i1], distances[i1 + 1], distances_[-1]))
)
distances = distances_
return distances[-1]
def resolve_sv_entities(slot_values: dict, parameter_ontology: dict) -> dict:
if "location" in slot_values:
str_location = slot_values["location"]
resolved_location_obj = resolve_location(
str_location, parameter_ontology["all"]["location"], True
)
slot_values["location"] = resolved_location_obj
if "participant" in slot_values:
str_participant = slot_values["participant"]
resolved_participant_obj = resolve_participant(
str_participant, parameter_ontology["all"]["participant"], True
)
slot_values["participant"] = resolved_participant_obj
if "activity" in slot_values:
str_activity = slot_values["activity"]
resolved_activity_obj = resolve_activity(
str_activity, parameter_ontology["all"]["activity"], True
)
slot_values["activity"] = resolved_activity_obj
return slot_values
def resolve_location(str_location: str, location_ontology: list, fuzzy: bool) -> dict:
print("Resolving location: %s" % str_location)
# Strict match
for target_location_obj in location_ontology:
if str_location.lower() == target_location_obj["geo_tag"]["place"].lower():
return target_location_obj
# If strict match doesn't work & fuzzy == True:
if fuzzy:
print("Trying fuzzy match for location %s" % str_location)
for target_location_obj in location_ontology:
edit_distance = get_edit_distance(
str_location.lower(), target_location_obj["geo_tag"]["place"].lower()
)
if edit_distance < 7:
print("Fuzzy match found for location %s" % str_location)
return target_location_obj
print("Match not found for location %s" % str_location)
return {}
def resolve_list_entities(
str_entity: str, entity_ontology: list, fuzzy: bool, target_key: str
) -> dict:
"""
(input) str_entities: [
'element_1', ...
e.g. 'skiing', 'snowboarding'
]
(target) list_entities: [
{
'target_key': <str>,
e.g. 'activity_name': 'skiing'
}
]
"""
# First, try converting the str to a list
try:
set_entity = set(name.lower() for name in eval(str_entity))
# Strict match
for target_entity_obj in entity_ontology:
target_entity = set(
str(p.get(target_key, "")).lower() for p in target_entity_obj
)
if set_entity == target_entity:
return target_entity_obj
# Fuzzy match 1
if fuzzy and len(set_entity) > 1:
print("Trying fuzzy match for entity %s" % str_entity)
match_thershold = max(1, int(len(set_entity) / 2) - 1)
for target_entity_obj in entity_ontology:
target_entity = set(
str(p.get(target_key, "")).lower() for p in target_entity_obj
)
if len(set_entity.intersection(target_entity)) >= match_thershold:
print("Fuzzy match found for %s" % str_entity)
return target_entity_obj
except:
print("Can't convert to list.")
# Fuzzy match 2
if fuzzy:
print("Trying fuzzy match for entity %s" % str_entity)
for target_entity_obj in entity_ontology:
edit_distance = get_edit_distance(
str_entity.lower().replace("'", ""),
str(
[str(p.get(target_key, "")).lower() for p in target_entity_obj]
).replace("'", ""),
)
if edit_distance < 9:
print("Fuzzy match found for %s" % str_entity)
return target_entity_obj
print("Match not found for %s" % str_entity)
return {}
def resolve_participant(
str_participant: str, participant_ontology: list, fuzzy: bool
) -> dict:
print("Resolving participant: %s" % str_participant)
return resolve_list_entities(
str_entity=str_participant,
entity_ontology=participant_ontology,
fuzzy=fuzzy,
target_key="name",
)
def resolve_activity(str_activity: str, activity_ontology: list, fuzzy: bool) -> dict:
print("Resolving activity: %s" % str_activity)
return resolve_list_entities(
str_entity=str_activity,
entity_ontology=activity_ontology,
fuzzy=fuzzy,
target_key="activity_name",
)
if __name__ == "__main__":
# Test resolve entities
import json
path_parameter_ontology = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/final_data/all_parameter_ontology.json"
parameter_ontology = json.load(open(path_parameter_ontology, "r"))
list_slot_values = [
# Strict match
{
"location": "Seattle Downtown",
"participant": "['Carl', 'Bryan', 'Emily']",
"activity": "['cooking sausages']",
},
# Fuzzy match by set intersection
{
"location": "seattle downtow",
"participant": "['Carl', 'Shane']",
"activity": "['cooking sausages', 'peeling potatoes']",
},
# Fuzzy match with incomplete list formats
{
"location": "Bay Area",
"participant": "Carl Bryan Emily",
"activity": "[cooking sausages",
},
]
for slot_values in list_slot_values:
print("------------------------------------")
print(resolve_sv_entities(slot_values, parameter_ontology))
|
comet_memory_dialog-main
|
dialog_simulator/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
from typing import List, Tuple
from SimulatorBase import SimulatorBase
from constants import GoalType, DialogAct, API_STATUS, API_CALL_TYPE
from Data import (
MemoryDialog,
Goal,
Frame,
ActAttributes,
APIRequest,
APIResponse,
GoalParameter,
)
from MemoryServiceAPI import MemoryServiceAPI
from utils import str_slot_values, str_request_slots, str_memories, get_template
random.seed(0)
class AssistantSimulator(SimulatorBase):
def __init__(self, *args, **kwargs):
super(AssistantSimulator, self).__init__(*args, **kwargs)
self.memory_service_api = None
class ModelBasedAssistantSimulator(AssistantSimulator):
def __init__(self, *args, **kwargs):
super(ModelBasedAssistantSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self, goal: Goal, memory_dialog: MemoryDialog
) -> Tuple[Frame, APIRequest, APIResponse]:
# Need to define this behavior e.g. as a config, a model, etc.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class RuleBasedAssistantSimulator(AssistantSimulator):
def __init__(self, *args, **kwargs):
super(RuleBasedAssistantSimulator, self).__init__(*args, **kwargs)
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def execute_turn(
self, goal: Goal, memory_dialog: MemoryDialog
) -> Tuple[Frame, APIRequest, APIResponse]:
# Need to define this behavior e.g. as a config, a model, etc.
pass
def generate_uttr(self, frame: Frame, goal: Goal) -> str:
pass
class PilotAssistantSimulator(AssistantSimulator):
"""
Includes the simplest implementation of a AssistantSimulator.
Use this class as a guide for implementing more complex
simulators.
"""
def __init__(self, *args, **kwargs):
super(PilotAssistantSimulator, self).__init__(*args, **kwargs)
# Simple interaction deterministic mapping
self._goal_to_handler = {
GoalType.UNKNOWN: self.AssistantGoalHandler(),
GoalType.SEARCH: self.AssistantSearchGoalHandler(),
GoalType.REFINE_SEARCH: self.AssistantRefineSearchGoalHandler(),
GoalType.GET_RELATED: self.AssistantGetRelatedGoalHandler(),
GoalType.GET_INFO: self.AssistantGetInfoGoalHandler(),
# GoalType.GET_AGGREGATED_INFO: self.AssistantGetAggregatedInfoGoalHandler(),
GoalType.SHARE: self.AssistantShareGoalHandler(),
}
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
return True
def execute_turn(
self, goal: Goal, memory_dialog: MemoryDialog
) -> Tuple[Frame, APIRequest, APIResponse]:
handler = self._goal_to_handler[goal.goal_type]
return handler.execute_turn(goal, memory_dialog, self.memory_service_api)
def generate_uttr(self, frame: Frame, goal: Goal) -> Frame:
handler = self._goal_to_handler[goal.goal_type]
uttr = handler.generate_uttr(frame, goal, self.memory_service_api)
frame.uttr = uttr
return frame
class AssistantGoalHandler:
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
return (
Frame("", DialogAct.UNKNOWN, ActAttributes()),
APIRequest(),
APIResponse(),
)
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
template = get_template(self._uttr_template, frame)
verbose_memory = True if random.random() < 0.35 else False
uttr = template.format(
slot_values=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories,
memory_service_api,
verbose=verbose_memory,
),
)
return uttr
class AssistantSearchGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_acts = [
DialogAct.INFORM_GET,
# DialogAct.PROMPT_REFINE
]
self.user_search_acts = set(
[DialogAct.REQUEST_GET, DialogAct.INFORM_REFINE, DialogAct.INFORM_GET]
)
self.asst_search_acts = set(
[
DialogAct.INFORM_GET,
]
)
self._uttr_template = {
DialogAct.INFORM_GET: [
"Here is what I found: {memories}.",
"Check out these photos: (summarize) {memories}.",
"How is what I found: {memories}. They match some of the criteria: {slot_values}.",
"I found these photos: {memories}.",
"Here is what I found: {memories}. [[ Please comment on the retrieved photos. ]]",
"Here is what I found: {memories}. [[ Briefly summarize what is visible in the photos. ]]",
]
}
self._uttr_template_no_results = {
DialogAct.INFORM_GET: [
"Sorry, I could not find any photo/video for {slot_values}.",
"Sorry, I could not find any photo/video.",
"I could not find any photo that matches the criteria {slot_values}.",
]
}
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
memories = frame.act_attributes.memories
if len(memories) > 0:
template = get_template(self._uttr_template, frame)
else:
template = get_template(self._uttr_template_no_results, frame)
verbose_memory = True if random.random() < 0.35 else False
uttr = template.format(
slot_values=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories,
memory_service_api,
verbose=verbose_memory,
),
)
return uttr
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
# Check the routing logic here
if last_user_turn.has_dialog_acts(self.user_search_acts):
# 1. User requests SEARCH with parameters
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_acts)
api_response = APIResponse()
api_request = APIRequest()
if asst_dialog_act in self.asst_search_acts:
# 1. (1) Return Search results
# Randomly fill the act_attributes
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
# ** TODO **: grab the correct frame, instead of the last frame
requested_act_attributes = last_user_turn.frames[
-1
].act_attributes
api_parameters = {
"slot_values": requested_act_attributes.slot_values_resolved
}
if goal_parameter.request_slots != []:
api_parameters[
"request_slots"
] = goal_parameter.request_slots
call_type = None
if goal.goal_type in set([GoalType.REFINE_SEARCH]):
call_type = API_CALL_TYPE.REFINE_SEARCH
else:
call_type = API_CALL_TYPE.SEARCH
api_request = APIRequest(
call_type=call_type,
parameters=api_parameters,
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.SEARCH_FOUND:
act_attributes = ActAttributes(
slot_values=requested_act_attributes.slot_values,
slot_values_resolved=requested_act_attributes.slot_values_resolved,
request_slots=[],
memories=api_response.results.get(
"retrieved_memories", []
),
)
elif api_response.status == API_STATUS.SEARCH_NOT_FOUND:
# TODO: we can put a special logic here
act_attributes = ActAttributes(
slot_values=requested_act_attributes.slot_values,
slot_values_resolved=requested_act_attributes.slot_values_resolved,
request_slots=[],
memories=api_response.results.get(
"retrieved_memories", []
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Follow-up questions
# 1. (3) Check disambiguation request
# TODO
pass
else:
# 2. Handle disambiguation info
# TODO
pass
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
class AssistantSearchGoalHandler(AssistantSearchGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class AssistantRefineSearchGoalHandler(AssistantSearchGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
# Execute
return super().execute_turn(goal, memory_dialog, memory_service_api)
class AssistantGetRelatedGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_acts = [
DialogAct.INFORM_GET,
]
self._uttr_template = {
DialogAct.INFORM_GET: [
"Here are some of the related photos I found: {memories}.",
"Here are the related memories: {memories}.",
"Here are the related memories I found: {memories}.",
"Here are the related memories: {memories}. They match some of the criteria: {request_slots}.",
"Here are the related memories: {memories}. [[ Please comment on the retrieved photos ]].",
"Here are the related memories: {memories}. [[ Please summarize what is visible in the photos briefly ]].",
]
}
self._uttr_template_no_request_slots = {
DialogAct.INFORM_GET: ["Here are the related memories: {memories}."]
}
self._uttr_template_no_results = {
DialogAct.INFORM_GET: [
"I could not find any related memory that matches the criteria.",
"Sorry, I could not find any related memory. Anything else I can help?",
]
}
def generate_uttr(
self, frame: Frame, goal: Goal, memory_service_api: MemoryServiceAPI
) -> str:
memories = frame.act_attributes.memories
request_slots = frame.act_attributes.request_slots
if len(memories) > 0:
if len(request_slots) > 0:
template = get_template(self._uttr_template, frame)
else:
template = get_template(self._uttr_template_no_request_slots, frame)
else:
template = get_template(self._uttr_template_no_results, frame)
verbose_memory = True if random.random() < 0.35 else False
uttr = template.format(
slot_values=str_slot_values(frame.act_attributes.slot_values),
request_slots=str_request_slots(frame.act_attributes.request_slots),
memories=str_memories(
frame.act_attributes.memories,
memory_service_api,
verbose=verbose_memory,
),
)
return uttr
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
if True:
# 1. User requests GET SIMILAR with parameters
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_acts)
api_response = APIResponse()
api_request = APIRequest()
if True:
# 1. (1) Return GET_RELATED results
# Randomly fill the act_attributes
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
api_request = APIRequest(
call_type=API_CALL_TYPE.GET_RELATED,
parameters={
##### TODO: fix it so it grabs the right frame (instead of the last frame)
"memories": last_user_turn.frames[
-1
].act_attributes.memories,
"request_slots": last_user_turn.frames[
-1
].act_attributes.request_slots,
"slot_values": goal_parameter.filter, ## TODO
},
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.SEARCH_FOUND:
act_attributes = ActAttributes(
slot_values=api_response.results.get(
"retrieved_info", {}
),
request_slots=api_response.results.get(
"request_slots", []
),
memories=api_response.results.get(
"retrieved_memories", []
),
)
elif api_response.status == API_STATUS.SEARCH_NOT_FOUND:
# TODO: we can put a special logic here
act_attributes = ActAttributes(
slot_values=api_response.results.get(
"retrieved_info", {}
),
request_slots=api_response.results.get(
"request_slots", []
),
memories=api_response.results.get(
"retrieved_memories", []
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Follow-up questions
# 1. (3) Check disambiguation request
# TODO
pass
else:
# 2. Handle disambiguation info
# TODO
pass
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
class AssistantGetInfoGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_main_acts = [
DialogAct.INFORM_GET,
]
self.available_asst_disambiguation_acts = [
DialogAct.REQUEST_DISAMBIGUATE,
]
self._uttr_template = {
DialogAct.INFORM_GET: [
"Here is the info on {request_slots}: {slot_values}",
"I found the info on {request_slots}: {slot_values}",
"Here is the info I found: {slot_values}",
],
DialogAct.REQUEST_DISAMBIGUATE: [
"Which photo or video do you mean?",
"Could you clarify which photo or video you are referring to?",
],
}
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
api_request = APIRequest()
if not last_user_turn.is_disambiguation_response():
# 1. User requests GET INFO with parameters
api_response = APIResponse()
# Request for disambiguation at a random rate
n_mentioned_memories = len(memory_dialog.dialog.mentioned_memory_ids)
if n_mentioned_memories > 1:
skip_disambiguation = random.random() > 0.4
else:
# Only one or less memory was mentioned
skip_disambiguation = True
if skip_disambiguation:
(
asst_dialog_act,
list_act_attributes,
api_request,
api_response,
) = self.main_act(goal, memory_dialog, memory_service_api)
else:
# 1. (2) Raise disambiguation request
# TODO
asst_dialog_act = random.choice(
self.available_asst_disambiguation_acts
)
list_act_attributes = [ActAttributes()]
api_response = APIResponse()
else:
# 2. Handle disambiguation info
(
asst_dialog_act,
list_act_attributes,
api_request,
api_response,
) = self.main_act(goal, memory_dialog, memory_service_api)
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
def main_act(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
):
last_user_turn = memory_dialog.dialog.user_turns[-1]
# 1. (1) Return info results
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_main_acts)
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
api_request = APIRequest(
call_type=API_CALL_TYPE.GET_INFO,
parameters={
##### TODO: fix it so it grabs the right frame (instead of the last frame)
"memories": last_user_turn.frames[-1].act_attributes.memories,
"request_slots": last_user_turn.frames[
-1
].act_attributes.request_slots,
},
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.INFO_FOUND:
act_attributes = ActAttributes(
slot_values=api_response.results.get("retrieved_info", {}),
request_slots=api_response.results.get("request_slots", []),
memories=api_response.results.get("retrieved_memories", []),
)
elif api_response.status == API_STATUS.INFO_NOT_FOUND:
# TODO
pass
list_act_attributes.append(act_attributes)
return asst_dialog_act, list_act_attributes, api_request, api_response
class AssistantShareGoalHandler(AssistantGoalHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_asst_acts = [
DialogAct.CONFIRM_SHARE,
]
self._uttr_template = {
DialogAct.CONFIRM_SHARE: [
"Confirmed. I will share {memories}.",
"Confirmed. I will share them.",
],
}
def execute_turn(
self,
goal: Goal,
memory_dialog: MemoryDialog,
memory_service_api: MemoryServiceAPI,
) -> Tuple[Frame, APIRequest, APIResponse]:
assert len(memory_dialog.dialog.user_turns) > 0
last_user_turn = memory_dialog.dialog.user_turns[-1]
if True:
# 1. User requests SHARE with parameters
# Get a random dialog act label
asst_dialog_act = random.choice(self.available_asst_acts)
api_response = APIResponse()
api_request = APIRequest()
if True:
# 1. (1) Return info results
list_act_attributes = []
for goal_parameter in goal.goal_parameters:
# Construct an API request
api_request = APIRequest(
call_type=API_CALL_TYPE.SHARE,
parameters={
## TODO: fix so it grabs the right frame
"memories": last_user_turn.frames[
-1
].act_attributes.memories,
},
memory_dialog=memory_dialog,
)
# Send in the request and get the API Response back
api_response = memory_service_api.call_api(api_request)
# Construct Act Attributes from the API Response
act_attributes = ActAttributes()
if api_response.status == API_STATUS.SHARED:
act_attributes = ActAttributes(
slot_values={},
request_slots=[],
memories=api_response.results.get(
"retrieved_memories", []
),
)
list_act_attributes.append(act_attributes)
else:
# 1. (2) Raise disambiguation request
# TODO
pass
else:
# 2. Handle disambiguation info
# TODO
pass
# Return an Frame object with the generated intent and attributes
# TODO: handle multiple goal parameters & multiple acts
return (
Frame("", asst_dialog_act, list_act_attributes[0]),
api_request,
api_response,
)
|
comet_memory_dialog-main
|
dialog_simulator/AssistantSimulator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import os
import copy
import json
import csv
import random
import pickle
from MemoryDialogSimulator import MemoryDialogSimulator
from UserSimulator import PilotUserSimulator
from AssistantSimulator import PilotAssistantSimulator
from GoalGenerator import RuleBasedGoalGenerator
from MemoryServiceAPI import MemoryServiceAPI
from utils import str_memory
if __name__ == "__main__":
# Parameters for generation
domain = "memory"
random.seed(0)
n_dialogs = 6000
n_max_turns = 8 # 5, 8, 10
goal_config = {
"n_min_goals": 3, # 4
"n_max_goals": 6, # 6
}
start_dialog_idx = 5500
# path_memory_graph_list = '/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/memory_may21_v1_100graphs.json'
path_memory_graph_list = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/final/mscoco_memory_graphs_1k.json"
path_out_json = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.json"
path_out_csv = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.tsv"
path_out_pickle = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/results/batch_4_mem_dials.p"
# Make sure we are not overwriting
debug = False
if not debug:
assert not os.path.exists(path_out_json)
assert not os.path.exists(path_out_csv)
assert not os.path.exists(path_out_pickle)
# Load parameters
memory_graph_list = json.load(open(path_memory_graph_list, "r"))
memory_graph_bank = {}
for memory_graph in memory_graph_list:
memory_graph_id = memory_graph["memory_graph_id"]
for i in range(len(memory_graph["memories"])):
memory_graph["memories"][i]["memory_graph_id"] = memory_graph_id
memory_graph_bank[memory_graph_id] = memory_graph
# Initialize the multimodal simulator
sim = MemoryDialogSimulator(
user_simulator=PilotUserSimulator(),
assistant_simulator=PilotAssistantSimulator(),
goal_generator=RuleBasedGoalGenerator(domain=domain),
memory_service_api=MemoryServiceAPI(metadata={}),
memory_graph_bank=memory_graph_bank,
domain=domain,
)
# Generate dialogs
memory_dialogs = sim.batch_generate_dialog_flows(
n_dialogs=n_dialogs,
n_max_turns=n_max_turns,
start_dialog_idx=start_dialog_idx,
goal_config=goal_config,
)
# Output dialogs
# a. Pickle output
pickle.dump(memory_dialogs, open(path_out_pickle, "wb"))
# b. JSON output
json.dump(
{"dialogue_data": [m_d.to_dict() for m_d in memory_dialogs]},
open(path_out_json, "w"),
indent=4,
)
# c. print output
for i, m_d in enumerate(memory_dialogs[:20]):
d = m_d.dialog
str_dialog = ""
print(f"----- Dialog {d.idx} ----- ")
for j in range(len(d.user_turns)):
user_turn = d.user_turns[j]
asst_turn = d.asst_turns[j]
for user_frame in user_turn.frames:
str_dialog += "U: " + user_frame.uttr + "\n"
# str_dialog += 'U: ' + str(user_frame.nlu.act_attributes.slot_values.values()) + '\n'
for asst_frame in asst_turn.frames:
str_dialog += "A: " + asst_frame.uttr + "\n"
# str_dialog += 'A: ' + str(asst_frame.nlu.act_attributes.slot_values.values()) + '\n'
print(str_dialog)
# d. TSV output for annotation
url_blank = "https://simmc2.s3-us-west-1.amazonaws.com/white.png"
with open(path_out_csv, "w", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter="\t", quotechar="'")
writer.writerow(
[
"dialog_id",
"dialog",
"img_0_url",
"img_1_url",
"img_2_url",
"img_3_url",
"img_4_url",
"img_5_url",
"img_6_url",
"img_7_url",
"img_8_url",
"img_9_url",
"img_10_url",
"img_11_url",
"img_12_url",
"img_13_url",
"img_14_url",
"img_15_url",
"img_0_desc",
"img_1_desc",
"img_2_desc",
"img_3_desc",
"img_4_desc",
"img_5_desc",
"img_6_desc",
"img_7_desc",
"img_8_desc",
"img_9_desc",
"img_10_desc",
"img_11_desc",
"img_12_desc",
"img_13_desc",
"img_14_desc",
"img_15_desc",
"metadata",
]
)
for _, m_d in enumerate(memory_dialogs):
mg = m_d.memory_graph
d = m_d.dialog
dialog_data = []
image_id = 0
all_image_urls = [url_blank]
all_memories = [None]
display_image_ids = [image_id]
for i in range(len(d.user_turns)):
# User turn
user_turn = d.user_turns[i]
user_utter = "USER: " + ". ".join(
[frame.uttr for frame in user_turn.frames]
)
user_turn_data = {
"turn_id": i * 2,
"speaker": "USER",
"utterance": user_utter.replace("'", ""),
"image_id": copy.deepcopy(display_image_ids),
"validation": []
#'validation': make_validation_tokens_for_turn(user_turn)
}
# Assistant turn
asst_turn = d.asst_turns[i]
asst_utter = "ASSISTANT: " + ". ".join(
[frame.uttr for frame in asst_turn.frames]
)
memory_ids = asst_turn.frames[-1].act_attributes.to_dict()["memories"]
if memory_ids != []:
display_urls = []
display_image_ids = []
for memory_id in memory_ids:
display_urls.extend(mg.get_memory_url(memory_id))
image_id += 1
display_image_ids.append(image_id)
all_image_urls.extend(display_urls)
all_memories.extend(mg.get_memories_by_ids(memory_ids))
asst_turn_data = {
"turn_id": i * 2 + 1,
"speaker": "ASSISTANT",
"utterance": asst_utter.replace("'", ""),
"image_id": copy.deepcopy(display_image_ids),
"validation": []
#'validation': make_validation_tokens_for_turn(asst_turn)
}
dialog_data.append(user_turn_data)
dialog_data.append(asst_turn_data)
# This should be true, assuming each memory has one image.
assert len(all_image_urls) == len(all_memories)
writer.writerow(
[
d.idx,
str(json.dumps(dialog_data)),
all_image_urls[0], # url_0
all_image_urls[1] if len(all_image_urls) > 1 else "",
all_image_urls[2] if len(all_image_urls) > 2 else "",
all_image_urls[3] if len(all_image_urls) > 3 else "",
all_image_urls[4] if len(all_image_urls) > 4 else "",
all_image_urls[5] if len(all_image_urls) > 5 else "",
all_image_urls[6] if len(all_image_urls) > 6 else "",
all_image_urls[7] if len(all_image_urls) > 7 else "",
all_image_urls[8] if len(all_image_urls) > 8 else "",
all_image_urls[9] if len(all_image_urls) > 9 else "",
all_image_urls[10] if len(all_image_urls) > 10 else "",
all_image_urls[11] if len(all_image_urls) > 11 else "",
all_image_urls[12] if len(all_image_urls) > 12 else "",
all_image_urls[13] if len(all_image_urls) > 13 else "",
all_image_urls[14] if len(all_image_urls) > 14 else "",
all_image_urls[15] if len(all_image_urls) > 15 else "",
"", # url_0
str_memory(all_memories[1]) if len(all_image_urls) > 1 else "",
str_memory(all_memories[2]) if len(all_image_urls) > 2 else "",
str_memory(all_memories[3]) if len(all_image_urls) > 3 else "",
str_memory(all_memories[4]) if len(all_image_urls) > 4 else "",
str_memory(all_memories[5]) if len(all_image_urls) > 5 else "",
str_memory(all_memories[6]) if len(all_image_urls) > 6 else "",
str_memory(all_memories[7]) if len(all_image_urls) > 7 else "",
str_memory(all_memories[8]) if len(all_image_urls) > 8 else "",
str_memory(all_memories[9]) if len(all_image_urls) > 9 else "",
str_memory(all_memories[10]) if len(all_image_urls) > 10 else "",
str_memory(all_memories[11]) if len(all_image_urls) > 11 else "",
str_memory(all_memories[12]) if len(all_image_urls) > 12 else "",
str_memory(all_memories[13]) if len(all_image_urls) > 13 else "",
str_memory(all_memories[14]) if len(all_image_urls) > 14 else "",
str_memory(all_memories[15]) if len(all_image_urls) > 15 else "",
{}, # mockup
]
)
# print(json.dumps(dialog_data))
# (5) Summary
print("n_dialogs:", len(memory_dialogs))
print("n_turns:", sum([len(m_d.dialog.asst_turns) for m_d in memory_dialogs]))
|
comet_memory_dialog-main
|
dialog_simulator/main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
from constants import (
GoalType,
GoalMemoryRefType,
numeric_slots,
non_visual_slots,
visual_slots,
all_slots,
)
from Data import Goal, GoalParameter, MemoryTime
from utils import weighted_choice
import copy
random.seed(0)
class RuleBasedGoalGenerator:
def __init__(self, *args, **kwargs):
self.non_visual_slots = non_visual_slots
self.visual_slots = visual_slots
self.all_slots = all_slots
def sample_goals(self, *args, **kwargs):
memory_graph = kwargs.pop("memory_graph", None)
goal_config = kwargs.pop("goal_config", {})
n_min_goals = goal_config.get("n_min_goals", 3)
n_max_goals = goal_config.get("n_max_goals", 5)
n_goals = random.randint(n_min_goals, n_max_goals)
goal_type_list = [
GoalType.SEARCH,
GoalType.REFINE_SEARCH,
GoalType.GET_RELATED,
GoalType.GET_INFO,
GoalType.GET_AGGREGATED_INFO,
GoalType.SHARE,
GoalType.CHITCHAT,
]
goal_type_list_weights_start = [
1,
0,
0,
0,
0,
0,
0,
# 1, 0, 0, 0, 1, 0, 0,
]
goal_type_list_weights_mid = [
0.8,
1.1,
1.7,
1.1,
0,
0.1,
0,
# 1, 0.8, 0.8, 1, 1, 0.5, 0.5,
]
goal_type_list_weights_end = [
0.3,
0.5,
0.6,
0.5,
0,
3,
0,
# 0.5, 0.5, 0.5, 0.5, 0.5, 3, 1,
]
# Randomly sample from the goal type list
# For now, we enforce the goals to start with BROWSE
# and end with ADD_TO_CART
# TODO: allow for a more flexible way of generating
# goal types
goal_types = (
random.choices(
population=goal_type_list, weights=goal_type_list_weights_start, k=1
)
+ random.choices(
population=goal_type_list,
weights=goal_type_list_weights_mid,
k=n_goals - 2,
)
+ random.choices(
population=goal_type_list, weights=goal_type_list_weights_end, k=1
)
)
# Make a complete goal with an accompanying set of goal parameters
# for each goal_type
goals = []
for goal_type in goal_types:
# For now, we pass in a random set of goal_parameters
goal_parameters = self.sample_goal_parameters(
goal_type, memory_graph, goal_config
)
goals.append(Goal(goal_type=goal_type, goal_parameters=goal_parameters))
return goals
def sample_goal_parameters(self, goal_type, memory_graph, goal_config):
# Sample goal parameters according to the input sample
# TODO: IMPLEMENT **
goal_parameters = []
parameter_ontology = goal_config["parameter_ontology"]
# (1) Pick a search filter
search_filter = {}
if goal_type in set(
[GoalType.SEARCH, GoalType.REFINE_SEARCH, GoalType.GET_RELATED]
):
if goal_type == GoalType.GET_RELATED:
n_slots = weighted_choice(population=[1, 2], weights=[0.93, 0.07])
else:
n_slots = weighted_choice(population=[1, 2], weights=[0.75, 0.25])
# Candidate slots: exclude a few slots that
# are semantically infeasible
# **** TODO ****: confirm that there is no slot to exclude
candidate_slots = self.all_slots - set([""])
search_filter_slots = random.choices(
population=list(candidate_slots), k=n_slots
)
for search_filter_slot in search_filter_slots:
# We first randomly assign a value for a randomly selected slot
if search_filter_slot == "time":
# Instead of choosing a specific datetime,
# search by year or month instead.
random_datetime = MemoryTime(
str_datetime=random.choice(
parameter_ontology["all"].get(search_filter_slot)
)
)
if random.random() > 0.1:
search_filter_value = str(MemoryTime(year=random_datetime.year))
else:
search_filter_value = str(
MemoryTime(
year=random_datetime.year, month=random_datetime.month
)
)
if goal_type == GoalType.GET_RELATED:
# A special value for refine_search: 'next' and 'prev'
# e.g. "where did we go next?"
if random.random() > 0.3:
search_filter_value = random.choice(
["right after", "right before", "on the same day"]
)
elif search_filter_slot == "location":
# TODO: Instead of choosing a specific location,
# occasionally search with a coarser query.
search_filter_value = random.choice(
parameter_ontology["all"].get(search_filter_slot)
)
if random.random() > 0.7:
search_filter_value = copy.deepcopy(search_filter_value)
search_filter_value["geo_tag"].get("place")
else:
# TODO: handle subsampling of participants & activities
search_filter_value = random.choice(
parameter_ontology["all"].get(search_filter_slot)
)
if search_filter_value != "":
search_filter[search_filter_slot] = search_filter_value
# (2) Pick an object reference type
object_reference_type = GoalMemoryRefType.NOT_SPECIFIED
if goal_type in set([GoalType.GET_RELATED, GoalType.GET_INFO, GoalType.SHARE]):
object_reference_type = weighted_choice(
population=[
GoalMemoryRefType.PREV_TURN,
GoalMemoryRefType.DIALOG,
GoalMemoryRefType.GRAPH,
],
weights=[0.8, 0.2, 0.0],
)
# (3) Pick slots to request (e.g. in questions)
request_slots = []
if goal_type in set([GoalType.GET_INFO]):
# We randomly sample slots to ask
# ****** TODO *******: make sure it's not asking about
# the parameters that were already in search filter
ask_from_visual_slot = random.random() > 0.9
if ask_from_visual_slot:
# ask about visual_slots (rare): people, activity
n_request_slots = 1
request_slots.extend(
random.sample(self.non_visual_slots, n_request_slots)
)
else:
# ask about non_visual_slots: time, location
n_request_slots = weighted_choice(population=[1, 2], weights=[0.8, 0.2])
request_slots.extend(
random.sample(self.non_visual_slots, n_request_slots)
)
elif goal_type in set([GoalType.GET_RELATED]):
# We randomly sample slots to ask
# iff search_filter is empty
if len(search_filter) == 0:
n_request_slots = weighted_choice(population=[0, 1], weights=[0.4, 0.6])
request_slots.extend(random.sample(self.all_slots, n_request_slots))
elif goal_type in set([GoalType.GET_AGGREGATED_INFO]):
# ****** TODO *******
pass
# (4) Compile it into a goal parameter
goal_parameter = GoalParameter(
filter=search_filter,
reference_type=object_reference_type,
request_slots=request_slots,
)
goal_parameters.append(goal_parameter)
return goal_parameters
|
comet_memory_dialog-main
|
dialog_simulator/GoalGenerator.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from Data import MemoryDialog, Goal, Frame
from typing import List
class SimulatorBase:
def register_memory_service_api(self, memory_service_api):
self.memory_service_api = memory_service_api
def fit_goal_to_intent(self, args):
# Define the goal to intent mapping behavior
pass
def is_servable(self, goal: Goal) -> bool:
# Check whether this simulator can serve the input goal.
pass
def generate_nlu_label(self, goal: Goal, context: MemoryDialog) -> Frame:
# Need to define this behavior first e.g. as a config, a model, etc.
pass
def generate_uttr(self, nlu_label: Frame) -> str:
pass
|
comet_memory_dialog-main
|
dialog_simulator/SimulatorBase.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
from __future__ import annotations
from constants import GoalType, GoalMemoryRefType, DialogAct
from utils import str_memories, int_memory_ids, get_slot_values_simple_from_json
import pickle
from datetime import datetime
class MemoryDialog:
def __init__(self, *args, **kwargs):
self.memory_graph = kwargs.pop("memory_graph", {}) # JSON format
self.dialog = kwargs.pop("dialog", None)
self.domain = kwargs.pop("domain", None)
def initialize(self):
self.dialog = Dialog(domain=self.domain)
def update(self, *args, **kwargs):
# Reflects change in scenes or in dialogs
# TODO: implement
if "memory_graph" in kwargs:
self.memory_graph = kwargs.pop("memory_graph")
if "dialog" in kwargs:
self.dialog = kwargs.pop("dialog")
def is_goal_met(self, goal):
# TODO: implement a more robust goal checking logic
# For now, we look where there is a hanging 'disambiguation' request
if self.dialog.asst_turns == []:
return False
last_asst_turn = self.dialog.asst_turns[-1]
goal_met = not last_asst_turn.is_disambiguation_request()
return goal_met
def to_dict(self):
out = self.dialog.to_dict()
out["memory_graph_id"] = self.memory_graph.get_id()
return out
def get_memories(self):
return self.memory_graph.get_memories()
class MemoryGraph:
def __init__(self, *args, **kwargs):
json_data = kwargs.pop("data", {})
self.load_data(json_data)
def load_data(self, json_data):
self.id = json_data["memory_graph_id"]
self.memories = [Memory(data=m) for m in json_data["memories"]]
self.groups = json_data["memory_groups"]
# Construct the memory to day/event mapping.
self.trip_map = {}
self.day_map = {}
self.event_map = {}
for trip_ind, trip_datum in enumerate(self.groups):
for day_ind, day_datum in enumerate(trip_datum["days"]):
for event_ind, event_datum in enumerate(day_datum["events"]):
for memory_id in event_datum["memories"]:
self.trip_map[memory_id] = trip_ind
self.day_map[memory_id] = day_ind
self.event_map[memory_id] = event_ind
def get_day_events(self, memory_id):
"""Get the day events given memory_id."""
trip_datum = self.groups[self.trip_map[memory_id]]
return trip_datum["days"][self.day_map[memory_id]]
def get_events(self, memory_id):
"""Get the events given memory_id."""
day_datum = self.get_day_events(memory_id)
return day_datum["events"][self.event_map[memory_id]]
def get_id(self):
return self.id
def get_memories(self):
return self.memories
def get_memory_by_id(self, memory_id):
for memory in self.memories:
if int(memory.data["memory_id"]) == int(memory_id):
return memory
def get_memories_by_ids(self, memory_ids):
return [self.get_memory_by_id(memory_id) for memory_id in memory_ids]
def get_memory_url(self, memory_id):
for memory in self.memories:
if memory.data["memory_id"] == memory_id:
return memory.get_memory_url()
return []
class Memory:
def __init__(self, *args, **kwargs):
self.data = kwargs.pop("data", {})
self.load_data(self.data)
def __str__(self):
return "Memory ID: {id} ({narrations}), Time: {time}, Loc: {location}".format(
id=self.data["memory_id"],
narrations=self.data["narrations"],
time=self.data["time"],
location=str(self.data["location"]["geo_tag"].get("place", "")),
)
def load_data(self, json_data):
# ** TODO **
"""
self.id = json_data['memory_id']
self.time = json_data['time']
self.start_time = json_data['start_time']
self.end_time = json_data['end_time']
self.narrations = json_data['narrations']
self.media = json_data['media']
self.location = json_data['location']
self.participant = json_data['participant']
self.activity = json_data['activity']
self.object = json_data['object']
"""
pass
def get_memory_url(self):
return [a["url"] for a in self.data["media"]]
class ActAttributes:
def __init__(self, *args, **kwargs):
self.slot_values = kwargs.pop("slot_values", {}) # slot_value pairs
self.slot_values_resolved = kwargs.pop("slot_values_resolved", {})
self.request_slots = kwargs.pop("request_slots", [])
self.memories = kwargs.pop("memories", []) # list of Memory objects
def __str__(self):
out = "{slot_values} | {request_slots} | {memories}".format(
slot_values=str(self.slot_values),
request_slots=str(self.request_slots),
memories=str_memories(self.memories),
)
return out
def to_dict(self):
return {
"slot_values": self.slot_values,
#'slot_values_resolved': self.slot_values_resolved,
"request_slots": self.request_slots,
"memories": int_memory_ids(self.memories),
}
class Frame:
def __init__(self, uttr: str, dialog_act: DialogAct, act_attributes: ActAttributes):
self.uttr = uttr
self.dialog_act = dialog_act
self.act_attributes = act_attributes
def __str__(self):
out = "{uttr} | {dialog_act} | {act_attributes}".format(
uttr=str(self.uttr),
dialog_act=self.dialog_act.value,
act_attributes=str(self.act_attributes),
)
return out
def to_dict(self):
return {
"uttr": self.uttr,
"act": self.dialog_act.value,
"act_attributes": self.act_attributes.to_dict(),
}
def is_disambiguation_request(self):
return self.dialog_act in set(
[DialogAct.REQUEST_DISAMBIGUATE, DialogAct.ASK_DISAMBIGUATE]
)
def is_disambiguation_response(self):
return self.dialog_act in set([DialogAct.INFORM_DISAMBIGUATE])
class Turn:
def __init__(self, frames, speaker, goal=None):
self.frames = frames
self.speaker = speaker
self.goal = goal
def __str__(self):
out = "{frames}".format(
frames=" / ".join([str(frame) for frame in self.frames])
)
return out
def is_disambiguation_request(self):
return True in set(frame.is_disambiguation_request() for frame in self.frames)
def is_disambiguation_response(self):
return True in set(frame.is_disambiguation_response() for frame in self.frames)
def get_uttr(self):
return ". ".join([f.uttr for f in self.frames])
def get_frames_to_dict(self):
return [f.to_dict() for f in self.frames]
def has_dialog_acts(self, dialog_acts):
"""
Return whether this turn contains
any of the input target dialog acts in its frames.
"""
for frame in self.frames:
if frame.dialog_act in dialog_acts:
return True
return False
class Dialog:
def __init__(self, idx=None, domain=None):
self.user_turns = []
self.asst_turns = []
self.goals = []
self.api_calls = []
self.api_results = []
self.idx = idx
self.domain = domain
self.mentioned_memory_ids = set([])
def __str__(self):
str_turns = []
for i in range(len(self.user_turns)):
user_turn = self.user_turns[i]
asst_turn = self.asst_turns[i]
str_turns.append(f"[Turn {i}] U: {user_turn}, A: {asst_turn}")
return str([t for t in str_turns])
def to_dict(self):
out = {
"dialogue": [],
"dialogue_idx": self.idx,
"domain": self.domain,
"mentioned_memory_ids": list(self.mentioned_memory_ids),
}
for i in range(len(self.user_turns)):
user_turn = self.user_turns[i]
asst_turn = self.asst_turns[i]
goal = self.goals[i]
api_call = self.api_calls[i]
turn_data = {
"turn_idx": i,
"system_transcript": asst_turn.get_uttr(),
"system_transcript_annotated": asst_turn.get_frames_to_dict(),
"transcript": user_turn.get_uttr(),
"transcript_annotated": user_turn.get_frames_to_dict(),
"goal_type": str(goal.goal_type),
"api_call": api_call.to_dict(),
#'api_result': api_result.to_dict()
}
try:
# Some earlier data is missing api_result
api_result = self.api_results[i]
turn_data["api_result"] = api_result.to_dict()
except:
api_result = {}
out["dialogue"].append(turn_data)
return out
def add_turn(self, user_turn, asst_turn):
self.add_user_turn(user_turn)
self.add_asst_turn(asst_turn)
def add_goal(self, goal):
self.goals.append(goal)
def add_api_call(self, api_call):
self.api_calls.append(api_call)
def add_api_result(self, api_result):
self.api_results.append(api_result)
def add_user_turn(self, user_turn):
self.user_turns.append(user_turn)
for frame in user_turn.frames:
for m in frame.act_attributes.memories:
self.mentioned_memory_ids.add(m.data["memory_id"])
def add_asst_turn(self, asst_turn):
self.asst_turns.append(asst_turn)
for frame in asst_turn.frames:
for m in frame.act_attributes.memories:
self.mentioned_memory_ids.add(m.data["memory_id"])
class APIRequest:
def __init__(self, *args, **kwargs):
self.call_type = kwargs.pop("call_type", None)
self.parameters = kwargs.pop("parameters", None)
self.memory_dialog = kwargs.pop("memory_dialog", None)
def __str__(self):
out = "call_type: {call_type}, parameters: {parameters}".format(
call_type=self.call_type, parameters=str(self.parameters)
)
return out
def to_dict(self, simple=False):
if self.parameters is not None:
parameters = {
"slot_values": self.parameters.get("slot_values", []),
"request_slots": self.parameters.get("request_slots", {}),
"memories": int_memory_ids(self.parameters.get("memories"))
if "memories" in self.parameters
else [],
}
if simple:
parameters["slot_values"] = get_slot_values_simple_from_json(
parameters["slot_values"]
)
else:
parameters = {}
return {"call_type": str(self.call_type), "parameters": parameters}
class APIResponse:
def __init__(self, *args, **kwargs):
self.status = kwargs.pop("status", None)
self.request = kwargs.pop("request", None)
self.results = kwargs.pop("results", {})
def __str__(self):
out = "status: {status}, results: {results}".format(
status=self.status, results=str(self.results)
)
return out
def to_dict(self):
return {
"status": str(self.status),
"results": {
"retrieved_memories": int_memory_ids(
self.results.get("retrieved_memories", [])
),
"retrieved_info": self.results.get("retrieved_info", []),
},
}
class GoalParameter:
def __init__(self, *args, **kwargs):
self.filter = kwargs.pop("filter", {}) # slot_value pairs
self.reference_type = kwargs.pop(
"reference_type", GoalMemoryRefType.NOT_SPECIFIED
)
self.request_slots = kwargs.pop(
"request_slots", []
) # need to map to Multimodal Context
def __str__(self):
out = "{filter} | {reference_type} | {request_slots}".format(
filter=str(self.filter),
reference_type=self.reference_type.value,
request_slots=str(self.request_slots),
)
return out
class Goal(object):
def __init__(self, *args, **kwargs):
self.goal_type = kwargs.pop("goal_type", GoalType.UNKNOWN)
self.goal_parameters = kwargs.pop("goal_parameters", [])
def __str__(self):
out = "{goal_type} | {goal_parameters}".format(
goal_type=str(self.goal_type),
goal_parameters=[str(p) for p in self.goal_parameters],
)
return out
class MemoryTime(object):
NOT_SPECIFIED = -1
def __init__(self, *args, **kwargs):
# Allows for not_specified time for easy calculation
self.year = kwargs.pop("year", self.NOT_SPECIFIED)
self.month = kwargs.pop("month", self.NOT_SPECIFIED)
self.day = kwargs.pop("day", self.NOT_SPECIFIED)
self.hour = kwargs.pop("hour", self.NOT_SPECIFIED)
self.minute = kwargs.pop("minute", self.NOT_SPECIFIED)
self.second = kwargs.pop("second", self.NOT_SPECIFIED)
if "str_datetime" in kwargs:
self.load_datetime(kwargs.pop("str_datetime"))
def load_datetime(self, str_datetime: str):
# datetime: "2021-04-10 10:00:00"
try:
datetime_obj = datetime.fromisoformat(str_datetime)
self.year = datetime_obj.year
self.month = datetime_obj.month
self.day = datetime_obj.day
self.hour = datetime_obj.hour
self.minute = datetime_obj.minute
self.second = datetime_obj.second
except:
year_month = str_datetime.split("-")
if len(year_month) == 1:
self.year = int(year_month[0])
else:
self.year = int(year_month[0])
self.month = int(year_month[1])
def is_within(self, target_memory_time: self):
# return whether self is within target_memory_time
# for now, we assume that either year and/or month is provided
if target_memory_time.year is not self.NOT_SPECIFIED:
if self.year != target_memory_time.year:
return False
if target_memory_time.month is not self.NOT_SPECIFIED:
if self.month != target_memory_time.month:
return False
return True
def __str__(self):
if self.day is self.NOT_SPECIFIED:
if self.month is self.NOT_SPECIFIED:
if self.year is self.NOT_SPECIFIED:
return ""
else:
return "%d" % self.year
else:
return "%d-%02d" % (self.year, self.month)
full_format = "%d-%02d-%02d %02d:%02d:%02d" % (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
)
return full_format
class MemoryLocation(object):
def __init__(self, *args, **kwargs):
self.data = kwargs.pop("data", {})
def is_within(self, target_memory_location: self):
# return whether self is within target_memory_time
memory_geo_tag = self.data["geo_tag"]
target_geo_tag = target_memory_location.data["geo_tag"]
if "place" in target_geo_tag:
return target_geo_tag["place"] == memory_geo_tag.get("place", "")
elif "city" in target_geo_tag:
return target_geo_tag["city"] == memory_geo_tag.get("city", "")
elif "state" in target_geo_tag:
return target_geo_tag["state"] == memory_geo_tag.get("state", "")
elif "country" in target_geo_tag:
return target_geo_tag["country"] == memory_geo_tag.get("country", "")
return False
if __name__ == "__main__":
# Memory Time operation test
memory_time_1 = MemoryTime(year=2016, month=3)
memory_time_2 = MemoryTime(year=2016, month=12)
memory_time_3 = MemoryTime(year=2016)
memory_time_4 = MemoryTime(year=2020)
memory_time_5 = MemoryTime(str_datetime="2020-10-23 10:00:00")
memory_time_6 = MemoryTime(str_datetime="2020-10")
print(memory_time_1)
print(memory_time_2)
print(memory_time_3)
print(memory_time_4)
print(memory_time_5)
print(memory_time_6)
print(memory_time_1.is_within(memory_time_2))
print(memory_time_1.is_within(memory_time_3))
print(memory_time_1.is_within(memory_time_4))
print(memory_time_5.is_within(memory_time_4))
goal = Goal(
goal_type=GoalType.GET_RELATED,
goal_parameters=[GoalParameter(filter={"time": memory_time_5})],
)
print(goal)
# Memory Graph Test
import json
path_memory_graph_list = "/Users/shanemoon/workspace/memory_dialog/dialog_simulator/memories/pilot/memory_may21_v1_100graphs.json"
memory_graph_list = json.load(open(path_memory_graph_list, "r"))
target_memory_graph_id = "St8BTzNuLCRb"
target_memory_graph_idx = -1
for i, memory_graph in enumerate(memory_graph_list):
if target_memory_graph_id == memory_graph["memory_graph_id"]:
target_memory_graph_idx = i
break
print(target_memory_graph_idx)
sample_memory_graph = memory_graph_list[target_memory_graph_idx]
mg = MemoryGraph(data=sample_memory_graph)
target_memory_index = 1
day_events = mg.get_day_events(memory_id=target_memory_index)
events = mg.get_events(memory_id=target_memory_index)
print("Target memory id:", target_memory_index)
print("Day events indices:", day_events)
print("Events indices:", events)
print("Event memories:", [str(mg.memories[idx]) for idx in events["memories"]])
|
comet_memory_dialog-main
|
dialog_simulator/Data.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#!/usr/bin/env python3
import random
from typing import Dict, Tuple
from Data import APIRequest, APIResponse, MemoryTime, MemoryLocation
from constants import API_CALL_TYPE, API_STATUS, GoalType
from utils import str_memory
from datetime import datetime
random.seed(0)
class MemoryServiceAPI:
def __init__(self, *args, **kwargs):
self.metadata = kwargs.pop("metadata", {})
def call_api(self, api_request: APIRequest) -> APIResponse:
status = None
results = None
if api_request.call_type == API_CALL_TYPE.SEARCH:
results, status = self.search(api_request)
elif api_request.call_type == API_CALL_TYPE.REFINE_SEARCH:
results, status = self.refine_search(api_request)
elif api_request.call_type == API_CALL_TYPE.GET_RELATED:
results, status = self.get_related(api_request)
elif api_request.call_type == API_CALL_TYPE.GET_INFO:
results, status = self.get_info(api_request)
elif api_request.call_type == API_CALL_TYPE.SHARE:
results, status = self.share(api_request)
# Construct a response
api_response = APIResponse(status=status, results=results, request=api_request)
return api_response
def search(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
search_filter = api_request.parameters["slot_values"]
memory_dialog = api_request.memory_dialog
# Unpack more parameters
n_max_results = api_request.parameters.get("n_max_results", 2)
exclude_memory_ids = api_request.parameters.get("exclude_memory_ids", set())
# Prepare search candidates
search_candidates = memory_dialog.get_memories()
# Prepare search output
retrieved_memories = []
# Execute search
for memory in search_candidates:
# If there was an exlusion request, modify the search candidates
if int(memory.data["memory_id"]) in exclude_memory_ids:
continue
# TODO: ****** implement *****
meet_criteria = True
for slot, value in search_filter.items():
# TODO: handle special cases
if slot == "time":
if search_filter.get("time", None) in {
"right before",
"right after",
"on the same day",
}:
# This is an error case -- that can happen
# due to the wrong model behaviors.
print("Wrong request ...")
meet_criteria = False
break
memory_time = MemoryTime(str_datetime=memory.data["time"])
search_time = MemoryTime(str_datetime=value)
if not memory_time.is_within(search_time):
meet_criteria = False
break
elif slot == "location":
memory_location = MemoryLocation(data=memory.data["location"])
search_location = MemoryLocation(data=value)
if not memory_location.is_within(search_location):
meet_criteria = False
break
elif slot == "participant":
memory_participants = {
p["name"] for p in memory.data["participant"]
}
search_participants = [p["name"] for p in value]
for search_participant in search_participants:
if search_participant not in memory_participants:
meet_criteria = False
break
elif slot == "activity":
memory_activities = {
a["activity_name"] for a in memory.data["activity"]
}
search_activities = [a["activity_name"] for a in value]
for search_activity in search_activities:
if search_activity not in memory_activities:
meet_criteria = False
break
else:
# General cases
if type(memory.data[slot]) == list:
pass
if value not in memory.data[slot]:
meet_criteria = False
break
else:
if value != memory.data[slot]:
meet_criteria = False
break
if meet_criteria:
retrieved_memories.append(memory)
# ** TODO: check if search_filter and retrieved_memories match **
# print('=====')
# print('search_filter', search_filter)
# print('-----')
# print('retrieved_memories', retrieved_memories)
# Rank and return only n_results
n_results = random.randint(1, n_max_results)
if len(retrieved_memories) > n_results:
random.shuffle(retrieved_memories)
retrieved_memories = retrieved_memories[:n_results]
# Output
results = {"retrieved_memories": retrieved_memories}
if results["retrieved_memories"] != []:
status = API_STATUS.SEARCH_FOUND
else:
status = API_STATUS.SEARCH_NOT_FOUND
return (results, status)
def refine_search(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Adjust the search based on the memory_dialog
memory_dialog = api_request.memory_dialog
# Search for previous search filter
prev_filter = None
for i in reversed(range(len(memory_dialog.dialog.asst_turns))):
asst_turn = memory_dialog.dialog.asst_turns[i]
turn_goal = asst_turn.goal
if turn_goal.goal_type in {GoalType.SEARCH, GoalType.GET_RELATED}:
# TODO: change it to reflect multi goal parameters
prev_filter = turn_goal.goal_parameters[0].filter
break
# Reconstruct the goal to include the previous search parameters
if prev_filter is not None:
search_filter = api_request.parameters["slot_values"]
# Previous request
for k, v in prev_filter.items():
search_filter[k] = v
# New request
for k, v in api_request.parameters["slot_values"].items():
search_filter[k] = v
api_request.parameters["slot_values"] = search_filter
else:
# This dialog is not allowed -- Refine should always
# happen after a Search or GET_RELATED. Hence abort.
### TODO: abort gracefully
print("***** Refine error *****")
assert False
# Exclude memories that are already discussed
api_request.parameters[
"exclude_memory_ids"
] = memory_dialog.dialog.mentioned_memory_ids
return self.search(api_request)
def get_related(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
search_filter = api_request.parameters["slot_values"]
if search_filter.get("time", None) in {
"right before",
"right after",
"on the same day",
}:
# This is a special request to retrieve
# related memories in the same time group (from the same day)
return self.get_connected(api_request, search_filter.get("time"))
else:
# Treat it as a modified search request
# where slot values are taken from the input memories
request_slots = api_request.parameters["request_slots"]
memories = api_request.parameters["memories"]
memory_dialog = api_request.memory_dialog
# If request_slots is not specified, randomly sample a few slots
if request_slots == []:
request_slot_candidates = {
"time",
"location",
"activity",
"participant",
}
# If a value is specified for a slot, exclude it
# from the candidates
request_slot_candidates -= search_filter.keys()
request_slots = random.choices(
population=list(request_slot_candidates), k=random.randint(1, 1)
)
for request_slot in request_slots:
for memory in memories:
request_slot_value = memory.data[request_slot]
# TODO: make it take multiple values
search_filter[request_slot] = request_slot_value
# Make a search request with the updated filter
api_request.parameters["slot_values"] = search_filter
# Exclude memories that are already discussed
api_request.parameters[
"exclude_memory_ids"
] = memory_dialog.dialog.mentioned_memory_ids
return self.search(api_request)
def get_connected(
self, api_request: APIRequest, time_constraint: str
) -> Tuple[Dict, API_STATUS]:
_ = api_request.parameters["slot_values"]
## TODO: handle multiple memories
target_memory = api_request.parameters["memories"][0]
memory_graph = api_request.memory_dialog.memory_graph
target_memory_index = -1
for i, memory in enumerate(memory_graph.memories):
if memory.data["memory_id"] == target_memory.data["memory_id"]:
target_memory_index = i
break
# Get connected memories
connected_memory_indices = memory_graph.get_events(target_memory_index)[
"memories"
]
connected_memories = []
# Compare time
target_time = datetime.fromisoformat(target_memory.data["time"])
for idx in connected_memory_indices:
if idx == target_memory_index:
continue
connected_memory = memory_graph.memories[idx]
connected_memory_time = datetime.fromisoformat(
connected_memory.data["time"]
)
if time_constraint == "right after":
if target_time < connected_memory_time:
connected_memories.append(connected_memory)
elif time_constraint == "right before":
if target_time > connected_memory_time:
connected_memories.append(connected_memory)
elif time_constraint == "on the same day":
connected_memories.append(connected_memory)
# Output
results = {"retrieved_memories": connected_memories}
if results["retrieved_memories"] != []:
status = API_STATUS.SEARCH_FOUND
else:
status = API_STATUS.SEARCH_NOT_FOUND
return (results, status)
def get_info(self, api_request: APIRequest) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
request_slots = api_request.parameters.get("request_slots", [])
memories = api_request.parameters.get("memories", [])
# Unpack more parameters
# TODO
# Prepare get_info output
lookup_results = {
"retrieved_memories": memories,
"retrieved_info": {},
"request_slots": request_slots,
}
# If request_slots is not specified, randomly sample a few slots
if request_slots == []:
if len(memories) > 0:
memory = memories[0]
request_slots = [k for k in memory.data if random.random() > 0.8]
def summarize_info(memory_data, slot):
if slot == "location":
return memory_data[slot]["geo_tag"]
else:
return memory_data[slot]
# Look up info
for memory in memories:
# Add the requested info
s_memory = str_memory(memory, verbose=False)
if request_slots == []:
# Give all relevant information
lookup_results["retrieved_info"][s_memory] = {
slot: summarize_info(memory.data, slot)
for slot in ["time", "location", "participant", "activity"]
}
else:
lookup_results["retrieved_info"][s_memory] = {}
for slot in request_slots:
if slot in memory.data:
lookup_results["retrieved_info"][s_memory][
slot
] = summarize_info(memory.data, slot)
# Add extra info
# TODO
# TODO: status can be INFO_NOT_FOUND
status = API_STATUS.INFO_FOUND
return (lookup_results, status)
def share(self, api_request) -> Tuple[Dict, API_STATUS]:
# Unpack API Request
memories = api_request.parameters["memories"]
# Prepare output
results = {"retrieved_memories": memories}
status = API_STATUS.SHARED
return (results, status)
|
comet_memory_dialog-main
|
dialog_simulator/MemoryServiceAPI.py
|
import argparse
def main():
parser = argparse.ArgumentParser(description='Make seeds')
parser.add_argument('--script', type=str, default='')
parser.add_argument('--num_seeds', type=int, default=5)
args = parser.parse_args()
seed = int(args.script.split('--seed ')[-1].split(' --')[0])
all_scripts = []
print(f'\nScripts:')
print(f'--------')
for _seed in range(args.num_seeds):
print(args.script.replace(f'--seed {seed}', f'--seed {_seed}'))
print('\n')
if __name__ == '__main__':
main()
|
spacetime-main
|
make_seeds.py
|
"""
Model loss functions and objectives
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import L1Loss as MAE
from torch.nn import MSELoss as MSE
from torch.nn import CrossEntropyLoss
def get_loss(loss, reduction='none', ignore_index=-100):
"""
Different loss functions depending on the dataset / task
"""
if loss == 'mse':
return nn.MSELoss(reduction=reduction)
elif loss == 'mae':
return nn.L1Loss(reduction=reduction)
elif loss == 'rmse':
return multivariate_RMSE(reduction='mean')
elif loss == 'rse':
return multivariate_RMSE(reduction='none')
elif loss == 'cross_entropy':
return nn.CrossEntropyLoss(reduction=reduction,
ignore_index=ignore_index)
elif loss == 'informer_mse':
return informer_MSE
elif loss == 'informer_mae':
return informer_MAE
elif loss == 'informer_rmse':
return informer_RMSE
def multivariate_RMSE(reduction):
criterion = torch.nn.MSELoss(reduction='none')
def loss(y_pred, y_true):
# y_pred, y_true.shape is B x L x D
mse = criterion(y_pred, y_true)
if reduction == 'mean':
mse = mse.mean(dim=1) # shape B x D
return torch.sqrt(mse)
return loss
# Losses from Informer code
def informer_MAE(y_pred, y_true):
return torch.mean(torch.abs(y_pred-y_true))
def informer_MSE(y_pred, y_true):
return torch.mean((y_pred-y_true)**2)
def informer_RMSE(y_pred, y_true):
return torch.sqrt(informer_MSE(y_pred, y_true))
|
spacetime-main
|
loss.py
|
"""
Model optimizer and scheduler
"""
import torch
def get_optimizer(model, configs):
optim_configs = {k: v for k, v in configs.items() if k != '_name_'}
if configs['_name_'] == 'adamw':
return torch.optim.AdamW(model.parameters(), **optim_configs)
elif configs['_name_'] == 'sgd':
return torch.optim.SGD(model.parameters(), **optim_configs)
elif configs['_name_'] == 'adam':
return torch.optim.Adam(model.parameters(), **optim_configs)
def get_scheduler(model, optimizer, configs):
if 'scheduler' in configs:
configs = configs['scheduler']
scheduler_configs = {k: v for k, v in configs.items() if k != '_name_'}
if configs['_name_'] == 'timm_cosine':
from timm.scheduler.cosine_lr import CosineLRScheduler
return CosineLRScheduler(optimizer=optimizer, **scheduler_configs)
elif configs['_name_'] == 'plateau':
from torch.optim.lr_scheduler import ReduceLROnPlateau
print(scheduler_configs)
try:
return ReduceLROnPlateau(optimizer=optimizer, **scheduler_configs)
except:
return ReduceLROnPlateau(optimizer=optimizer)
else:
return None
|
spacetime-main
|
optimizer.py
|
import os
import copy
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os.path import join
from omegaconf import OmegaConf
from dataloaders import initialize_data_functions, get_evaluation_loaders
from utils.logging import print_header, print_args, print_config
from optimizer import get_optimizer, get_scheduler
from loss import get_loss
from data_transforms import get_data_transforms
from train import train_model, evaluate_model, plot_forecasts
from setup import format_arg, seed_everything
from setup import initialize_args
from setup import load_model_config, load_main_config
from setup import initialize_experiment
from setup.configs.model import update_output_config_from_args # For multivariate feature prediction
from model.network import SpaceTime
def main():
print_header('*** EXPERIMENT ARGS ***')
args = initialize_args()
seed_everything(args.seed)
experiment_configs = load_main_config(args, config_dir='./configs')
load_data, visualize_data = initialize_data_functions(args)
print_header('*** DATASET ***')
print_config(experiment_configs['dataset'])
print_header('*** LOADER ***')
print_config(experiment_configs['loader'])
print_header('*** OPTIMIZER ***')
print_config(experiment_configs['optimizer'])
print_header('*** SCHEDULER ***')
print_config(experiment_configs['scheduler'])
# Loading Data
dataloaders = load_data(experiment_configs['dataset'],
experiment_configs['loader'])
train_loader, val_loader, test_loader = dataloaders
splits = ['train', 'val', 'test']
dataloaders_by_split = {split: dataloaders[ix]
for ix, split in enumerate(splits)}
eval_loaders = get_evaluation_loaders(dataloaders, batch_size=args.batch_size)
# Setup input_dim based on features
x, y, *z = train_loader.dataset.__getitem__(0)
args.input_dim = x.shape[1] # L x D
output_dim = y.shape[1]
# Initialize Model
args.device = (torch.device('cuda:0')
if torch.cuda.is_available() and not args.no_cuda
else torch.device('cpu'))
model_configs = {'embedding_config': args.embedding_config,
'encoder_config': args.encoder_config,
'decoder_config': args.decoder_config,
'output_config': args.output_config}
model_configs = OmegaConf.create(model_configs)
model_configs = load_model_config(model_configs, config_dir='./configs/model',
args=args)
model_configs['inference_only'] = False
model_configs['lag'] = args.lag
model_configs['horizon'] = args.horizon
if args.features == 'M': # Update output
update_output_config_from_args(model_configs['output_config'], args,
update_output_dim=True, output_dim=output_dim)
model_configs['output_config'].input_dim = model_configs['output_config'].kwargs.input_dim
model_configs['output_config'].output_dim = model_configs['output_config'].kwargs.output_dim
print(model_configs['output_config'])
model = SpaceTime(**model_configs)
model.replicate = args.replicate # Only used for testing specific things indicated by replicate
model.set_lag(args.lag)
model.set_horizon(args.horizon)
# Initialize optimizer and scheduler
optimizer = get_optimizer(model, experiment_configs['optimizer'])
scheduler = get_scheduler(model, optimizer,
experiment_configs['scheduler'])
# Save some model stats
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
args.model_parameter_count = params
arg_dict = print_args(args, return_dict=True, verbose=args.verbose)
# Setup logging
wandb = initialize_experiment(args, experiment_name_id='',
best_train_metric=1e10,
best_val_metric=1e10)
try:
pd.DataFrame.from_dict(arg_dict).to_csv(args.log_configs_path)
except:
pd.DataFrame.from_dict([arg_dict]).to_csv(args.log_configs_path)
if args.verbose:
print_header('*** MODEL ***')
print(model)
print_config(model_configs)
from einops import rearrange
_k = model.encoder.blocks[0].pre.get_kernel(rearrange(x, '(o l) d -> o d l', o=1))
_k_diff = model.encoder.blocks[0].pre.diff_kernel
_k_ma_r = model.encoder.blocks[0].pre.ma_r_kernel
print_header(f'ββ> Preprocessing kernels (full: {_k.shape}, diff: {_k_diff.shape}, ma: {_k_ma_r.shape})')
print(_k[:16, :_k_ma_r.shape[-1]])
print_header(f'*** TRAINING ***')
print(f'βββ Lag: {args.lag}')
print(f'βββ Horizon: {args.horizon}')
print(f'βββ Criterion: {args.loss}, weights: {args.criterion_weights}')
print(f'βββ Dims: input={args.input_dim}, model={args.model_dim}')
print(f'βββ Number trainable parameters: {params}') # βββ
print(f'βββ Experiment name: {args.experiment_name}')
print(f'βββ Logging to: {args.log_results_path}')
# Loss objectives
criterions = {name: get_loss(name) for name in ['rmse', 'mse', 'mae', 'rse']}
eval_criterions = criterions
for name in ['rmse', 'mse', 'mae']:
eval_criterions[f'informer_{name}'] = get_loss(f'informer_{name}')
input_transform, output_transform = get_data_transforms(args.data_transform,
args.lag)
model = train_model(model, optimizer, scheduler, dataloaders_by_split,
criterions, max_epochs=args.max_epochs, config=args,
input_transform=input_transform,
output_transform=output_transform,
val_metric=args.val_metric, wandb=wandb,
return_best=True, early_stopping_epochs=args.early_stopping_epochs)
# Eval best val checkpoint
eval_splits = ['eval_train', 'val', 'test']
eval_loaders_by_split = {split: eval_loaders[ix] for ix, split in
enumerate(eval_splits)}
model, log_metrics, total_y = evaluate_model(model, dataloaders=eval_loaders_by_split,
optimizer=optimizer, scheduler=scheduler,
criterions=eval_criterions, config=args,
epoch=args.best_val_metric_epoch,
input_transform=input_transform,
output_transform=output_transform,
val_metric=args.val_metric, wandb=wandb,
train=False)
n_plots = len(splits) # train, val, test
fig, axes = plt.subplots(1, n_plots, figsize=(6.4 * n_plots, 4.8))
plot_forecasts(total_y, splits=eval_splits, axes=axes)
if not args.no_wandb:
wandb.log({"forecast_plot": fig})
wandb.log(log_metrics)
if __name__ == '__main__':
main()
|
spacetime-main
|
main.py
|
import importlib
from torch.utils.data import DataLoader
def initialize_data_functions(args):
"""
Retrieve dataloaders and visualization function.
Example:
load_data, visualize_data = initialize_data_functions(args)
dataloaders, dataset = load_data(config.dataset, config.loader)
"""
try:
dataset_module = f'dataloaders.{get_data_module(args)}'
dataset_module = importlib.import_module(dataset_module)
except Exception as e:
print(f'Error: dataloaders.{get_data_module(args)}')
raise e
load_data = getattr(dataset_module, 'load_data')
visualize_data = getattr(dataset_module, 'visualize_data')
return load_data, visualize_data
def get_data_module(args):
dataset_fname = args.dataset
# Informer - time series forecasting
if args.dataset in ['etth1', 'etth2', 'ettm1', 'ettm2',
'ecl', 'traffic', 'weather']:
args.dataset_type = 'informer'
if args.dataset[:3] == 'ett':
args.variant = int(args.dataset[-1])
args.dataset = args.dataset[:-1]
else:
args.variant = None
data_module = args.dataset_type
elif args.dataset in ['etth', 'ettm']:
print(f'Dataset type: {args.dataset_type}')
print(f'-> dataset: {args.dataset}{args.variant}')
data_module = args.dataset_type
else:
data_module = f'{args.dataset_type}.{dataset_fname}'
raise NotImplementedError(f'{args.dataset} not implemented!')
return data_module
def get_evaluation_loaders(dataloaders, batch_size):
eval_dataloaders = [
DataLoader(dataloader.dataset,
shuffle=False,
batch_size=batch_size,
num_workers=0)
for dataloader in dataloaders
]
return eval_dataloaders
|
spacetime-main
|
dataloaders/__init__.py
|
import numpy as np
import matplotlib.pyplot as plt
from dataloaders.datasets.informer import ETTHour, ETTMinute, ECL, Exchange, ILI, Traffic, Weather
def get_dataset(name):
if name == 'etth':
return ETTHour
elif name == 'ettm':
return ETTMinute
elif name == 'ecl':
return ECL
elif name == 'exchange':
return Exchange
elif name == 'ili':
return ILI
elif name == 'traffic':
return Traffic
elif name == 'weather':
return Weather
else:
supported = ['etth', 'ettm', 'ecl', 'exchange', 'ili', 'traffic', 'weather']
raise NotImplementedError(f"Please check that name is in {supported}")
def load_data(config_dataset, config_loader):
dataset = get_dataset(config_dataset['_name_'])(**config_dataset)
dataset.setup()
train_loader = dataset.train_dataloader(**config_loader)
# Eval loaders are dictionaries where key is resolution, value is dataloader
# - Borrowed from S4 dataloaders. For now just set resolution to 1
val_loader = dataset.val_dataloader(**config_loader)[None]
test_loader = dataset.test_dataloader(**config_loader)[None]
return train_loader, val_loader, test_loader # , dataset
def visualize_data(dataloaders, splits=['train', 'val', 'test'],
save=False, args=None, title=None):
assert len(splits) == len(dataloaders)
start_idx = 0
for idx, split in enumerate(splits):
y = dataloaders[idx].dataset.data_x
x = np.arange(len(y)) + start_idx
plt.plot(x, y, label=split)
start_idx += len(x)
plt.title(title)
plt.legend()
plt.show()
|
spacetime-main
|
dataloaders/informer.py
|
"""
Parent dataset for sequential data.
Code from https://github.com/HazyResearch/state-spaces/blob/main/src/dataloaders/base.py
"""
from functools import partial
import os
import io
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from torch import nn
from torch.nn import functional as F
from utils.config import is_list
# Default data path is environment variable or hippo/data
if (default_data_path := os.getenv("DATA_PATH")) is None:
default_data_path = Path(__file__).parent.parent.parent.absolute()
default_data_path = default_data_path / "dataloaders" / "data"
else:
default_data_path = Path(default_data_path).absolute()
class SequenceDataset:
registry = {}
_name_ = NotImplementedError("Dataset must have shorthand name")
# Since subclasses do not specify __init__ which is instead handled by this class
# Subclasses can provide a list of default arguments which are automatically registered as attributes
# TODO apparently there is a python 3.8 decorator that basically does this
@property
def init_defaults(self):
return {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.registry[cls._name_] = cls
def __init__(self, _name_, data_dir=None, tbptt=False, chunk_len=None, overlap_len=None, **dataset_cfg):
assert _name_ == self._name_
self.data_dir = Path(data_dir).absolute() if data_dir is not None else None
# Arguments for TBPTT: only used if tbptt is True and are passed to TBPTTDataLoader
# Not used right now
self.tbptt = tbptt
self.chunk_len = chunk_len
self.overlap_len = overlap_len
# Add all arguments to self
init_args = self.init_defaults
init_args.update(
dataset_cfg
) # TODO this overrides the default dict which is bad
for k, v in init_args.items():
setattr(self, k, v)
# train, val, test datasets must be set by class instantiation
self.dataset_train = None
self.dataset_val = None
self.dataset_test = None
def setup(self):
"""This method should set self.dataset_train, self.dataset_val, and self.dataset_test"""
raise NotImplementedError
def split_train_val(self, val_split):
train_len = int(len(self.dataset_train) * (1.0 - val_split))
self.dataset_train, self.dataset_val = torch.utils.data.random_split(
self.dataset_train,
(train_len, len(self.dataset_train) - train_len),
generator=torch.Generator().manual_seed(
getattr(self, "seed", 42)
),
)
@staticmethod
def collate_fn(batch, resolution=1):
"""batch: list of (x, y) pairs"""
def _collate(batch, resolution=1):
# From https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
x = torch.stack(batch, dim=0, out=out)
if resolution is not None:
x = x[:, ::resolution] # assume length is first axis after batch
return x
else:
return torch.tensor(batch)
x, y = zip(*batch)
# Drop every nth sample
# x = torch.stack(x, dim=0)[:, ::resolution]
# y = torch.LongTensor(y)
# y = torch.tensor(y)
# y = torch.stack(y, dim=0)
x = _collate(x, resolution=resolution)
y = _collate(y, resolution=None)
return x, y
def train_dataloader(self, train_resolution, eval_resolutions, **kwargs):
if train_resolution is None:
train_resolution = [1]
if not is_list(train_resolution):
train_resolution = [train_resolution]
assert len(train_resolution) == 1, "Only one train resolution supported for now"
return self._dataloader(
self.dataset_train,
resolutions=train_resolution,
shuffle=True,
**kwargs,
)[0]
def val_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_val, **kwargs)
def test_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_test, **kwargs)
def _eval_dataloader(self, dataset, train_resolution, eval_resolutions, **kwargs):
if eval_resolutions is None:
eval_resolutions = [1]
if not is_list(eval_resolutions):
eval_resolutions = [eval_resolutions]
kwargs["shuffle"] = False if "shuffle" not in kwargs else kwargs["shuffle"]
dataloaders = self._dataloader(
dataset,
resolutions=eval_resolutions,
# shuffle=False,
**kwargs,
)
return (
{
str(res) if res > 1 else None: dl
for res, dl in zip(eval_resolutions, dataloaders)
}
if dataloaders is not None
else None
)
def _dataloader(self, dataset, resolutions, **loader_args):
if dataset is None:
return None
DataLoader = torch.utils.data.DataLoader
return [
DataLoader(
dataset=dataset,
collate_fn=partial(self.collate_fn, resolution=resolution)
if self.collate_fn is not None
else None,
**loader_args,
)
for resolution in resolutions
]
def __str__(self):
return self._name_
|
spacetime-main
|
dataloaders/datasets/sequence.py
|
from .sequence import SequenceDataset, default_data_path
|
spacetime-main
|
dataloaders/datasets/__init__.py
|
"""
Informer benchmark datasets from Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting (AAAI'21 Best Paper)
- Authors: Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, Wancai Zhang
Code from https://github.com/HazyResearch/state-spaces/blob/main/src/dataloaders/et.py
- Original dataset: https://github.com/zhouhaoyi/ETDataset
- Original dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from dataloaders.datasets import SequenceDataset, default_data_path
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc=1, freq="h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates["month"] = dates.date.apply(lambda row: row.month, 1)
dates["day"] = dates.date.apply(lambda row: row.day, 1)
dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1)
dates["hour"] = dates.date.apply(lambda row: row.hour, 1)
dates["minute"] = dates.date.apply(lambda row: row.minute, 1)
dates["minute"] = dates.minute.map(lambda x: x // 15)
freq_map = {
"y": [],
"m": ["month"],
"w": ["month"],
"d": ["month", "day", "weekday"],
"b": ["month", "day", "weekday"],
"h": ["month", "day", "weekday", "hour"],
"t": ["month", "day", "weekday", "hour", "minute"],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack(
[feat(dates) for feat in time_features_from_frequency_str(freq)]
).transpose(1, 0)
class StandardScaler:
def __init__(self):
self.mean = 0.0
self.std = 1.0
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data - mean) / std
def inverse_transform(self, data, loc=None):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data * std) + mean
class InformerDataset(Dataset):
def __init__(
self,
root_path,
flag="train",
size=None,
features="S",
data_path="ETTh1.csv",
target="OT",
scale=True,
inverse=False,
timeenc=0,
freq="h",
cols=None,
eval_stamp=False,
eval_mask=False,
):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ["train", "test", "val"]
type_map = {"train": 0, "val": 1, "test": 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols = cols
self.eval_stamp = eval_stamp
self.eval_mask = eval_mask
self.forecast_horizon = self.pred_len
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
# if data_path == 'national_illness.csv':
# breakpoint()
def _borders(self, df_raw):
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
return border1s, border2s
def _process_columns(self, df_raw):
if self.cols:
cols = self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns)
cols.remove(self.target)
cols.remove("date")
return df_raw[["date"] + cols + [self.target]]
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
df_raw = self._process_columns(df_raw)
border1s, border2s = self._borders(df_raw)
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features == "M":
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == "S" or self.features == "MS":
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0] : border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values) # Scaled down, should not be Y
else:
data = df_data.values
df_stamp = df_raw[["date"]][border1:border2]
df_stamp["date"] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_x = np.concatenate(
[seq_x, np.zeros((self.pred_len, self.data_x.shape[-1]))], axis=0
)
if self.inverse:
# seq_y = np.concatenate(
# [
# self.data_x[r_begin : r_begin + self.label_len],
# self.data_y[r_begin + self.label_len : r_end],
# ],
# 0,
# )
# raise NotImplementedError # OLD in S4 codebase
seq_y = self.data_y[s_end:r_end]
else:
# seq_y = self.data_y[r_begin:r_end] # OLD in Informer codebase
seq_y = self.data_y[s_end:r_end]
# OLD in Informer codebase
# seq_x_mark = self.data_stamp[s_begin:s_end]
# seq_y_mark = self.data_stamp[r_begin:r_end]
if self.eval_stamp:
mark = self.data_stamp[s_begin:r_end]
else:
mark = self.data_stamp[s_begin:s_end]
mark = np.concatenate([mark, np.zeros((self.pred_len, mark.shape[-1]))], axis=0)
if self.eval_mask:
mask = np.concatenate([np.zeros(self.seq_len), np.ones(self.pred_len)], axis=0)
else:
mask = np.concatenate([np.zeros(self.seq_len), np.zeros(self.pred_len)], axis=0)
mask = mask[:, None]
# Add the mask to the timestamps: # 480, 5
# mark = np.concatenate([mark, mask[:, np.newaxis]], axis=1)
seq_x = seq_x.astype(np.float32)
seq_y = seq_y.astype(np.float32)
if self.timeenc == 0:
mark = mark.astype(np.int64)
else:
mark = mark.astype(np.float32)
mask = mask.astype(np.int64)
return torch.tensor(seq_x), torch.tensor(seq_y), torch.tensor(mark), torch.tensor(mask)
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data, loc=None):
return self.scaler.inverse_transform(data, loc)
@property
def d_input(self):
if self.features == 'M':
return 1
return self.data_x.shape[-1]
@property
def d_output(self):
if self.features in ["M", "S"]:
return self.data_x.shape[-1]
elif self.features == "MS":
return 1
else:
raise NotImplementedError
@property
def n_tokens_time(self):
if self.freq == 'h':
return [13, 32, 7, 24]
elif self.freq == 't':
return [13, 32, 7, 24, 4]
else:
raise NotImplementedError
class _Dataset_ETT_hour(InformerDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 - self.seq_len,
12 * 30 * 24 + 4 * 30 * 24 - self.seq_len,
]
border2s = [
12 * 30 * 24,
12 * 30 * 24 + 4 * 30 * 24,
12 * 30 * 24 + 8 * 30 * 24,
]
return border1s, border2s
def _process_columns(self, df_raw):
return df_raw
@property
def n_tokens_time(self):
assert self.freq == "h"
return [13, 32, 7, 24]
class _Dataset_ETT_minute(_Dataset_ETT_hour):
def __init__(self, data_path="ETTm1.csv", freq="t", **kwargs):
super().__init__(data_path=data_path, freq=freq, **kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 * 4 - self.seq_len,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
]
border2s = [
12 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
]
return border1s, border2s
@property
def n_tokens_time(self):
assert self.freq == "t"
return [13, 32, 7, 24, 4]
class _Dataset_Weather(InformerDataset):
def __init__(self, data_path="WTH.csv", target="WetBulbCelsius", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_ECL(InformerDataset):
def __init__(self, data_path="ECL.csv", target="MT_320", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_ILI(InformerDataset):
def __init__(self, data_path="national_illness.csv", target="OT", **kwargs):
# breakpoint()
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_Exchange(InformerDataset):
def __init__(self, data_path="exchange_rate.csv", target="OT", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_Traffic(InformerDataset):
def __init__(self, data_path="traffic.csv", target="OT", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class InformerSequenceDataset(SequenceDataset):
@property
def n_tokens_time(self):
# Shape of the dates: depends on `timeenc` and `freq`
return self.dataset_train.n_tokens_time # data_stamp.shape[-1]
@property
def d_input(self):
return self.dataset_train.d_input
@property
def d_output(self):
return self.dataset_train.d_output
@property
def l_output(self):
return self.dataset_train.pred_len
def _get_data_filename(self, variant):
return self.variants[variant]
@staticmethod
def collate_fn(batch, resolution, **kwargs):
x, y, *z = zip(*batch)
x = torch.stack(x, dim=0)[:, ::resolution]
y = torch.stack(y, dim=0)
z = [torch.stack(e, dim=0)[:, ::resolution] for e in z]
return x, y, *z
def setup(self):
self.data_dir = self.data_dir or default_data_path / 'informer' / self._name_
self.dataset_train = self._dataset_cls(
root_path=self.data_dir,
flag="train",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_val = self._dataset_cls(
root_path=self.data_dir,
flag="val",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_test = self._dataset_cls(
root_path=self.data_dir,
flag="test",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
class ETTHour(InformerSequenceDataset):
_name_ = "etth"
_dataset_cls = _Dataset_ETT_hour
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
# 8.18.2022 - Changed the keys to 1, 2 from 0, 1
variants = {
1: "ETTh1.csv",
2: "ETTh2.csv",
}
class ETTMinute(InformerSequenceDataset):
_name_ = "ettm"
_dataset_cls = _Dataset_ETT_minute
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "t",
"cols": None,
}
# 8.18.2022 - Changed the keys to 1, 2 from 0, 1
variants = {
1: "ETTm1.csv",
2: "ETTm2.csv",
}
class Weather(InformerSequenceDataset):
_name_ = "weather"
_dataset_cls = _Dataset_Weather
init_defaults = {
"size": None,
"features": "S",
"target": "WetBulbCelsius",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "WTH.csv",
}
class ECL(InformerSequenceDataset):
_name_ = "ecl"
_dataset_cls = _Dataset_ECL
init_defaults = {
"size": None,
"features": "S",
"target": "MT_320",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ECL.csv",
}
class ILI(InformerSequenceDataset):
_name_ = "ili"
_dataset_cls = _Dataset_ILI
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "national_illness.csv",
}
class Exchange(InformerSequenceDataset):
_name_ = "exchange"
_dataset_cls = _Dataset_Exchange
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "exchange_rate.csv",
}
class Traffic(InformerSequenceDataset):
_name_ = "traffic"
_dataset_cls = _Dataset_Traffic
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "traffic.csv",
}
|
spacetime-main
|
dataloaders/datasets/informer.py
|
from .args import initialize_args
from .configs import load_main_config, load_model_config
from .experiment import format_arg, seed_everything, initialize_experiment
|
spacetime-main
|
setup/__init__.py
|
import os
import random
import torch
import numpy as np
from os.path import join
def format_arg(arg_name, cutoff=2):
arg_name = str(arg_name)
if arg_name is None:
return arg_name
# Hardcode to handle backslash
name_splits = arg_name.split('/')
if len(name_splits) > 1:
return name_splits[-1]
# Abbreviate based on underscore
name_splits = arg_name.split('_')
if len(name_splits) > 1:
return ''.join([s[0] for s in name_splits])
else:
return arg_name[:cutoff]
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def initialize_experiment(args, experiment_name_id='',
best_train_metric=1e10,
best_val_metric=1e10):
# Experiment name
args.experiment_name = f'{experiment_name_id}-' if experiment_name_id != '' else ''
args.dataset_name = args.dataset if args.variant is None else f'{args.dataset}{args.variant}'
args.experiment_name += f'm={args.model}' # f'd={args.dataset_name}-m={args.model}'
try:
args.criterion_weights = '+'.join(args.criterion_weights)
except:
args.criterion_weights = '1+1+1'
for arg in ['embedding_config', 'preprocess_config', 'encoder_config', 'decoder_config', 'output_config',
'n_blocks', 'n_kernels', 'n_heads', 'kernel_dim', 'kernel_init', 'norm_order', 'lag', 'horizon', 'features',
'data_transform', 'criterion_weights', 'loss', 'dropout', 'lr', 'optimizer', 'scheduler',
'weight_decay', 'batch_size', 'val_metric', 'max_epochs', 'early_stopping_epochs', 'replicate']:
try:
args.experiment_name += f'-{format_arg(arg)}={format_arg(getattr(args, arg), cutoff=None)}'
except:
pass
args.experiment_name += f'-se={args.seed}'
args.experiment_name = args.experiment_name.replace('True', '1').replace('False', '0').replace('None', 'na').replace(
'normal', 'no').replace('xavier', 'xa').replace('identity', 'id').replace('avgpool', 'avgp')
# Checkpointing
args.best_train_metric = best_train_metric
args.best_val_metric = best_val_metric
checkpoint_dir = join(args.checkpoint_dir, args.dataset_name)
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
print(f'-> Created model checkpoint saving directory at {checkpoint_dir}!')
args.checkpoint_dir = checkpoint_dir
args.best_train_checkpoint_path = join(args.checkpoint_dir,
f'btrn-{args.experiment_name}.pth')
args.best_val_checkpoint_path = join(args.checkpoint_dir,
f'bval-{args.experiment_name}.pth')
# Logging
project_name = f'spacetime-d={args.dataset_name}-f={args.features}-horizon={args.horizon}'
if not args.no_wandb:
import wandb
run_name = args.experiment_name
wandb.init(config={},
entity=args.wandb_entity,
name=run_name,
project=project_name,
dir=args.log_dir)
wandb.config.update(args)
else:
wandb = None
# Local logging
args.log_dir = join(args.log_dir, project_name)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
print(f'-> Created logging directory at {args.log_dir}!')
log_id = args.experiment_name
args.log_results_path = join(args.log_dir, f'r-{log_id}.csv')
args.log_configs_path = join(args.log_dir, f'c-{log_id}.csv')
args.log_results_dict = {'epoch': [], 'split': []}
# Loss weights
args.criterion_weights = [float(w) for w in args.criterion_weights.split('+')]
return wandb
|
spacetime-main
|
setup/experiment.py
|
import argparse
def initialize_args():
parser = argparse.ArgumentParser(description='SpaceTime arguments')
# Model
parser.add_argument('--model', type=str, default='spacetime')
parser.add_argument('--embedding_config', type=str, default='embedding/repeat')
parser.add_argument('--preprocess_config', type=str, default='preprocess/default')
parser.add_argument('--encoder_config', type=str, default='encoder/default')
parser.add_argument('--decoder_config', type=str, default='decoder/default')
parser.add_argument('--output_config', type=str, default='output/default')
# Model config arguments
parser.add_argument('--n_blocks', type=int, default=None) # Only update encoder blocks
parser.add_argument('--n_kernels', type=int, default=None)
parser.add_argument('--n_heads', type=int, default=None)
parser.add_argument('--model_dim', type=int, default=None)
parser.add_argument('--input_dim', type=int, default=1,
help='Input dimensions. Updated based on dataset.')
parser.add_argument('--kernel_dim', type=int, default=None)
# parser.add_argument('--head_dim', type=int, default=None)
parser.add_argument('--activation', type=str, choices=['gelu', 'relu'])
parser.add_argument('--dropout', type=float, default=None)
parser.add_argument('--layernorm', action='store_true', default=None)
parser.add_argument('--norm_order', type=int, default=None)
# SSM
parser.add_argument('--kernel_init', type=str, default=None)
parser.add_argument('--skip_ssm', action='store_true', default=None)
# MLP
parser.add_argument('--mlp_n_layers', type=int, default=None)
parser.add_argument('--mlp_n_activations', type=int, default=None)
parser.add_argument('--mlp_preactivation', type=int, default=None)
parser.add_argument('--skip_mlp', action='store_true', default=None)
# Data
parser.add_argument('--dataset', type=str, default='etth1')
parser.add_argument('--dataset_type', type=str, default='')
parser.add_argument('--variant', type=int, default=None)
parser.add_argument('--trainer', type=str, default='default')
parser.add_argument('--loader', type=str, default='default')
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--data_dir', type=str, default='./data')
## Informer / time-series-specific
parser.add_argument('--features', type=str, default='S')
parser.add_argument('--no_scale', action='store_true', default=False)
parser.add_argument('--inverse', action='store_true', default=False)
parser.add_argument('--data_transform', type=str, default='mean',
choices=['mean', 'mean_input', 'last', 'standardize', 'none'])
# Prediction Task
parser.add_argument('--lag', type=int, default=1,
help="Number of samples included in input. If 0, then can change?")
parser.add_argument('--horizon', type=int, default=1)
parser.add_argument('--loss', type=str, default='rmse',
choices=['rmse', 'mse', 'mae', 'rse', 'cross_entropy',
'informer_rmse', 'informer_mse', 'informer_mae'])
# Training
parser.add_argument('--criterion_weights', nargs='+') # Convert to float after setup.experiment.initialize_experiment
parser.add_argument('--optimizer', type=str, default='adamw')
parser.add_argument('--scheduler', type=str, default='timm_cosine',
choices=['none', 'plateau', 'timm_cosine'])
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=999)
parser.add_argument('--early_stopping_epochs', type=int, default=10)
parser.add_argument('--val_metric', type=str, default='rmse')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--momentum', type=float, default=0.0)
# Saving + logging
parser.add_argument('--log_epoch', type=int, default=10)
parser.add_argument('--no_wandb', action='store_true', default=False)
parser.add_argument('--wandb_entity', type=str, default='mzhang')
parser.add_argument('--log_dir', type=str, default='./logs')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints')
# Misc.
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--no_pin_memory', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--replicate', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
return args
|
spacetime-main
|
setup/args.py
|
"""
Load default configs
"""
from .data import get_dataset_config, get_dataloader_config
from .optimizer import get_optimizer_config, get_scheduler_config
from .model import load_model_config
def load_main_config(args, config_dir='./configs'):
configs = {'dataset': get_dataset_config(args, config_dir),
'loader': get_dataloader_config(args, config_dir),
'optimizer': get_optimizer_config(args, config_dir),
'scheduler': get_scheduler_config(args, config_dir)}
return configs
|
spacetime-main
|
setup/configs/__init__.py
|
"""
Load and update model configs
"""
from os.path import join
from omegaconf import OmegaConf
# SpaceTime model
def load_model_config(config, config_dir='./configs/model', args=None):
for k in ['embedding_config', 'encoder_config',
'decoder_config', 'output_config']:
_config = OmegaConf.load(join(config_dir, f'{config[k]}.yaml'))
if k == 'encoder_config' or k == 'decoder_config':
for ix, block_config in enumerate(_config['blocks']):
# Load preprocess kernel configs
c_path = join(config_dir, f"{block_config['pre_config']}.yaml")
block_config['pre_config'] = OmegaConf.load(c_path)
# Load SSM kernel configs
c_path = join(config_dir, f"{block_config['ssm_config']}.yaml")
block_config['ssm_config'] = OmegaConf.load(c_path)
# Load MLP configs
c_path = join(config_dir, f"{block_config['mlp_config']}.yaml")
block_config['mlp_config'] = OmegaConf.load(c_path)
config[k] = _config
config = update_embedding_config_from_args(config, args)
config = update_block_config_from_args(config, args)
config.output_config = update_output_config_from_args(config.output_config, args)
config.output_config.input_dim = config.output_config.kwargs.input_dim
config.output_config.output_dim = config.output_config.kwargs.output_dim
return config
def update_embedding_config_from_args(config, args):
n_heads, head_dim = update_n_heads(config.embedding_config, args)
if args.n_heads is not None:
config.embedding_config['kwargs']['n_heads'] = args.n_heads
elif config.embedding_config['kwargs']['n_heads'] is not None:
args.n_heads = config.embedding_config['kwargs']['n_heads']
else:
args.n_heads = args.model_dim // (args.input_dim * args.n_kernels)
config.embedding_config['kwargs']['n_heads'] = args.n_heads
if args.n_kernels is not None:
config.embedding_config['kwargs']['n_kernels'] = args.n_kernels
elif config.embedding_config['kwargs']['n_kernels'] is not None:
args.n_kernels = config.embedding_config['kwargs']['n_kernels']
else:
args.n_kernels = args.model_dim // (args.input_dim * args.n_heads)
config.embedding_config['kwargs']['n_kernels'] = args.n_kernels
if args.input_dim != 1:
embedding_dim = (n_heads * head_dim * args.n_kernels)
config.embedding_config['kwargs']['input_dim'] = args.input_dim
elif args.model_dim is not None:
embedding_dim = args.model_dim
elif args.model_dim is None:
args.model_dim = args.n_heads * args.n_kernels * head_dim
embedding_dim = args.model_dim
config.embedding_config['kwargs']['embedding_dim'] = embedding_dim
return config
def update_block_config_from_args(config, args):
# Update encoder only
# - Update both SSM and MLP configs, and also total number of blocks
# - For blocks, preserve first (which may be slightly special due to preprocessing)
# then add (args.n_blocks - 1) copies of an updated block
# Update first block preprocess_config
encoder_block = config.encoder_config['blocks'][0]
if args.input_dim > 1:
_config = encoder_block.pre_config
_config.kwargs.head_dim = args.input_dim
_config.kwargs.model_dim = (args.input_dim * _config.kwargs.n_heads *
_config.kwargs.n_kernels * _config.kwargs.kernel_repeat)
encoder_block.pre_config = update_preprocess_config_from_args(encoder_block.pre_config, args)
# if args.input_dim > 1: # Remember to comment out / git commit
_config = encoder_block.ssm_config
_config.kwargs.head_dim = args.input_dim
if args.model_dim is None:
_config.kwargs.model_dim = (args.input_dim * _config.kwargs.n_heads *
_config.kwargs.n_kernels * _config.kwargs.kernel_repeat)
else:
_config.kwargs.model_dim = args.model_dim
encoder_block.mlp_config = update_mlp_config_from_args(encoder_block.mlp_config, args,
input_dim=_config.kwargs.model_dim if args.input_dim > 1 else None)
# Update remaining blocks
encoder_block = config.encoder_config['blocks'][-1]
if encoder_block.pre_config.kwargs is not None:
encoder_block.pre_config = update_preprocess_config_from_args(encoder_block.pre_config, args)
encoder_block.ssm_config = update_ssm_config_from_args(encoder_block.ssm_config, args)
encoder_block.mlp_config = update_mlp_config_from_args(encoder_block.mlp_config, args)
n_blocks = len(config.encoder_config['blocks'])
if args.n_blocks is not None:
n_blocks = args.n_blocks - 1
else:
args.n_blocks = len(config.encoder_config['blocks']) + 1 # 1 decoder block for now
config.encoder_config['blocks'] = ([config.encoder_config['blocks'][0]] +
[encoder_block] * (n_blocks - 1))
# Update decoder block
config.decoder_config.blocks[0] = update_decoder_block(config.decoder_config.blocks[0], args)
return config
def update_decoder_block(decoder_block, args):
decoder_block.ssm_config.kwargs.lag = args.lag
decoder_block.ssm_config.kwargs.horizon = args.horizon
n_heads, head_dim = update_n_heads(decoder_block.ssm_config, args)
n_kernels = update_n_kernels(decoder_block.ssm_config, args, n_heads)
if args.model_dim is not None:
decoder_block.ssm_config.kwargs.model_dim = args.model_dim
decoder_block.ssm_config.kwargs.n_kernels = n_kernels
decoder_block.ssm_config.kwargs.n_heads = n_heads
decoder_block.ssm_config.kwargs.head_dim = head_dim
if args.norm_order is not None:
decoder_block.ssm_config.kwargs.norm_order = args.norm_order
return decoder_block
def update_preprocess_config_from_args(config, args):
if args.model_dim is not None:
model_dim = args.model_dim
else:
model_dim = config.kwargs.model_dim
kwargs = {
'model_dim': model_dim,
'head_dim': args.input_dim,
'kernel_repeat': model_dim // (config.kwargs.n_kernels *
config.kwargs.head_dim),
'seed': args.seed
}
for k, v in kwargs.items():
if v is not None:
config.kwargs[k] = v
else:
try:
setattr(args, k, config.kwargs[k])
except Exception as e:
print(e)
assert k not in config.kwargs
return config
def update_ssm_config_from_args(config, args):
if 'companion' in config.method or 'shift' in config.method:
kwargs = get_companion_ssm_kwargs_from_args(config, args)
else:
raise NotImplementedError('Still need to implement non-companion SSM')
for k, v in kwargs.items():
if v is not None:
config.kwargs[k] = v
else:
try:
setattr(args, k, config.kwargs[k])
except Exception as e:
print(e)
assert k not in config.kwargs
return config
def get_companion_ssm_kwargs_from_args(config, args):
n_heads, head_dim = update_n_heads(config, args)
n_kernels = update_n_kernels(config, args, n_heads)
kwargs = {
'model_dim': args.model_dim,
'n_kernels': n_kernels,
'kernel_dim': args.kernel_dim,
'n_heads': n_heads,
'head_dim': head_dim,
'kernel_init': args.kernel_init,
'skip_connection': args.skip_ssm,
'norm_order': args.norm_order
}
return kwargs
def update_n_heads(config, args):
if 'head_dim' in config.kwargs:
head_dim = args.input_dim if config.kwargs.head_dim != 1 else 1
else:
head_dim = args.input_dim
if config.kwargs.n_heads == 1:
n_heads = 1
elif args.n_heads is not None:
n_heads = args.n_heads
elif args.n_heads is None:
n_heads = config.kwargs.n_heads
return n_heads, head_dim
def update_n_kernels(config, args, n_heads):
model_dim = (args.model_dim if args.model_dim is not None
else config.kwargs.model_dim)
if n_heads == 1:
n_kernels = model_dim // config.kwargs.head_dim
elif args.n_kernels is not None:
n_kernels = args.n_kernels
elif args.n_kernels is None:
n_kernels = model_dim // (config.kwargs.head_dim * n_heads)
try:
assert model_dim % (n_kernels * config.kwargs.head_dim * n_heads) == 0
except Exception as e:
print(e)
print(f'model_dim:', model_dim)
print(f'n_kernels:', n_kernels)
print(f'config.kwargs.head_dim:', config.kwargs.head_dim)
print(f'n_heads:', n_heads)
breakpoint()
raise e
return n_kernels
def update_mlp_config_from_args(config, args,
input_dims=True, output_dims=True,
input_dim=None, output_dim=None):
# Logic for handling input and output dimension update
if input_dims and input_dim is None:
input_dim = args.model_dim
elif not input_dims:
input_dim = None
if output_dims and output_dim is None:
output_dim = args.model_dim
elif not output_dims:
output_dim = None
if config.method == 'mlp':
kwargs = {
'input_dim': input_dim,
'output_dim': output_dim,
'activation': args.activation,
'dropout': args.dropout,
'layernorm': args.layernorm,
'n_layers': args.mlp_n_layers,
'n_activations': args.mlp_n_activations,
'pre_activation': args.mlp_preactivation,
'skip_connection': args.skip_mlp,
}
for k, v in kwargs.items():
if v is not None:
if not input_dims and k == 'skip_connection':
pass
elif not output_dims and k == 'skip_connection':
pass
else:
config.kwargs[k] = v
elif input_dims and output_dims:
setattr(args, k, config.kwargs[k])
return config
def update_output_config_from_args(config, args, update_output_dim=False,
output_dim=None):
if config.method == 'mlp':
config = update_mlp_config_from_args(config, args, input_dims=True,
output_dims=update_output_dim,
input_dim=args.model_dim,
output_dim=output_dim)
return config
|
spacetime-main
|
setup/configs/model.py
|
from os.path import join
from omegaconf import OmegaConf
def get_optimizer_config(args, config_dir='./configs'):
config = OmegaConf.load(
join(config_dir, 'optimizer', f'{args.optimizer}.yaml'))
if args.lr is not None:
config.lr = args.lr
if args.weight_decay is not None:
config.weight_decay = args.weight_decay
if args.optimizer == 'sgd' and args.momentum is not None:
config.momentum = args.momentum
return config
def get_scheduler_config(args, config_dir='./configs'):
config = OmegaConf.load(
join(config_dir, 'scheduler', f'{args.scheduler}.yaml'))
if (config.scheduler._name_ == 'plateau' and args.val_metric == 'acc'):
config.scheduler.mode = 'max'
return config
|
spacetime-main
|
setup/configs/optimizer.py
|
from os.path import join
from omegaconf import OmegaConf
from dataloaders import get_data_module
def get_dataset_config(args, config_dir='./configs'):
get_data_module(args) # Initialize args.dataset_type
fpath = join(config_dir, 'datasets', args.dataset_type, f'{args.dataset}.yaml')
config = OmegaConf.load(fpath)
config = update_dataset_config_from_args(config, args)
return config
def get_dataloader_config(args, config_dir='./configs'):
get_data_module(args) # Initialize args.dataset_type
fpath = join(config_dir, 'loader', f'{args.dataset_type}.yaml')
config = OmegaConf.load(fpath)
config.batch_size = args.batch_size
config.num_workers = args.num_workers
config.pin_memory = not args.no_pin_memory
return config
# ---------------------------------
# Update configs from argparse args
# ---------------------------------
def update_dataset_config_from_args(config, args):
if args.dataset_type == 'informer':
config.size = [args.lag, args.horizon, args.horizon]
config.features = args.features
config.variant = args.variant
config.scale = not args.no_scale
config.inverse = args.inverse
else:
pass
return config
|
spacetime-main
|
setup/configs/data.py
|
"""
Logging utilities
"""
import rich.syntax
import rich.tree
from omegaconf import OmegaConf, DictConfig, ListConfig
def print_header(x, border='both'):
print('-' * len(x))
print(x)
print('-' * len(x))
def print_args(args, return_dict=False, verbose=True):
attributes = [a for a in dir(args) if a[0] != '_']
arg_dict = {} # switched to ewr
if verbose: print('ARGPARSE ARGS')
for ix, attr in enumerate(attributes):
# fancy = 'βββ' if ix == len(attributes) - 1 else 'βββ'
fancy = 'ββ' if ix == len(attributes) - 1 else 'ββ'
if verbose: print(f'{fancy} {attr}: {getattr(args, attr)}')
arg_dict[attr] = getattr(args, attr)
if return_dict:
return arg_dict
# Control how tqdm progress bar looks
def type_of_script():
try:
ipy_str = str(type(get_ipython()))
if 'zmqshell' in ipy_str:
return 'jupyter'
if 'terminal' in ipy_str:
return 'ipython'
except:
return 'terminal'
def print_config(config: DictConfig,
resolve: bool = True,
name: str = 'CONFIG') -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Hydra.
fields (Sequence[str], optional): Determines which main fields from config will
be printed and in what order.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "bright" # "dim"
tree = rich.tree.Tree(name, style=style, guide_style=style)
fields = config.keys()
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
elif isinstance(config_section, ListConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
# with open("config_tree.txt", "w") as fp:
# rich.print(tree, file=fp)
|
spacetime-main
|
utils/logging.py
|
"""
Code from https://github.com/HazyResearch/state-spaces/blob/main/src/utils/config.py
"""
import rich.syntax
import rich.tree
from omegaconf import OmegaConf, DictConfig
from typing import Sequence, Mapping
def print_config(config: DictConfig,
resolve: bool = True,) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Omegaconf
fields (Sequence[str], optional): Determines which main fields from config will
be printed and in what order.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "bright" # "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
fields = config.keys()
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
def is_list(x):
return isinstance(x, Sequence) and not isinstance(x, str)
def is_dict(x):
return isinstance(x, Mapping)
def to_dict(x, recursive=True):
"""Convert Sequence or Mapping object to dict
lists get converted to {0: x[0], 1: x[1], ...}
"""
if is_list(x):
x = {i: v for i, v in enumerate(x)}
if is_dict(x):
if recursive:
return {k: to_dict(v, recursive=recursive) for k, v in x.items()}
else:
return dict(x)
else:
return x
def to_list(x, recursive=False):
"""Convert an object to list.
If Sequence (e.g. list, tuple, Listconfig): just return it
Special case: If non-recursive and not a list, wrap in list
"""
if is_list(x):
if recursive:
return [to_list(_x) for _x in x]
else:
return list(x)
else:
if recursive:
return x
else:
return [x]
|
spacetime-main
|
utils/config.py
|
import copy
def update_args_from_checkpoint_name(args, fname):
_args = copy.deepcopy(args)
fname = fname.replace('=no-', '=normal-').replace('=xa-', '=xavier-').replace('.pth', '').replace('=tc-', '=timm_cosine-').replace('=ir-', '=informer_rmse-')
all_args = []
for f in fname.split('-')[2:]:
k, v = f.split('=')
if k in all_args:
k += '_'
all_args.append(k)
try:
v = arg_type[k](v)
except Exception as e:
print(k, v, e)
if v != 'None':
if k in ['ec', 'pc', 'ec_', 'dc', 'oc']:
v = set_config_arg(v, arg_map[k])
setattr(_args, arg_map[k], v)
else:
setattr(_args, arg_map[k], None)
return _args
def set_config_arg(config_name, arg_map_val):
return f"{arg_map_val.split('_')[0]}/{config_name}"
arg_map = {'ns': 'n_shots',
'ec': 'embedding_config',
'pc': 'preprocess_config',
'ec_': 'encoder_config',
'dc': 'decoder_config',
'oc': 'output_config',
'nb': 'n_blocks',
'nk': 'n_kernels',
'nh': 'n_heads',
'md': 'model_dim',
'kd': 'kernel_dim',
'ki': 'kernel_init',
'no': 'norm_order',
'la': 'lag',
'ho': 'horizon',
'dt': 'data_transform',
'cw': 'criterion_weights',
'lo': 'loss',
'dr': 'dropout',
'lr': 'lr',
'op': 'optimizer',
'sc': 'scheduler',
'wd': 'weight_decay',
'bs': 'batch_size',
'vm': 'val_metric',
'me': 'max_epochs',
'ese': 'early_stopping_epochs',
're': 'replicate',
'se': 'seed'}
arg_type = {'ns': int,
'ec': str,
'pc': str,
'ec_': str,
'dc': str,
'oc': str,
'nb': int,
'nk': int,
'nh': int,
'md': int,
'kd': int,
'hd': int,
'ki': str,
'no': int,
'la': int,
'ho': int,
'dt': str,
'cw': str,
'lo': str,
'dr': float,
'la': int, # bool
'lr': float,
'op': str,
'sc': str,
'wd': float,
'bs': int,
'vm': str,
'me': int,
'ese': int,
're': int,
'se': int}
|
spacetime-main
|
utils/checkpoint.py
|
spacetime-main
|
utils/__init__.py
|
|
spacetime-main
|
model/__init__.py
|
|
import torch.nn as nn
from einops import rearrange
from model.components import Activation, DropoutNd
def init_mlp(config):
if config['method'] == 'mlp':
return MLP(**config['kwargs'])
else:
return nn.Identity()
class MLP(nn.Module):
def __init__(self,
input_dim: int,
output_dim: int,
activation: str=None,
dropout: float=0.,
layernorm: bool=False,
n_layers: int=1,
n_activations: int=0,
pre_activation: bool=False,
input_shape: str='bld',
hidden_dim: int=None,
skip_connection: bool=False,
average_pool: str=None):
"""
Fully-connected network
"""
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.input_shape = input_shape
self.activation = activation
self.dropout = dropout
self.layernorm = nn.LayerNorm(input_dim) if layernorm else nn.Identity()
self.n_layers = n_layers
self.n_activations = n_activations
self.pre_activation = pre_activation
self.skip_connection = skip_connection
self.average_pool = average_pool
self.initialize_layers()
def initialize_layers(self):
n_layers_to_init = self.n_layers
n_activations_to_init = self.n_activations
if self.hidden_dim is None:
if self.n_layers < 2:
self.hidden_dim = self.output_dim
else:
self.hidden_dim = self.input_dim
# Add layers
if self.n_activations > self.n_layers or self.pre_activation:
layers = [Activation(self.activation, inplace=True), self.init_dropout()]
n_activations_to_init -= 1
else:
layers = []
while n_layers_to_init > 0 or n_activations_to_init > 0:
if n_layers_to_init == self.n_layers:
layers.append(nn.Linear(self.input_dim, self.hidden_dim))
elif n_layers_to_init > 1:
layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))
elif n_layers_to_init == 1:
layers.append(nn.Linear(self.hidden_dim, self.output_dim))
if n_activations_to_init > 0:
layers.append(Activation(self.activation, inplace=True))
n_layers_to_init -= 1
n_activations_to_init -= 1
self.layers = nn.Sequential(*layers)
def init_dropout(self):
if self.dropout > 1: # Dropout hack for now, testing DropoutNd
return DropoutNd(p=self.dropout-1.)
elif self.dropout > 0:
return nn.Dropout(self.dropout)
else:
return nn.Identity()
def forward(self, x):
x = self.layernorm(x)
if self.input_shape == 'bdl':
x = rearrange(x, 'b d l -> b l d')
if self.skip_connection:
# Layernorm with skip connection
x = self.layers(x) + x
else:
x = self.layers(x)
if self.average_pool == 'l':
x = x.mean(dim=1, keepdim=True)
return x
|
spacetime-main
|
model/mlp.py
|
"""
SpaceTime Network
"""
import torch.nn as nn
from model.embedding import init_embedding
from model.block import Encoder, Decoder
from model.mlp import init_mlp
class SpaceTime(nn.Module):
def __init__(self,
embedding_config: dict,
encoder_config: dict,
decoder_config: dict,
output_config: dict,
inference_only: bool=False,
lag: int=1,
horizon: int=1):
super().__init__()
self.embedding_config = embedding_config
self.encoder_config = encoder_config
self.decoder_config = decoder_config
self.output_config = output_config
self.inference_only = inference_only
self.lag = lag
self.horizon = horizon
self.init_weights(embedding_config, encoder_config,
decoder_config, output_config)
# -----------------
# Initialize things
# -----------------
def init_weights(self,
embedding_config: dict,
encoder_config: dict,
decoder_config: dict,
output_config: dict):
self.embedding = self.init_embedding(embedding_config)
self.encoder = self.init_encoder(encoder_config)
self.decoder = self.init_decoder(decoder_config)
self.output = self.init_output(output_config)
def init_embedding(self, config):
return init_embedding(config)
def init_encoder(self, config):
self.encoder = Encoder(config)
# Allow access to first encoder SSM kernel_dim
self.kernel_dim = self.encoder.blocks[0].ssm.kernel_dim
return self.encoder
def init_decoder(self, config):
self.decoder = Decoder(config)
self.decoder.blocks.ssm.lag = self.lag
self.decoder.blocks.ssm.horizon = self.horizon
return self.decoder
def init_output(self, config):
return init_mlp(config)
# -------------
# Toggle things
# -------------
def set_inference_only(self, mode=False):
self.inference_only = mode
self.decoder.blocks.ssm.inference_only = mode
def set_closed_loop(self, mode=True):
self.decoder.blocks.ssm.closed_loop = mode
def set_train(self):
self.train()
def set_eval(self):
self.eval()
self.set_inference_only(mode=True)
def set_lag(self, lag: int):
self.decoder.blocks.ssm.lag = lag
def set_horizon(self, horizon: int):
self.decoder.blocks.ssm.horizon = horizon
# ------------
# Forward pass
# ------------
def forward(self, u):
self.set_closed_loop(True)
# Assume u.shape is (batch x len x dim),
# where len = lag + horizon
z = self.embedding(u)
z = self.encoder(z)
y_c, _ = self.decoder(z)
y_c = self.output(y_c) # y_c is closed-loop output
if not self.inference_only:
# Also compute outputs via open-loop
self.set_closed_loop(False)
y_o, z_u = self.decoder(z)
y_o = self.output(y_o) # y_o is "open-loop" output
# Prediction and "ground-truth" for next-time-step
# layer input (i.e., last-layer output)
z_u_pred, z_u_true = z_u
else:
y_o = None
z_u_pred, z_u_true = None, None
# Return (model outputs), (model last-layer next-step inputs)
return (y_c, y_o), (z_u_pred, z_u_true)
|
spacetime-main
|
model/network.py
|
"""
Basic neural net components
OurModule from: https://github.com/HazyResearch/state-spaces/blob/main/src/models/sequence/ss/kernel.py (OptimModule)
Activation and DropoutND from: https://github.com/HazyResearch/state-spaces/blob/main/src/models/nn/components.py
"""
import torch
import torch.nn as nn
from einops import rearrange
class OurModule(nn.Module):
def __init__(self):
super().__init__()
def register(self, name, tensor, trainable=False, lr=None, wd=None):
"""Utility method: register a tensor as a buffer or trainable parameter"""
if trainable:
try:
self.register_parameter(name, nn.Parameter(tensor))
except KeyError:
delattr(self, name)
self.register_parameter(name, nn.Parameter(tensor))
else:
try:
self.register_buffer(name, tensor)
except KeyError:
delattr(self, name)
self.register_buffer(name, tensor)
optim = {}
if trainable and lr is not None: optim["lr"] = lr
if trainable and wd is not None: optim["weight_decay"] = wd
if len(optim) > 0: setattr(getattr(self, name), "_optim", optim)
def Activation(activation=None, size=None, dim=-1, inplace=False):
if activation in [ None, 'id', 'identity', 'linear' ]:
return nn.Identity(inplace)
elif activation == 'tanh':
return nn.Tanh(inplace)
elif activation == 'relu':
return nn.ReLU(inplace)
elif activation == 'gelu':
return nn.GELU()
elif activation in ['swish', 'silu']:
return nn.SiLU(inplace)
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid(inplace)
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True, transposed=True):
"""
tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError("dropout probability has to be in [0, 1), " "but got {}".format(p))
self.p = p
self.tie = tie
self.transposed = transposed
self.binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
def forward(self, x):
""" x: (batch, lengths..., dim) """
if self.training:
if self.transposed: x = rearrange(x, 'b ... d -> b d ...')
mask_shape = x.shape[:2] + (1,)*(x.ndim-2) if self.tie else x.shape
mask = torch.rand(*mask_shape, device=x.device) < 1.-self.p
x = x * mask * (1.0/(1-self.p))
if self.transposed: x = rearrange(x, 'b d ... -> b ... d')
return x
return x
|
spacetime-main
|
model/components.py
|
"""
SpaceTime blocks, stacked into encoder and decoder of architecture
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.components import OurModule
from model.mlp import init_mlp
from model.ssm import init_ssm
from model.ssm.preprocess import init_preprocess_ssm as init_pre
class Block(OurModule):
"""
Standard encoder block
"""
def __init__(self,
input_dim: int,
pre_config: str=None,
ssm_config: str=None,
mlp_config: str=None,
skip_connection: bool=False,
skip_preprocess: bool=False):
super().__init__()
self.input_dim = input_dim
self.skip_connection = skip_connection
self.skip_preprocess = skip_preprocess
self.pre = init_pre(pre_config)
self.ssm = init_ssm(ssm_config)
self.mlp = init_mlp(mlp_config)
def forward(self, u):
"""
Input shape: B x L x D
"""
z = self.pre(u)
y = self.ssm(z)
y = self.mlp(y)
if self.skip_connection and self.skip_preprocess:
return y + u # Also skip preprocessing step
elif self.skip_connection:
return y + z
else:
return y
class ClosedLoopBlock(Block):
"""
Block with a closed-loop SSM.
In SpaceTime, we only consider using one ClosedLoopBlock
as the last-layer in a single-layer decoder.
However, other architectures can also be explored, e.g.,
having more "open" blocks on top of the ClosedLoopBlock
in a multi-layer decoder.
"""
def __init__(self, **kwargs):
kwargs['skip_connection'] = False
super().__init__(**kwargs)
def forward(self, u):
z = self.pre(u)
# Computes layer outputs and next-time-step layer inputs
y, u_next = self.ssm(z)
# Return both layer outputs and prediction + "ground-truth"
# for next-time-step layer inputs
return y, (u_next, u)
class Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.blocks = self.init_blocks(config)
def init_blocks(self, config):
blocks = []
for block in config['blocks']:
blocks.append(Block(**block))
return nn.Sequential(*blocks)
def forward(self, x):
return self.blocks(x)
class Decoder(nn.Module):
"""
In SpaceTime, we only consider using one ClosedLoopBlock
as the last-layer in a single-layer decoder.
However, other architectures can also be explored, e.g.,
having more "open" blocks on top of the ClosedLoopBlock
in a multi-layer decoder.
In future, can refactor this class to be more general
and support multiple layers. (p easy, just weirdness with
nn.Sequential and multiple outputs)
"""
def __init__(self, config):
super().__init__()
self.config = config
self.blocks = self.init_blocks(config)
def init_blocks(self, config):
return ClosedLoopBlock(**config['blocks'][0])
def forward(self, x):
return self.blocks(x) # y, (u_next, u)
|
spacetime-main
|
model/block.py
|
import torch
import torch.nn.functional as F
import opt_einsum as oe
from einops import repeat, rearrange
from model.functional.krylov import krylov
from model.ssm.base import SSM
class CompanionSSM(SSM):
"""
Open-loop implementation of Companion SSM:
-> y_t = C \sum_{i = 0}^{k - 1 - i} A^k B u_i
where A is companion matrix
"""
def __init__(self, norm_order, **kwargs):
self.norm_order = norm_order
kwargs['kernel_repeat'] = 1
kwargs['kernel_weights'] = None
kwargs['kernel_train'] = True
# Set kwargs['n_heads'] as n_kernels for preprocessing kernels
# Set kwargs['head_dim'] to be original sample input dim
super().__init__(**kwargs)
def init_kernel_weights(self, kernel_init):
if kernel_init == 'normal':
kernel = torch.randn(self.n_kernels, self.kernel_dim)
elif kernel_init == 'xavier':
# Xavier-ish initialization
stdv = 1. / math.sqrt(self.kernel_dim)
kernel = torch.FloatTensor(self.n_kernels,
self.kernel_dim).uniform_(-stdv, stdv)
else:
raise NotImplementedError
return kernel
def init_weights(self):
super().init_weights() # Initializes skip connection
self._fp = (self.n_kernels, self.kernel_dim)
# Shift matrix initialization
self.shift_matrix = torch.zeros(self.n_kernels,
self.kernel_dim,
self.kernel_dim)
self.shift_matrix[:, 1:, :-1] = torch.eye(self.kernel_dim - 1)
self.p_padding = torch.zeros(*self._fp)
self.p_padding[:, -1] = 1.
# A matrix
a = self.init_kernel_weights(self.kernel_init)
self.register("a", a, trainable=True, lr=None, wd=None)
# B matrix
b = self.init_kernel_weights(self.kernel_init)
self.register("b", b, trainable=True, lr=None, wd=None)
# C matrix
c = self.init_kernel_weights(self.kernel_init)
self.register("c", c, trainable=True, lr=None, wd=None)
def norm(self, x, ord=1):
# x.shape is either (H x D) or (H x D x D)
x_norm = torch.linalg.norm(x, ord=ord, dim=-1, keepdim=True)
# If norm(x) in batch close to 0, don't normalize
# (heuristicky, but we norm for stability)
try:
x = x / x_norm if torch.abs(x_norm).mean().item() > 1e-4 else x
except Exception as e:
print(e)
breakpoint()
# x = F.normalize(x, dim=1, p=ord, eps=1)
return x
def matrix_power(self, l, c, b, p):
# Construct companion matrix
A = self.shift_matrix.to(p.device) + (
oe.contract('h i, h j -> h j i',
self.p_padding.to(p.device), p)
)
# Use repeated squares to power A
g = krylov(l, A, b, c)
return g
def get_kernel(self, u, c=None, l=None):
l = u.shape[-1] if l is None else l
c = self.c if c is None else c
a = (self.norm(self.a, ord=self.norm_order)
if self.norm_order > 0 else self.a)
f = self.matrix_power(l, c, self.b, a).to(u.device)
return f
def forward(self, u):
return super().forward(u)
|
spacetime-main
|
model/ssm/companion.py
|
from .companion import CompanionSSM
from .shift import ShiftSSM
from .closed_loop import ClosedLoopCompanionSSM, ClosedLoopShiftSSM
def init_ssm(config):
supported_methods = ['companion', 'closed_loop_companion',
'shift', 'closed_loop_shift']
if config['method'] == 'companion':
ssm = CompanionSSM
elif config['method'] == 'closed_loop_companion':
ssm = ClosedLoopCompanionSSM
elif config['method'] == 'shift':
ssm = ShiftSSM
elif config['method'] == 'closed_loop_shift':
ssm = ClosedLoopShiftSSM
else:
raise NotImplementedError(
f"SSM config method {config['method']} not implemented! Please choose from {supported_methods}")
return ssm(**config['kwargs'])
|
spacetime-main
|
model/ssm/__init__.py
|
import torch
import opt_einsum as oe
from einops import repeat, rearrange
from model.functional.krylov import krylov
from model.ssm.companion import CompanionSSM
class ShiftSSM(CompanionSSM):
"""
Open-loop implementation of Shift SSM:
-> y_t = C \sum_{i = 0}^{k - 1 - i} S^k B u_i
where S is shift matrix
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def init_weights(self):
super().init_weights() # Initializes skip connection, B, C matrices
# A column initialized in super().init_weights(), but now we zero-out
a = torch.zeros(self.n_kernels, self.kernel_dim)
self.register("a", a, trainable=False, lr=None, wd=None)
# B Matrix - make it not learnable by default
b = torch.zeros(self.n_kernels, self.kernel_dim)
b[:, 0] = 1.
self.register("b", b, trainable=False, lr=None, wd=None)
# C matrix
c = self.init_kernel_weights(self.kernel_init)
self.register("c", c, trainable=True, lr=None, wd=None)
def forward(self, u):
return super().forward(u)
|
spacetime-main
|
model/ssm/shift.py
|
import torch
import torch.nn as nn
import opt_einsum as oe
from einops import rearrange, repeat
from model.components import OurModule
class SSM(OurModule):
def __init__(self,
model_dim: int,
n_kernels: int, # Number of kernels / scales
kernel_dim: int,
kernel_repeat: int,
n_heads: int=None, # Number of heads per kernel
head_dim: int=1, # Dimension of each head
kernel_weights: torch.float=None,
kernel_init: str='normal',
kernel_train: bool=True,
skip_connection: bool=False,
seed: int=42):
super().__init__()
# At least one of these should be int
assert not (n_heads is None and head_dim is None)
self.model_dim = model_dim
self.n_kernels = n_kernels
self.kernel_dim = kernel_dim
self.kernel_repeat = kernel_repeat
self.head_dim, self.n_heads = self.init_heads(n_heads, head_dim)
self.kernel_weights = kernel_weights
self.kernel_init = kernel_init
self.kernel_train = kernel_train
self.skip_connection = skip_connection
self.seed = seed
self.generator = torch.Generator()
self.generator.manual_seed(seed)
self.init_weights()
def init_heads(self, n_heads: int, head_dim: int):
if head_dim is None:
self.head_dim = self.model_dim // (self.kernel_repeat *
self.n_kernels * n_heads)
self.n_heads = n_heads
else:
self.head_dim = head_dim
self.n_heads = self.model_dim // (self.kernel_repeat *
self.n_kernels * head_dim)
return self.head_dim, self.n_heads
def fft_conv(self, u_input: torch.tensor, v_kernel: torch.tensor):
# Convolve u with v in O(n log n) time with FFT (n = len(u))
L = u_input.shape[-1] # Assume u is input
u_f = torch.fft.rfft(u_input, n=2*L) # (B H L)
v_f = torch.fft.rfft(v_kernel[:, :L], n=2*L) # (H L)
y_f = oe.contract('b h l, h l -> b h l', u_f, v_f)
y = torch.fft.irfft(y_f, n=2*L)[..., :L] # (B H L)
return y
def init_weights(self):
if self.kernel_weights is not None:
# lr and wd as None sets them to be same as model lr and weight_decay
register('k', self.kernel_weights, trainable=True, lr=None, wd=None)
skip = torch.randn(self.model_dim)
self.register('skip', skip, trainable=True, lr=None, wd=None)
def get_kernel(self):
raise NotImplementedError
def forward(self, u):
u = rearrange(u, 'b l d -> b d l') # Assume u is B x L x D
# Repeat kernels across heads
if self.kernel_weights is None:
k = self.get_kernel(u)
k = repeat(k, 'nk kd -> (kr nk nh hd) kd',
kr=self.kernel_repeat, nh=self.n_heads, hd=self.head_dim)
else:
k = self.k
try:
y = self.fft_conv(u, k)
except Exception as e:
print(e)
breakpoint()
if self.skip_connection:
y = y + oe.contract('b d l, d -> b d l', u, self.skip)
y = rearrange(y, 'b d l -> b l d')
return y
class IdentitySSM(SSM):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def init_weights(self):
self.register('kernel', None, trainable=False)
def forward(self, u):
return u
|
spacetime-main
|
model/ssm/base.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.