CoffeBank commited on
Commit
076a7ff
·
1 Parent(s): e4082c9
Files changed (2) hide show
  1. binoculars/detector.py +2 -5
  2. model_utils.py +3 -4
binoculars/detector.py CHANGED
@@ -20,9 +20,6 @@ huggingface_config = {
20
  BINOCULARS_ACCURACY_THRESHOLD = 0.9015310749276843 # optimized for f1-score
21
  BINOCULARS_FPR_THRESHOLD = 0.8536432310785527 # optimized for low-fpr [chosen at 0.01%]
22
 
23
- DEVICE_1 = "cuda:0" if torch.cuda.is_available() else "cpu"
24
- DEVICE_2 = "cuda:1" if torch.cuda.device_count() > 1 else DEVICE_1
25
-
26
 
27
  class Binoculars(object):
28
  def __init__(self,
@@ -36,14 +33,14 @@ class Binoculars(object):
36
 
37
  self.change_mode(mode)
38
  self.observer_model = AutoModelForCausalLM.from_pretrained(observer_name_or_path,
39
- device_map={"": DEVICE_1},
40
  trust_remote_code=True,
41
  torch_dtype=torch.bfloat16 if use_bfloat16
42
  else torch.float32,
43
  token=huggingface_config["TOKEN"]
44
  )
45
  self.performer_model = AutoModelForCausalLM.from_pretrained(performer_name_or_path,
46
- device_map={"": DEVICE_2},
47
  trust_remote_code=True,
48
  torch_dtype=torch.bfloat16 if use_bfloat16
49
  else torch.float32,
 
20
  BINOCULARS_ACCURACY_THRESHOLD = 0.9015310749276843 # optimized for f1-score
21
  BINOCULARS_FPR_THRESHOLD = 0.8536432310785527 # optimized for low-fpr [chosen at 0.01%]
22
 
 
 
 
23
 
24
  class Binoculars(object):
25
  def __init__(self,
 
33
 
34
  self.change_mode(mode)
35
  self.observer_model = AutoModelForCausalLM.from_pretrained(observer_name_or_path,
36
+ device_map={"": "cuda" if torch.cuda.is_available() else "cpu"},
37
  trust_remote_code=True,
38
  torch_dtype=torch.bfloat16 if use_bfloat16
39
  else torch.float32,
40
  token=huggingface_config["TOKEN"]
41
  )
42
  self.performer_model = AutoModelForCausalLM.from_pretrained(performer_name_or_path,
43
+ device_map={"": "cuda" if torch.cuda.is_available() else "cpu"},
44
  trust_remote_code=True,
45
  torch_dtype=torch.bfloat16 if use_bfloat16
46
  else torch.float32,
model_utils.py CHANGED
@@ -7,7 +7,6 @@ from NN_classifier.simple_binary_classifier import Medium_Binary_Network
7
  from feature_extraction import extract_features
8
  import pandas as pd
9
 
10
-
11
  def load_model(model_dir='models/medium_binary_classifier'):
12
  model_path = os.path.join(model_dir, 'nn_model.pt')
13
  scaler_path = os.path.join(model_dir, 'scaler.joblib')
@@ -28,8 +27,8 @@ def load_model(model_dir='models/medium_binary_classifier'):
28
 
29
  input_size = scaler.n_features_in_
30
 
31
- model = Medium_Binary_Network(input_size, hidden_sizes=[256, 192, 128, 64], dropout=0.3)
32
- model.load_state_dict(torch.load(model_path))
33
  model.eval()
34
 
35
  if imputer is not None:
@@ -75,7 +74,7 @@ def classify_text(text, model, scaler, label_encoder, imputer=None, scores=None)
75
 
76
  features_scaled = scaler.transform(features)
77
 
78
- features_tensor = torch.FloatTensor(features_scaled)
79
 
80
  with torch.no_grad():
81
  outputs = model(features_tensor)
 
7
  from feature_extraction import extract_features
8
  import pandas as pd
9
 
 
10
  def load_model(model_dir='models/medium_binary_classifier'):
11
  model_path = os.path.join(model_dir, 'nn_model.pt')
12
  scaler_path = os.path.join(model_dir, 'scaler.joblib')
 
27
 
28
  input_size = scaler.n_features_in_
29
 
30
+ model = Medium_Binary_Network(input_size, hidden_sizes=[256, 192, 128, 64], dropout=0.3).to("cuda" if torch.cuda.is_available() else "cpu")
31
+ model.load_state_dict(torch.load(model_path, map_location="cuda" if torch.cuda.is_available() else "cpu"))
32
  model.eval()
33
 
34
  if imputer is not None:
 
74
 
75
  features_scaled = scaler.transform(features)
76
 
77
+ features_tensor = torch.FloatTensor(features_scaled).to("cuda" if torch.cuda.is_available() else "cpu")
78
 
79
  with torch.no_grad():
80
  outputs = model(features_tensor)