-
Notifications
You must be signed in to change notification settings - Fork 2
/
utils.py
170 lines (140 loc) · 4.95 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import asyncio
import csv
import difflib
import gym
import json
import logging
import numpy as np
import os
import pathlib
import pdb
import random
import sys
import time
import torch
import tqdm
from nle.nethack.actions import ACTIONS
from transformers import GenerationConfig
from transformers import StoppingCriteria
base_path = str(pathlib.Path().resolve())
PROJECT_PATH = os.path.join(base_path[: base_path.find("diff_history")], "diff_history")
sys.path.insert(0, os.path.join(PROJECT_PATH, "external/nle-language-wrapper"))
from nle_language_wrapper import NLELanguageWrapper
from nle_language_wrapper.nle_language_obsv import NLELanguageObsv
NH_ACTION_STR_TO_IDX = {str(ACTIONS[i]): i for i in range(len(ACTIONS))}
NH_ACTION_IDX_TO_STR = {v: k for (k, v) in NH_ACTION_STR_TO_IDX.items()}
class UnrollLengthCriteria(StoppingCriteria):
def __init__(self, unroll_length, stop_token_id, num_return_sequences):
assert isinstance(unroll_length, int)
self.unroll_length = unroll_length
self.stop_token_id = stop_token_id
self.counts_per_sequence = torch.zeros((num_return_sequences,))
def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
) -> bool:
sequences_should_be_stopped = []
for i in range(input_ids.shape[0]):
if input_ids[i][-1] == self.stop_token_id:
self.counts_per_sequence[i] += 1
if self.counts_per_sequence[i] >= self.unroll_length:
sequences_should_be_stopped.append(True)
continue
sequences_should_be_stopped.append(False)
return all(sequences_should_be_stopped)
def get_diff(prompt_uno, prompt_dos, n=0):
proc_prompt_uno = prompt_uno.strip().splitlines()
proc_prompt_dos = prompt_dos.strip().splitlines()
out = "\n".join(
[
line
for i, line in enumerate(
difflib.unified_diff(
proc_prompt_uno,
proc_prompt_dos,
n=n,
fromfile="file1",
tofile="file2",
lineterm="",
)
)
if i > 1
]
)
return out
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def log(stats, step, is_global=False, wandb=False):
stats_values = {}
prefix = "global/" if is_global else "local/"
for k, v in stats.items():
stats_values[prefix + k] = v.result()
v.reset()
logging.info(stats_values)
if not is_global:
record.log_to_file(**stats_values)
if FLAGS.wandb:
wandb.log(stats_values, step=step)
def pretty_print_ttyrec(observation):
nrows, ncols = observation["tty_chars"].shape
ob_as_array = np.array([chr(oo) for oo in observation["tty_chars"].flatten()])
ob_as_array = ob_as_array.reshape(nrows, ncols) ## tty_chars default shape
rows = []
for row in range(nrows):
ob_row = ob_as_array[row]
ob_row_as_str = "".join([oo for oo in ob_row])
rows += [ob_row_as_str]
ob_as_str = "\n".join(rows)
print(ob_as_str)
return ob_as_str
def load_hf_lm_and_tokenizer(
model_name_or_path,
tokenizer_name_or_path=None,
device_map="auto",
load_in_8bit=False,
load_in_half=False,
gptq_model=False,
use_fast_tokenizer=False,
padding_side="left",
):
from transformers import AutoModelForCausalLM, AutoTokenizer
if not tokenizer_name_or_path:
tokenizer_name_or_path = model_name_or_path
if "longformer" in tokenizer_name_or_path:
tokenizer = LongformerTokenizerFast.from_pretrained(tokenizer_name_or_path)
else:
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, use_fast=use_fast_tokenizer
)
tokenizer.padding_side = padding_side
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
if gptq_model:
from auto_gptq import AutoGPTQForCausalLM
model_wrapper = AutoGPTQForCausalLM.from_quantized(
model_name_or_path, device="cuda:0", use_triton=True
)
model = model_wrapper.model
elif load_in_8bit:
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, device_map=device_map, load_in_8bit=True
)
else:
if device_map:
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, device_map=device_map
)
else:
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
if torch.cuda.is_available():
model = model.cuda()
if load_in_half:
print("loading in half")
model = model.half()
model.eval()
model = model.cuda()
return model, tokenizer