Update handler.py
Browse files- handler.py +6 -12
handler.py
CHANGED
|
@@ -1,21 +1,16 @@
|
|
| 1 |
# handler.py
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
-
from peft import PeftModel
|
| 5 |
|
| 6 |
-
|
| 7 |
-
BASE_MODEL = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
| 8 |
-
ADAPTER_PATH = "."
|
| 9 |
|
| 10 |
class EndpointHandler:
|
| 11 |
def __init__(self, path=""):
|
| 12 |
-
print("Loading
|
| 13 |
-
self.tokenizer = AutoTokenizer.from_pretrained(
|
| 14 |
-
|
| 15 |
-
|
| 16 |
)
|
| 17 |
-
self.model = PeftModel.from_pretrained(base_model, ADAPTER_PATH)
|
| 18 |
-
self.model = self.model.merge_and_unload()
|
| 19 |
self.model.eval()
|
| 20 |
print("Model loaded successfully.")
|
| 21 |
|
|
@@ -32,5 +27,4 @@ class EndpointHandler:
|
|
| 32 |
pad_token_id=self.tokenizer.eos_token_id,
|
| 33 |
eos_token_id=self.tokenizer.eos_token_id,
|
| 34 |
)
|
| 35 |
-
|
| 36 |
-
return {"generated_text": text}
|
|
|
|
| 1 |
# handler.py
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
| 4 |
|
| 5 |
+
MODEL_PATH = "."
|
|
|
|
|
|
|
| 6 |
|
| 7 |
class EndpointHandler:
|
| 8 |
def __init__(self, path=""):
|
| 9 |
+
print("Loading merged model...")
|
| 10 |
+
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
|
| 11 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
| 12 |
+
MODEL_PATH, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True
|
| 13 |
)
|
|
|
|
|
|
|
| 14 |
self.model.eval()
|
| 15 |
print("Model loaded successfully.")
|
| 16 |
|
|
|
|
| 27 |
pad_token_id=self.tokenizer.eos_token_id,
|
| 28 |
eos_token_id=self.tokenizer.eos_token_id,
|
| 29 |
)
|
| 30 |
+
return {"generated_text": self.tokenizer.decode(outputs[0], skip_special_tokens=True)}
|
|
|