-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmake_model.py
More file actions
41 lines (33 loc) · 1.26 KB
/
make_model.py
File metadata and controls
41 lines (33 loc) · 1.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import random
import dill
class MyModel:
def __init__(self):
self.vocabulary = ["where", "what", "monkey", "backprop", "power", "is"]
def generate_text(self, input_text, min_length):
output = [input_text]
for _ in range(min_length):
output.append(random.choice(self.vocabulary))
return " ".join(output)
def __call__(self, params, task="text-generation"):
if task == "text-generation":
# Input according to Backprop's task specification
text = params.get("text")
min_length = params.get("min_length", 5)
# Output must be json serializable
return self.generate_text(text, min_length)
else:
raise ValueError("Unsupported task!")
# Make model object
model = MyModel()
# How Backprop will call your model on request to /text-generation endpoint
# Backprop passes the text-generation API parameters to params and sets task as "text-generation"
request_body = {
"text": "Test",
"min_length": 3
}
print(model(request_body, task="text-generation"))
# Set the below line to save dependencies!
dill.settings["recurse"] = True
# Serialize the model object into a file called model.bin
with open("model.bin", "wb") as f:
dill.dump(model, f)