-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathst_Openelm_3B.py
More file actions
89 lines (75 loc) · 2.89 KB
/
st_Openelm_3B.py
File metadata and controls
89 lines (75 loc) · 2.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import streamlit as st
import time
import sys
from gradio_client import Client
# Internal usage
import os
from time import sleep
if "hf_model" not in st.session_state:
st.session_state.hf_model = "OpenELM_3B_Demo"
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
@st.cache_resource
def create_client():
yourHFtoken = "hf_xxxxxxxxxxxxxxxxxxxxxxxx" # here your HF token
print(f"loading the API gradio client for {st.session_state.hf_model}")
client = Client("Norod78/OpenELM_3B_Demo", hf_token=yourHFtoken)
return client
# FUNCTION TO LOG ALL CHAT MESSAGES INTO chathistory.txt
def writehistory(text):
with open("chathistoryOpenELM3B.txt", "a", encoding="utf-8") as f:
f.write(text)
f.write("\n")
f.close()
# AVATARS
av_us = "🧑💻" # './man.png' #"🦖" #A single emoji, e.g. "🧑💻", "🤖", "🦖". Shortcodes are not supported.
av_ass = "🤖" #'./robot.png'
# Set a default model
### START STREAMLIT UI
st.image(
"https://github.com/fabiomatricardi/ChatBOTMastery/raw/main/OpenELMlogo.png",
)
st.markdown("### *powered by Streamlit & Gradio_client*", unsafe_allow_html=True)
# st.subheader(f"Free ChatBot using {st.session_state.hf_model}")
st.markdown("---")
client = create_client()
# Display chat messages from history on app rerun
for message in st.session_state.messages:
if message["role"] == "user":
with st.chat_message(message["role"], avatar=av_us):
st.markdown(message["content"])
else:
with st.chat_message(message["role"], avatar=av_ass):
st.markdown(message["content"])
# Accept user input
if myprompt := st.chat_input("What is an AI model?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": myprompt})
# Display user message in chat message container
with st.chat_message("user", avatar=av_us):
st.markdown(myprompt)
usertext = f"user: {myprompt}"
writehistory(usertext)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
res = client.submit(
message=myprompt,
request=800, # max new tokens
param_3=0.6, # temperature
param_4=0.9, # top-p
param_5=50, # top-k
param_6=1.4, # repetition penalty
api_name="/chat",
)
for r in res:
full_response = r
message_placeholder.markdown(r + "▌")
message_placeholder.markdown(full_response)
asstext = f"assistant: {full_response}"
writehistory(asstext)
st.session_state.messages.append(
{"role": "assistant", "content": full_response}
)