Hosting of LLM which is built by using SDK
In this example we will be using the streamlit. Streamlit script should be in same folder with this notebook. If you inspect streamlit code look under 'Supplementary Files'.
After testing our application we can set our configurations and start the deployment process.
We receive the necessary information for deploy from the region
my_app_prefixes = region.app_prefix_list
display(my_app_prefixes.to_pandas())
app_prefix = my_app_prefixes[0].prefix #Select your app with my_app_prefixes[index]
my_app_settings = region.app_deployment_setting_list
display(my_app_settings.to_pandas())
deployment_setting_key = my_app_settings[1].key
prt.apps.deploy(
deployment_setting_key=deployment_setting_key, # Deployment Key, ask admin for deployment key
prefix=app_prefix, # Apphost deployment extension
app_name='test',
app_dir=None # Directory of files that will be deployed ('None' for current directory)
)
Supplementary Files
streamlit_app.py
# The below is official Streamlit + Langchain demo.
import streamlit as st
import practicuscore as prt
from langchain_practicus import ChatPracticus
prt.apps.secure_page(
page_title="🦜🔗 Quickstart App" # Give page title
)
st.title("🦜🔗 Quickstart App v1") # Give app title
# This function use our 'api_token' and 'endpoint_url' and return the response.
def generate_response(input_text, endpoint, api):
model = ChatPracticus(
endpoint_url=endpoint, # Give model url
# Give api token , ask your admin for api
api_token=api,
model_id="model",
verify_ssl=True,
)
st.info(model.invoke(input_text).content) # We are give the input to model and get content
with st.form("my_form"): # Define our question
endpoint = st.text_input('Enter your end point url:')
api = st.text_input('Enter your api token:')
text = st.text_area(
"Enter text:",
"Who is Einstein ?",
)
submitted = st.form_submit_button("Submit") # Define the button
if submitted:
generate_response(text, endpoint, api) # Return the response
Previous: Build | Next: Stream > Sdk Streamlit Hosting