Trying to run Gemini on streamlit, help me regarding this code any help appriciated.
import vertexai
import streamlit as st
from vertexai.preview.generative_models import GenerationConfig, GenerativeModel, Part, Content, ChatSession
project = 'gold-hold-418319'
vertexai.init(project=project)
config = GenerationConfig(
temperature=0.4,
top_k=10
)
# Load model with config defined
model = GenerativeModel(
'gemini-pro',
generation_config=config,
)
chat = model.start_chat()
# Helper function
def llm_function(chat: ChatSession, query: str) -> None:
"""
Processes a user query and displays the response in the Streamlit app.
Keyword arguments:
chat -- ChatSession,
query -- text containing message.
"""
responses = chat.send_message(query)
output = responses.candidates[0].content.parts[0].text
with st.chat_message("model"):
st.markdown(output)
st.session_state.messages.append({
'role': 'user',
'content': query
})
st.session_state.messages.append({
'role': 'model',
'content': output
})
# Setting up title
st.title('Gemini Explorer')
# Initialize chat history
if 'messages' not in st.session_state:
st.session_state.messages = []
# Display and load chat history
for index, message in enumerate(st.session_state.messages):
content = Content(
role=message['role'],
parts=Part.from_text(message['content'])
)
if index != 0:
with st.chat_message(message['role']):
st.markdown(message['content'])
# chat.history.append(content)
# for initial message startup
if len(st.session_state.messages) == 0:
initial_prompt = "Introduce yourself as ReX, an assistant powered by Google Gemini. You use emojis to be interactive"
llm_function(chat, initial_prompt)
# To capture user input
query = st.chat_input('Gemini Explorer')
if query:
with st.chat_message('user'):
st.markdown(query)
llm_function(chat=chat, query=query)
I am trying it to run on streamlit but got this error Part is not iterable.
full error:
TypeError: argument of type 'Part' is not iterable
Traceback: File "/Users/vrajmalvi/miniconda3/envs/tensorflow/lib/python3.9/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 542, in _run_script exec(code, module.__dict__) File "/Users/vrajmalvi/Documents/GitHub/Radical_AI/gemini_explorer.py", line 74, in <module> llm_function(chat, initial_prompt) File "/Users/vrajmalvi/Documents/GitHub/Radical_AI/gemini_explorer.py", line 34, in llm_function output = responses.candidates[0].content.parts[0].text File "/Users/vrajmalvi/miniconda3/envs/tensorflow/lib/python3.9/site-packages/vertexai/generative_models/_generative_models.py", line 1612, in text if "text" not in self._raw_part:
Not sure how to solve any help would be appritiated.
tried updating libries as well.