Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

home.py 3.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  1. import streamlit as st
  2. from PIL import Image
  3. import google.generativeai as genai
  4. st.set_page_config(page_title="Gemini Pro with Streamlit",page_icon="♊")
  5. st.write("Welcome to the Gemini Pro Dashboard. You can proceed by providing your Google API Key")
  6. with st.expander("Provide Your Google API Key"):
  7. google_api_key = st.text_input("Google API Key", key="google_api_key", type="password")
  8. if not google_api_key:
  9. st.info("Enter the Google API Key to continue")
  10. st.stop()
  11. genai.configure(api_key=google_api_key)
  12. st.title("Gemini Pro with Streamlit Dashboard")
  13. with st.sidebar:
  14. option = st.selectbox('Choose Your Model',('gemini-pro', 'gemini-pro-vision'))
  15. if 'model' not in st.session_state or st.session_state.model != option:
  16. st.session_state.chat = genai.GenerativeModel(option).start_chat(history=[])
  17. st.session_state.model = option
  18. st.write("Adjust Your Parameter Here:")
  19. temperature = st.number_input("Temperature", min_value=0.0, max_value= 1.0, value =0.5, step =0.01)
  20. max_token = st.number_input("Maximum Output Token", min_value=0, value =100)
  21. gen_config = genai.types.GenerationConfig(max_output_tokens=max_token,temperature=temperature)
  22. st.divider()
  23. st.markdown("""<span ><font size=1>Connect With Me</font></span>""",unsafe_allow_html=True)
  24. "[Linkedin](https://www.linkedin.com/in/cornellius-yudha-wijaya/)"
  25. "[GitHub](https://github.com/cornelliusyudhawijaya)"
  26. st.divider()
  27. upload_image = st.file_uploader("Upload Your Image Here", accept_multiple_files=False, type = ['jpg', 'png'])
  28. if upload_image:
  29. image = Image.open(upload_image)
  30. st.divider()
  31. if st.button("Clear Chat History"):
  32. st.session_state.messages.clear()
  33. st.session_state["messages"] = [{"role": "assistant", "content": "Hi there. Can I help you?"}]
  34. if "messages" not in st.session_state:
  35. st.session_state["messages"] = [{"role": "assistant", "content": "Hi there. Can I help you?"}]
  36. for msg in st.session_state.messages:
  37. st.chat_message(msg["role"]).write(msg["content"])
  38. if upload_image:
  39. if option == "gemini-pro":
  40. st.info("Please Switch to the Gemini Pro Vision")
  41. st.stop()
  42. if prompt := st.chat_input():
  43. st.session_state.messages.append({"role": "user", "content": prompt})
  44. st.chat_message("user").write(prompt)
  45. response=st.session_state.chat.send_message([prompt,image],stream=True,generation_config = gen_config)
  46. response.resolve()
  47. msg=response.text
  48. st.session_state.chat = genai.GenerativeModel(option).start_chat(history=[])
  49. st.session_state.messages.append({"role": "assistant", "content": msg})
  50. st.image(image,width=300)
  51. st.chat_message("assistant").write(msg)
  52. else:
  53. if prompt := st.chat_input():
  54. st.session_state.messages.append({"role": "user", "content": prompt})
  55. st.chat_message("user").write(prompt)
  56. response=st.session_state.chat.send_message(prompt,stream=True,generation_config = gen_config)
  57. response.resolve()
  58. msg=response.text
  59. st.session_state.messages.append({"role": "assistant", "content": msg})
  60. st.chat_message("assistant").write(msg)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...