-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
129 lines (107 loc) · 3.63 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import streamlit as st
from langchain_ollama import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
AIMessagePromptTemplate,
ChatPromptTemplate
)
# Custom CSS styling
st.markdown("""
<style>
/* Existing styles */
.main {
background-color: #1a1a1a;
color: #ffffff;
}
.sidebar .sidebar-content {
background-color: #2d2d2d;
}
.stTextInput textarea {
color: #ffffff !important;
}
/* Add these new styles for select box */
.stSelectbox div[data-baseweb="select"] {
color: white !important;
background-color: #3d3d3d !important;
}
.stSelectbox svg {
fill: white !important;
}
.stSelectbox option {
background-color: #2d2d2d !important;
color: white !important;
}
/* For dropdown menu items */
div[role="listbox"] div {
background-color: #2d2d2d !important;
color: white !important;
}
</style>
""", unsafe_allow_html=True)
st.title("🧠 DeepSeek Companion")
st.caption("🚀 Your AI Pair Programmer with Debugging Superpowers")
# Sidebar configuration
with st.sidebar:
st.header("⚙️ Configuration")
selected_model = st.selectbox(
"Choose Model",
["deepseek-r1:1.5b", "deepseek-r1:3b"],
index=0
)
st.divider()
st.markdown("### Model Capabilities")
st.markdown("""
- 🐍 Python Expert
- 🐞 Debugging Assistant
- 📝 Code Documentation
- 💡 Solution Design
""")
st.divider()
st.markdown("Built with [Ollama](https://ollama.ai/) | [LangChain](https://python.langchain.com/)")
# initiate the chat engine
llm_engine=ChatOllama(
model=selected_model,
base_url="http://localhost:11434",
temperature=0.3
)
# System prompt configuration
system_prompt = SystemMessagePromptTemplate.from_template(
"You are an expert AI coding assistant. Provide concise, correct solutions "
"with strategic print statements for debugging. Always respond in English."
)
# Session state management
if "message_log" not in st.session_state:
st.session_state.message_log = [{"role": "ai", "content": "Hi! I'm DeepSeek. How can I help you code today? 💻"}]
# Chat container
chat_container = st.container()
# Display chat messages
with chat_container:
for message in st.session_state.message_log:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input and processing
user_query = st.chat_input("Type your coding question here...")
def generate_ai_response(prompt_chain):
processing_pipeline=prompt_chain | llm_engine | StrOutputParser()
return processing_pipeline.invoke({})
def build_prompt_chain():
prompt_sequence = [system_prompt]
for msg in st.session_state.message_log:
if msg["role"] == "user":
prompt_sequence.append(HumanMessagePromptTemplate.from_template(msg["content"]))
elif msg["role"] == "ai":
prompt_sequence.append(AIMessagePromptTemplate.from_template(msg["content"]))
return ChatPromptTemplate.from_messages(prompt_sequence)
if user_query:
# Add user message to log
st.session_state.message_log.append({"role": "user", "content": user_query})
# Generate AI response
with st.spinner("🧠 Processing..."):
prompt_chain = build_prompt_chain()
ai_response = generate_ai_response(prompt_chain)
# Add AI response to log
st.session_state.message_log.append({"role": "ai", "content": ai_response})
# Rerun to update chat display
st.rerun()