1) implement cmd+enter apply, 2) split system prompt and user prompt

This commit is contained in:
deng
2025-11-03 10:13:16 +08:00
parent 64c1049d7b
commit f1d75c9d2f

View File

@ -4,9 +4,8 @@
# date : 20250604
import streamlit as st
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI
from utils import parse_config
@ -19,13 +18,14 @@ class TranslatorApp:
self._config = parse_config(config_path)
self._chain = self._prepare_chain()
def _prepare_chain(self) -> LLMChain:
def _prepare_chain(self) -> Runnable:
"""Prepare the chain for translation"""
template = (
'你是專業的翻譯人員,請判斷這段句子「{input_text}的語言是否為{source_lang},若非的'
'話則請回傳一模一樣的句子,若是的話則請將該句翻譯成{target_lang}並且符合{description}'
'(僅回傳翻譯結果即可)。'
system_template = (
'你是專業的翻譯人員,請判斷接下來句子的語言是否為{source_lang},若是的話則請將該句翻譯成{target_lang}'
'並且符合{description}(僅回傳翻譯結果即可),若非的話則請回傳一模一樣的句子。'
)
user_template = '{input_text}'
if self._config['app']['llm_mode'] == 'ollama':
llm = ChatOllama(
base_url=self._config['app']['ollama']['url'],
@ -45,10 +45,11 @@ class TranslatorApp:
)
else:
raise ValueError(f'Unsupported llm model: {self._config['app']['llm_mode']}')
prompt = PromptTemplate(
input_variables=['input_text', 'source_lang', 'target_lang', 'description'],
template=template
)
prompt = ChatPromptTemplate.from_messages([
('system', system_template),
('human', user_template)
])
return prompt | llm
def run(self) -> None:
@ -67,32 +68,31 @@ class TranslatorApp:
key='lang_choice',
horizontal=True
)
input_text = st.text_area(
label='輸入',
placeholder='請輸入文字',
key='input_text'
)
translate_button = st.button('翻譯')
output_container = st.empty()
if st.button('翻譯'):
if input_text or translate_button:
if not input_text.strip():
st.warning("請輸入要翻譯的文字")
return
else:
with st.spinner('翻譯中...'):
with st.spinner('翻譯中...'):
result = self._chain.stream({
'input_text': input_text,
'source_lang': self._config['app']['lang_directions'][direction]['source_lang'],
'target_lang': self._config['app']['lang_directions'][direction]['target_lang'],
'description': self._config['app']['lang_directions'][direction]['description']
})
result = self._chain.stream({
'input_text': input_text,
'source_lang': self._config['app']['lang_directions'][direction]['source_lang'],
'target_lang': self._config['app']['lang_directions'][direction]['target_lang'],
'description': self._config['app']['lang_directions'][direction]['description']
})
output_container.write_stream(
stream=result
)
output_container.write_stream(
stream=result
)
if __name__ == '__main__':