1) implement cmd+enter apply, 2) split system prompt and user prompt

This commit is contained in:
deng
2025-11-03 10:13:16 +08:00
parent 64c1049d7b
commit f1d75c9d2f

View File

@ -4,9 +4,8 @@
# date : 20250604 # date : 20250604
import streamlit as st import streamlit as st
from langchain.chains import LLMChain from langchain.prompts import ChatPromptTemplate
from langchain.prompts import PromptTemplate from langchain_core.runnables import Runnable
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from utils import parse_config from utils import parse_config
@ -19,13 +18,14 @@ class TranslatorApp:
self._config = parse_config(config_path) self._config = parse_config(config_path)
self._chain = self._prepare_chain() self._chain = self._prepare_chain()
def _prepare_chain(self) -> LLMChain: def _prepare_chain(self) -> Runnable:
"""Prepare the chain for translation""" """Prepare the chain for translation"""
template = ( system_template = (
'你是專業的翻譯人員,請判斷這段句子「{input_text}的語言是否為{source_lang},若非的' '你是專業的翻譯人員,請判斷接下來句子的語言是否為{source_lang},若是的話則請將該句翻譯成{target_lang}'
'話則請回傳一模一樣的句子,若是的話則請將該句翻譯成{target_lang}並且符合{description}' '並且符合{description}(僅回傳翻譯結果即可),若非的話則請回傳一模一樣的句子。'
'(僅回傳翻譯結果即可)。'
) )
user_template = '{input_text}'
if self._config['app']['llm_mode'] == 'ollama': if self._config['app']['llm_mode'] == 'ollama':
llm = ChatOllama( llm = ChatOllama(
base_url=self._config['app']['ollama']['url'], base_url=self._config['app']['ollama']['url'],
@ -45,10 +45,11 @@ class TranslatorApp:
) )
else: else:
raise ValueError(f'Unsupported llm model: {self._config['app']['llm_mode']}') raise ValueError(f'Unsupported llm model: {self._config['app']['llm_mode']}')
prompt = PromptTemplate(
input_variables=['input_text', 'source_lang', 'target_lang', 'description'], prompt = ChatPromptTemplate.from_messages([
template=template ('system', system_template),
) ('human', user_template)
])
return prompt | llm return prompt | llm
def run(self) -> None: def run(self) -> None:
@ -67,32 +68,31 @@ class TranslatorApp:
key='lang_choice', key='lang_choice',
horizontal=True horizontal=True
) )
input_text = st.text_area( input_text = st.text_area(
label='輸入', label='輸入',
placeholder='請輸入文字', placeholder='請輸入文字',
key='input_text' key='input_text'
) )
translate_button = st.button('翻譯')
output_container = st.empty() output_container = st.empty()
if st.button('翻譯'):
if input_text or translate_button:
if not input_text.strip(): if not input_text.strip():
st.warning("請輸入要翻譯的文字") st.warning("請輸入要翻譯的文字")
return else:
with st.spinner('翻譯中...'):
with st.spinner('翻譯中...'): result = self._chain.stream({
'input_text': input_text,
'source_lang': self._config['app']['lang_directions'][direction]['source_lang'],
'target_lang': self._config['app']['lang_directions'][direction]['target_lang'],
'description': self._config['app']['lang_directions'][direction]['description']
})
result = self._chain.stream({ output_container.write_stream(
'input_text': input_text, stream=result
'source_lang': self._config['app']['lang_directions'][direction]['source_lang'], )
'target_lang': self._config['app']['lang_directions'][direction]['target_lang'],
'description': self._config['app']['lang_directions'][direction]['description']
})
output_container.write_stream(
stream=result
)
if __name__ == '__main__': if __name__ == '__main__':