無聊寫了一個(gè)命令行版的deepseek對(duì)話器,請(qǐng)按照源碼中的說明替換為自己的API KEY(需要自己到deepseek官方申請(qǐng)),同時(shí)歡迎大家魔改。
代碼如下:
# 安裝必要的庫(-i https://pypi.tuna.tsinghua.edu.cn/simple 是臨時(shí)指定國(guó)內(nèi)的清華源,加速)
#
# pip install openai -i https://pypi.tuna.tsinghua.edu.cn/simple
# pip install colorama requests -i https://pypi.tuna.tsinghua.edu.cn/simple
#
# 阿里云源:https://mirrors.aliyun.com/pypi/simple/
# 中科大源:https://pypi.mirrors.ustc.edu.cn/simple/
import requests
import json
from colorama import Fore, Back, Style, init
# 初始化colorama(自動(dòng)處理Windows和Linux/macOS的終端顏色)
init(autoreset=True)
# 替換為你的 DeepSeek API Key
API_KEY = "★★★替換為自己的 DeepSeek API Key,保留前后引號(hào)!★★★"
# DeepSeek API 的端點(diǎn)
API_URL = "https://api.deepseek.com/v1/chat/completions"
def call_deepseek_api_stream(messages, model, temperature):
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
}
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": True,
}
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
if response.status_code != 200:
yield {"error": f"API 請(qǐng)求失敗,狀態(tài)碼:{response.status_code}", "response": response.text}
return
full_response = ""
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith("data:"):
data = decoded_line[5:].strip()
if data == "[DONE]":
break
try:
chunk = json.loads(data)
if "usage" in chunk: # 獲取最終token統(tǒng)計(jì)
yield {"usage": chunk["usage"]}
yield chunk
except json.JSONDecodeError:
continue
def count_tokens(text):
"""簡(jiǎn)單估算token數(shù)(實(shí)際應(yīng)以API返回為準(zhǔn))"""
return len(text) // 4 # 英文平均1token=4字符,中文約1token=2字符
def get_temperature_recommendation():
"""顯示不同任務(wù)的temperature推薦值"""
print("\n請(qǐng)根據(jù)您的任務(wù)類型選擇temperature值(推薦范圍):")
print("1. 代碼生成/數(shù)學(xué)解題: 0.0")
print("2. 數(shù)據(jù)抽取/分析: 1.0")
print("3. 通用對(duì)話: 1.3 (默認(rèn))")
print("4. 翻譯: 1.3")
print("5. 創(chuàng)意類寫作/詩歌創(chuàng)作: 1.5")
while True:
user_input = input("請(qǐng)輸入temperature值(0.0-2.0,直接回車使用默認(rèn)值1.3): ").strip()
if not user_input: # 用戶直接按回車
return 1.3 # 默認(rèn)值
try:
temp = float(user_input)
if 0.0 <= temp <= 2.0:
return temp
print("輸入超出范圍,請(qǐng)重新輸入(0.0-2.0)")
except ValueError:
print("請(qǐng)輸入有效的數(shù)字或直接回車")
def print_separator(title=None):
"""打印帶可選標(biāo)題的分隔線"""
sep = "=" * 60
if title:
print(f"\n{Fore.YELLOW}{sep}")
print(f"{Fore.CYAN}{title.center(60)}")
print(f"{Fore.YELLOW}{sep}{Style.RESET_ALL}")
else:
print(f"{Fore.YELLOW}{sep}{Style.RESET_ALL}")
def main():
# 模型選擇
print_separator("模型選擇")
print(f"{Fore.GREEN}請(qǐng)選擇模型:{Style.RESET_ALL}")
print(f"1. {Fore.BLUE}DeepSeek-V3{Style.RESET_ALL} (默認(rèn))")
print(f"2. {Fore.MAGENTA}DeepSeek-R1{Style.RESET_ALL} (會(huì)顯示思考過程)")
model_choice = input("請(qǐng)輸入數(shù)字選擇模型(1/2): ")
if model_choice == "2":
model = "deepseek-reasoner"
show_reasoning = True
else:
model = "deepseek-chat" # 默認(rèn)V3模型
show_reasoning = False
# 獲取temperature值(支持回車默認(rèn)值)
temperature = get_temperature_recommendation()
# 初始化系統(tǒng)提示(提取到循環(huán)外以便復(fù)用)
system_prompt = "你是一個(gè)有幫助的AI助手。"
if show_reasoning:
system_prompt += "請(qǐng)逐步展示你的思考過程。"
# 外層循環(huán),用于會(huì)話重置
while True:
# 初始化對(duì)話歷史(每次新會(huì)話重置)
conversation_history = [
{"role": "system", "content": system_prompt}
]
total_tokens = 0
print_separator("對(duì)話開始")
print(f"{Fore.GREEN}當(dāng)前設(shè)置 - 模型: {model}, temperature: {temperature}{Style.RESET_ALL}")
print(f"輸入 '{Fore.RED}quit!{Style.RESET_ALL}' 退出程序")
print(f"輸入 '{Fore.BLUE}new!{Style.RESET_ALL}' 開始新對(duì)話\n")
# 內(nèi)層循環(huán),處理單次會(huì)話
while True:
user_input = input(f"{Fore.RED}你: {Style.RESET_ALL}")
if user_input.lower() == "quit!":
print_separator("對(duì)話結(jié)束")
print(f"{Fore.CYAN}累計(jì)消耗Token: {total_tokens}{Style.RESET_ALL}")
return # 完全退出程序
if user_input.lower() == "new!":
print_separator("新對(duì)話開始")
break # 跳出內(nèi)層循環(huán),開始新會(huì)話
# 添加用戶消息到對(duì)話歷史
conversation_history.append({"role": "user", "content": user_input})
user_tokens = count_tokens(user_input)
total_tokens += user_tokens
print(f"{Fore.BLUE}AI: {Style.RESET_ALL}", end="", flush=True)
full_reply = ""
reasoning_content = ""
current_reasoning = ""
current_tokens = 0
is_reasoning_block = False
# 流式獲取AI回復(fù)
for chunk in call_deepseek_api_stream(conversation_history, model, temperature):
if "error" in chunk:
print(f"\n{Fore.RED}出錯(cuò)了: {chunk['error']}{Style.RESET_ALL}")
break
elif "usage" in chunk:
current_tokens = chunk["usage"]["total_tokens"]
continue
delta = chunk.get("choices", [{}])[0].get("delta", {})
# 處理思考過程
if show_reasoning and "reasoning_content" in delta:
reasoning = delta["reasoning_content"]
if reasoning:
if not is_reasoning_block:
print(f"\n{Fore.CYAN}╔{'思考過程':═^58}╗{Style.RESET_ALL}")
print(f"{Fore.CYAN}║ {Style.RESET_ALL}", end="")
is_reasoning_block = True
print(f"{Fore.CYAN}{reasoning}{Style.RESET_ALL}", end="", flush=True)
current_reasoning += reasoning
reasoning_content += reasoning
# 處理正常回復(fù)內(nèi)容
content = delta.get("content", "")
if content:
if is_reasoning_block:
print(f"\n{Fore.CYAN}╚{'':═^58}╝{Style.RESET_ALL}")
print(f"{Fore.GREEN}╔{'回答':═^58}╗{Style.RESET_ALL}")
print(f"{Fore.GREEN}║ {Style.RESET_ALL}", end="")
is_reasoning_block = False
print(f"{Fore.GREEN}{content}{Style.RESET_ALL}", end="", flush=True)
full_reply += content
# 結(jié)束當(dāng)前輸出塊
if is_reasoning_block:
print(f"\n{Fore.CYAN}╚{'':═^58}╝{Style.RESET_ALL}")
elif full_reply:
print(f"\n{Fore.GREEN}╚{'':═^58}╝{Style.RESET_ALL}")
# 添加AI回復(fù)到對(duì)話歷史
if full_reply:
if show_reasoning and reasoning_content:
assistant_reply = f"[思考過程]\n{reasoning_content}\n[回答]\n{full_reply}"
else:
assistant_reply = full_reply
conversation_history.append({
"role": "assistant",
"content": assistant_reply
})
reply_tokens = count_tokens(full_reply)
if current_tokens == 0:
current_tokens = user_tokens + reply_tokens
total_tokens += current_tokens - user_tokens
print(f"\n{Fore.YELLOW}[本次消耗: {current_tokens} tokens | 累計(jì): {total_tokens} tokens]{Style.RESET_ALL}")
if __name__ == "__main__":
main()
![圖片[1]-自用命令行版DeepSeek對(duì)話器:構(gòu)建你的智能助手](http://www.oilmaxhydraulic.com.cn/wp-content/uploads/2025/07/d2b5ca33bd20250725131453-1024x501.png)
![圖片[2]-自用命令行版DeepSeek對(duì)話器:構(gòu)建你的智能助手](http://www.oilmaxhydraulic.com.cn/wp-content/uploads/2025/07/d2b5ca33bd20250725131520-1024x486.png)
? 版權(quán)聲明
THE END