123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266 |
- # -*- coding:utf-8 -*-
- if __name__ == '__main__':
- import os
- os.chdir("..")
- import time
- from typing import Dict, Any, Union
- import httpx
- import requests
- from tools.loglog import logger, simple_logger, log_err_e
- from tools.new_mysql import MySQLUploader
- m = MySQLUploader()
- def get_openai_model(model_text: str):
- """模糊获得模型名"""
- if "3.5" in model_text or "3.5-turbo" in model_text or "3.5turbo" in model_text:
- model = "gpt-3.5-turbo"
- elif "4o" in model_text or "gpt4o" in model_text:
- model = "gpt-4o"
- elif "4turbo" in model_text or "4-turbo" in model_text:
- model = "gpt-4-turbo"
- else:
- model = "gpt-4o"
- return model
- def insert_ip_token(ip, demo_name, gpt_content, prompt_tokens, completion_tokens, total_tokens):
- sql = "insert into consumer_token (ip,demo_name,gpt_content,prompt_tokens,completion_tokens,total_tokens) values (%s,%s,%s,%s,%s,%s)"
- m.execute_(sql, (ip, demo_name, str(gpt_content), prompt_tokens, completion_tokens, total_tokens))
- def get_answer_from_gpt(question, real_ip="localhost", demo_name="无", model="gpt-4o", max_tokens=3500, temperature: float = 0,
- json_resp: Union[Dict[Any, Any], bool] = False, n=1, check_fucn=None, sys_prompt=None):
- model = get_openai_model(model)
- d2 = {"model": model, "messages": [], "max_tokens": max_tokens, "temperature": temperature, 'n': n}
- if sys_prompt:
- d2['messages'].append({"role": "system", "content": sys_prompt})
- d2['messages'].append({"role": "user", "content": question})
- if json_resp is True:
- d2["response_format"] = {"type": "json_object"}
- elif json_resp is False:
- pass
- else:
- d2["response_format"] = json_resp
- for num_count in range(3):
- try:
- response = requests.post(f'http://170.106.108.95/v1/chat/completions', json=d2)
- r_json = response.json()
- if r2 := r_json.get("choices", None):
- if n > 1:
- gpt_res = []
- for i in r2:
- gpt_res.append(i["message"]["content"])
- else:
- gpt_res = r2[0]["message"]["content"]
- gpt_content = str(gpt_res)
- prompt_tokens = r_json["usage"]["prompt_tokens"]
- completion_tokens = r_json["usage"]["completion_tokens"]
- total_tokens = r_json["usage"]["total_tokens"]
- insert_ip_token(real_ip, demo_name, gpt_content, prompt_tokens, completion_tokens, total_tokens)
- simple_logger.info(f"问题日志:\n{question}\n回答日志:\n{gpt_res}")
- if not check_fucn:
- return gpt_res
- check_result = check_fucn(str(gpt_res))
- if check_result:
- return gpt_res
- else:
- raise Exception(f"第{num_count + 1}次共3次,GPT的校验没有通过,校验函数:{check_fucn.__name__}")
- elif r_json.get("message") == "IP address blocked":
- print("IP address blocked")
- raise Exception("IP address blocked")
- else:
- print(f"小错误:{question[:10]}")
- logger.error(response.text)
- except Exception as e:
- logger.info(f"小报错忽略{e}")
- time.sleep(10)
- logger.critical("get_answer_from_gpt 严重错误,3次后都失败了")
- def get_article_gpt_pydantic(question, real_ip="localhost", demo_name="无", model="gpt-4.1", max_tokens=3500, temperature: float = 0, n=1,
- check_fucn=None, sys_prompt=None):
- """
- 异步获取文章
- :param question: 问题
- :param real_ip: 真实IP
- :param demo_name: 项目名称
- :param model: 模型名称
- :param max_tokens: 最大token数
- :param temperature: 温度
- :param n: 生成数量
- :param check_fucn: 校验函数
- :param sys_prompt: 系统提示
- :return: 文章内容
- """
- d2 = {"model": model, "messages": [], "max_tokens": max_tokens, "temperature": temperature, "n": n, "response_format": {'type': 'json_schema',
- 'json_schema': {
- 'name': 'Article',
- 'schema': {'$defs': {
- 'Candidate': {
- 'properties': {
- 'label': {
- 'title': 'Label',
- 'type': 'string'},
- 'text': {
- 'title': 'Text',
- 'type': 'string'},
- 'isRight': {
- 'title': 'Isright',
- 'type': 'integer'}},
- 'required': [
- 'label',
- 'text',
- 'isRight'],
- 'title': 'Candidate',
- 'type': 'object'},
- 'DifficultSentence': {
- 'properties': {
- 'english': {
- 'title': 'English',
- 'type': 'string'},
- 'chinese': {
- 'title': 'Chinese',
- 'type': 'string'}},
- 'required': [
- 'english',
- 'chinese'],
- 'title': 'DifficultSentence',
- 'type': 'object'},
- 'Question': {
- 'properties': {
- 'trunk': {
- 'title': 'Trunk',
- 'type': 'string'},
- 'analysis': {
- 'title': 'Analysis',
- 'type': 'string'},
- 'candidates': {
- 'items': {
- '$ref': '#/$defs/Candidate'},
- 'title': 'Candidates',
- 'type': 'array'}},
- 'required': [
- 'trunk',
- 'analysis',
- 'candidates'],
- 'title': 'Question',
- 'type': 'object'}},
- 'properties': {
- 'difficultSentences': {
- 'items': {
- '$ref': '#/$defs/DifficultSentence'},
- 'title': 'Difficultsentences',
- 'type': 'array'},
- 'usedMeanIds': {
- 'items': {
- 'type': 'integer'},
- 'title': 'Usedmeanids',
- 'type': 'array'},
- 'questions': {
- 'items': {
- '$ref': '#/$defs/Question'},
- 'title': 'Questions',
- 'type': 'array'},
- 'englishArticle': {
- 'title': 'Englisharticle',
- 'type': 'string'},
- 'chineseArticle': {
- 'title': 'Chinesearticle',
- 'type': 'string'},
- 'allWordAmount': {
- 'title': 'Allwordamount',
- 'type': 'integer'}},
- 'required': [
- 'difficultSentences',
- 'usedMeanIds',
- 'questions',
- 'englishArticle',
- 'chineseArticle',
- 'allWordAmount'],
- 'title': 'Article',
- 'type': 'object'}}}}
- if sys_prompt:
- d2['messages'].append({"role": "system", "content": sys_prompt})
- d2['messages'].append({"role": "user", "content": question})
- for num_count in range(3):
- try:
- response = requests.post('http://170.106.108.95/v1/chat/completions', json=d2)
- r_json = response.json()
- simple_logger.info(f"问题日志:\n{question}\n回答日志:\n{r_json}")
- return r_json
- #
- #
- except httpx.HTTPError as e:
- logger.error(f"HTTP请求错误: {str(e)}")
- if num_count < 2:
- time.sleep(10)
- else:
- raise
- except Exception as e:
- log_err_e(e, "其他错误")
- if num_count < 2:
- time.sleep(10)
- else:
- raise
- logger.critical("get_article_gpt_pydantic 严重错误,3次后都失败了")
- raise Exception("获取文章失败,已达到最大重试次数")
- def parse_gpt_phon_to_tuplelist(text: str) -> list:
- """解析gpt返回的音标数据"""
- result = []
- if not text:
- return []
- for i in text.split("\n"):
- ii = i.split("***")
- if len(ii) >= 3:
- result.append((ii[0].strip(), ii[1].strip(), ii[2].strip()))
- return result
- if __name__ == '__main__':
- question = "hello"
- sys_prompt = "你是一个专业的英语老师,擅长根据用户提供的词汇生成对应的英语文章和中文翻译和4个配套选择题。"
- q = """下面我会为你提供两组数据,[单词组1]和[单词组2](里面包含词义id,英语单词,中文词义),优先使用[单词组1]内的单词,请根据这些单词的中文词义,生成一篇带中文翻译的考场英语文章,英语文章和中文翻译要有[标题]。注意这个单词有多个词义时,生成的英语文章一定要用提供的中文词义。并挑选一句复杂的句子和其中文翻译,放入difficultSentences。英语文章,放入"englishArticle"中。中文翻译,放入"chineseArticle"中。最终文中使用到的单词id放入"usedMeanIds"中。4个选择题,放入questions字段。questions结构下有4个选择题对象,其中trunk是[英语]问题文本,analysis是[中文]的问题分析,candidates是4个ABCD选项,内部有label是指选项序号A B C D ,text是[英语]选项文本,isRight是否正确答案1是正确0是错误。
- 要求:
- 1.必须用提供的这个词义的单词,其他单词使用常见、高中难度的的单词。文章整体难度适中,大约和中国的高中生,中国CET-6,雅思6分这样的难度标准。
- 2.优先保证文章语句通顺,意思不要太生硬。不要为了使用特定的单词,造成文章语义前后不搭,允许不使用个别词义。
- 3.文章中使用提供单词,一定要和提供单词的中文词义匹配,尤其是一词多义时,务必使用提供单词的词义。必须要用提供单词的词义。如果用到的词义与提供单词词义不一致,请不要使用这个单词。
- 4.生成的文章要求600词左右,可以用\\n\\n字符分段,一般5个段落左右。第一段是文章标题。
- 5.生成文章优先使用[单词组1]的词义,其次可以挑选使用[单词组2]的词义。允许不使用[单词组1]的个别单词,优先保证文章整体意思通顺连贯和故事完整。
- 提供[单词组1]:4238 penalty:惩罚, 刑罚;4591 bare:赤裸的, 无遮蔽的;4227 stable:畜舍, 马厩;4236 psychology:心理学;4245 offense:进攻, 攻势, 冒犯, 触怒, 过错;4237 innocent:清白的, 无辜的, 天真的;4228 refrigerator:冰箱, 冷库;4247 tissue:(动植物)组织;4250 awareness:察觉, 觉悟, 意识;4234 mode:方式, 模式;4224 neat:整洁, 利索;4225 statistics:统计;4251 random:任意的, 随机的;4201 laundry:洗衣房;4545 barrel:桶, 一桶之量;4249 recruit:招募, 新成员;4229 pregnant:怀孕的, 孕育的;4235 relevant:有关的, 相关联的;4252 incentive:刺激, 激励, 鼓励;4194 grave:坟墓, 墓穴;
- 提供[单词组2]:;
- """
- resp = get_answer_from_gpt(question=question, temperature=0.9, sys_prompt=sys_prompt, model="gpt-4.1")
- print(type(resp))
- print(resp)
|