# -*- coding: UTF-8 -*- """专为鲍利提分小程序,制作的word文档;apifox接口在-单词教学宝-词汇突击学案文档生成接口""" import time import re import os import math import yaml from random import randint, shuffle from docx.shared import Pt, Inches, Cm, RGBColor from docx.enum.text import WD_COLOR_INDEX from make_docx_demo.data import * from docx_base import Word, Table, hex_to_rgb, rgb_to_hex, ParagraphBase from make_docx_demo.docx_other_func import time_use, qrcode_maker, get_weekday from tools.loglog import logger, log_err_e from make_docx_demo.word2pdf import convert_word_to_pdf from make_docx_demo.get_standard_data import get_standard_data from common.split_text import split_text_to_word_punctuation from config.read_config import address num_dict = {1: "❶", 2: "❷", 3: "❸", 4: "❹", 5: "❺", 6: "❻", 7: "❼", 8: "❽", 9: "❾", 10: "❿", 11: "⓫", 12: "⓬", 13: "⓭", 14: "⓮", 15: "⓯", 16: "⓰", 17: "⓱", 18: "⓲", 19: "⓳", 20: "⓴"} @time_use def header_maker(docx: Word, json_data): exercise_id = str(json_data.get("ExerciseId", "")).rjust(11, "0") exercise_title = json_data.get("ExerciseTitle", "") exercise_level = json_data['StudentInfo']['StudentStudy']['ReadingLevel'] student_name = json_data.get("StudentInfo").get("StudentName", '') class_name = json_data.get("StudentInfo").get("ClassName", '').replace("词汇突击", "") t_date = time.strftime("%Y-%m-%d", time.localtime()) t_weekday = get_weekday() t_time = time.strftime("%H:%M:%S", time.localtime()) for i in range(1, len(docx.doc.sections) - 1): tb_header = docx.add_header_table(rows=1, cols=5, section_index=i, tb_name="页眉表格") tb_header.set_cell_text(0, 0, "鲍利提分", bold=True, size=16, color=(220, 220, 220), border=False, chinese_font_name="黑体") tb_header.set_cell_text(0, 1, f"{class_name}\n{student_name}", size=8, border=False, color=(220, 220, 220)) tb_header.set_cell_text(0, 2, f"词汇训练\n{exercise_level}级", size=8, border=False, color=(220, 220, 220)) tb_header.set_cell_text(0, 3, f"{exercise_id}", bold=True, size=24, border=False, color=(220, 220, 220)) tb_header.set_cell_text(0, 4, f"{t_date}\n{t_weekday}\n{t_time}", size=8, border=False, color=(220, 220, 220)) tb_header.set_tb_colum_width(width=[100, 70, 70, 150, 80]) target_section = docx.doc.sections[-1] target_section.header.is_linked_to_previous = False for paragraph in target_section.header.paragraphs: paragraph.clear() target_section.header_distance = 0 target_section.footer_distance = 280000 @time_use def sub_title_maker(docx: Word, main_title, sub_title_name1, sub_title_name2='鲍利提分,你的智能教练'): p = docx.add_blank_paragraph() line_width = 200 main_rect_x = line_width + 10 main_rect_width = 150 right_line_x = main_rect_x + main_rect_width + 10 p.add_rectangle(main_title, x=main_rect_x, y=4, fill_color="000000", width=main_rect_width, height=48, font_color="ffffff", font_size=18) p.add_rectangle("", x=0, y=50, boder_color="808080", width=line_width, height=2) p.add_rectangle("", x=right_line_x, y=50, boder_color="808080", width=line_width, height=2) p.add_rectangle(f"【{sub_title_name1}】", x=0, y=20, width=line_width, height=40, font_size=8, chinese_font="宋体") p.add_rectangle(sub_title_name2, x=right_line_x, y=20, width=line_width, height=40, font_color="808080", font_size=8, chinese_font="宋体") docx.add_blank_paragraph() docx.add_blank_paragraph() docx.add_blank_paragraph() @time_use def section_1(docx: Word, json_data, *args, **kwargs): exercise_id_int = json_data.get("ExerciseId", "") student_name = json_data.get("StudentInfo").get("StudentName", '') student_stage = json_data.get("StudentInfo").get("StudentStage") grade_name = {1: "小学", 2: "初中", 3: "高中"}.get(student_stage) t_date_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) totalVocabulary, readingAccuracy, readingLevel, readingSpeed = get_standard_data(student_stage) FirstVocabulary = json_data['StudentInfo']['StudentStudy']['FirstVocabulary'] Vocabulary = json_data['StudentInfo']['StudentStudy']['Vocabulary'] ReadingVolume = json_data['StudentInfo']['StudentStudy']['ReadingVolume'] r6 = json_data['StudentInfo']['StudentStudy']['ReadingLevel'] r7 = len([strange_words for exercise in json_data['WordAndArticleContents'] for strange_words in exercise['StrangeWords']]) r8 = r6 multi_article_difficulty = [article_obj['Score'] for article_obj in json_data['WordAndArticleContents'][0]['Articles']] difficulty_value = sum(multi_article_difficulty) // len(multi_article_difficulty) if multi_article_difficulty else 0 InspirationalMessage = json_data.get('InspirationalMessage') "开始版面-------------------------------------------------" docx.add_paragraph(text="鲍利提分个性化学案", size=20, align="center", bold=True) docx.add_paragraph(text="AI解码英语基因,智能重组高分密码", size=14, align="center") docx.add_blank_paragraph() docx.add_paragraph(text="学生基本情况", size=16, align="left", bold=True, dq=10, dh=5) t1 = Table(docx, 0, 3, border=True, tb_name="学生基本情况") t1.add_table_row_data_xml_fastly(["姓名", "年级", "初始词汇量"]) t1.add_table_row_data_xml_fastly([student_name, grade_name, FirstVocabulary]) t1.add_table_row_data_xml_fastly(["当前词汇量", "学段总词汇量", "累计阅读量"]) t1.add_table_row_data_xml_fastly([Vocabulary, totalVocabulary, ReadingVolume]) t1.set_all_border_fastly(xml=True) t1.set_ALIGN_VERTICAL() t1.set_row_height(row_height=20) docx.add_blank_paragraph() docx.add_paragraph(text="本次学案难度情况", size=16, align="left", bold=True, dq=10, dh=5) t3 = Table(docx, 0, 4, border=False, tb_name="本次学案难度情况") t3.add_table_row_data_xml_fastly(["指标", "生词数量", "阅读难度等级", "文章词汇难度值"]) t3.add_table_row_data_xml_fastly(["本次内容", f"{r7}个", r8, difficulty_value]) t3.set_all_border_fastly(xml=True) t3.set_ALIGN_VERTICAL() t3.set_row_height(row_height=20) docx.add_blank_paragraph() if InspirationalMessage: docx.add_paragraph(text="寄语", size=16, align="left", bold=True, dq=10, dh=5) t4 = Table(docx, 0, 1, border=False, tb_name="封面的寄语") t4.add_table_row_data_xml_fastly([InspirationalMessage], ) t4.set_all_border_fastly(xml=True) t4.set_ALIGN_VERTICAL() t4.set_row_height(row_height=50) t4.set_tb_colum_width(0, 500) docx.add_blank_paragraph() docx.add_paragraph(text="练习提醒Tips", size=16, align="left", bold=True, dq=10, dh=5) t5 = Table(docx, 0, 1, border=False, tb_name="本次学案难度情况") text = "请认真阅读,不可急于求成,要确保能够理解每一句话,不要满足于略知概要,不要跳读略读,不要猜答案,加油!" t5.add_table_row_data_xml_fastly([text], ) t5.set_all_border_fastly(xml=True) t5.set_ALIGN_VERTICAL() t5.set_row_height(row_height=50) t5.set_tb_colum_width(0, 500) docx.add_paragraph(text="多媒体辅助", size=16, align="left", bold=True, dq=10, dh=5) docx.add_paragraph(text="需要示范的的学员,扫以下二维码获取音频、视频示范:", size=12, align="left", dq=5, dh=5) p = docx.add_blank_paragraph() img_io = qrcode_maker(full_url=f"{address}/link?type=exercise&id={exercise_id_int}&from=bltf") p.add_pic(img_io, width=2) img_io.close() docx.add_paragraph(text=f"生成时间: {t_date_time}", size=12, align="left", dq=10) docx.add_page_section() @time_use def section_4(docx: Word, json_data, *args, **kwargs): student_name = json_data.get("StudentInfo").get("StudentName", '') title_info = "\n".join(json_data.get("Title")) if title_info: docx.add_paragraph(f"{student_name} 同学:", align="center", bold=True, dq=5, dh=5) p1 = docx.add_blank_paragraph() p1.add_run_to_p(title_info, size=10) sub_title_maker(docx, "词汇精准学", "智能定制你的专属英语DNA图谱") tb = Table(docx, 1, 1, border=True, tb_name="词汇精准学") tb.set_tb_colum_width(0, 460) tb.set_cell_text(0, 0, "按顺序朗读生词表两遍。\n(1)用红笔在不会的单词序号上打星号,增加记忆。\n(2)朗读例句,不认识的部分参照译文理解。", align="left", size=10, dq=10, dh=10) docx.add_blank_paragraph() @time_use def section_4_1(docx: Word, json_data, *args, **kwargs): def insert_content(row, col, data, qrcode_result: dict): cell_outside = tb_outside.get_cell(row, col, delete_default_para=True) tb_inside = Table(cell_outside, rows=5, cols=3, tb_name="内部内容") tb_inside.merge_cell(0, 0, 0, 2) tb_inside.merge_cell(1, 0, 1, 2) tb_inside.merge_cell(2, 0, 2, 2) tb_inside.merge_cell(3, 0, 3, 2) tb_inside.merge_cell(4, 0, 4, 2) num_calucate = 2 * row + 1 if col == 0 else 2 * row + 2 p = ParagraphBase(tb_inside.get_cell_paragraph(0, 0, align="left")) p.add_run_to_p(num_dict[num_calucate], bold=True, size=22, font_name="MS Gothic") p.add_run_to_p(' ' + data[0], bold=True, size=20) tb_inside.set_cell_text(row=1, column=0, cell_text=data[1] + " " + data[2], border=False, size=10, align="left", bk_color=(240, 240, 240)) cell_p = tb_inside.get_cell_paragraph(2, 0, align="left") cell_p_1 = ParagraphBase(cell_p) cell_p_1.add_run_to_p(data[3], size=10, bold=True) cell_p_1.add_run_to_p(" " + data[4], size=8) cell_p = tb_inside.get_cell_paragraph(3, 0, align="left") cell_p_1 = ParagraphBase(cell_p) cell_p_1.add_run_to_p(data[5], size=10, bold=True) cell_p_1.add_run_to_p(" " + data[6], size=8) cell_p = tb_inside.get_cell_paragraph(4, 0, align="left") cell_p_1 = ParagraphBase(cell_p) cell_p_1.add_run_to_p(data[7], size=10, bold=True) cell_p_1.add_run_to_p(" " + data[8], size=8) properties_chinese_map = {"adj": "形容词", "n": "名词", "interj": "感叹词", "conj": "连词", "num": "数字", "art": "冠词", "pron": "代词", "adv": "副词", "prep": "介词", "v": "动词"} strange_words_data = [] strange_words = json_data.get('StrangeWords') qrcode_thread = [] qrcode_result = {} for item in strange_words: spell = item['Spell'] word_id = item['WordId'] en = "" if not item.get("SymbolsEn", "") else item.get("SymbolsEn") am = "" if not item.get("SymbolsAm", "") else item.get("SymbolsAm") symbols_en = "英" + f'[{en}]' symbols_am = "美" + f'[{am}]' word_properties = " ".join([properties_chinese_map.get(i, "") for i in item['WordProperties']]) word_meanings = item.get('Meaning', "") word_changes_list = [] for idx, s in enumerate(item["WordChanges"], start=1): s_type, s_spell = s['Type'], s['Spell'] if "原型" in s_type or "大小写" in s_type: continue tail = '\n' if idx != len(item["WordChanges"]) else '' word_changes_list.append(f"{s_spell} {s_type}{tail}") word_changes = "".join(word_changes_list) if item['Sentences']: sentences = item['Sentences'][0]['English'] + '\n' + item['Sentences'][0]['Chinese'] else: sentences = "" single_word_tuple = (spell, symbols_en, symbols_am, word_properties, word_meanings, "词汇变形", word_changes, "例句", sentences) strange_words_data.append(single_word_tuple) rows = math.ceil(len(strange_words_data) / 2) tb_outside = Table(docx, rows=rows, cols=2, tb_name="外层框架") tb_outside.set_tb_colum_width(width=[230, 230]) for t in qrcode_thread: t.join() for row in range(rows): for col in range(2): try: data_item = strange_words_data.pop(0) insert_content(row, col, data_item, qrcode_result) except IndexError: break docx.add_page_section() @time_use def section_5(docx: Word, json_data, *args, **kwargs): copy_word_list = [i['Meaning'] for i in json_data.get('StrangeWords')] random_copy_word_list = copy_word_list * 3 shuffle(random_copy_word_list) first_copy_word_list = copy_word_list.copy() copy_word_list_add_num = [f"{i} ({idx})" for idx, i in enumerate(first_copy_word_list, start=1)] shuffle(copy_word_list_add_num) total_copy_word_list = copy_word_list_add_num + random_copy_word_list sub_title_maker(docx, "单词高效记", "会读会写才算真学会") tb = Table(docx, 1, 1, tb_name="高效速记", border=True) tb.set_tb_colum_width(0, 460) text = ["请在横线上写下对应单词,每格写一遍,尽量默写,默写不出的,可查阅生词表;\n", "书写时保持工整;每写完一个单词小声念一遍词义与单词。\n"] cell_p = tb.get_cell_paragraph(0, 0, align="left") p = ParagraphBase(cell_p) p.add_run_to_p(" 高效速记\n", size=16, bold=True, ) for t in text: p.add_run_to_p("☆ ", size=10, font_name="MS Gothic") p.add_run_to_p(t, size=10) docx.add_blank_paragraph() total_count = len(total_copy_word_list) half_count = int(total_count / 2) tb2 = Table(docx, half_count + 1, 4, tb_name="高效速记下面的单词") for row in range(total_count): data = total_copy_word_list[row] if row < half_count: tb2.set_cell_text(row, 0, data, size=9, align="right", border=False, dq=2.5, dh=2) tb2.set_cell_text(row, 1, str(row + 1) + "." + "_" * 20, size=9, align="left", border=False, dq=2.5, dh=2) else: tb2.set_cell_text(row - half_count, 2, data, size=9, align="right", border=False, dq=2.5, dh=2) tb2.set_cell_text(row - half_count, 3, str(row + 1) + "." + "_" * 20, size=9, align="left", border=False, dq=2.5, dh=2) tb2.set_tb_colum_width(width=[120, 110] * 2) docx.add_page_section() @time_use def section_6(docx: Word, json_data, *args, **kwargs): example_sentence = [f"{index}. {i['Sentences'][0]['English']} ({i['Spell']})" for index, i in enumerate(json_data['StrangeWords'], start=1) if i['Sentences']] sub_title_maker(docx, "例句填填看", "记词义,练拼写,学的快") tb = Table(docx, 1, 1, tb_name="例句填填看", border=True) tb.set_tb_colum_width(0, 460) text = ["请在横线上写下单词在例句中的词义,若想不起来,可随时到例句答案表中查看。\n", "参阅过答案的例句,请在句前的“□”中标记问号,以便复习回顾。\n", "单词有多个意思的,应只填写适合语境的意思。\n", "例句中有不熟悉的单词,请用斜线划掉,以便拍照报告给我们。"] cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10) p = ParagraphBase(cell_p) for t in text: p.add_run_to_p("☆ ", size=10, font_name="MS Gothic") p.add_run_to_p(t, size=10) for i in example_sentence: p = docx.add_blank_paragraph(dq=4, dh=4) p.add_run_to_p("□ ", size=12, font_name="宋体") p.add_run_to_p(i + "___________") docx.add_page_section() @time_use def section_7(docx: Word, json_data, *args, **kwargs): def wanxing(index, article_single): article_id = article_single['Id'] article_length = article_single['AllWordAmount'] strange_words_ids = [i['MeanId'] for i in json_data['StrangeWords']] explanatory_words_ids = [i['MeaningId'] for i in article_single['ExplanatoryWords']] select_text = [] for ques_index, candidates in enumerate(article_single['Questions'], start=1): single_select_text = '' for s in candidates['Candidates']: single_select_text += s['Label'] + '. ' participle = s['Participle'] if participle: single_select_text += participle + ' \n' else: text = s['Text'] single_select_text += text + ' \n' select_text.append(f"{ques_index}. {single_select_text}") all_select_text = "\n".join(select_text) article_main: str = article_single['English'] + "\n\n郑重提示:认真看完全文再看问题。\n\n" + all_select_text article_main_list = article_main.split(" ") explanatory_words = "\n\n".join( [f"{index}. {i['Spell']} [{i['SymbolsEn']}] [{i['SymbolsAm']}] {i['Meaning']}" for index, i in enumerate(article_single['ExplanatoryWords'], start=1)]) sub_title_maker(docx, "真题强化练", "智能匹配难度,轻松提升阅读") tb = Table(docx, 1, 1, tb_name="真题强化练", border=True) tb.set_tb_colum_width(0, 460) text = ["练习中不认识的单词,尽量猜测词义,并用斜线划掉,以便拍照报告给我们。\n\n", "答题完毕后,可查字典,并注释在右侧批注区,不要在原文上注释。复习时不必通读全文,结合上下文能回忆起标记词的词义即可,想不起的再对照批注区。\n", "完形填空是优秀的测验题型,却不适合用于训练阅读能力和提升词汇量,所以建议在阅读能力(理解度、速度、难度)达标后再做完形填空题型练习。\n", "阅读能力达标的同学,按三遍法做完形填空,基本可以达到满分。三遍法要求如下:\n", "第一遍(理解):结合选项通读全文,以求理解文章主旨,但不动笔,以免形成成见。\n", "第二遍(填空):通读全文,从候选词中选出适宜项目,将完整的单词填入空格,使文章连贯。\n", "第三遍(核验):通读填空后的全文,确认上下文无矛盾之处。\n", "三遍通读均应记录起讫时间,并将速度纳入能力考核项目。能力合格者,考试中也应有充裕时间完成以上 3 遍通读。\n", "阅读计时从此处开始,请按顺序完成阅读,并注意记录时间。"] text2 = [f"全题长度(含问题及选项):{article_length}; 编号:{article_id};\n", "第一遍(理解)开始时间:_________________ 第二遍(填空)开始时间:_________________\n", "第三遍(核验)开始时间:_________________"] cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10) p = ParagraphBase(cell_p) for t in text: p.add_run_to_p("☆ ", size=10, font_name="MS Gothic") p.add_run_to_p(t, size=10) for t2 in text2: p.add_run_to_p(t2, size=10) docx.add_blank_paragraph() tb1 = Table(docx, 1, 3) tb1.set_tb_colum_width(width=[90, 370, 5]) tb1_p = ParagraphBase(tb1.get_cell_paragraph(0, 0, align="left")) tb1_p.add_pic("make_docx_demo/static/lianxi1.jpg", width=2.5) tb1.set_cell_text(0, 1, f"篇幅(含问题选项):{article_length} 词 阅读开始时间:_____点_____分_____秒", size=9.5, border=False, align="left") tb2 = Table(docx, rows=1, cols=2, border=True, tb_name="完形填空") tb2.set_tb_colum_width(width=[320, 140]) tb2_p = ParagraphBase(tb2.get_cell_paragraph(0, 0, align="left")) for w in article_main_list: word = re.search(r"\[(\d+)]", w) if word: w = w[:w.find('[')] meaning_id = int(word.group(1)) if meaning_id in strange_words_ids: tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True) elif meaning_id in explanatory_words_ids: tb2_p.add_run_to_p(w + ' ', size=10.5, italic=True) else: tb2_p.add_run_to_p(w + ' ', size=10.5) else: tb2_p.add_run_to_p(w + ' ', size=10.5) tb2.set_cell_text(0, 1, explanatory_words, size=10.5, font_color=(80, 80, 80), align="left") docx.add_blank_paragraph() tail_zhushi = """第一遍(理解)结束时间:__________用时:____秒 第二遍(填空)结束时间:__________用时:____秒 第三遍(核验)结束时间:__________用时:____秒 总计用时:____分____秒 """ docx.add_paragraph(tail_zhushi, size=10.5) docx.add_blank_paragraph() def reading(index, article_single): def single_yuedu(index, a): article_id = a['Id'] article_length = a['AllWordAmount'] strange_words_ids = set() explanatory_words_ids = set() bold_word = set() italics_word = set() italics_index_dict = {} for i in json_data['StrangeWords']: strange_words_ids.add(i['MeanId']) bold_word.add(i['Spell']) bold_word.update([change_word['Spell'] for change_word in i['WordChanges']]) for italics_index, ii in enumerate(a['ExplanatoryWords'], start=1): explanatory_words_ids.add(ii['MeaningId']) italics_word.add(ii['Spell']) if 'WordChanges' in ii: italics_word.update([change_word['Spell'] for change_word in ii['WordChanges']]) italics_index_dict.update({change_word['Spell']: f"[{italics_index}]" for change_word in ii['WordChanges']}) italics_index_dict[ii['MeaningId']] = f"[{italics_index}]" italics_index_dict[ii['Spell']] = f"[{italics_index}]" select_text = [] for ques_index, candidates in enumerate(a['Questions'], start=1): single_select_text = '' subject = candidates['Subject'] + '\n' for s in candidates['Candidates']: single_select_text += s['Label'] + '. ' participle = s['Participle'] if participle: single_select_text += participle + ' \n' else: text = s['Text'] single_select_text += text + ' \n' select_text.append(str(ques_index) + ". " + subject + single_select_text) all_select_text = "\n".join(select_text) article_main: str = a['English'] + "\n\n郑重提示:认真看完全文再看问题。\n" + all_select_text article_main_list = split_text_to_word_punctuation(article_main) explanatory_words = "\n\n".join( [f"{index}. {i['Spell']}\n [{i['SymbolsEn']}] [{i['SymbolsAm']}]\n {i['Meaning']}" for index, i in enumerate(a['ExplanatoryWords'], start=1)]) tb1 = Table(docx, 1, 3, tb_name="图片小标题") tb1.set_tb_colum_width(width=[90, 370, 5]) tb1_p = ParagraphBase(tb1.get_cell_paragraph(0, 0, align="left")) tb1_p.add_pic(f"make_docx_demo/static/lianxi{index}.jpg", width=2.5) tb1.set_cell_text(0, 1, f"篇幅(含问题选项):{article_length} 词 阅读开始时间:_____点_____分_____秒", size=9.5, border=False, align="left") tb2 = Table(docx, rows=1, cols=2, border=True, tb_name="阅读") tb2.set_tb_colum_width(width=[320, 140]) tb2_p = ParagraphBase(tb2.get_cell_paragraph(0, 0, align="left")) for w in article_main_list: word = re.search(r"\[(\d+)]", w) if word: w = w[:w.find('[')] meaning_id = int(word.group(1)) if meaning_id in strange_words_ids: tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True) elif meaning_id in explanatory_words_ids: italics_index_str = italics_index_dict[meaning_id] tb2_p.add_run_to_p(w + f'{italics_index_str} ', size=10.5, italic=True) else: tb2_p.add_run_to_p(w + ' ', size=10.5) else: if w in bold_word: tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True) elif w in italics_word: italics_index_str = italics_index_dict[w] tb2_p.add_run_to_p(w + f'{italics_index_str} ', size=10.5, italic=True) else: tb2_p.add_run_to_p(w + ' ', size=10.5) tb2.set_cell_text(0, 1, explanatory_words, size=10.5, font_color=(80, 80, 80), align="left", centre=False, line_spacing=300) docx.add_blank_paragraph() tail_zhushi = """完成时间:_____点_____分_____秒,本篇用时:_____秒。""" docx.add_paragraph(tail_zhushi, size=10.5) docx.add_blank_paragraph() def top_header(): sub_title_maker(docx, "阅读提升练", "智能匹配难度,轻松提升阅读", "鲍利提分, 高效学习专家") tb = Table(docx, 1, 1, tb_name="真题强化练", border=True) tb.set_tb_colum_width(0, 460) text = ["阅读中不认识的单词,尽量猜测词义,并用斜线划掉,以便拍照报告给我们。\n", "读完全文后,可查字典,并抄在右侧批注区,不要在原文上注释。复习时不必通读全文,结合上下文能回忆起标记词的词义即可,想不起的再对照批注区。\n", "阅读训练的目的是提高对英语词、句、篇的敏感度,答题只是检验学习成果的手段,所以切勿为了快速做题而跳读、略读。阅读速度是很重要的训练指标,请在确实理解词句的基础上尽量提高阅读速度。只要平时扎实阅读,考试中不会没有时间认真读题。\n", "阅读计时从此处开始,请按顺序完成阅读,并注意记录时间。\n\n", "生词划线示例:competitions she once attended. Incuding her years of experience" ] cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10) pp = ParagraphBase(cell_p) for index_t, t in enumerate(text): if index_t == len(text) - 1: pp.add_run_to_p(t, size=12) pp.add_rectangle('', x=115, y=170, width=55, height=25, boder_color='000000', shape_type='line') pp.add_rectangle('', x=298, y=170, width=55, height=25, boder_color='000000', shape_type='line') else: pp.add_run_to_p("☆ ", size=10, font_name="MS Gothic") pp.add_run_to_p(t, size=10) docx.add_blank_paragraph() "---------------------开始单篇运行---------------------" if index == 1: top_header() single_yuedu(index, article_single) def seven_to_five(index, article_single): article_id = article_single['Id'] article_length = article_single['AllWordAmount'] strange_words_ids = [i['MeanId'] for i in json_data['StrangeWords']] explanatory_words_ids = [i['MeaningId'] for i in article_single['ExplanatoryWords']] select_text = [] for ques_index, s_candidates in enumerate(article_single['Candidates'], start=1): single_select_text = '' single_select_text += s_candidates['Label'] + '. ' participle = s_candidates['Participle'] if participle: single_select_text += participle else: text = s_candidates['Text'] single_select_text += text select_text.append(f"{single_select_text}") all_select_text = "\n".join(select_text) article_main: str = article_single['English'] + "\n\n郑重提示:认真看完全文再看问题。\n\n" + all_select_text article_main_list = article_main.split(" ") explanatory_words = "\n\n".join( [f"{index}. {i['Spell']} [{i['SymbolsEn']}] [{i['SymbolsAm']}] {i['Meaning']}" for index, i in enumerate(article_single['ExplanatoryWords'], start=1)]) sub_title_maker(docx, "阅读提升练", "智能匹配难度,轻松提升阅读", "鲍利提分, 高效学习专家") tb = Table(docx, 1, 1, tb_name="真题强化练", border=True) tb.set_tb_colum_width(0, 460) text = ["阅读中不认识的单词,尽量猜测词义,并用斜线划掉,以便拍照报告给我们。\n", "读完全文后,可查字典,并抄在右侧批注区,不要在原文上注释。复习时不必通读全文,结合上下文能回忆起标记词的词义即可,想不起的再对照批注区。\n", "7 选 5 题型是测试学生对文章理解程度的好题型,但因打破里文章的连贯性,故不是训练阅读能力的好素材。建议学生在阅读基本能力(理解度、速度、难度)达标后再开展 7 选 5 题型训练。若不能胜任本练习,请回到词汇与阅读训练,先打好基础。\n", "阅读计时从此处开始,请按顺序完成阅读,并注意记录时间。"] cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10) p = ParagraphBase(cell_p) for t in text: p.add_run_to_p("☆ ", size=10, font_name="MS Gothic") p.add_run_to_p(t, size=10) docx.add_blank_paragraph() tb1 = Table(docx, 1, 3, tb_name="图片小标题") tb1.set_tb_colum_width(width=[90, 370, 5]) tb1_p = ParagraphBase(tb1.get_cell_paragraph(0, 0, align="left")) tb1_p.add_pic("make_docx_demo/static/lianxi1.jpg", width=2.5) tb1.set_cell_text(0, 1, f"篇幅(含问题选项):{article_length} 词 阅读开始时间:_____点_____分_____秒", size=9.5, border=False, align="left") tb2 = Table(docx, rows=1, cols=2, border=True, tb_name="七选五") tb2.set_tb_colum_width(width=[320, 140]) tb2_p = ParagraphBase(tb2.get_cell_paragraph(0, 0, align="left")) for w in article_main_list: word = re.search(r"\[(\d+)]", w) if word: w = w[:w.find('[')] meaning_id = int(word.group(1)) if meaning_id in strange_words_ids: tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True) elif meaning_id in explanatory_words_ids: tb2_p.add_run_to_p(w + ' ', size=10.5, italic=True) else: tb2_p.add_run_to_p(w + ' ', size=10.5) else: tb2_p.add_run_to_p(w + ' ', size=10.5) tb2.set_cell_text(0, 1, explanatory_words, size=10.5, font_color=(80, 80, 80), align="left") docx.add_blank_paragraph() "判断题型;根据题型选择----------------------------" all_article_length = 0 for index, article_single in enumerate(json_data['Articles'], start=1): article_type = article_single['Category'] article_type_select = {1: reading, 2: wanxing, 3: seven_to_five} assert article_type in article_type_select article_type_select[article_type](index, article_single) article_length = article_single['AllWordAmount'] all_article_length += article_length tail_zhushi = f"""阅读计时在此结束。 今日总计阅读量 {all_article_length} 词,用时________秒,整份学案共有_______个题目答对。""" docx.add_paragraph(tail_zhushi, size=10.5) docx.add_blank_paragraph() docx.add_page_section() @time_use def section_9(docx: Word, json_data, *args, **kwargs): def wanxing(index, article_count, article_single): chinese_article = article_single['Chinese'] all_analysis = '' docx.add_paragraph("答案和解析", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True) for ques_index, question_item in enumerate(article_single['Questions'], start=1): analysis = question_item['Analysis'].strip() abcd_label = '' candidates = question_item['Candidates'] for abcd_selected in candidates: if abcd_selected['IsRight']: abcd_label += abcd_selected['Label'].strip() all_analysis += f"{ques_index}.\n{abcd_label} {analysis}\n" docx.add_paragraph(all_analysis, size=9) docx.add_paragraph("全文参考译文", chinese_font_name="微软雅黑", dq=15, dh=5, bold=True) docx.add_paragraph(chinese_article, size=9, dq=5, dh=5, line_spacing=300) def reading(index, article_count, article_single): """ index : 外面传入,从1开始。如果只有 """ all_analysis = '' all_difficult_sentences = [] chinese_article = article_single['Chinese'] questions = article_single['Questions'] for ques_index, question_item in enumerate(questions, start=1): analysis = question_item['Analysis'].strip("\n") abcd_label = '' candidates = question_item['Candidates'] for abcd_selected in candidates: if abcd_selected['IsRight']: abcd_label += abcd_selected['Label'].strip("\n") new_line = "" if ques_index == len(questions) else "\n" all_analysis += f"{ques_index}.{abcd_label} {analysis}{new_line}" if index != article_count: all_analysis += '\n' docx.add_paragraph(f"Passage {index}", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True, size=16) docx.add_paragraph("全文参考译文", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True) docx.add_paragraph(chinese_article, size=9) docx.add_paragraph("答案和解析", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True) docx.add_paragraph(all_analysis, size=9) def seven_to_five(index, article_count, article_single): chinese_article = article_single['Chinese'] all_analysis = '' docx.add_paragraph("答案和解析", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True) for q_index, question_item in enumerate(article_single['Questions'], start=1): analysis = question_item['Analysis'] abcd_label = '' candidates = question_item['Candidates'] for abcd_selected in candidates: if abcd_selected['IsRight']: abcd_label += abcd_selected['Label'] all_analysis += f"{q_index}.{abcd_label} {analysis}\n" docx.add_paragraph(all_analysis, size=9) docx.add_paragraph("全文参考译文", chinese_font_name="微软雅黑", dq=15, dh=5, bold=True) docx.add_paragraph("Passage 1", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True) docx.add_paragraph(chinese_article, size=9, dq=5, dh=5, line_spacing=300) "判断题型;根据题型选择----------------------------" sub_title_maker(docx, "解题自主纠", "自主学习,逐步养成良好学习习惯", "鲍利提分,你的智能教练") articles = json_data['Articles'] article_count = len(articles) for index, article_single in enumerate(articles, start=1): article_type = article_single['Category'] article_type_select = {1: reading, 2: wanxing, 3: seven_to_five} assert article_type in article_type_select article_type_select[article_type](index, article_count, article_single) docx.add_docx_component("make_docx_demo/word_component/blank.docx") docx.add_page_section() @time_use def section_10(docx: Word, json_data, scanpage_format, *args, **kwargs): docx.add_paragraph("☆ 请写出词义,再对照筛査表批改。词义顺序可互换;答案意思相符即可,不要求一字不差。批改结果眷抄到筛査表。", size=9, dq=2, dh=2) tb = Table(docx, 50, 4, tb_name="写出词义") tb.set_tb_colum_width(width=[110, 120, 110, 120]) for row in range(50): tb.set_cell_text(row, 0, str(row + 1) + " " + "rich", size=8.5, dq=1, dh=1, border=False) tb.set_cell_text(row, 1, "□________________", size=10, dq=0, dh=0, border=False) tb.set_cell_text(row, 2, str(row + 51) + " " + "rich", size=8.5, dq=1, dh=1, border=False) tb.set_cell_text(row, 3, "□________________", size=10, dq=0, dh=0, border=False) tb.set_row_height(13) docx.add_page_break() docx.add_paragraph("☆ 请在需要加强学习的词义前方框中划线,两头各超出 1 毫米为宜(示例:□☑52.example);请保持本表整洁并交回。", size=9, dq=2, dh=2) tb2 = Table(docx, 25, 8, tb_name="划线表") tb2.set_tb_colum_width(width=[57.5] * 8) docx.add_blank_paragraph(dq=5, dh=5) for row in range(25): tb2.set_cell_text(row, 0, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2) tb2.set_cell_text(row, 1, "星期二", size=8.5, border="right", dq=1.2, dh=1.2) tb2.set_cell_text(row, 2, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2) tb2.set_cell_text(row, 3, "星期二", size=8.5, border="right", dq=1.2, dh=1.2) tb2.set_cell_text(row, 4, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2) tb2.set_cell_text(row, 5, "星期二", size=8.5, border="right", dq=1.2, dh=1.2) tb2.set_cell_text(row, 6, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2) tb2.set_cell_text(row, 7, "星期二", size=8.5, border=False, dq=1.2, dh=1.2) docx.set_page_column(5) docx.add_docx_component("make_docx_demo/word_component/component.docx") docx.end_page_column() if scanpage_format == 3: docx.add_page_section() @time_use def two_check_page(docx: Word, json_data, *args, **kwargs): def empty_filter_page(class_name, student_name, page_title, page_sub_title, t_datetime, word_data_list): page_sub_title = "词汇训练" if len(word_data_list) % 2 != 0: word_data_list.append("") tb = Table(docx, 1, 3, tb_name="头部三元素") tb.set_tb_colum_width(width=[40, 100, 100]) tb.set_tb_colum_width(0, 100) tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体") tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8, dh=2) tb.set_cell_text(0, 2, f"{page_title}\n{page_sub_title}", border=False, size=8, dh=2) docx.add_paragraph("请写出词义,再对照筛查表批改。词义顺序可互换;答案意思相符即可,不要求一字不差。批改结果誊抄到筛查表。", size=9) tb = Table(docx, rows=0, cols=4, tb_name="第一页筛查表") tb.set_all_border_fastly(xml=True, outside_side_border=True, outside_side_border_size=5) half_count = int(len(word_data_list) / 2) for index, row in enumerate(range(half_count)): first_word, second_word = word_data_list[row], word_data_list[row + half_count] cell3 = f"{index + 1 + half_count}. {second_word}" if second_word else "" cell4 = "□ ___________________________" if second_word else "" data = [f"{index + 1}. {first_word}", "□ ___________________________", cell3, cell4] tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9]) tb.set_row_height(13.8) tb.set_table_width_xml([2124, 3257, 2140, 3257]) blank_count = " " * 80 p = docx.add_blank_paragraph(dq=5) p.add_run_to_p(f"{t_datetime} {page_title}-{page_sub_title}{blank_count}", size=8, chinese_font_name="仿宋", font_name="仿宋") docx.add_page_break() def filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime, foot_description, foot_description2, article_type, word_data_list2): page_sub_title = "词汇训练" if len(word_data_list2) % 2 != 0: word_data_list2.append(["", ""]) tb = Table(docx, 1, 5, tb_name="头部五元素") tb.set_tb_colum_width(width=[80, 100, 120, 150, 70]) tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体") tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8) tb.set_cell_text(0, 2, f"{page_id}", border=False, size=16, dh=2, bold=True, font_name="黑体") tb.set_cell_text(0, 3, f"{page_title}\n{page_sub_title}", border=False, size=8) p_cell = tb.get_cell_paragraph(0, 4) p = ParagraphBase(p_cell) io_image = qrcode_maker(f"{page_id}") p.add_pic(io_image, width=Inches(0.6)) io_image.close() pp = docx.add_blank_paragraph() pp.add_run_to_p("下述词汇相应的词义未掌握的请划掉,并将整个页面拍照给我们,以便记录词汇掌握数据。示例:comfort 4. 舒适,安逸", size=9) pp.add_rectangle('', x=540, y=10, width=55, height=0, boder_color='000000', shape_type='line') tb = Table(docx, rows=0, cols=4, tb_name="第二页筛查表") ## 1234竖着放 total_row = int(len(word_data_list2) / 2) for row in range(total_row): spell1, meaning1 = word_data_list2[row] spell2, meaning2 = word_data_list2[total_row + row] cell3 = f"{spell2}" if spell2 else "" cell4 = f"{total_row + row + 1}. {meaning2}" if meaning2 else "" data = [f"{spell1}", f"{row + 1}. {meaning1}", cell3, cell4] tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9], alignment=['right', 'left', 'right', 'left']) tb.set_row_height(13.8) tb.set_table_width_xml([2124, 3257, 2140, 3257]) docx.add_paragraph(f"{t_datetime} {page_title}-{page_sub_title}{foot_description}", size=8, chinese_font_name="仿宋", font_name="仿宋", dq=5) student_name = json_data.get("StudentInfo").get("StudentName", '') class_name = json_data.get("StudentInfo").get("ClassName", '').replace("词汇突击", "") t_datetime = time.strftime("%Y-%m-%d %H:%M", time.localtime()) article_type = 1 try: article_type = json_data['WordAndArticleContents'][0]['Articles'][0]['Category'] except Exception as e: log_err_e(e, "学案类型不存在就取1,词汇突击里面只有阅读理解") """---------------------------------------------------------------------------------""" screening_scanPages = json_data['ScreeningScanPages'] for index, page in enumerate(screening_scanPages, start=1): page_id = str(page['PageId']).rjust(11, "0") page_title = page['Title'] page_sub_title = page['SubTitle'] foot_description = page['FootDescription'] foot_description2 = page['FootDescription2'] word_data_list1 = [] word_data_list2 = [] for i in page['FilterTable']['Items']: word_data_list1.append(i['Spell']) word_data_list2.append([i['Spell'], i['Meaning']]) filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime, foot_description, foot_description2, article_type, word_data_list2) if index != len(screening_scanPages): pass docx.add_page_break() def old_two_check_page(docx: Word, json_data, **kwargs): def empty_filter_page(class_name, student_name, page_title, page_sub_title, t_datetime, word_data_list): if len(word_data_list) % 2 != 0: word_data_list.append("") tb = Table(docx, 1, 3, tb_name="头部三元素") tb.set_tb_colum_width(width=[140, 100, 100]) tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体") tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8, dh=2) tb.set_cell_text(0, 2, f"{page_title}\n{page_sub_title}", border=False, size=8, dh=2) docx.add_paragraph("请写出词义,再对照筛查表批改。词义顺序可互换;答案意思相符即可,不要求一字不差。批改结果誊抄到筛查表。", size=9) tb = Table(docx, rows=0, cols=4, tb_name="第一页筛查表") tb.set_all_border_fastly(xml=True, outside_side_border=True, outside_side_border_size=5) half_count = int(len(word_data_list) / 2) for index, row in enumerate(range(half_count)): first_word, second_word = word_data_list[row], word_data_list[row + half_count] cell3 = f"{index + 1 + half_count}. {second_word}" if second_word else "" cell4 = "□ ___________________________" if second_word else "" data = [f"{index + 1}. {first_word}", "□ ___________________________", cell3, cell4] tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9]) tb.set_row_height(13.8, first_row_h=6) tb.set_table_width_xml([2124, 3257, 2140, 3257]) blank_count = " " * 80 p = docx.add_blank_paragraph(dq=5) p.add_run_to_p(f"{t_datetime} {page_title}-{page_sub_title}{blank_count}", size=8, chinese_font_name="仿宋", font_name="仿宋") docx.add_page_break() def filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime, foot_description, foot_description2, article_type, word_data_list2): if len(word_data_list2) % 2 != 0: word_data_list2.append(["", ""]) tb = Table(docx, 1, 5, tb_name="头部五元素") tb.set_tb_colum_width(width=[80, 100, 120, 150, 70]) tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体") tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8) tb.set_cell_text(0, 2, f"{page_id}", border=False, size=16, dh=2, bold=True, font_name="黑体") tb.set_cell_text(0, 3, f"{page_title}\n{page_sub_title}", border=False, size=8) p_cell = tb.get_cell_paragraph(0, 4) p = ParagraphBase(p_cell) page_id = int(page_id) io_image = qrcode_maker(f"{page_id}") p.add_pic(io_image, width=Inches(0.6)) io_image.close() pp = docx.add_blank_paragraph() p_base = ParagraphBase(pp) p_base.p.add_run_to_p("请在需要加强学习的词义前方框中划线,两头各超出1毫米为宜(示例:", size=9) p_base.p.add_pic("make_docx_demo/static/line_example.png", width=Inches(0.8)) p_base.p.add_run_to_p(" );请保持本表整洁并交回。", size=9) tb = Table(docx, rows=0, cols=4, tb_name="第二页筛查表") ## 1234竖着放 total_row = int(len(word_data_list2) / 2) for row in range(total_row): spell1, meaning1 = word_data_list2[row] spell2, meaning2 = word_data_list2[total_row + row] cell1 = f"{row + 1}. {spell1}" if spell1 else "" cell2 = f"□ {meaning1}" if meaning1 else "" cell3 = f"{total_row + row + 1}. {spell2}" if spell2 else "" cell4 = f"□ {meaning2}" if meaning2 else "" data = [cell1, cell2, cell3, cell4] tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9]) tb.set_all_border_fastly(xml=True, outside_side_border=True, outside_side_border_size=5) tb.set_row_height(13.6, first_row_h=6) tb.set_table_width_xml([2124, 3257, 2140, 3257]) if article_type == 1: docx.add_paragraph(f"{t_datetime} {page_title}-{page_sub_title}{foot_description}", size=8, chinese_font_name="仿宋", font_name="仿宋", dq=5) docx.add_paragraph(foot_description2, align="right", size=8, chinese_font_name="仿宋") else: docx.add_paragraph(f"{t_datetime} {page_title}-{page_sub_title}{foot_description}", size=8, chinese_font_name="仿宋", font_name="仿宋", dq=5) student_name = json_data.get("StudentInfo").get("StudentName", '') class_name = json_data.get("StudentInfo").get("ClassName", '') t_datetime = time.strftime("%Y-%m-%d %H:%M", time.localtime()) article_type = json_data['WordAndArticleContents'][0]['Articles'][0]['Category'] is_add_empty_filter_page = json_data['Config']['AddEmptyFilterPage'] """---------------------------------------------------------------------------------""" for index, page in enumerate(json_data['ScreeningScanPages'], start=1): page_id = str(page['PageId']).rjust(11, "0") if index >= 2: docx.add_page_break() page_title = page['Title'] page_sub_title = page['SubTitle'] foot_description = page['FootDescription'] foot_description2 = page['FootDescription2'] word_data_list1 = [] word_data_list2 = [] item_list: list = page['FilterTable']['Items'] item_count = len(item_list) if item_count < 100: item_list.extend([{"Spell": "", "Meaning": ""} for _ in range(100 - item_count)]) for i in page['FilterTable']['Items']: word_data_list1.append(i['Spell']) word_data_list2.append([i['Spell'], i['Meaning']]) if is_add_empty_filter_page: empty_filter_page(class_name, student_name, page_title, page_sub_title, t_datetime, word_data_list1) filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime, foot_description, foot_description2, article_type, word_data_list2) @time_use def other(docx, json_data, *args, **kwargs): sections = docx.doc.sections for section in sections[:-1]: section.top_margin = Inches(0.3) section.bottom_margin = Inches(0.4) section.left_margin = Inches(0.8) section.right_margin = Inches(0.8) section.footer_distance = 180000 sections[-1].top_margin = Inches(0.1) sections[-1].bottom_margin = Inches(0.1) sections[-1].left_margin = Inches(0.5) sections[-1].right_margin = Inches(0.5) header_maker(docx, json_data) def start_make_word(json_data, document_format, scanpage_format): parent_path = "make_docx_demo/file_result/" if not os.path.exists(parent_path): os.makedirs(parent_path) try: exercise_id = json_data['ExerciseId'] docx = Word(save_file_name=f"{parent_path}{exercise_id}.docx", start_template_name="make_docx_demo/word_component/start_template.docx") section_1(docx=docx, json_data=json_data, scanpage_format=scanpage_format) section_4(docx=docx, json_data=json_data, scanpage_format=scanpage_format) for exercise_json in json_data['WordAndArticleContents']: section_4_1(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format) section_5(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format) section_6(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format) section_7(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format) section_9(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format) if scanpage_format == 1: two_check_page(docx=docx, json_data=json_data, scanpage_format=scanpage_format) old_two_check_page(docx=docx, json_data=json_data, scanpage_format=scanpage_format) elif scanpage_format == 2: section_10(docx=docx, json_data=json_data, scanpage_format=scanpage_format) elif scanpage_format == 3: section_10(docx=docx, json_data=json_data, scanpage_format=scanpage_format) two_check_page(docx=docx, json_data=json_data, scanpage_format=scanpage_format) other(docx=docx, json_data=json_data, scanpage_format=scanpage_format) docx.save_docx() if document_format == 1: return f"{parent_path}{exercise_id}.docx" else: convert_word_to_pdf(f"{parent_path}{exercise_id}") return f"{parent_path}{exercise_id}.pdf" except Exception as e: log_err_e(e) if __name__ == '__main__': import os t = time.time() os.chdir("..") start_make_word(test_json1, 1, 1) print(time.time() - t)