main_word_applet.py 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. # -*- coding: UTF-8 -*-
  2. """专为鲍利提分小程序,制作的word文档;apifox接口在-单词教学宝-词汇突击学案文档生成接口"""
  3. import time
  4. import re
  5. import os
  6. import math
  7. import yaml
  8. from random import randint, shuffle
  9. from docx.shared import Pt, Inches, Cm, RGBColor
  10. from docx.enum.text import WD_COLOR_INDEX
  11. from make_docx_demo.data import *
  12. from docx_base import Word, Table, hex_to_rgb, rgb_to_hex, ParagraphBase
  13. from make_docx_demo.docx_other_func import time_use, qrcode_maker, get_weekday
  14. from tools.loglog import logger, log_err_e
  15. from make_docx_demo.word2pdf import convert_word_to_pdf
  16. from make_docx_demo.get_standard_data import get_standard_data
  17. from common.split_text import split_text_to_word_punctuation
  18. from config.read_config import address
  19. num_dict = {1: "❶", 2: "❷", 3: "❸", 4: "❹", 5: "❺", 6: "❻", 7: "❼", 8: "❽", 9: "❾",
  20. 10: "❿", 11: "⓫", 12: "⓬", 13: "⓭", 14: "⓮", 15: "⓯", 16: "⓰", 17: "⓱", 18: "⓲", 19: "⓳", 20: "⓴"}
  21. @time_use
  22. def header_maker(docx: Word, json_data):
  23. exercise_id = str(json_data.get("ExerciseId", "")).rjust(11, "0")
  24. exercise_title = json_data.get("ExerciseTitle", "")
  25. exercise_level = json_data['StudentInfo']['StudentStudy']['ReadingLevel']
  26. student_name = json_data.get("StudentInfo").get("StudentName", '')
  27. class_name = json_data.get("StudentInfo").get("ClassName", '').replace("词汇突击", "")
  28. t_date = time.strftime("%Y-%m-%d", time.localtime())
  29. t_weekday = get_weekday()
  30. t_time = time.strftime("%H:%M:%S", time.localtime())
  31. for i in range(1, len(docx.doc.sections) - 1):
  32. tb_header = docx.add_header_table(rows=1, cols=5, section_index=i, tb_name="页眉表格")
  33. tb_header.set_cell_text(0, 0, "鲍利提分", bold=True, size=16, color=(220, 220, 220), border=False, chinese_font_name="黑体")
  34. tb_header.set_cell_text(0, 1, f"{class_name}\n{student_name}", size=8, border=False, color=(220, 220, 220))
  35. tb_header.set_cell_text(0, 2, f"词汇训练\n{exercise_level}级", size=8, border=False, color=(220, 220, 220))
  36. tb_header.set_cell_text(0, 3, f"{exercise_id}", bold=True, size=24, border=False, color=(220, 220, 220))
  37. tb_header.set_cell_text(0, 4, f"{t_date}\n{t_weekday}\n{t_time}", size=8, border=False, color=(220, 220, 220))
  38. tb_header.set_tb_colum_width(width=[100, 70, 70, 150, 80])
  39. target_section = docx.doc.sections[-1]
  40. target_section.header.is_linked_to_previous = False
  41. for paragraph in target_section.header.paragraphs:
  42. paragraph.clear()
  43. target_section.header_distance = 0
  44. target_section.footer_distance = 280000
  45. @time_use
  46. def sub_title_maker(docx: Word, main_title, sub_title_name1, sub_title_name2='鲍利提分,你的智能教练'):
  47. p = docx.add_blank_paragraph()
  48. line_width = 200
  49. main_rect_x = line_width + 10
  50. main_rect_width = 150
  51. right_line_x = main_rect_x + main_rect_width + 10
  52. p.add_rectangle(main_title, x=main_rect_x, y=4, fill_color="000000", width=main_rect_width, height=48, font_color="ffffff",
  53. font_size=18)
  54. p.add_rectangle("", x=0, y=50, boder_color="808080", width=line_width, height=2)
  55. p.add_rectangle("", x=right_line_x, y=50, boder_color="808080", width=line_width, height=2)
  56. p.add_rectangle(f"【{sub_title_name1}】", x=0, y=20, width=line_width, height=40, font_size=8, chinese_font="宋体")
  57. p.add_rectangle(sub_title_name2, x=right_line_x, y=20, width=line_width, height=40, font_color="808080", font_size=8,
  58. chinese_font="宋体")
  59. docx.add_blank_paragraph()
  60. docx.add_blank_paragraph()
  61. docx.add_blank_paragraph()
  62. @time_use
  63. def section_1(docx: Word, json_data, *args, **kwargs):
  64. exercise_id_int = json_data.get("ExerciseId", "")
  65. student_name = json_data.get("StudentInfo").get("StudentName", '')
  66. student_stage = json_data.get("StudentInfo").get("StudentStage")
  67. grade_name = {1: "小学", 2: "初中", 3: "高中"}.get(student_stage)
  68. t_date_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
  69. totalVocabulary, readingAccuracy, readingLevel, readingSpeed = get_standard_data(student_stage)
  70. FirstVocabulary = json_data['StudentInfo']['StudentStudy']['FirstVocabulary']
  71. Vocabulary = json_data['StudentInfo']['StudentStudy']['Vocabulary']
  72. ReadingVolume = json_data['StudentInfo']['StudentStudy']['ReadingVolume']
  73. r6 = json_data['StudentInfo']['StudentStudy']['ReadingLevel']
  74. r7 = len([strange_words for exercise in json_data['WordAndArticleContents'] for strange_words in exercise['StrangeWords']])
  75. r8 = r6
  76. multi_article_difficulty = [article_obj['Score'] for article_obj in json_data['WordAndArticleContents'][0]['Articles']]
  77. difficulty_value = sum(multi_article_difficulty) // len(multi_article_difficulty) if multi_article_difficulty else 0
  78. InspirationalMessage = json_data.get('InspirationalMessage')
  79. "开始版面-------------------------------------------------"
  80. docx.add_paragraph(text="鲍利提分个性化学案", size=20, align="center", bold=True)
  81. docx.add_paragraph(text="AI解码英语基因,智能重组高分密码", size=14, align="center")
  82. docx.add_blank_paragraph()
  83. docx.add_paragraph(text="学生基本情况", size=16, align="left", bold=True, dq=10, dh=5)
  84. t1 = Table(docx, 0, 3, border=True, tb_name="学生基本情况")
  85. t1.add_table_row_data_xml_fastly(["姓名", "年级", "初始词汇量"])
  86. t1.add_table_row_data_xml_fastly([student_name, grade_name, FirstVocabulary])
  87. t1.add_table_row_data_xml_fastly(["当前词汇量", "学段总词汇量", "累计阅读量"])
  88. t1.add_table_row_data_xml_fastly([Vocabulary, totalVocabulary, ReadingVolume])
  89. t1.set_all_border_fastly(xml=True)
  90. t1.set_ALIGN_VERTICAL()
  91. t1.set_row_height(row_height=20)
  92. docx.add_blank_paragraph()
  93. docx.add_paragraph(text="本次学案难度情况", size=16, align="left", bold=True, dq=10, dh=5)
  94. t3 = Table(docx, 0, 4, border=False, tb_name="本次学案难度情况")
  95. t3.add_table_row_data_xml_fastly(["指标", "生词数量", "阅读难度等级", "文章词汇难度值"])
  96. t3.add_table_row_data_xml_fastly(["本次内容", f"{r7}个", r8, difficulty_value])
  97. t3.set_all_border_fastly(xml=True)
  98. t3.set_ALIGN_VERTICAL()
  99. t3.set_row_height(row_height=20)
  100. docx.add_blank_paragraph()
  101. if InspirationalMessage:
  102. docx.add_paragraph(text="寄语", size=16, align="left", bold=True, dq=10, dh=5)
  103. t4 = Table(docx, 0, 1, border=False, tb_name="封面的寄语")
  104. t4.add_table_row_data_xml_fastly([InspirationalMessage], )
  105. t4.set_all_border_fastly(xml=True)
  106. t4.set_ALIGN_VERTICAL()
  107. t4.set_row_height(row_height=50)
  108. t4.set_tb_colum_width(0, 500)
  109. docx.add_blank_paragraph()
  110. docx.add_paragraph(text="练习提醒Tips", size=16, align="left", bold=True, dq=10, dh=5)
  111. t5 = Table(docx, 0, 1, border=False, tb_name="本次学案难度情况")
  112. text = "请认真阅读,不可急于求成,要确保能够理解每一句话,不要满足于略知概要,不要跳读略读,不要猜答案,加油!"
  113. t5.add_table_row_data_xml_fastly([text], )
  114. t5.set_all_border_fastly(xml=True)
  115. t5.set_ALIGN_VERTICAL()
  116. t5.set_row_height(row_height=50)
  117. t5.set_tb_colum_width(0, 500)
  118. docx.add_paragraph(text="多媒体辅助", size=16, align="left", bold=True, dq=10, dh=5)
  119. docx.add_paragraph(text="需要示范的的学员,扫以下二维码获取音频、视频示范:", size=12, align="left", dq=5, dh=5)
  120. p = docx.add_blank_paragraph()
  121. img_io = qrcode_maker(full_url=f"{address}/link?type=exercise&id={exercise_id_int}&from=bltf")
  122. p.add_pic(img_io, width=2)
  123. img_io.close()
  124. docx.add_paragraph(text=f"生成时间: {t_date_time}", size=12, align="left", dq=10)
  125. docx.add_page_section()
  126. @time_use
  127. def section_4(docx: Word, json_data, *args, **kwargs):
  128. student_name = json_data.get("StudentInfo").get("StudentName", '')
  129. title_info = "\n".join(json_data.get("Title"))
  130. if title_info:
  131. docx.add_paragraph(f"{student_name} 同学:", align="center", bold=True, dq=5, dh=5)
  132. p1 = docx.add_blank_paragraph()
  133. p1.add_run_to_p(title_info, size=10)
  134. sub_title_maker(docx, "词汇精准学", "智能定制你的专属英语DNA图谱")
  135. tb = Table(docx, 1, 1, border=True, tb_name="词汇精准学")
  136. tb.set_tb_colum_width(0, 460)
  137. tb.set_cell_text(0, 0, "按顺序朗读生词表两遍。\n(1)用红笔在不会的单词序号上打星号,增加记忆。\n(2)朗读例句,不认识的部分参照译文理解。",
  138. align="left", size=10, dq=10, dh=10)
  139. docx.add_blank_paragraph()
  140. @time_use
  141. def section_4_1(docx: Word, json_data, *args, **kwargs):
  142. def insert_content(row, col, data, qrcode_result: dict):
  143. cell_outside = tb_outside.get_cell(row, col, delete_default_para=True)
  144. tb_inside = Table(cell_outside, rows=5, cols=3, tb_name="内部内容")
  145. tb_inside.merge_cell(0, 0, 0, 2)
  146. tb_inside.merge_cell(1, 0, 1, 2)
  147. tb_inside.merge_cell(2, 0, 2, 2)
  148. tb_inside.merge_cell(3, 0, 3, 2)
  149. tb_inside.merge_cell(4, 0, 4, 2)
  150. num_calucate = 2 * row + 1 if col == 0 else 2 * row + 2
  151. p = ParagraphBase(tb_inside.get_cell_paragraph(0, 0, align="left"))
  152. p.add_run_to_p(num_dict[num_calucate], bold=True, size=22, font_name="MS Gothic")
  153. p.add_run_to_p(' ' + data[0], bold=True, size=20)
  154. tb_inside.set_cell_text(row=1, column=0, cell_text=data[1] + " " + data[2], border=False, size=10, align="left",
  155. bk_color=(240, 240, 240))
  156. cell_p = tb_inside.get_cell_paragraph(2, 0, align="left")
  157. cell_p_1 = ParagraphBase(cell_p)
  158. cell_p_1.add_run_to_p(data[3], size=10, bold=True)
  159. cell_p_1.add_run_to_p(" " + data[4], size=8)
  160. cell_p = tb_inside.get_cell_paragraph(3, 0, align="left")
  161. cell_p_1 = ParagraphBase(cell_p)
  162. cell_p_1.add_run_to_p(data[5], size=10, bold=True)
  163. cell_p_1.add_run_to_p(" " + data[6], size=8)
  164. cell_p = tb_inside.get_cell_paragraph(4, 0, align="left")
  165. cell_p_1 = ParagraphBase(cell_p)
  166. cell_p_1.add_run_to_p(data[7], size=10, bold=True)
  167. cell_p_1.add_run_to_p(" " + data[8], size=8)
  168. properties_chinese_map = {"adj": "形容词", "n": "名词", "interj": "感叹词", "conj": "连词", "num": "数字", "art": "冠词",
  169. "pron": "代词", "adv": "副词", "prep": "介词", "v": "动词"}
  170. strange_words_data = []
  171. strange_words = json_data.get('StrangeWords')
  172. qrcode_thread = []
  173. qrcode_result = {}
  174. for item in strange_words:
  175. spell = item['Spell']
  176. word_id = item['WordId']
  177. en = "" if not item.get("SymbolsEn", "") else item.get("SymbolsEn")
  178. am = "" if not item.get("SymbolsAm", "") else item.get("SymbolsAm")
  179. symbols_en = "英" + f'[{en}]'
  180. symbols_am = "美" + f'[{am}]'
  181. word_properties = " ".join([properties_chinese_map.get(i, "") for i in item['WordProperties']])
  182. word_meanings = item.get('Meaning', "")
  183. word_changes_list = []
  184. for idx, s in enumerate(item["WordChanges"],start=1):
  185. s_type,s_spell = s['Type'], s['Spell']
  186. if "原型" in s_type or "大小写" in s_type:
  187. continue
  188. tail = '\n' if idx != len(item["WordChanges"]) else ''
  189. word_changes_list.append(f"{s_spell} {s_type}{tail}")
  190. word_changes = "".join(word_changes_list)
  191. if item['Sentences']:
  192. sentences = item['Sentences'][0]['English'] + '\n' + item['Sentences'][0]['Chinese']
  193. else:
  194. sentences = ""
  195. single_word_tuple = (spell, symbols_en, symbols_am, word_properties, word_meanings,
  196. "词汇变形", word_changes, "例句", sentences)
  197. strange_words_data.append(single_word_tuple)
  198. rows = math.ceil(len(strange_words_data) / 2)
  199. tb_outside = Table(docx, rows=rows, cols=2, tb_name="外层框架")
  200. tb_outside.set_tb_colum_width(width=[230, 230])
  201. for t in qrcode_thread:
  202. t.join()
  203. for row in range(rows):
  204. for col in range(2):
  205. try:
  206. data_item = strange_words_data.pop(0)
  207. insert_content(row, col, data_item, qrcode_result)
  208. except IndexError:
  209. break
  210. docx.add_page_section()
  211. @time_use
  212. def section_5(docx: Word, json_data, *args, **kwargs):
  213. copy_word_list = [i['Meaning'] for i in json_data.get('StrangeWords')]
  214. random_copy_word_list = copy_word_list * 3
  215. shuffle(random_copy_word_list)
  216. first_copy_word_list = copy_word_list.copy()
  217. copy_word_list_add_num = [f"{i} ({idx})" for idx, i in enumerate(first_copy_word_list, start=1)]
  218. shuffle(copy_word_list_add_num)
  219. total_copy_word_list = copy_word_list_add_num + random_copy_word_list
  220. sub_title_maker(docx, "单词高效记", "会读会写才算真学会")
  221. tb = Table(docx, 1, 1, tb_name="高效速记", border=True)
  222. tb.set_tb_colum_width(0, 460)
  223. text = ["请在横线上写下对应单词,每格写一遍,尽量默写,默写不出的,可查阅生词表;\n",
  224. "书写时保持工整;每写完一个单词小声念一遍词义与单词。\n"]
  225. cell_p = tb.get_cell_paragraph(0, 0, align="left")
  226. p = ParagraphBase(cell_p)
  227. p.add_run_to_p(" 高效速记\n", size=16, bold=True, )
  228. for t in text:
  229. p.add_run_to_p("☆ ", size=10, font_name="MS Gothic")
  230. p.add_run_to_p(t, size=10)
  231. docx.add_blank_paragraph()
  232. total_count = len(total_copy_word_list)
  233. half_count = int(total_count / 2)
  234. tb2 = Table(docx, half_count + 1, 4, tb_name="高效速记下面的单词")
  235. for row in range(total_count):
  236. data = total_copy_word_list[row]
  237. if row < half_count:
  238. tb2.set_cell_text(row, 0, data, size=9, align="right", border=False, dq=2.5, dh=2)
  239. tb2.set_cell_text(row, 1, str(row + 1) + "." + "_" * 20, size=9, align="left", border=False, dq=2.5, dh=2)
  240. else:
  241. tb2.set_cell_text(row - half_count, 2, data, size=9, align="right", border=False, dq=2.5, dh=2)
  242. tb2.set_cell_text(row - half_count, 3, str(row + 1) + "." + "_" * 20, size=9, align="left", border=False, dq=2.5, dh=2)
  243. tb2.set_tb_colum_width(width=[120, 110] * 2)
  244. docx.add_page_section()
  245. @time_use
  246. def section_6(docx: Word, json_data, *args, **kwargs):
  247. example_sentence = [f"{index}. {i['Sentences'][0]['English']} ({i['Spell']})" for index, i in
  248. enumerate(json_data['StrangeWords'], start=1) if i['Sentences']]
  249. sub_title_maker(docx, "例句填填看", "记词义,练拼写,学的快")
  250. tb = Table(docx, 1, 1, tb_name="例句填填看", border=True)
  251. tb.set_tb_colum_width(0, 460)
  252. text = ["请在横线上写下单词在例句中的词义,若想不起来,可随时到例句答案表中查看。\n",
  253. "参阅过答案的例句,请在句前的“□”中标记问号,以便复习回顾。\n",
  254. "单词有多个意思的,应只填写适合语境的意思。\n",
  255. "例句中有不熟悉的单词,请用斜线划掉,以便拍照报告给我们。"]
  256. cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10)
  257. p = ParagraphBase(cell_p)
  258. for t in text:
  259. p.add_run_to_p("☆ ", size=10, font_name="MS Gothic")
  260. p.add_run_to_p(t, size=10)
  261. for i in example_sentence:
  262. p = docx.add_blank_paragraph(dq=4, dh=4)
  263. p.add_run_to_p("□ ", size=12, font_name="宋体")
  264. p.add_run_to_p(i + "___________")
  265. docx.add_page_section()
  266. @time_use
  267. def section_7(docx: Word, json_data, *args, **kwargs):
  268. def wanxing(index, article_single):
  269. article_id = article_single['Id']
  270. article_length = article_single['AllWordAmount']
  271. strange_words_ids = [i['MeanId'] for i in json_data['StrangeWords']]
  272. explanatory_words_ids = [i['MeaningId'] for i in article_single['ExplanatoryWords']]
  273. select_text = []
  274. for ques_index, candidates in enumerate(article_single['Questions'], start=1):
  275. single_select_text = ''
  276. for s in candidates['Candidates']:
  277. single_select_text += s['Label'] + '. '
  278. participle = s['Participle']
  279. if participle:
  280. single_select_text += participle + ' \n'
  281. else:
  282. text = s['Text']
  283. single_select_text += text + ' \n'
  284. select_text.append(f"{ques_index}. {single_select_text}")
  285. all_select_text = "\n".join(select_text)
  286. article_main: str = article_single['English'] + "\n\n郑重提示:认真看完全文再看问题。\n\n" + all_select_text
  287. article_main_list = article_main.split(" ")
  288. explanatory_words = "\n\n".join(
  289. [f"{index}. {i['Spell']} [{i['SymbolsEn']}] [{i['SymbolsAm']}] {i['Meaning']}" for index, i in
  290. enumerate(article_single['ExplanatoryWords'], start=1)])
  291. sub_title_maker(docx, "真题强化练", "智能匹配难度,轻松提升阅读")
  292. tb = Table(docx, 1, 1, tb_name="真题强化练", border=True)
  293. tb.set_tb_colum_width(0, 460)
  294. text = ["练习中不认识的单词,尽量猜测词义,并用斜线划掉,以便拍照报告给我们。\n\n",
  295. "答题完毕后,可查字典,并注释在右侧批注区,不要在原文上注释。复习时不必通读全文,结合上下文能回忆起标记词的词义即可,想不起的再对照批注区。\n",
  296. "完形填空是优秀的测验题型,却不适合用于训练阅读能力和提升词汇量,所以建议在阅读能力(理解度、速度、难度)达标后再做完形填空题型练习。\n",
  297. "阅读能力达标的同学,按三遍法做完形填空,基本可以达到满分。三遍法要求如下:\n",
  298. "第一遍(理解):结合选项通读全文,以求理解文章主旨,但不动笔,以免形成成见。\n",
  299. "第二遍(填空):通读全文,从候选词中选出适宜项目,将完整的单词填入空格,使文章连贯。\n",
  300. "第三遍(核验):通读填空后的全文,确认上下文无矛盾之处。\n",
  301. "三遍通读均应记录起讫时间,并将速度纳入能力考核项目。能力合格者,考试中也应有充裕时间完成以上 3 遍通读。\n",
  302. "阅读计时从此处开始,请按顺序完成阅读,并注意记录时间。"]
  303. text2 = [f"全题长度(含问题及选项):{article_length}; 编号:{article_id};\n",
  304. "第一遍(理解)开始时间:_________________ 第二遍(填空)开始时间:_________________\n",
  305. "第三遍(核验)开始时间:_________________"]
  306. cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10)
  307. p = ParagraphBase(cell_p)
  308. for t in text:
  309. p.add_run_to_p("☆ ", size=10, font_name="MS Gothic")
  310. p.add_run_to_p(t, size=10)
  311. for t2 in text2:
  312. p.add_run_to_p(t2, size=10)
  313. docx.add_blank_paragraph()
  314. tb1 = Table(docx, 1, 3)
  315. tb1.set_tb_colum_width(width=[90, 370, 5])
  316. tb1_p = ParagraphBase(tb1.get_cell_paragraph(0, 0, align="left"))
  317. tb1_p.add_pic("make_docx_demo/static/lianxi1.jpg", width=2.5)
  318. tb1.set_cell_text(0, 1, f"篇幅(含问题选项):{article_length} 词 阅读开始时间:_____点_____分_____秒",
  319. size=9.5, border=False, align="left")
  320. tb2 = Table(docx, rows=1, cols=2, border=True, tb_name="完形填空")
  321. tb2.set_tb_colum_width(width=[320, 140])
  322. tb2_p = ParagraphBase(tb2.get_cell_paragraph(0, 0, align="left"))
  323. for w in article_main_list:
  324. word = re.search(r"\[(\d+)]", w)
  325. if word:
  326. w = w[:w.find('[')]
  327. meaning_id = int(word.group(1))
  328. if meaning_id in strange_words_ids:
  329. tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True)
  330. elif meaning_id in explanatory_words_ids:
  331. tb2_p.add_run_to_p(w + ' ', size=10.5, italic=True)
  332. else:
  333. tb2_p.add_run_to_p(w + ' ', size=10.5)
  334. else:
  335. tb2_p.add_run_to_p(w + ' ', size=10.5)
  336. tb2.set_cell_text(0, 1, explanatory_words, size=10.5, font_color=(80, 80, 80), align="left")
  337. docx.add_blank_paragraph()
  338. tail_zhushi = """第一遍(理解)结束时间:__________用时:____秒
  339. 第二遍(填空)结束时间:__________用时:____秒
  340. 第三遍(核验)结束时间:__________用时:____秒
  341. 总计用时:____分____秒
  342. """
  343. docx.add_paragraph(tail_zhushi, size=10.5)
  344. docx.add_blank_paragraph()
  345. def reading(index, article_single):
  346. def single_yuedu(index, a):
  347. article_id = a['Id']
  348. article_length = a['AllWordAmount']
  349. strange_words_ids = set()
  350. explanatory_words_ids = set()
  351. bold_word = set()
  352. italics_word = set()
  353. italics_index_dict = {}
  354. for i in json_data['StrangeWords']:
  355. strange_words_ids.add(i['MeanId'])
  356. bold_word.add(i['Spell'])
  357. bold_word.update([change_word['Spell'] for change_word in i['WordChanges']])
  358. for italics_index,ii in enumerate(a['ExplanatoryWords'], start=1):
  359. explanatory_words_ids.add(ii['MeaningId'])
  360. italics_word.add(ii['Spell'])
  361. if 'WordChanges' in ii:
  362. italics_word.update([change_word['Spell'] for change_word in ii['WordChanges']])
  363. italics_index_dict.update({change_word['Spell']:f"[{italics_index}]" for change_word in ii['WordChanges']})
  364. italics_index_dict[ii['MeaningId']] = f"[{italics_index}]"
  365. italics_index_dict[ii['Spell']] = f"[{italics_index}]"
  366. select_text = []
  367. for ques_index, candidates in enumerate(a['Questions'], start=1):
  368. single_select_text = ''
  369. subject = candidates['Subject'] + '\n'
  370. for s in candidates['Candidates']:
  371. single_select_text += s['Label'] + '. '
  372. participle = s['Participle']
  373. if participle:
  374. single_select_text += participle + ' \n'
  375. else:
  376. text = s['Text']
  377. single_select_text += text + ' \n'
  378. select_text.append(str(ques_index) + ". " + subject + single_select_text)
  379. all_select_text = "\n".join(select_text)
  380. article_main: str = a['English'] + "\n\n郑重提示:认真看完全文再看问题。\n" + all_select_text
  381. article_main_list = split_text_to_word_punctuation(article_main)
  382. explanatory_words = "\n\n".join(
  383. [f"{index}. {i['Spell']}\n [{i['SymbolsEn']}] [{i['SymbolsAm']}]\n {i['Meaning']}" for index, i in
  384. enumerate(a['ExplanatoryWords'], start=1)])
  385. tb1 = Table(docx, 1, 3, tb_name="图片小标题")
  386. tb1.set_tb_colum_width(width=[90, 370, 5])
  387. tb1_p = ParagraphBase(tb1.get_cell_paragraph(0, 0, align="left"))
  388. tb1_p.add_pic(f"make_docx_demo/static/lianxi{index}.jpg", width=2.5)
  389. tb1.set_cell_text(0, 1, f"篇幅(含问题选项):{article_length} 词 阅读开始时间:_____点_____分_____秒",
  390. size=9.5, border=False, align="left")
  391. tb2 = Table(docx, rows=1, cols=2, border=True, tb_name="阅读")
  392. tb2.set_tb_colum_width(width=[320, 140])
  393. tb2_p = ParagraphBase(tb2.get_cell_paragraph(0, 0, align="left"))
  394. for w in article_main_list:
  395. word = re.search(r"\[(\d+)]", w)
  396. if word:
  397. w = w[:w.find('[')]
  398. meaning_id = int(word.group(1))
  399. if meaning_id in strange_words_ids:
  400. tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True)
  401. elif meaning_id in explanatory_words_ids:
  402. italics_index_str = italics_index_dict[meaning_id]
  403. tb2_p.add_run_to_p(w + f'{italics_index_str} ', size=10.5, italic=True)
  404. else:
  405. tb2_p.add_run_to_p(w + ' ', size=10.5)
  406. else:
  407. if w in bold_word:
  408. tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True)
  409. elif w in italics_word:
  410. italics_index_str = italics_index_dict[w]
  411. tb2_p.add_run_to_p(w + f'{italics_index_str} ', size=10.5, italic=True)
  412. else:
  413. tb2_p.add_run_to_p(w + ' ', size=10.5)
  414. tb2.set_cell_text(0, 1, explanatory_words, size=10.5, font_color=(80, 80, 80), align="left", centre=False,line_spacing=300)
  415. docx.add_blank_paragraph()
  416. tail_zhushi = """完成时间:_____点_____分_____秒,本篇用时:_____秒。"""
  417. docx.add_paragraph(tail_zhushi, size=10.5)
  418. docx.add_blank_paragraph()
  419. def top_header():
  420. sub_title_maker(docx, "阅读提升练", "智能匹配难度,轻松提升阅读", "鲍利提分, 高效学习专家")
  421. tb = Table(docx, 1, 1, tb_name="真题强化练", border=True)
  422. tb.set_tb_colum_width(0, 460)
  423. text = ["阅读中不认识的单词,尽量猜测词义,并用斜线划掉,以便拍照报告给我们。\n",
  424. "读完全文后,可查字典,并抄在右侧批注区,不要在原文上注释。复习时不必通读全文,结合上下文能回忆起标记词的词义即可,想不起的再对照批注区。\n",
  425. "阅读训练的目的是提高对英语词、句、篇的敏感度,答题只是检验学习成果的手段,所以切勿为了快速做题而跳读、略读。阅读速度是很重要的训练指标,请在确实理解词句的基础上尽量提高阅读速度。只要平时扎实阅读,考试中不会没有时间认真读题。\n",
  426. "阅读计时从此处开始,请按顺序完成阅读,并注意记录时间。\n\n",
  427. "生词划线示例:competitions she once attended. Incuding her years of experience"
  428. ]
  429. cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10)
  430. pp = ParagraphBase(cell_p)
  431. for index_t, t in enumerate(text):
  432. if index_t == len(text) - 1:
  433. pp.add_run_to_p(t, size=12)
  434. pp.add_rectangle('', x=115, y=170, width=55, height=25, boder_color='000000', shape_type='line')
  435. pp.add_rectangle('', x=298, y=170, width=55, height=25, boder_color='000000', shape_type='line')
  436. else:
  437. pp.add_run_to_p("☆ ", size=10, font_name="MS Gothic")
  438. pp.add_run_to_p(t, size=10)
  439. docx.add_blank_paragraph()
  440. "---------------------开始单篇运行---------------------"
  441. if index == 1:
  442. top_header()
  443. single_yuedu(index, article_single)
  444. def seven_to_five(index, article_single):
  445. article_id = article_single['Id']
  446. article_length = article_single['AllWordAmount']
  447. strange_words_ids = [i['MeanId'] for i in json_data['StrangeWords']]
  448. explanatory_words_ids = [i['MeaningId'] for i in article_single['ExplanatoryWords']]
  449. select_text = []
  450. for ques_index, s_candidates in enumerate(article_single['Candidates'], start=1):
  451. single_select_text = ''
  452. single_select_text += s_candidates['Label'] + '. '
  453. participle = s_candidates['Participle']
  454. if participle:
  455. single_select_text += participle
  456. else:
  457. text = s_candidates['Text']
  458. single_select_text += text
  459. select_text.append(f"{single_select_text}")
  460. all_select_text = "\n".join(select_text)
  461. article_main: str = article_single['English'] + "\n\n郑重提示:认真看完全文再看问题。\n\n" + all_select_text
  462. article_main_list = article_main.split(" ")
  463. explanatory_words = "\n\n".join(
  464. [f"{index}. {i['Spell']} [{i['SymbolsEn']}] [{i['SymbolsAm']}] {i['Meaning']}" for index, i in
  465. enumerate(article_single['ExplanatoryWords'], start=1)])
  466. sub_title_maker(docx, "阅读提升练", "智能匹配难度,轻松提升阅读", "鲍利提分, 高效学习专家")
  467. tb = Table(docx, 1, 1, tb_name="真题强化练", border=True)
  468. tb.set_tb_colum_width(0, 460)
  469. text = ["阅读中不认识的单词,尽量猜测词义,并用斜线划掉,以便拍照报告给我们。\n",
  470. "读完全文后,可查字典,并抄在右侧批注区,不要在原文上注释。复习时不必通读全文,结合上下文能回忆起标记词的词义即可,想不起的再对照批注区。\n",
  471. "7 选 5 题型是测试学生对文章理解程度的好题型,但因打破里文章的连贯性,故不是训练阅读能力的好素材。建议学生在阅读基本能力(理解度、速度、难度)达标后再开展 7 选 5 题型训练。若不能胜任本练习,请回到词汇与阅读训练,先打好基础。\n",
  472. "阅读计时从此处开始,请按顺序完成阅读,并注意记录时间。"]
  473. cell_p = tb.get_cell_paragraph(0, 0, align="left", dq=10, dh=10)
  474. p = ParagraphBase(cell_p)
  475. for t in text:
  476. p.add_run_to_p("☆ ", size=10, font_name="MS Gothic")
  477. p.add_run_to_p(t, size=10)
  478. docx.add_blank_paragraph()
  479. tb1 = Table(docx, 1, 3, tb_name="图片小标题")
  480. tb1.set_tb_colum_width(width=[90, 370, 5])
  481. tb1_p = ParagraphBase(tb1.get_cell_paragraph(0, 0, align="left"))
  482. tb1_p.add_pic("make_docx_demo/static/lianxi1.jpg", width=2.5)
  483. tb1.set_cell_text(0, 1, f"篇幅(含问题选项):{article_length} 词 阅读开始时间:_____点_____分_____秒",
  484. size=9.5, border=False, align="left")
  485. tb2 = Table(docx, rows=1, cols=2, border=True, tb_name="七选五")
  486. tb2.set_tb_colum_width(width=[320, 140])
  487. tb2_p = ParagraphBase(tb2.get_cell_paragraph(0, 0, align="left"))
  488. for w in article_main_list:
  489. word = re.search(r"\[(\d+)]", w)
  490. if word:
  491. w = w[:w.find('[')]
  492. meaning_id = int(word.group(1))
  493. if meaning_id in strange_words_ids:
  494. tb2_p.add_run_to_p(w + ' ', size=10.5, bold=True)
  495. elif meaning_id in explanatory_words_ids:
  496. tb2_p.add_run_to_p(w + ' ', size=10.5, italic=True)
  497. else:
  498. tb2_p.add_run_to_p(w + ' ', size=10.5)
  499. else:
  500. tb2_p.add_run_to_p(w + ' ', size=10.5)
  501. tb2.set_cell_text(0, 1, explanatory_words, size=10.5, font_color=(80, 80, 80), align="left")
  502. docx.add_blank_paragraph()
  503. "判断题型;根据题型选择----------------------------"
  504. all_article_length = 0
  505. for index, article_single in enumerate(json_data['Articles'], start=1):
  506. article_type = article_single['Category']
  507. article_type_select = {1: reading, 2: wanxing, 3: seven_to_five}
  508. assert article_type in article_type_select
  509. article_type_select[article_type](index, article_single)
  510. article_length = article_single['AllWordAmount']
  511. all_article_length += article_length
  512. tail_zhushi = f"""阅读计时在此结束。
  513. 今日总计阅读量 {all_article_length} 词,用时________秒,整份学案共有_______个题目答对。"""
  514. docx.add_paragraph(tail_zhushi, size=10.5)
  515. docx.add_blank_paragraph()
  516. docx.add_page_section()
  517. @time_use
  518. def section_9(docx: Word, json_data, *args, **kwargs):
  519. def wanxing(index,article_count, article_single):
  520. chinese_article = article_single['Chinese']
  521. all_analysis = ''
  522. docx.add_paragraph("答案和解析", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True)
  523. for ques_index, question_item in enumerate(article_single['Questions'], start=1):
  524. analysis = question_item['Analysis'].strip()
  525. abcd_label = ''
  526. candidates = question_item['Candidates']
  527. for abcd_selected in candidates:
  528. if abcd_selected['IsRight']:
  529. abcd_label += abcd_selected['Label'].strip()
  530. all_analysis += f"{ques_index}.\n{abcd_label} {analysis}\n"
  531. docx.add_paragraph(all_analysis, size=9)
  532. docx.add_paragraph("全文参考译文", chinese_font_name="微软雅黑", dq=15, dh=5, bold=True)
  533. docx.add_paragraph(chinese_article, size=9, dq=5, dh=5, line_spacing=300)
  534. def reading(index,article_count, article_single):
  535. """
  536. index : 外面传入,从1开始。如果只有
  537. """
  538. all_analysis = ''
  539. all_difficult_sentences = []
  540. chinese_article = article_single['Chinese']
  541. questions = article_single['Questions']
  542. for ques_index, question_item in enumerate(questions, start=1):
  543. analysis = question_item['Analysis'].strip("\n")
  544. abcd_label = ''
  545. candidates = question_item['Candidates']
  546. for abcd_selected in candidates:
  547. if abcd_selected['IsRight']:
  548. abcd_label += abcd_selected['Label'].strip("\n")
  549. new_line = "" if ques_index==len(questions) else "\n"
  550. all_analysis += f"{ques_index}.{abcd_label} {analysis}{new_line}"
  551. if index!=article_count:
  552. all_analysis += '\n'
  553. docx.add_paragraph(f"Passage {index}", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True, size=16)
  554. docx.add_paragraph("全文参考译文", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True)
  555. docx.add_paragraph(chinese_article, size=9)
  556. docx.add_paragraph("答案和解析", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True)
  557. docx.add_paragraph(all_analysis, size=9)
  558. def seven_to_five(index,article_count, article_single):
  559. chinese_article = article_single['Chinese']
  560. all_analysis = ''
  561. docx.add_paragraph("答案和解析", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True)
  562. for q_index, question_item in enumerate(article_single['Questions'], start=1):
  563. analysis = question_item['Analysis']
  564. abcd_label = ''
  565. candidates = question_item['Candidates']
  566. for abcd_selected in candidates:
  567. if abcd_selected['IsRight']:
  568. abcd_label += abcd_selected['Label']
  569. all_analysis += f"{q_index}.{abcd_label} {analysis}\n"
  570. docx.add_paragraph(all_analysis, size=9)
  571. docx.add_paragraph("全文参考译文", chinese_font_name="微软雅黑", dq=15, dh=5, bold=True)
  572. docx.add_paragraph("Passage 1", chinese_font_name="微软雅黑", dq=5, dh=5, bold=True)
  573. docx.add_paragraph(chinese_article, size=9, dq=5, dh=5, line_spacing=300)
  574. "判断题型;根据题型选择----------------------------"
  575. sub_title_maker(docx, "解题自主纠", "自主学习,逐步养成良好学习习惯","鲍利提分,你的智能教练")
  576. articles = json_data['Articles']
  577. article_count = len(articles)
  578. for index, article_single in enumerate(articles, start=1):
  579. article_type = article_single['Category']
  580. article_type_select = {1: reading, 2: wanxing, 3: seven_to_five}
  581. assert article_type in article_type_select
  582. article_type_select[article_type](index,article_count, article_single)
  583. docx.add_docx_component("make_docx_demo/word_component/blank.docx")
  584. docx.add_page_section()
  585. @time_use
  586. def section_10(docx: Word, json_data, scanpage_format, *args, **kwargs):
  587. docx.add_paragraph("☆ 请写出词义,再对照筛査表批改。词义顺序可互换;答案意思相符即可,不要求一字不差。批改结果眷抄到筛査表。", size=9, dq=2,
  588. dh=2)
  589. tb = Table(docx, 50, 4, tb_name="写出词义")
  590. tb.set_tb_colum_width(width=[110, 120, 110, 120])
  591. for row in range(50):
  592. tb.set_cell_text(row, 0, str(row + 1) + " " + "rich", size=8.5, dq=1, dh=1, border=False)
  593. tb.set_cell_text(row, 1, "□________________", size=10, dq=0, dh=0, border=False)
  594. tb.set_cell_text(row, 2, str(row + 51) + " " + "rich", size=8.5, dq=1, dh=1, border=False)
  595. tb.set_cell_text(row, 3, "□________________", size=10, dq=0, dh=0, border=False)
  596. tb.set_row_height(13)
  597. docx.add_page_break()
  598. docx.add_paragraph("☆ 请在需要加强学习的词义前方框中划线,两头各超出 1 毫米为宜(示例:□☑52.example);请保持本表整洁并交回。", size=9, dq=2,
  599. dh=2)
  600. tb2 = Table(docx, 25, 8, tb_name="划线表")
  601. tb2.set_tb_colum_width(width=[57.5] * 8)
  602. docx.add_blank_paragraph(dq=5, dh=5)
  603. for row in range(25):
  604. tb2.set_cell_text(row, 0, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2)
  605. tb2.set_cell_text(row, 1, "星期二", size=8.5, border="right", dq=1.2, dh=1.2)
  606. tb2.set_cell_text(row, 2, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2)
  607. tb2.set_cell_text(row, 3, "星期二", size=8.5, border="right", dq=1.2, dh=1.2)
  608. tb2.set_cell_text(row, 4, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2)
  609. tb2.set_cell_text(row, 5, "星期二", size=8.5, border="right", dq=1.2, dh=1.2)
  610. tb2.set_cell_text(row, 6, "[01] Tuesday", size=8.5, border=False, dq=1.2, dh=1.2)
  611. tb2.set_cell_text(row, 7, "星期二", size=8.5, border=False, dq=1.2, dh=1.2)
  612. docx.set_page_column(5)
  613. docx.add_docx_component("make_docx_demo/word_component/component.docx")
  614. docx.end_page_column()
  615. if scanpage_format == 3:
  616. docx.add_page_section()
  617. @time_use
  618. def two_check_page(docx: Word, json_data, *args, **kwargs):
  619. def empty_filter_page(class_name, student_name, page_title, page_sub_title, t_datetime, word_data_list):
  620. page_sub_title = "词汇训练"
  621. if len(word_data_list) % 2 != 0:
  622. word_data_list.append("")
  623. tb = Table(docx, 1, 3, tb_name="头部三元素")
  624. tb.set_tb_colum_width(width=[40, 100, 100])
  625. tb.set_tb_colum_width(0, 100)
  626. tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体")
  627. tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8, dh=2)
  628. tb.set_cell_text(0, 2, f"{page_title}\n{page_sub_title}", border=False, size=8, dh=2)
  629. docx.add_paragraph("请写出词义,再对照筛查表批改。词义顺序可互换;答案意思相符即可,不要求一字不差。批改结果誊抄到筛查表。", size=9)
  630. tb = Table(docx, rows=0, cols=4, tb_name="第一页筛查表")
  631. tb.set_all_border_fastly(xml=True, outside_side_border=True, outside_side_border_size=5)
  632. half_count = int(len(word_data_list) / 2)
  633. for index, row in enumerate(range(half_count)):
  634. first_word, second_word = word_data_list[row], word_data_list[row + half_count]
  635. cell3 = f"{index + 1 + half_count}. {second_word}" if second_word else ""
  636. cell4 = "□ ___________________________" if second_word else ""
  637. data = [f"{index + 1}. {first_word}", "□ ___________________________", cell3, cell4]
  638. tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9])
  639. tb.set_row_height(13.8)
  640. tb.set_table_width_xml([2124, 3257, 2140, 3257])
  641. blank_count = " " * 80
  642. p = docx.add_blank_paragraph(dq=5)
  643. p.add_run_to_p(f"{t_datetime} {page_title}-{page_sub_title}{blank_count}", size=8, chinese_font_name="仿宋", font_name="仿宋")
  644. docx.add_page_break()
  645. def filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime,
  646. foot_description, foot_description2, article_type, word_data_list2):
  647. page_sub_title = "词汇训练"
  648. if len(word_data_list2) % 2 != 0:
  649. word_data_list2.append(["", ""])
  650. tb = Table(docx, 1, 5, tb_name="头部五元素")
  651. tb.set_tb_colum_width(width=[80, 100, 120, 150, 70])
  652. tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体")
  653. tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8)
  654. tb.set_cell_text(0, 2, f"{page_id}", border=False, size=16, dh=2, bold=True, font_name="黑体")
  655. tb.set_cell_text(0, 3, f"{page_title}\n{page_sub_title}", border=False, size=8)
  656. p_cell = tb.get_cell_paragraph(0, 4)
  657. p = ParagraphBase(p_cell)
  658. io_image = qrcode_maker(f"{page_id}")
  659. p.add_pic(io_image, width=Inches(0.6))
  660. io_image.close()
  661. pp = docx.add_blank_paragraph()
  662. pp.add_run_to_p("下述词汇相应的词义未掌握的请划掉,并将整个页面拍照给我们,以便记录词汇掌握数据。示例:comfort 4. 舒适,安逸", size=9)
  663. pp.add_rectangle('', x=540, y=10, width=55, height=0, boder_color='000000', shape_type='line')
  664. tb = Table(docx, rows=0, cols=4, tb_name="第二页筛查表")
  665. ## 1234竖着放
  666. total_row = int(len(word_data_list2) / 2)
  667. for row in range(total_row):
  668. spell1, meaning1 = word_data_list2[row]
  669. spell2, meaning2 = word_data_list2[total_row + row]
  670. cell3 = f"{spell2}" if spell2 else ""
  671. cell4 = f"{total_row + row + 1}. {meaning2}" if meaning2 else ""
  672. data = [f"{spell1}", f"{row + 1}. {meaning1}", cell3, cell4]
  673. tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9], alignment=['right', 'left', 'right', 'left'])
  674. tb.set_row_height(13.8)
  675. tb.set_table_width_xml([2124, 3257, 2140, 3257])
  676. docx.add_paragraph(f"{t_datetime} {page_title}-{page_sub_title}{foot_description}", size=8, chinese_font_name="仿宋",
  677. font_name="仿宋", dq=5)
  678. student_name = json_data.get("StudentInfo").get("StudentName", '')
  679. class_name = json_data.get("StudentInfo").get("ClassName", '').replace("词汇突击", "")
  680. t_datetime = time.strftime("%Y-%m-%d %H:%M", time.localtime())
  681. article_type = 1
  682. try:
  683. article_type = json_data['WordAndArticleContents'][0]['Articles'][0]['Category']
  684. except Exception as e:
  685. log_err_e(e, "学案类型不存在就取1,词汇突击里面只有阅读理解")
  686. """---------------------------------------------------------------------------------"""
  687. screening_scanPages = json_data['ScreeningScanPages']
  688. for index, page in enumerate(screening_scanPages, start=1):
  689. page_id = str(page['PageId']).rjust(11, "0")
  690. page_title = page['Title']
  691. page_sub_title = page['SubTitle']
  692. foot_description = page['FootDescription']
  693. foot_description2 = page['FootDescription2']
  694. word_data_list1 = []
  695. word_data_list2 = []
  696. for i in page['FilterTable']['Items']:
  697. word_data_list1.append(i['Spell'])
  698. word_data_list2.append([i['Spell'], i['Meaning']])
  699. filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime,
  700. foot_description, foot_description2, article_type, word_data_list2)
  701. if index!=len(screening_scanPages):
  702. pass
  703. docx.add_page_break()
  704. def old_two_check_page(docx: Word, json_data, **kwargs):
  705. def empty_filter_page(class_name, student_name, page_title, page_sub_title, t_datetime, word_data_list):
  706. if len(word_data_list) % 2 != 0:
  707. word_data_list.append("")
  708. tb = Table(docx, 1, 3, tb_name="头部三元素")
  709. tb.set_tb_colum_width(width=[140, 100, 100])
  710. tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体")
  711. tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8, dh=2)
  712. tb.set_cell_text(0, 2, f"{page_title}\n{page_sub_title}", border=False, size=8, dh=2)
  713. docx.add_paragraph("请写出词义,再对照筛查表批改。词义顺序可互换;答案意思相符即可,不要求一字不差。批改结果誊抄到筛查表。", size=9)
  714. tb = Table(docx, rows=0, cols=4, tb_name="第一页筛查表")
  715. tb.set_all_border_fastly(xml=True, outside_side_border=True, outside_side_border_size=5)
  716. half_count = int(len(word_data_list) / 2)
  717. for index, row in enumerate(range(half_count)):
  718. first_word, second_word = word_data_list[row], word_data_list[row + half_count]
  719. cell3 = f"{index + 1 + half_count}. {second_word}" if second_word else ""
  720. cell4 = "□ ___________________________" if second_word else ""
  721. data = [f"{index + 1}. {first_word}", "□ ___________________________", cell3, cell4]
  722. tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9])
  723. tb.set_row_height(13.8, first_row_h=6)
  724. tb.set_table_width_xml([2124, 3257, 2140, 3257])
  725. blank_count = " " * 80
  726. p = docx.add_blank_paragraph(dq=5)
  727. p.add_run_to_p(f"{t_datetime} {page_title}-{page_sub_title}{blank_count}", size=8, chinese_font_name="仿宋", font_name="仿宋")
  728. docx.add_page_break()
  729. def filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime,
  730. foot_description, foot_description2, article_type, word_data_list2):
  731. if len(word_data_list2) % 2 != 0:
  732. word_data_list2.append(["", ""])
  733. tb = Table(docx, 1, 5, tb_name="头部五元素")
  734. tb.set_tb_colum_width(width=[80, 100, 120, 150, 70])
  735. tb.set_cell_text(0, 0, f"鲍利提分", border=False, size=16, bold=True, chinese_font_name="黑体")
  736. tb.set_cell_text(0, 1, f"{class_name}\n{student_name}", border=False, size=8)
  737. tb.set_cell_text(0, 2, f"{page_id}", border=False, size=16, dh=2, bold=True, font_name="黑体")
  738. tb.set_cell_text(0, 3, f"{page_title}\n{page_sub_title}", border=False, size=8)
  739. p_cell = tb.get_cell_paragraph(0, 4)
  740. p = ParagraphBase(p_cell)
  741. page_id = int(page_id)
  742. io_image = qrcode_maker(f"{page_id}")
  743. p.add_pic(io_image, width=Inches(0.6))
  744. io_image.close()
  745. pp = docx.add_blank_paragraph()
  746. p_base = ParagraphBase(pp)
  747. p_base.p.add_run_to_p("请在需要加强学习的词义前方框中划线,两头各超出1毫米为宜(示例:", size=9)
  748. p_base.p.add_pic("make_docx_demo/static/line_example.png", width=Inches(0.8))
  749. p_base.p.add_run_to_p(" );请保持本表整洁并交回。", size=9)
  750. tb = Table(docx, rows=0, cols=4, tb_name="第二页筛查表")
  751. ## 1234竖着放
  752. total_row = int(len(word_data_list2) / 2)
  753. for row in range(total_row):
  754. spell1, meaning1 = word_data_list2[row]
  755. spell2, meaning2 = word_data_list2[total_row + row]
  756. cell1 = f"{row + 1}. {spell1}" if spell1 else ""
  757. cell2 = f"□ {meaning1}" if meaning1 else ""
  758. cell3 = f"{total_row + row + 1}. {spell2}" if spell2 else ""
  759. cell4 = f"□ {meaning2}" if meaning2 else ""
  760. data = [cell1,cell2, cell3, cell4]
  761. tb.add_table_row_data_xml_fastly(data, font_size=[10.5, 9, 10.5, 9])
  762. tb.set_all_border_fastly(xml=True, outside_side_border=True, outside_side_border_size=5)
  763. tb.set_row_height(13.6, first_row_h=6)
  764. tb.set_table_width_xml([2124, 3257, 2140, 3257])
  765. if article_type == 1:
  766. docx.add_paragraph(f"{t_datetime} {page_title}-{page_sub_title}{foot_description}", size=8, chinese_font_name="仿宋",
  767. font_name="仿宋", dq=5)
  768. docx.add_paragraph(foot_description2, align="right", size=8, chinese_font_name="仿宋")
  769. else:
  770. docx.add_paragraph(f"{t_datetime} {page_title}-{page_sub_title}{foot_description}", size=8, chinese_font_name="仿宋",
  771. font_name="仿宋", dq=5)
  772. student_name = json_data.get("StudentInfo").get("StudentName", '')
  773. class_name = json_data.get("StudentInfo").get("ClassName", '')
  774. t_datetime = time.strftime("%Y-%m-%d %H:%M", time.localtime())
  775. article_type = json_data['WordAndArticleContents'][0]['Articles'][0]['Category']
  776. is_add_empty_filter_page = json_data['Config']['AddEmptyFilterPage']
  777. """---------------------------------------------------------------------------------"""
  778. for index, page in enumerate(json_data['ScreeningScanPages'], start=1):
  779. page_id = str(page['PageId']).rjust(11, "0")
  780. if index >= 2:
  781. docx.add_page_break()
  782. page_title = page['Title']
  783. page_sub_title = page['SubTitle']
  784. foot_description = page['FootDescription']
  785. foot_description2 = page['FootDescription2']
  786. word_data_list1 = []
  787. word_data_list2 = []
  788. item_list:list = page['FilterTable']['Items']
  789. item_count = len(item_list)
  790. if item_count<100:
  791. item_list.extend([{"Spell":"","Meaning":""} for _ in range(100-item_count)])
  792. for i in page['FilterTable']['Items']:
  793. word_data_list1.append(i['Spell'])
  794. word_data_list2.append([i['Spell'], i['Meaning']])
  795. if is_add_empty_filter_page:
  796. empty_filter_page(class_name, student_name, page_title, page_sub_title, t_datetime, word_data_list1)
  797. filter_table_page(page_id, class_name, student_name, page_title, page_sub_title, t_datetime,
  798. foot_description, foot_description2, article_type, word_data_list2)
  799. @time_use
  800. def other(docx, json_data, *args, **kwargs):
  801. sections = docx.doc.sections
  802. for section in sections[:-1]:
  803. section.top_margin = Inches(0.3)
  804. section.bottom_margin = Inches(0.4)
  805. section.left_margin = Inches(0.8)
  806. section.right_margin = Inches(0.8)
  807. section.footer_distance = 180000
  808. sections[-1].top_margin = Inches(0.1)
  809. sections[-1].bottom_margin = Inches(0.1)
  810. sections[-1].left_margin = Inches(0.5)
  811. sections[-1].right_margin = Inches(0.5)
  812. header_maker(docx, json_data)
  813. def start_make_word(json_data, document_format, scanpage_format):
  814. parent_path = "make_docx_demo/file_result/"
  815. if not os.path.exists(parent_path):
  816. os.makedirs(parent_path)
  817. try:
  818. exercise_id = json_data['ExerciseId']
  819. docx = Word(save_file_name=f"{parent_path}{exercise_id}.docx",
  820. start_template_name="make_docx_demo/word_component/start_template.docx")
  821. section_1(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  822. section_4(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  823. for exercise_json in json_data['WordAndArticleContents']:
  824. section_4_1(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format)
  825. section_5(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format)
  826. section_6(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format)
  827. section_7(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format)
  828. section_9(docx=docx, json_data=exercise_json, scanpage_format=scanpage_format)
  829. if scanpage_format == 1:
  830. two_check_page(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  831. old_two_check_page(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  832. elif scanpage_format == 2:
  833. section_10(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  834. elif scanpage_format == 3:
  835. section_10(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  836. two_check_page(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  837. other(docx=docx, json_data=json_data, scanpage_format=scanpage_format)
  838. docx.save_docx()
  839. if document_format == 1:
  840. return f"{parent_path}{exercise_id}.docx"
  841. else:
  842. convert_word_to_pdf(f"{parent_path}{exercise_id}")
  843. return f"{parent_path}{exercise_id}.pdf"
  844. except Exception as e:
  845. log_err_e(e)
  846. if __name__ == '__main__':
  847. import os
  848. t = time.time()
  849. os.chdir("..")
  850. start_make_word(test_json1, 1, 1)
  851. print(time.time() - t)