mainspacy.py 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275
  1. # -*- coding: utf-8 -*-
  2. """Grammar highlighter powered by spaCy + benepar constituency parsing."""
  3. import asyncio
  4. import html
  5. import re
  6. from collections import Counter
  7. from dataclasses import dataclass, field
  8. from html.parser import HTMLParser
  9. from string import Template
  10. from typing import Any, Dict, List, Optional, Tuple
  11. from urllib import error as urllib_error, request as urllib_request
  12. from urllib.parse import urlparse, urlunparse
  13. import benepar
  14. import httpx
  15. import spacy
  16. from fastapi import FastAPI, HTTPException
  17. from fastapi.middleware.cors import CORSMiddleware
  18. from fastapi.responses import HTMLResponse
  19. from pydantic import BaseModel, Field
  20. from spacy.cli import download as spacy_download
  21. from spacy.language import Language
  22. from spacy.tokens import Span as SpacySpan, Token as SpacyToken
  23. from style_config import SENTENCE_HELPER_ENABLED, STYLE_BLOCK
  24. BENE_PAR_WARNING: Optional[str] = None
  25. HAS_BENEPAR: bool = False # new: track whether benepar was successfully attached
  26. def _ensure_benepar_warning(message: str) -> None:
  27. """Record a warning once when benepar annotations are unavailable."""
  28. global BENE_PAR_WARNING
  29. if not BENE_PAR_WARNING:
  30. BENE_PAR_WARNING = message
  31. def _load_spacy_pipeline(
  32. model_name: str = "en_core_web_sm", benepar_model: str = "benepar_en3"
  33. ) -> Language:
  34. global BENE_PAR_WARNING, HAS_BENEPAR
  35. BENE_PAR_WARNING = None
  36. HAS_BENEPAR = False
  37. try:
  38. nlp = spacy.load(model_name)
  39. except OSError:
  40. try:
  41. spacy_download(model_name)
  42. nlp = spacy.load(model_name)
  43. except Exception as exc: # pragma: no cover - install helper
  44. raise RuntimeError(
  45. f"spaCy model '{model_name}' is required. Install via `python -m spacy download {model_name}`."
  46. ) from exc
  47. # Ensure we have sentence segmentation available
  48. pipe_names = set(nlp.pipe_names)
  49. if not ({"parser", "senter", "sentencizer"} & pipe_names):
  50. try:
  51. nlp.add_pipe("sentencizer")
  52. except Exception:
  53. pass # if already present or unavailable, ignore
  54. # Try to add benepar
  55. if "benepar" not in nlp.pipe_names:
  56. try:
  57. nlp.add_pipe("benepar", config={"model": benepar_model}, last=True)
  58. HAS_BENEPAR = True
  59. except ValueError:
  60. try:
  61. benepar.download(benepar_model)
  62. nlp.add_pipe("benepar", config={"model": benepar_model}, last=True)
  63. HAS_BENEPAR = True
  64. except Exception as exc: # pragma: no cover - install helper
  65. HAS_BENEPAR = False
  66. BENE_PAR_WARNING = (
  67. "Benepar model '{model}' unavailable ({err}). Falling back to dependency-based spans."
  68. ).format(model=benepar_model, err=exc)
  69. except Exception as exc:
  70. HAS_BENEPAR = False
  71. BENE_PAR_WARNING = (
  72. "Failed to attach benepar parser to spaCy pipeline. Falling back to dependency-based spans ({err})."
  73. ).format(err=exc)
  74. else:
  75. HAS_BENEPAR = True
  76. return nlp
  77. try:
  78. NLP: Optional[Language] = _load_spacy_pipeline()
  79. NLP_LOAD_ERROR: Optional[Exception] = None
  80. except Exception as exc: # pragma: no cover - import-time diagnostics
  81. NLP = None
  82. NLP_LOAD_ERROR = exc
  83. class AnalyzeRequest(BaseModel):
  84. text: str = Field(..., description="Raw English text to highlight")
  85. class AnalyzeResponse(BaseModel):
  86. highlighted_html: str
  87. @dataclass
  88. class Token:
  89. text: str
  90. start: int
  91. end: int
  92. kind: str # 'word' | 'space' | 'punct'
  93. @dataclass
  94. class Span:
  95. start_token: int
  96. end_token: int
  97. cls: str
  98. attrs: Optional[Dict[str, str]] = None
  99. @dataclass
  100. class SentenceSummary:
  101. subjects: List[str] = field(default_factory=list)
  102. predicates: List[str] = field(default_factory=list)
  103. objects: List[str] = field(default_factory=list)
  104. complements: List[str] = field(default_factory=list)
  105. clauses: List[str] = field(default_factory=list)
  106. clause_functions: List[str] = field(default_factory=list)
  107. connectors: List[str] = field(default_factory=list)
  108. residual_roles: List[str] = field(default_factory=list)
  109. sentence_length: int = 0
  110. TOKEN_REGEX = re.compile(
  111. r"""
  112. (?:\s+)
  113. |(?:\d+(?:[\.,]\d+)*)
  114. |(?:\w+(?:[-']\w+)*)
  115. |(?:.)
  116. """,
  117. re.VERBOSE | re.UNICODE,
  118. )
  119. WORD_LIKE_RE = re.compile(r"\w+(?:[-']\w+)*\Z", re.UNICODE)
  120. NUMBER_RE = re.compile(r"\d+(?:[\.,]\d+)*\Z", re.UNICODE)
  121. PARAGRAPH_BREAK_RE = re.compile(r"(?:\r?\n[ \t]*){2,}")
  122. SUBJECT_DEPS = {"nsubj", "nsubjpass", "csubj", "csubjpass"}
  123. DIRECT_OBJECT_DEPS = {"dobj", "obj"}
  124. INDIRECT_OBJECT_DEPS = {"iobj", "dative"}
  125. COMPLEMENT_DEPS = {"attr", "oprd", "acomp", "ccomp", "xcomp"}
  126. ADVERBIAL_DEPS = {"advmod", "npadvmod", "advcl", "obl", "prep", "pcomp"}
  127. RELATIVE_PRONOUNS = {"which", "that", "who", "whom", "whose", "where", "when"}
  128. SUBORDINATORS_TO_FUNCTION = {
  129. "when": "TIME",
  130. "while": "TIME",
  131. "after": "TIME",
  132. "before": "TIME",
  133. "until": "TIME",
  134. "as": "TIME",
  135. "once": "TIME",
  136. "since": "TIME",
  137. "because": "REASON",
  138. "now that": "REASON",
  139. "if": "CONDITION",
  140. "unless": "CONDITION",
  141. "provided": "CONDITION",
  142. "provided that": "CONDITION",
  143. "although": "CONCESSION",
  144. "though": "CONCESSION",
  145. "even though": "CONCESSION",
  146. "whereas": "CONCESSION",
  147. "so that": "RESULT",
  148. "so": "RESULT",
  149. "lest": "PURPOSE",
  150. "in order that": "PURPOSE",
  151. }
  152. FINITE_VERB_TAGS = {"VBD", "VBP", "VBZ"}
  153. NONFINITE_VERB_TAGS = {"VBG", "VBN"}
  154. FIXED_MULTIWORD_PHRASES: Tuple[Tuple[re.Pattern, str], ...] = tuple(
  155. (
  156. re.compile(pattern, re.IGNORECASE),
  157. label,
  158. )
  159. for pattern, label in [
  160. (r"\bas well as\b", "as well as"),
  161. (r"\brather than\b", "rather than"),
  162. (r"\bin addition to\b", "in addition to"),
  163. (r"\bin spite of\b", "in spite of"),
  164. (r"\baccording to\b", "according to"),
  165. (r"\bas soon as\b", "as soon as"),
  166. ]
  167. )
  168. CLAUSE_FUNCTION_LABELS = {
  169. "TIME": "时间",
  170. "REASON": "原因",
  171. "CONDITION": "条件",
  172. "CONCESSION": "让步",
  173. "RESULT": "结果",
  174. "PURPOSE": "目的",
  175. }
  176. RESIDUAL_DEP_LABELS = {
  177. "det": "限定词",
  178. "prep": "介词",
  179. "case": "介词标记",
  180. "cc": "并列连词",
  181. "mark": "从属连词",
  182. "poss": "所有格标记",
  183. "nummod": "数量修饰语",
  184. "aux": "助动词",
  185. "prt": "小品词",
  186. }
  187. RESIDUAL_POS_LABELS = {
  188. "ADJ": "形容词修饰语",
  189. "ADV": "副词",
  190. "NUM": "数词",
  191. "PRON": "代词",
  192. }
  193. def _classify_segment(seg: str) -> str:
  194. if not seg:
  195. return "punct"
  196. if seg.isspace():
  197. return "space"
  198. if NUMBER_RE.fullmatch(seg) or WORD_LIKE_RE.fullmatch(seg):
  199. return "word"
  200. return "punct"
  201. def _append_fallback_tokens(text: str, start: int, end: int, tokens: List[Token]) -> None:
  202. for idx in range(start, end):
  203. ch = text[idx]
  204. if ch.isspace():
  205. kind = "space"
  206. elif ch.isalnum() or ch == "_":
  207. kind = "word"
  208. else:
  209. kind = "punct"
  210. tokens.append(Token(ch, idx, idx + 1, kind))
  211. def tokenize_preserve(text: str) -> List[Token]:
  212. tokens: List[Token] = []
  213. if not text:
  214. return tokens
  215. last_end = 0
  216. for match in TOKEN_REGEX.finditer(text):
  217. if match.start() > last_end:
  218. _append_fallback_tokens(text, last_end, match.start(), tokens)
  219. seg = text[match.start() : match.end()]
  220. tokens.append(Token(seg, match.start(), match.end(), _classify_segment(seg)))
  221. last_end = match.end()
  222. if last_end < len(text):
  223. _append_fallback_tokens(text, last_end, len(text), tokens)
  224. if not tokens and text:
  225. tokens = [Token(text, 0, len(text), "word" if text[0].isalnum() else "punct")]
  226. return tokens
  227. def build_char_to_token_map(tokens: List[Token]) -> Dict[int, int]:
  228. mapping: Dict[int, int] = {}
  229. for idx, tok in enumerate(tokens):
  230. for pos in range(tok.start, tok.end):
  231. mapping[pos] = idx
  232. return mapping
  233. def char_span_to_token_span(
  234. char_start: int, char_end: int, mapping: Dict[int, int]
  235. ) -> Tuple[int, int]:
  236. if char_end <= char_start:
  237. return -1, -1
  238. start_idx = mapping.get(char_start)
  239. end_idx = mapping.get(char_end - 1)
  240. if start_idx is None or end_idx is None:
  241. return -1, -1
  242. return start_idx, end_idx + 1
  243. def add_char_based_span(
  244. spans: List[Span],
  245. char_start: int,
  246. char_end: int,
  247. cls: str,
  248. mapping: Dict[int, int],
  249. attrs: Optional[Dict[str, str]] = None,
  250. ) -> None:
  251. s_tok, e_tok = char_span_to_token_span(char_start, char_end, mapping)
  252. if s_tok < 0 or e_tok < 0:
  253. return
  254. safe_attrs = None
  255. if attrs:
  256. safe_attrs = {k: html.escape(v, quote=True) for k, v in attrs.items() if v}
  257. spans.append(Span(start_token=s_tok, end_token=e_tok, cls=cls, attrs=safe_attrs))
  258. def add_span(spans: List[Span], start_token: int, end_token: int, cls: str, attrs: Optional[Dict[str, str]] = None):
  259. if start_token < 0 or end_token < 0 or end_token <= start_token:
  260. return
  261. spans.append(Span(start_token=start_token, end_token=end_token, cls=cls, attrs=attrs))
  262. def subtree_char_span(token: SpacyToken) -> Tuple[int, int]:
  263. subtree = list(token.subtree)
  264. if not subtree:
  265. return token.idx, token.idx + len(token.text)
  266. return subtree[0].idx, subtree[-1].idx + len(subtree[-1].text)
  267. def _subtree_text(token: SpacyToken) -> str:
  268. span = token.doc[token.left_edge.i : token.right_edge.i + 1]
  269. return span.text
  270. def _find_antecedent_word(sentence: SpacySpan, clause_start_char: int) -> Optional[str]:
  271. candidate = None
  272. for tok in sentence:
  273. if tok.idx >= clause_start_char:
  274. break
  275. if tok.pos_ in {"NOUN", "PROPN", "PRON"}:
  276. candidate = tok.text
  277. return candidate
  278. def _is_nonfinite_clause(span: SpacySpan) -> bool:
  279. tags = {tok.tag_ for tok in span if tok.tag_}
  280. if tags & FINITE_VERB_TAGS:
  281. return False
  282. if "TO" in tags or tags & NONFINITE_VERB_TAGS:
  283. return True
  284. return False
  285. def _classify_noun_clause(span: SpacySpan) -> Optional[str]:
  286. deps = {tok.dep_ for tok in span}
  287. if deps & {"csubj", "csubjpass"}:
  288. return "subject"
  289. if deps & {"ccomp", "xcomp"}:
  290. return "complement"
  291. if deps & {"dobj", "obj"}:
  292. return "object"
  293. return None
  294. def _split_paragraph_ranges(text: str) -> List[Tuple[int, int]]:
  295. """Return inclusive paragraph ranges, keeping separators intact."""
  296. if not text:
  297. return [(0, 0)]
  298. ranges: List[Tuple[int, int]] = []
  299. start = 0
  300. for match in PARAGRAPH_BREAK_RE.finditer(text):
  301. ranges.append((start, match.start()))
  302. start = match.end()
  303. ranges.append((start, len(text)))
  304. # Ensure at least one range and sorted order
  305. if not ranges:
  306. ranges = [(0, len(text))]
  307. return ranges
  308. def _circled_number(value: int) -> str:
  309. """Return the circled number style for sentence numbering."""
  310. if value <= 0:
  311. return ""
  312. if value <= 20:
  313. return chr(ord("\u2460") + value - 1)
  314. if 21 <= value <= 35:
  315. return chr(ord("\u3251") + value - 21)
  316. if 36 <= value <= 50:
  317. return chr(ord("\u32B1") + value - 36)
  318. return f"({value})"
  319. def annotate_constituents(
  320. sentence: SpacySpan,
  321. spans: List[Span],
  322. mapping: Dict[int, int],
  323. sentence_start_char: int,
  324. sentence_end_char: int,
  325. summary: Optional[SentenceSummary] = None,
  326. ) -> None:
  327. # If benepar is not attached or a previous warning indicates fallback, skip.
  328. if not HAS_BENEPAR or BENE_PAR_WARNING:
  329. _ensure_benepar_warning(
  330. "Benepar component missing or unavailable. Using dependency-based spans."
  331. )
  332. return
  333. # If the extension is not present, skip
  334. if not SpacySpan.has_extension("constituents"):
  335. _ensure_benepar_warning(
  336. "Benepar component missing from spaCy pipeline. Falling back to dependency spans."
  337. )
  338. return
  339. try:
  340. constituents = sentence._.constituents
  341. except Exception as exc:
  342. # Catch any error while accessing benepar results and fallback safely
  343. _ensure_benepar_warning(
  344. f"Benepar constituency parse unavailable: {exc}. Falling back to dependency spans."
  345. )
  346. return
  347. seen_ranges = set()
  348. for const in constituents:
  349. label = getattr(const, "label_", None)
  350. if not label:
  351. continue
  352. start_char, end_char = const.start_char, const.end_char
  353. if start_char == sentence_start_char and end_char == sentence_end_char:
  354. continue # skip the entire sentence span itself
  355. key = (start_char, end_char, label)
  356. is_relative = False
  357. if label in {"PP", "ADVP"}:
  358. if key in seen_ranges:
  359. continue
  360. seen_ranges.add(key)
  361. add_char_based_span(spans, start_char, end_char, "role-adverbial", mapping)
  362. continue
  363. if label == "SBAR" and const:
  364. first_token = const[0]
  365. lowered = first_token.text.lower()
  366. if lowered in RELATIVE_PRONOUNS:
  367. antecedent = _find_antecedent_word(sentence, start_char)
  368. attrs = {"data-modifies": antecedent} if antecedent else None
  369. add_char_based_span(spans, start_char, end_char, "clause-relative", mapping, attrs)
  370. if summary:
  371. summary.clauses.append("定语从句")
  372. is_relative = True
  373. else:
  374. function = SUBORDINATORS_TO_FUNCTION.get(lowered)
  375. attrs = {"data-function": function}
  376. add_char_based_span(spans, start_char, end_char, "clause-adverbial", mapping, attrs)
  377. if summary:
  378. summary.clauses.append("状语从句")
  379. if function:
  380. summary.clause_functions.append(function)
  381. continue
  382. if label in {"S", "VP"}:
  383. if _is_nonfinite_clause(const):
  384. add_char_based_span(spans, start_char, end_char, "clause-nonfinite", mapping)
  385. if summary:
  386. summary.clauses.append("非限定结构")
  387. continue
  388. if label == "S" and not is_relative:
  389. role = _classify_noun_clause(const)
  390. if role:
  391. attrs = {"data-clause-role": role}
  392. add_char_based_span(spans, start_char, end_char, "clause-noun", mapping, attrs)
  393. if summary:
  394. summary.clauses.append(f"名词性从句({role})")
  395. def _predicate_span_bounds(head: SpacyToken) -> Tuple[int, int]:
  396. """Return a character range covering predicate head + functional dependents."""
  397. tokens = [head]
  398. for child in head.children:
  399. if child.dep_ in {"aux", "auxpass", "prt", "cop", "neg"}:
  400. tokens.append(child)
  401. start_char = min(tok.idx for tok in tokens)
  402. end_char = max(tok.idx + len(tok.text) for tok in tokens)
  403. return start_char, end_char
  404. def _predicate_heads(sentence: SpacySpan) -> List[SpacyToken]:
  405. """Collect predicate heads including coordinated verbs."""
  406. candidates: List[SpacyToken] = []
  407. for tok in sentence:
  408. if tok.pos_ not in {"VERB", "AUX"} and tok.tag_ not in FINITE_VERB_TAGS:
  409. continue
  410. if tok.dep_ == "ROOT":
  411. candidates.append(tok)
  412. continue
  413. if tok.dep_ == "conj" and tok.head.pos_ in {"VERB", "AUX"}:
  414. candidates.append(tok)
  415. continue
  416. if tok.dep_ in {"ccomp", "xcomp", "advcl", "acl", "relcl", "parataxis"}:
  417. candidates.append(tok)
  418. seen = set()
  419. ordered: List[SpacyToken] = []
  420. for tok in sorted(candidates, key=lambda t: t.i):
  421. if tok.i in seen:
  422. continue
  423. seen.add(tok.i)
  424. ordered.append(tok)
  425. return ordered
  426. def _add_fixed_phrases(
  427. sentence: SpacySpan, mapping: Dict[int, int], spans: List[Span], summary: SentenceSummary
  428. ) -> None:
  429. base = sentence.start_char
  430. text = sentence.text
  431. for pattern, label in FIXED_MULTIWORD_PHRASES:
  432. for match in pattern.finditer(text):
  433. start_char = base + match.start()
  434. end_char = base + match.end()
  435. add_char_based_span(
  436. spans,
  437. start_char,
  438. end_char,
  439. "phrase-fixed",
  440. mapping,
  441. attrs={"data-phrase": label},
  442. )
  443. summary.connectors.append(label.lower())
  444. def annotate_sentence(
  445. tokens: List[Token],
  446. sentence: SpacySpan,
  447. mapping: Dict[int, int],
  448. ) -> Tuple[List[Span], SentenceSummary]:
  449. spans: List[Span] = []
  450. summary = SentenceSummary(sentence_length=len(sentence))
  451. sent_bounds = char_span_to_token_span(sentence.start_char, sentence.end_char, mapping)
  452. sent_start_tok, sent_end_tok = sent_bounds
  453. def add_subtree(token: SpacyToken, cls: str):
  454. start_char, end_char = subtree_char_span(token)
  455. add_char_based_span(spans, start_char, end_char, cls, mapping)
  456. def add_token(token: SpacyToken, cls: str):
  457. add_char_based_span(spans, token.idx, token.idx + len(token.text), cls, mapping)
  458. for tok in sentence:
  459. if tok.dep_ in SUBJECT_DEPS:
  460. add_subtree(tok, "role-subject")
  461. summary.subjects.append(_subtree_text(tok))
  462. for head in _predicate_heads(sentence):
  463. start_char, end_char = _predicate_span_bounds(head)
  464. add_char_based_span(spans, start_char, end_char, "role-predicate", mapping)
  465. predicate_text = sentence.doc.text[start_char:end_char].strip()
  466. summary.predicates.append(predicate_text or head.text)
  467. for tok in sentence:
  468. if tok.dep_ in DIRECT_OBJECT_DEPS:
  469. add_subtree(tok, "role-object-do")
  470. summary.objects.append(_subtree_text(tok))
  471. break
  472. io_token = next((tok for tok in sentence if tok.dep_ in INDIRECT_OBJECT_DEPS), None)
  473. if io_token is None:
  474. for tok in sentence:
  475. if tok.dep_ == "pobj" and tok.head.dep_ == "prep" and tok.head.lemma_.lower() in {"to", "for"}:
  476. io_token = tok
  477. break
  478. if io_token:
  479. add_subtree(io_token, "role-object-io")
  480. summary.objects.append(_subtree_text(io_token))
  481. for tok in sentence:
  482. if tok.dep_ in COMPLEMENT_DEPS:
  483. add_subtree(tok, "role-complement")
  484. summary.complements.append(_subtree_text(tok))
  485. break
  486. for tok in sentence:
  487. lowered = tok.text.lower()
  488. if tok.dep_ in {"cc", "mark", "preconj"} or tok.pos_ in {"CCONJ", "SCONJ"}:
  489. add_token(tok, "role-connector")
  490. summary.connectors.append(lowered)
  491. if tok.dep_ == "det" or tok.pos_ == "DET":
  492. add_token(tok, "role-determiner")
  493. if tok.dep_ in {"amod", "poss", "compound", "nummod"}:
  494. add_token(tok, "role-modifier")
  495. adverbial_ranges = set()
  496. for tok in sentence:
  497. if tok.dep_ in ADVERBIAL_DEPS:
  498. adverbial_ranges.add(subtree_char_span(tok))
  499. for start_char, end_char in adverbial_ranges:
  500. add_char_based_span(spans, start_char, end_char, "role-adverbial", mapping)
  501. for tok in sentence:
  502. if tok.dep_ == "appos":
  503. add_subtree(tok, "role-apposition")
  504. if sent_start_tok >= 0 and sent_end_tok >= 0:
  505. stack = []
  506. for idx in range(sent_start_tok, sent_end_tok):
  507. token = tokens[idx]
  508. if token.text == "(":
  509. stack.append(idx)
  510. elif token.text == ")" and stack:
  511. add_span(spans, stack.pop(), idx + 1, "role-parenthetical")
  512. comma_token_idxs = [
  513. i
  514. for i in range(sent_start_tok, sent_end_tok)
  515. if tokens[i].kind == "punct" and tokens[i].text == ","
  516. ]
  517. for idx, first_comma in enumerate(comma_token_idxs):
  518. if idx + 1 >= len(comma_token_idxs):
  519. break
  520. second_comma = comma_token_idxs[idx + 1]
  521. start_char = tokens[first_comma].start
  522. end_char = tokens[second_comma].end
  523. span = sentence.doc.char_span(start_char, end_char, alignment_mode="expand")
  524. if span and any(tok.tag_ == "VBG" for tok in span):
  525. add_span(spans, first_comma, second_comma + 1, "role-absolute")
  526. annotate_constituents(
  527. sentence,
  528. spans,
  529. mapping,
  530. sentence.start_char,
  531. sentence.end_char,
  532. summary,
  533. )
  534. _add_fixed_phrases(sentence, mapping, spans, summary)
  535. return spans, summary
  536. def _label_residual_token(token: SpacyToken) -> Optional[str]:
  537. dep_label = RESIDUAL_DEP_LABELS.get(token.dep_)
  538. if dep_label:
  539. return dep_label
  540. return RESIDUAL_POS_LABELS.get(token.pos_)
  541. def _collect_residual_roles(
  542. sentence: SpacySpan,
  543. tokens: List[Token],
  544. spans: List[Span],
  545. sent_bounds: Tuple[int, int],
  546. summary: SentenceSummary,
  547. mapping: Dict[int, int],
  548. ) -> None:
  549. sent_start, sent_end = sent_bounds
  550. if sent_start < 0 or sent_end < 0 or sent_start >= sent_end:
  551. return
  552. coverage = [False] * (sent_end - sent_start)
  553. for span in spans:
  554. lo = max(span.start_token, sent_start)
  555. hi = min(span.end_token, sent_end)
  556. for idx in range(lo, hi):
  557. coverage[idx - sent_start] = True
  558. doc = sentence.doc
  559. for offset, covered in enumerate(coverage):
  560. if covered:
  561. continue
  562. token = tokens[sent_start + offset]
  563. if token.kind != "word":
  564. continue
  565. span = doc.char_span(token.start, token.end, alignment_mode="expand")
  566. if not span or not span.text.strip():
  567. continue
  568. label = _label_residual_token(span[0])
  569. if label and label not in summary.residual_roles:
  570. summary.residual_roles.append(label)
  571. if label:
  572. add_char_based_span(
  573. spans,
  574. token.start,
  575. token.end,
  576. "role-residual",
  577. mapping,
  578. attrs={"data-role": label},
  579. )
  580. def _classify_sentence_complexity(summary: SentenceSummary) -> Tuple[str, bool]:
  581. clause_count = len(summary.clauses)
  582. connector_count = len(summary.connectors)
  583. word_count = summary.sentence_length
  584. if clause_count >= 2:
  585. return "多重复杂句", True
  586. if clause_count == 1:
  587. return "主从复合句", True
  588. if connector_count >= 2:
  589. return "并列复合句", True
  590. if word_count >= 25:
  591. return "长句", True
  592. return "简单句", False
  593. def _translate_clause_functions(functions: List[str]) -> List[str]:
  594. translated = []
  595. for item in functions:
  596. label = CLAUSE_FUNCTION_LABELS.get(item, item)
  597. if label not in translated:
  598. translated.append(label)
  599. return translated
  600. def build_sentence_note(summary: SentenceSummary) -> Tuple[str, bool]:
  601. note_parts: List[str] = []
  602. clause_label = "无"
  603. if summary.clauses:
  604. counts = Counter(summary.clauses)
  605. clause_label = "、".join(
  606. f"{name}×{count}" if count > 1 else name for name, count in counts.items()
  607. )
  608. functions = _translate_clause_functions(summary.clause_functions)
  609. connectors = list(dict.fromkeys(summary.connectors))
  610. residual = summary.residual_roles
  611. subjects_seq = list(dict.fromkeys(summary.subjects))
  612. predicates_seq = list(dict.fromkeys(summary.predicates))
  613. objects_seq = list(dict.fromkeys(summary.objects))
  614. complements_seq = list(dict.fromkeys(summary.complements))
  615. subjects = "、".join(subjects_seq) if subjects_seq else "未识别"
  616. predicates = "、".join(predicates_seq) if predicates_seq else "未识别"
  617. objects = "、".join(objects_seq) if objects_seq else "无"
  618. complements = "、".join(complements_seq) if complements_seq else "无"
  619. note_parts.append(f"主语:{subjects}")
  620. note_parts.append(f"谓语:{predicates}")
  621. note_parts.append(f"宾语:{objects}")
  622. if complements != "无":
  623. note_parts.append(f"补语:{complements}")
  624. note_parts.append(f"从句:{clause_label}")
  625. if functions:
  626. note_parts.append(f"从句功能:{'、'.join(functions)}")
  627. connector_text = "、".join(connectors) if connectors else "未检测到典型连接词"
  628. note_parts.append(f"连接词:{connector_text}")
  629. if residual:
  630. note_parts.append(f"未高亮:{'、'.join(residual)}")
  631. complexity_label, is_complex = _classify_sentence_complexity(summary)
  632. note_parts.insert(0, f"句型:{complexity_label}")
  633. note_parts.append(f"词数:{summary.sentence_length}")
  634. return ";".join(note_parts), is_complex
  635. def render_with_spans(tokens: List[Token], spans: List[Span]) -> str:
  636. spans = sorted(spans, key=lambda s: (s.start_token, -s.end_token))
  637. out_parts: List[str] = []
  638. active_stack: List[Span] = []
  639. span_queue = list(spans)
  640. current_idx = 0
  641. def open_span(span: Span):
  642. attrs = ""
  643. if span.attrs:
  644. attrs = " " + " ".join(
  645. f"{k}='" + html.escape(v, quote=True) + "'" for k, v in span.attrs.items()
  646. )
  647. out_parts.append(f"<span class='{span.cls}'{attrs}>")
  648. def close_span():
  649. out_parts.append("</span>")
  650. while current_idx < len(tokens):
  651. opening = [sp for sp in span_queue if sp.start_token == current_idx]
  652. for sp in opening:
  653. open_span(sp)
  654. active_stack.append(sp)
  655. span_queue.remove(sp)
  656. token = tokens[current_idx]
  657. out_parts.append(html.escape(token.text))
  658. current_idx += 1
  659. while active_stack and active_stack[-1].end_token == current_idx:
  660. active_stack.pop()
  661. close_span()
  662. while active_stack:
  663. active_stack.pop()
  664. close_span()
  665. return "".join(out_parts)
  666. def _run_pipeline_without_benepar(text: str) -> "spacy.tokens.Doc":
  667. """Run the spaCy pipeline skipping benepar, for robust fallback."""
  668. assert NLP is not None
  669. doc = NLP.make_doc(text)
  670. for name, proc in NLP.pipeline:
  671. if name == "benepar":
  672. continue
  673. doc = proc(doc)
  674. return doc
  675. def highlight_text_with_spacy(text: str, paragraph_meta: Optional[List[Dict[str, str]]] = None) -> str:
  676. if NLP is None:
  677. raise RuntimeError(f"spaCy pipeline unavailable: {NLP_LOAD_ERROR}")
  678. tokens = tokenize_preserve(text)
  679. if not tokens:
  680. return ""
  681. mapping = build_char_to_token_map(tokens)
  682. # Robust doc creation: if benepar causes any error, skip it and fallback.
  683. try:
  684. doc = NLP(text)
  685. except Exception as exc:
  686. _ensure_benepar_warning(
  687. f"Benepar failed during processing: {exc}. Falling back to dependency-based spans."
  688. )
  689. doc = _run_pipeline_without_benepar(text)
  690. paragraph_ranges = _split_paragraph_ranges(text)
  691. paragraph_counters = [0 for _ in paragraph_ranges]
  692. paragraph_idx = 0
  693. paragraph_spans: List[Span] = []
  694. paragraph_attrs = paragraph_meta if paragraph_meta and len(paragraph_meta) == len(paragraph_ranges) else None
  695. for idx, (start, end) in enumerate(paragraph_ranges):
  696. attrs = None
  697. if paragraph_attrs:
  698. attrs = paragraph_attrs[idx] or None
  699. add_char_based_span(paragraph_spans, start, end, "paragraph-scope", mapping, attrs=attrs)
  700. spans: List[Span] = list(paragraph_spans)
  701. for sent in doc.sents:
  702. while paragraph_idx < len(paragraph_ranges) and paragraph_ranges[paragraph_idx][1] <= sent.start_char:
  703. paragraph_idx += 1
  704. current_idx = min(paragraph_idx, len(paragraph_ranges) - 1)
  705. paragraph_counters[current_idx] += 1
  706. sentence_label = _circled_number(paragraph_counters[current_idx])
  707. sentence_spans, summary = annotate_sentence(tokens, sent, mapping)
  708. sent_bounds = char_span_to_token_span(sent.start_char, sent.end_char, mapping)
  709. sent_start, sent_end = sent_bounds
  710. if sent_start >= 0 and sent_end >= 0:
  711. _collect_residual_roles(sent, tokens, sentence_spans, sent_bounds, summary, mapping)
  712. helper_note, is_complex = build_sentence_note(summary)
  713. attrs = {
  714. "data-sid": sentence_label,
  715. "data-note": helper_note,
  716. "data-complex": "1" if is_complex else "0",
  717. }
  718. sentence_spans.append(Span(start_token=sent_start, end_token=sent_end, cls="sentence-scope", attrs=attrs))
  719. spans.extend(sentence_spans)
  720. return render_with_spans(tokens, spans)
  721. app = FastAPI(title="Grammar Highlight API (spaCy + benepar)")
  722. app.add_middleware(
  723. CORSMiddleware,
  724. allow_origins=["*"],
  725. allow_credentials=True,
  726. allow_methods=["*"],
  727. allow_headers=["*"],
  728. )
  729. @app.post("/analyze", response_model=AnalyzeResponse)
  730. async def analyze(req: AnalyzeRequest):
  731. text = req.text
  732. if text is None or not text.strip():
  733. raise HTTPException(status_code=400, detail="Text is required")
  734. try:
  735. sanitized_fragment = highlight_text_with_spacy(text)
  736. helper_state = "on" if SENTENCE_HELPER_ENABLED else "off"
  737. return AnalyzeResponse(
  738. highlighted_html=f"{STYLE_BLOCK}<div class='analysis' data-helper='{helper_state}'>{sanitized_fragment}</div>"
  739. )
  740. except RuntimeError as exc:
  741. raise HTTPException(status_code=500, detail=str(exc)) from exc
  742. except Exception as exc: # pragma: no cover - defensive
  743. raise HTTPException(status_code=500, detail=f"Analysis failed: {exc}") from exc
  744. @app.get("/health")
  745. async def health():
  746. status = "ok" if NLP is not None else "failed"
  747. detail = None if NLP is not None else str(NLP_LOAD_ERROR)
  748. payload = {"status": status}
  749. if detail:
  750. payload["detail"] = detail
  751. if BENE_PAR_WARNING:
  752. payload["warning"] = BENE_PAR_WARNING
  753. payload["benepar_attached"] = HAS_BENEPAR
  754. return payload
  755. @app.get("/proxy", response_class=HTMLResponse)
  756. async def proxy(url: Optional[str] = None, show_images: bool = False):
  757. if not url:
  758. return HTMLResponse(_render_proxy_page(show_images=show_images))
  759. try:
  760. normalized_url, title, page_text, images, code_blocks, paragraph_meta = await _fetch_remote_plaintext(url)
  761. highlighted_fragment = highlight_text_with_spacy(page_text, paragraph_meta=paragraph_meta or None)
  762. if code_blocks:
  763. highlighted_fragment = _inject_proxy_codeblocks(highlighted_fragment, code_blocks)
  764. image_notice = None
  765. if images:
  766. if show_images:
  767. highlighted_fragment = _inject_proxy_images(highlighted_fragment, images)
  768. else:
  769. highlighted_fragment = _strip_proxy_image_markers(highlighted_fragment)
  770. image_notice = (
  771. f"检测到 {len(images)} 张正文图片,为提速默认隐藏。勾选“显示图片”后重新抓取即可加载原图。"
  772. )
  773. html_body = _render_proxy_page(
  774. url_value=normalized_url,
  775. message="分析完成,结果如下。",
  776. highlight_fragment=highlighted_fragment,
  777. source_url=normalized_url,
  778. source_title=title,
  779. show_images=show_images,
  780. image_notice=image_notice,
  781. )
  782. return HTMLResponse(html_body)
  783. except ValueError as exc:
  784. body = _render_proxy_page(url_value=url or "", message=str(exc), is_error=True, show_images=show_images)
  785. return HTMLResponse(body, status_code=400)
  786. except httpx.HTTPError as exc:
  787. # Provide a clearer message for common HTTP errors from the remote site.
  788. msg = None
  789. if isinstance(exc, httpx.HTTPStatusError) and exc.response is not None:
  790. status = exc.response.status_code
  791. if status == 403:
  792. msg = (
  793. "抓取页面失败:目标站点返回 403 Forbidden(禁止访问)。"
  794. "该网站很可能禁止自动抓取或代理访问,目前无法通过本工具获取正文,"
  795. "可以尝试在浏览器中打开并手动复制需要的内容。"
  796. )
  797. else:
  798. msg = f"抓取页面失败:目标站点返回 HTTP {status}。"
  799. if msg is None:
  800. msg = f"抓取页面失败:{exc}"
  801. body = _render_proxy_page(
  802. url_value=url or "",
  803. message=msg,
  804. is_error=True,
  805. show_images=show_images,
  806. )
  807. return HTMLResponse(body, status_code=502)
  808. except Exception as exc:
  809. body = _render_proxy_page(
  810. url_value=url or "",
  811. message=f"代理分析失败:{exc}",
  812. is_error=True,
  813. show_images=show_images,
  814. )
  815. return HTMLResponse(body, status_code=500)
  816. @app.get("/", response_class=HTMLResponse)
  817. async def ui():
  818. return """<!DOCTYPE html>
  819. <html lang=\"zh-CN\">
  820. <head>
  821. <meta charset=\"UTF-8\" />
  822. <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
  823. <title>Grammar Highlighter</title>
  824. <style>
  825. body { font-family: system-ui, -apple-system, sans-serif; margin: 2rem; line-height: 1.6; }
  826. textarea { width: 100%; min-height: 140px; font-size: 1rem; padding: 0.75rem; border: 1px solid #d0d7de; border-radius: 0.5rem; }
  827. button { margin-top: 0.75rem; padding: 0.6rem 1.4rem; font-size: 1rem; cursor: pointer; border: none; border-radius: 999px; background: #1f7a8c; color: #fff; }
  828. button + button { margin-left: 0.5rem; background: #6b7280; }
  829. button:disabled { opacity: 0.6; cursor: wait; }
  830. #result { margin-top: 1.5rem; border-top: 1px solid #e5e7eb; padding-top: 1rem; min-height: 2rem; }
  831. #status { margin-left: 0.75rem; color: #3b82f6; }
  832. .err { color: #b00020; }
  833. .muted { color: #6b7280; font-size: 0.9rem; }
  834. .tts-controls { margin-top: 0.75rem; display: flex; align-items: center; gap: 0.75rem; flex-wrap: wrap; }
  835. .tts-controls button { margin-top: 0; background: #f97316; }
  836. .tts-status { font-size: 0.95rem; color: #475569; }
  837. </style>
  838. </head>
  839. <body>
  840. <h1>Grammar Highlighter (spaCy + benepar)</h1>
  841. <textarea id=\"text\" placeholder=\"Type the English text you want to analyze...\"></textarea>
  842. <div>
  843. <button type=\"button\" id=\"submit\">Analyze</button>
  844. <button type=\"button\" id=\"clear\">清空输入</button>
  845. <span id=\"status\"></span>
  846. </div>
  847. <div class=\"tts-controls\">
  848. <button type=\"button\" id=\"tts\">朗读高亮文本</button>
  849. <button type=\"button\" id=\"tts-selection\">朗读选中文本</button>
  850. <span class=\"tts-status\" id=\"tts-status\"></span>
  851. </div>
  852. <div id=\"result\"></div>
  853. <script>
  854. const btn = document.getElementById('submit');
  855. const btnClear = document.getElementById('clear');
  856. const textarea = document.getElementById('text');
  857. const statusEl = document.getElementById('status');
  858. const ttsBtn = document.getElementById('tts');
  859. const ttsSelectionBtn = document.getElementById('tts-selection');
  860. const ttsStatus = document.getElementById('tts-status');
  861. const result = document.getElementById('result');
  862. const TTS_ENDPOINT = 'http://141.140.15.30:8028/generate';
  863. let currentAudio = null;
  864. let queuedAudios = [];
  865. let streamingFinished = false;
  866. function resetUI() {
  867. result.innerHTML = '';
  868. statusEl.textContent = '';
  869. statusEl.classList.remove('err');
  870. ttsStatus.textContent = '';
  871. setTtsButtonsDisabled(false);
  872. resetAudioPlayback();
  873. }
  874. btn.addEventListener('click', async () => {
  875. resetUI();
  876. const value = textarea.value.trim();
  877. if (!value) {
  878. statusEl.textContent = '请输入要分析的英文文本。';
  879. statusEl.classList.add('err');
  880. return;
  881. }
  882. btn.disabled = true;
  883. statusEl.textContent = 'Analyzing ...';
  884. try {
  885. const response = await fetch('/analyze', {
  886. method: 'POST',
  887. headers: { 'Content-Type': 'application/json' },
  888. body: JSON.stringify({ text: value })
  889. });
  890. if (!response.ok) {
  891. const error = await response.json().catch(() => ({ detail: 'Request failed' }));
  892. throw new Error(error.detail || 'Request failed');
  893. }
  894. const data = await response.json();
  895. result.innerHTML = data.highlighted_html || '';
  896. statusEl.textContent = '';
  897. } catch (err) {
  898. statusEl.textContent = '错误:' + (err.message || 'Unknown error');
  899. statusEl.classList.add('err');
  900. } finally {
  901. btn.disabled = false;
  902. }
  903. });
  904. btnClear.addEventListener('click', () => {
  905. textarea.value = '';
  906. resetUI();
  907. textarea.focus();
  908. });
  909. function extractHighlightedText() {
  910. const highlightRoot = result.querySelector('.analysis');
  911. return highlightRoot ? highlightRoot.textContent.trim() : '';
  912. }
  913. function setTtsButtonsDisabled(disabled) {
  914. if (ttsBtn) {
  915. ttsBtn.disabled = disabled;
  916. }
  917. if (ttsSelectionBtn) {
  918. ttsSelectionBtn.disabled = disabled;
  919. }
  920. }
  921. function resetAudioPlayback() {
  922. queuedAudios = [];
  923. streamingFinished = false;
  924. if (currentAudio) {
  925. currentAudio.pause();
  926. currentAudio = null;
  927. }
  928. }
  929. function markStreamingFinished() {
  930. streamingFinished = true;
  931. if (!currentAudio && !queuedAudios.length) {
  932. ttsStatus.textContent = '播放完成';
  933. }
  934. }
  935. function playNextAudioChunk() {
  936. if (!queuedAudios.length) {
  937. currentAudio = null;
  938. if (streamingFinished) {
  939. ttsStatus.textContent = '播放完成';
  940. } else {
  941. ttsStatus.textContent = '等待更多语音...';
  942. }
  943. return;
  944. }
  945. const chunk = queuedAudios.shift();
  946. ttsStatus.textContent = '播放中...';
  947. currentAudio = new Audio('data:audio/wav;base64,' + chunk);
  948. currentAudio.onended = playNextAudioChunk;
  949. currentAudio.onerror = () => {
  950. ttsStatus.textContent = '播放失败';
  951. currentAudio = null;
  952. };
  953. currentAudio.play().catch(err => {
  954. ttsStatus.textContent = '自动播放被阻止:' + err.message;
  955. currentAudio = null;
  956. });
  957. }
  958. function enqueueAudioChunk(chunk) {
  959. queuedAudios.push(chunk);
  960. if (!currentAudio) {
  961. playNextAudioChunk();
  962. }
  963. }
  964. function parseTtsLine(line) {
  965. try {
  966. const parsed = JSON.parse(line);
  967. if (parsed && parsed.audio) {
  968. enqueueAudioChunk(parsed.audio);
  969. return true;
  970. }
  971. } catch (err) {
  972. console.warn('无法解析TTS响应行', err);
  973. }
  974. return false;
  975. }
  976. async function consumeTtsResponse(response) {
  977. let chunkCount = 0;
  978. const handleLine = rawLine => {
  979. const trimmed = rawLine.replace(/\\r/g, '').trim();
  980. if (!trimmed) return;
  981. if (parseTtsLine(trimmed)) {
  982. chunkCount += 1;
  983. }
  984. };
  985. if (response.body && response.body.getReader) {
  986. const reader = response.body.getReader();
  987. const decoder = new TextDecoder();
  988. let buffer = '';
  989. while (true) {
  990. const { value, done } = await reader.read();
  991. if (done) break;
  992. buffer += decoder.decode(value, { stream: true });
  993. let newlineIndex;
  994. while ((newlineIndex = buffer.indexOf('\\n')) >= 0) {
  995. const line = buffer.slice(0, newlineIndex);
  996. buffer = buffer.slice(newlineIndex + 1);
  997. handleLine(line);
  998. }
  999. }
  1000. buffer += decoder.decode();
  1001. if (buffer) {
  1002. handleLine(buffer);
  1003. }
  1004. } else {
  1005. const payload = await response.text();
  1006. payload.split('\\n').forEach(handleLine);
  1007. }
  1008. return chunkCount;
  1009. }
  1010. function getSelectedPageText() {
  1011. const selection = window.getSelection ? window.getSelection() : null;
  1012. return selection ? selection.toString().trim() : '';
  1013. }
  1014. async function streamTtsRequest(text) {
  1015. const response = await fetch(TTS_ENDPOINT, {
  1016. method: 'POST',
  1017. headers: { 'Content-Type': 'application/json' },
  1018. body: JSON.stringify({ text })
  1019. });
  1020. if (!response.ok) {
  1021. throw new Error('接口响应错误');
  1022. }
  1023. const chunkCount = await consumeTtsResponse(response);
  1024. if (!chunkCount) {
  1025. throw new Error('接口未返回音频数据');
  1026. }
  1027. markStreamingFinished();
  1028. }
  1029. function createTtsRequest(textResolver, emptyMessage) {
  1030. return async () => {
  1031. const text = textResolver();
  1032. if (!text) {
  1033. ttsStatus.textContent = emptyMessage;
  1034. return;
  1035. }
  1036. setTtsButtonsDisabled(true);
  1037. ttsStatus.textContent = '请求语音...';
  1038. resetAudioPlayback();
  1039. try {
  1040. await streamTtsRequest(text);
  1041. } catch (err) {
  1042. ttsStatus.textContent = 'TTS 出错:' + (err && err.message ? err.message : err);
  1043. resetAudioPlayback();
  1044. } finally {
  1045. setTtsButtonsDisabled(false);
  1046. }
  1047. };
  1048. }
  1049. if (ttsBtn) {
  1050. ttsBtn.addEventListener('click', createTtsRequest(extractHighlightedText, '请先生成高亮结果'));
  1051. }
  1052. if (ttsSelectionBtn) {
  1053. ttsSelectionBtn.addEventListener('click', createTtsRequest(getSelectedPageText, '请先选择要朗读的文本'));
  1054. }
  1055. </script>
  1056. </body>
  1057. </html>"""
  1058. PROXY_PAGE_TEMPLATE = Template(
  1059. """<!DOCTYPE html>
  1060. <html lang=\"zh-CN\">
  1061. <head>
  1062. <meta charset=\"UTF-8\" />
  1063. <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
  1064. <title>Grammar Proxy Highlighter</title>
  1065. <style>
  1066. body { font-family: system-ui, -apple-system, \"Segoe UI\", sans-serif; margin: 0 auto; max-width: 860px; padding: 1.5rem; line-height: 1.65; }
  1067. h1 { font-size: 1.45rem; margin-bottom: 1rem; }
  1068. form { display: flex; flex-wrap: wrap; gap: 0.5rem; margin-bottom: 0.75rem; }
  1069. input[type=\"url\"] { flex: 1 1 260px; padding: 0.65rem; font-size: 1rem; border-radius: 0.5rem; border: 1px solid #d0d7de; }
  1070. button { padding: 0.65rem 1.4rem; border: none; border-radius: 999px; background: #2563eb; color: #fff; font-size: 1rem; cursor: pointer; }
  1071. .show-images-toggle { display: inline-flex; align-items: center; gap: 0.35rem; font-size: 0.9rem; color: #475569; }
  1072. .show-images-toggle input { width: auto; }
  1073. .tts-controls { margin-top: 0.5rem; display: flex; align-items: center; flex-wrap: wrap; gap: 0.75rem; }
  1074. .tts-controls button { background: #f97316; }
  1075. .tts-status { font-size: 0.95rem; color: #475569; }
  1076. .status { margin-top: 0.25rem; font-size: 0.95rem; }
  1077. .status.err { color: #b00020; }
  1078. .status.ok { color: #059669; }
  1079. section.result { margin-top: 1.4rem; padding-top: 1rem; border-top: 1px solid #e5e7eb; }
  1080. section.result .source { font-size: 0.95rem; margin-bottom: 0.5rem; color: #475569; word-break: break-word; }
  1081. section.result .source a { color: inherit; text-decoration: underline; }
  1082. section.result img { display:block; margin:0.75rem auto; max-width:100%; height:auto; max-width:min(100%,800px); }
  1083. .image-hint { font-size:0.9rem; color:#6b7280; margin:0.5rem 0 0; }
  1084. .clear-floating { position: fixed; left: 0; right: 0; bottom: 0; padding: 0.55rem 1.5rem; border-radius: 0; border-top: 1px solid #e5e7eb; background: rgba(249,250,251,0.96); display: flex; justify-content: center; z-index: 40; }
  1085. .clear-floating button { padding: 0.55rem 1.8rem; border-radius: 999px; background: #6b7280; color: #fff; font-size: 0.95rem; }
  1086. .clear-floating button:hover { filter: brightness(1.05); }
  1087. @media (prefers-reduced-motion: reduce) { .clear-floating { scroll-behavior: auto; } }
  1088. @media (max-width: 640px) { body { padding-bottom: 3.2rem; } }
  1089. </style>
  1090. $style_block
  1091. </head>
  1092. <body>
  1093. <h1>网页代理高亮</h1>
  1094. <form method=\"get\" action=\"/proxy\" class=\"url-form\">
  1095. <input type=\"url\" name=\"url\" value=\"$url_value\" placeholder=\"https://example.com/article\" required />
  1096. <button type=\"submit\">抓取并高亮</button>
  1097. <label class=\"show-images-toggle\">
  1098. <input type=\"checkbox\" name=\"show_images\" value=\"1\" $show_images_checked />
  1099. <span>显示图片(默认关闭以提升速度)</span>
  1100. </label>
  1101. </form>
  1102. $status_block
  1103. <div class=\"tts-controls\">
  1104. <button type=\"button\" id=\"proxy-tts-btn\" disabled>朗读高亮文本</button>
  1105. <button type=\"button\" id=\"proxy-tts-selection\">朗读选中文本</button>
  1106. <span class=\"tts-status\" id=\"proxy-tts-status\"></span>
  1107. </div>
  1108. $result_block
  1109. <div class=\"clear-floating\">
  1110. <button type=\"button\" id=\"proxy-reset\">清空并重置</button>
  1111. </div>
  1112. <script>
  1113. (function() {
  1114. var resetBtn = document.getElementById('proxy-reset');
  1115. if (resetBtn) {
  1116. resetBtn.addEventListener('click', function() {
  1117. // 简单做法:回到无参数的 /proxy,相当于重置页面状态
  1118. window.location.href = '/proxy';
  1119. });
  1120. }
  1121. var ttsBtn = document.getElementById('proxy-tts-btn');
  1122. var ttsSelectionBtn = document.getElementById('proxy-tts-selection');
  1123. var ttsStatus = document.getElementById('proxy-tts-status');
  1124. var TTS_ENDPOINT = 'http://141.140.15.30:8028/generate';
  1125. var currentAudio = null;
  1126. var queuedAudios = [];
  1127. var streamingFinished = false;
  1128. function extractProxyText() {
  1129. var container = document.querySelector('section.result .analysis');
  1130. return container ? container.textContent.trim() : '';
  1131. }
  1132. function setTtsButtonsDisabled(disabled) {
  1133. if (ttsBtn) {
  1134. ttsBtn.disabled = disabled;
  1135. }
  1136. if (ttsSelectionBtn) {
  1137. ttsSelectionBtn.disabled = disabled;
  1138. }
  1139. }
  1140. function resetAudioPlayback() {
  1141. queuedAudios = [];
  1142. streamingFinished = false;
  1143. if (currentAudio) {
  1144. currentAudio.pause();
  1145. currentAudio = null;
  1146. }
  1147. }
  1148. function markStreamingFinished() {
  1149. streamingFinished = true;
  1150. if (!currentAudio && !queuedAudios.length) {
  1151. ttsStatus.textContent = '播放完成';
  1152. }
  1153. }
  1154. function playNextAudioChunk() {
  1155. if (!queuedAudios.length) {
  1156. currentAudio = null;
  1157. if (streamingFinished) {
  1158. ttsStatus.textContent = '播放完成';
  1159. } else {
  1160. ttsStatus.textContent = '等待更多语音...';
  1161. }
  1162. return;
  1163. }
  1164. var chunk = queuedAudios.shift();
  1165. ttsStatus.textContent = '播放中...';
  1166. currentAudio = new Audio('data:audio/wav;base64,' + chunk);
  1167. currentAudio.onended = playNextAudioChunk;
  1168. currentAudio.onerror = function() {
  1169. ttsStatus.textContent = '播放失败';
  1170. currentAudio = null;
  1171. };
  1172. currentAudio.play().catch(function(err) {
  1173. ttsStatus.textContent = '自动播放被阻止:' + err.message;
  1174. currentAudio = null;
  1175. });
  1176. }
  1177. function enqueueAudioChunk(chunk) {
  1178. queuedAudios.push(chunk);
  1179. if (!currentAudio) {
  1180. playNextAudioChunk();
  1181. }
  1182. }
  1183. function parseTtsLine(line) {
  1184. try {
  1185. var parsed = JSON.parse(line);
  1186. if (parsed && parsed.audio) {
  1187. enqueueAudioChunk(parsed.audio);
  1188. return true;
  1189. }
  1190. } catch (err) {
  1191. console.warn('无法解析TTS响应行', err);
  1192. }
  1193. return false;
  1194. }
  1195. async function consumeTtsResponse(response) {
  1196. var chunkCount = 0;
  1197. var handleLine = function(rawLine) {
  1198. var trimmed = rawLine.replace(/\\r/g, '').trim();
  1199. if (!trimmed) return;
  1200. if (parseTtsLine(trimmed)) {
  1201. chunkCount += 1;
  1202. }
  1203. };
  1204. if (response.body && response.body.getReader) {
  1205. var reader = response.body.getReader();
  1206. var decoder = new TextDecoder();
  1207. var buffer = '';
  1208. while (true) {
  1209. var readResult = await reader.read();
  1210. if (readResult.done) {
  1211. break;
  1212. }
  1213. buffer += decoder.decode(readResult.value, { stream: true });
  1214. var newlineIndex;
  1215. while ((newlineIndex = buffer.indexOf('\\n')) >= 0) {
  1216. var line = buffer.slice(0, newlineIndex);
  1217. buffer = buffer.slice(newlineIndex + 1);
  1218. handleLine(line);
  1219. }
  1220. }
  1221. buffer += decoder.decode();
  1222. if (buffer) {
  1223. handleLine(buffer);
  1224. }
  1225. } else {
  1226. var payload = await response.text();
  1227. payload.split('\\n').forEach(handleLine);
  1228. }
  1229. return chunkCount;
  1230. }
  1231. function getSelectedPageText() {
  1232. var selection = window.getSelection ? window.getSelection() : null;
  1233. return selection ? selection.toString().trim() : '';
  1234. }
  1235. async function streamTtsRequest(text) {
  1236. var response = await fetch(TTS_ENDPOINT, {
  1237. method: 'POST',
  1238. headers: { 'Content-Type': 'application/json' },
  1239. body: JSON.stringify({ text: text })
  1240. });
  1241. if (!response.ok) {
  1242. throw new Error('接口响应错误');
  1243. }
  1244. var chunkCount = await consumeTtsResponse(response);
  1245. if (!chunkCount) {
  1246. throw new Error('接口未返回音频数据');
  1247. }
  1248. markStreamingFinished();
  1249. }
  1250. function createTtsRequest(textResolver, emptyMessage) {
  1251. return async function() {
  1252. var text = textResolver();
  1253. if (!text) {
  1254. ttsStatus.textContent = emptyMessage;
  1255. return;
  1256. }
  1257. setTtsButtonsDisabled(true);
  1258. ttsStatus.textContent = '请求语音...';
  1259. resetAudioPlayback();
  1260. try {
  1261. await streamTtsRequest(text);
  1262. } catch (err) {
  1263. ttsStatus.textContent = 'TTS 出错:' + (err && err.message ? err.message : err);
  1264. resetAudioPlayback();
  1265. } finally {
  1266. setTtsButtonsDisabled(false);
  1267. }
  1268. };
  1269. }
  1270. if (ttsBtn) {
  1271. ttsBtn.addEventListener('click', createTtsRequest(extractProxyText, '暂无可朗读内容'));
  1272. var hasText = !!extractProxyText();
  1273. ttsBtn.disabled = !hasText;
  1274. if (!hasText) {
  1275. ttsStatus.textContent = '高亮完成后可朗读';
  1276. }
  1277. }
  1278. if (ttsSelectionBtn) {
  1279. ttsSelectionBtn.addEventListener('click', createTtsRequest(getSelectedPageText, '请先选择要朗读的文本'));
  1280. }
  1281. })();
  1282. </script>
  1283. </body>
  1284. </html>"""
  1285. )
  1286. ALLOWED_URL_SCHEMES = {"http", "https"}
  1287. MAX_REMOTE_HTML_BYTES = 1_000_000
  1288. REMOTE_FETCH_TIMEOUT = 10.0
  1289. REMOTE_FETCH_HEADERS = {
  1290. # Use a browser-like user agent and common headers so that sites which
  1291. # block generic HTTP clients are more likely to return normal content.
  1292. "User-Agent": (
  1293. "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
  1294. "AppleWebKit/537.36 (KHTML, like Gecko) "
  1295. "Chrome/124.0.0.0 Safari/537.36"
  1296. ),
  1297. "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
  1298. "Accept-Language": "en-US,en;q=0.9",
  1299. # Let httpx / the underlying HTTP stack negotiate an encoding it can
  1300. # actually decode. If we unconditionally advertise "br" but the runtime
  1301. # does not have brotli support installed, some sites will respond with
  1302. # brotli-compressed payloads that end up as乱码 or decoding errors.
  1303. #
  1304. # Most modern servers default to gzip or identity when the header is
  1305. # absent, which are both handled fine by httpx.
  1306. # "Accept-Encoding": "gzip, deflate, br",
  1307. "Connection": "keep-alive",
  1308. "Upgrade-Insecure-Requests": "1",
  1309. # A few anti‑bot setups check these request headers; keeping them close
  1310. # to real desktop Chrome values slightly improves compatibility, even
  1311. # though they are not a guarantee against 403 responses.
  1312. "Sec-Fetch-Site": "none",
  1313. "Sec-Fetch-Mode": "navigate",
  1314. "Sec-Fetch-User": "?1",
  1315. "Sec-Fetch-Dest": "document",
  1316. }
  1317. SIMPLE_FETCH_HEADERS = {
  1318. # Minimal browser-like headers for the fallback "simple request" path.
  1319. "User-Agent": (
  1320. "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
  1321. "AppleWebKit/537.36 (KHTML, like Gecko) "
  1322. "Chrome/124.0.0.0 Safari/537.36"
  1323. ),
  1324. "Accept": "text/html,application/xhtml+xml;q=0.9,*/*;q=0.8",
  1325. "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
  1326. "Connection": "close",
  1327. }
  1328. def _inject_proxy_images(html_fragment: str, images: List[Dict[str, str]]) -> str:
  1329. """Replace stable image placeholders with <img> tags in the highlighted HTML."""
  1330. result = html_fragment
  1331. for idx, img in enumerate(images):
  1332. marker = img.get("marker") or f"__GHIMG_{idx}__"
  1333. src = html.escape(img.get("src", "") or "", quote=True)
  1334. if not src:
  1335. continue
  1336. alt = html.escape(img.get("alt", "") or "", quote=True)
  1337. title = html.escape(img.get("title", "") or "", quote=True)
  1338. attrs = [f"src='{src}'"]
  1339. if alt:
  1340. attrs.append(f"alt='{alt}'")
  1341. if title:
  1342. attrs.append(f"title='{title}'")
  1343. # Preserve simple width/height hints when they look safe. Most modern
  1344. # pages rely on CSS for sizing, but explicit attributes can help keep
  1345. # code snippets or diagrams close to their original scale.
  1346. def _safe_dim(value: Optional[str]) -> Optional[str]:
  1347. if not value:
  1348. return None
  1349. value = value.strip()
  1350. if re.fullmatch(r"\d+(?:\.\d+)?(px|%)?", value):
  1351. return value
  1352. return None
  1353. width = _safe_dim(img.get("width"))
  1354. height = _safe_dim(img.get("height"))
  1355. if width:
  1356. attrs.append(f"width='{html.escape(width, quote=True)}'")
  1357. if height:
  1358. attrs.append(f"height='{html.escape(height, quote=True)}'")
  1359. img_tag = "<img " + " ".join(attrs) + " />"
  1360. # Simple textual replacement is sufficient because placeholders
  1361. # are emitted as plain word tokens without HTML meta characters.
  1362. result = result.replace(marker, img_tag)
  1363. return result
  1364. IMG_MARKER_RE = re.compile(r"__GHIMG_\d+__")
  1365. def _strip_proxy_image_markers(html_fragment: str) -> str:
  1366. """Remove residual image placeholders when images are hidden."""
  1367. if IMG_MARKER_RE.search(html_fragment) is None:
  1368. return html_fragment
  1369. return IMG_MARKER_RE.sub("", html_fragment)
  1370. def _inject_proxy_codeblocks(html_fragment: str, code_blocks: List[Dict[str, str]]) -> str:
  1371. """Replace code placeholders with <pre><code> blocks, preserving formatting."""
  1372. result = html_fragment
  1373. for idx, block in enumerate(code_blocks):
  1374. marker = block.get("marker") or f"__GHCODE_{idx}__"
  1375. raw = block.get("text") or ""
  1376. if not raw.strip():
  1377. continue
  1378. # Escape HTML but keep newlines so that <pre> preserves formatting.
  1379. code_html = html.escape(raw, quote=False)
  1380. pre_tag = f"<pre><code>{code_html}</code></pre>"
  1381. result = result.replace(marker, pre_tag)
  1382. return result
  1383. class SimpleHTMLStripper(HTMLParser):
  1384. def __init__(self):
  1385. super().__init__()
  1386. # Accumulate visible text into paragraph-like blocks while skipping
  1387. # navigation / sidebars / ads etc. We do this with a small HTML
  1388. # structure–aware state machine instead of flattening everything.
  1389. self._blocks: List[Dict[str, Any]] = []
  1390. self._current_parts: List[str] = []
  1391. # Track when we are inside potentially main content containers
  1392. # like <article> or <main>.
  1393. self._article_depth = 0
  1394. # Track whether we are inside a preformatted code block so that we
  1395. # can preserve indentation and line breaks instead of collapsing
  1396. # whitespace as normal text.
  1397. self._in_pre = False
  1398. self._in_code = False
  1399. self._current_code_chunks: List[str] = []
  1400. self._code_blocks: List[Dict[str, str]] = []
  1401. # Stack of flags indicating which open tags should be skipped.
  1402. # When any active flag is True, textual data is ignored.
  1403. self._skip_stack: List[bool] = []
  1404. self._skip_depth = 0
  1405. self._title_chunks: List[str] = []
  1406. self._in_title = False
  1407. self._h1_chunks: List[str] = []
  1408. self._h1_main_chunks: List[str] = []
  1409. self._in_h1 = False
  1410. # Collected inline images from the main content, in document order.
  1411. # Each image is represented as a small dict with sanitized attributes.
  1412. self._images: List[Dict[str, str]] = []
  1413. # Active list containers (<ul>/<ol>) and current <li> nesting state.
  1414. self._list_stack: List[Dict[str, Any]] = []
  1415. self._list_item_stack: List[Dict[str, Any]] = []
  1416. # Keywords commonly used in class/id attributes for non‑article areas
  1417. _NOISE_KEYWORDS = {
  1418. "sidebar",
  1419. "side-bar",
  1420. "aside",
  1421. "nav",
  1422. "menu",
  1423. "breadcrumb",
  1424. "breadcrumbs",
  1425. "pagination",
  1426. "pager",
  1427. "comment",
  1428. "comments",
  1429. "reply",
  1430. "advert",
  1431. "ad-",
  1432. "ads",
  1433. "sponsor",
  1434. "promo",
  1435. "promotion",
  1436. "related",
  1437. "recommend",
  1438. "share",
  1439. "social",
  1440. "subscribe",
  1441. "signup",
  1442. "login",
  1443. "popup",
  1444. "modal",
  1445. "banner",
  1446. "cookie",
  1447. "notification",
  1448. "toolbar",
  1449. "footer",
  1450. "header-bar",
  1451. }
  1452. # Tags whose textual content is almost never part of the main article.
  1453. _ALWAYS_SKIP_TAGS = {
  1454. "script",
  1455. "style",
  1456. "noscript",
  1457. "nav",
  1458. "aside",
  1459. "footer",
  1460. "form",
  1461. "svg",
  1462. "iframe",
  1463. "button",
  1464. "input",
  1465. "textarea",
  1466. "select",
  1467. "option",
  1468. "label",
  1469. }
  1470. # Structural container tags where noise classes/roles are meaningful.
  1471. # For purely inline tags we avoid applying aggressive noise heuristics
  1472. # so that important inline text (e.g. spans in the first sentence) is
  1473. # not accidentally dropped.
  1474. _STRUCTURAL_NOISE_TAGS = {
  1475. "div",
  1476. "section",
  1477. "aside",
  1478. "nav",
  1479. "header",
  1480. "footer",
  1481. "main",
  1482. "article",
  1483. "ul",
  1484. "ol",
  1485. "li",
  1486. }
  1487. # Block-level tags that naturally mark paragraph boundaries.
  1488. _BLOCK_TAGS = {
  1489. "p",
  1490. "li",
  1491. "blockquote",
  1492. "h1",
  1493. "h2",
  1494. "h3",
  1495. "h4",
  1496. "h5",
  1497. "h6",
  1498. "pre",
  1499. "table",
  1500. "tr",
  1501. }
  1502. # Keywords for containers that are likely to hold the main article body.
  1503. # Used to decide which regions count as "main content" for both text
  1504. # and inline images.
  1505. _CONTENT_KEYWORDS = {
  1506. "content",
  1507. "main-content",
  1508. "article-body",
  1509. "post-body",
  1510. "post-content",
  1511. "entry-content",
  1512. "story-body",
  1513. "blog-post",
  1514. "markdown-body",
  1515. "readable-content",
  1516. }
  1517. # Keywords on image-related class/id/src that usually indicate avatars,
  1518. # logo icons, decorative banners, etc., which we want to drop from the
  1519. # extracted main content.
  1520. _IMAGE_NOISE_KEYWORDS = {
  1521. "avatar",
  1522. "author",
  1523. "logo",
  1524. "icon",
  1525. "favicon",
  1526. "badge",
  1527. "banner",
  1528. "thumb",
  1529. "thumbnail",
  1530. "profile",
  1531. "cover",
  1532. "background",
  1533. "sprite",
  1534. "emoji",
  1535. "reaction",
  1536. }
  1537. _TEXT_NOISE_KEYWORDS = {
  1538. "menu",
  1539. "menus",
  1540. "navigation",
  1541. "nav",
  1542. "目录",
  1543. "目錄",
  1544. "导航",
  1545. "導航",
  1546. "菜单",
  1547. "菜單",
  1548. "广告",
  1549. "廣告",
  1550. "ad",
  1551. "ads",
  1552. "sponsor",
  1553. "sponsored",
  1554. "上一篇",
  1555. "下一篇",
  1556. "返回顶部",
  1557. "返回頂部",
  1558. "分享",
  1559. "分享至",
  1560. "相关推荐",
  1561. "相关阅读",
  1562. "相關閱讀",
  1563. "recommended",
  1564. "related posts",
  1565. "login",
  1566. "signup",
  1567. }
  1568. _TEXT_NOISE_PREFIXES = (
  1569. "目录",
  1570. "目錄",
  1571. "导航",
  1572. "導航",
  1573. "菜单",
  1574. "菜單",
  1575. "广告",
  1576. "廣告",
  1577. "上一篇",
  1578. "下一篇",
  1579. "上一页",
  1580. "下一页",
  1581. "返回目录",
  1582. "返回目錄",
  1583. "返回顶部",
  1584. "返回頂部",
  1585. "分享",
  1586. "相关",
  1587. "相關",
  1588. "recommended",
  1589. "login",
  1590. "signup",
  1591. )
  1592. def _finish_paragraph(self) -> None:
  1593. """Flush current buffered tokens into a paragraph list."""
  1594. if not self._current_parts:
  1595. return
  1596. # For regular paragraphs we still collapse excessive internal
  1597. # whitespace, but we keep logical breaks between paragraphs
  1598. # themselves so that the downstream highlighter can reconstruct
  1599. # paragraph structure.
  1600. text = " ".join(self._current_parts)
  1601. text = re.sub(r"\s+", " ", text).strip()
  1602. self._current_parts = []
  1603. if not text:
  1604. return
  1605. if self._looks_like_noise_paragraph(text):
  1606. return
  1607. block_kind = "paragraph"
  1608. list_kind: Optional[str] = None
  1609. list_depth = 0
  1610. list_index: Optional[int] = None
  1611. if self._list_item_stack:
  1612. list_ctx = self._list_item_stack[-1]
  1613. block_kind = "list-item"
  1614. list_kind = list_ctx.get("list_type") or "ul"
  1615. depth_value = list_ctx.get("depth", 1)
  1616. try:
  1617. depth_int = int(depth_value)
  1618. except (TypeError, ValueError):
  1619. depth_int = 1
  1620. list_depth = min(max(depth_int, 1), 5)
  1621. if list_kind == "ol":
  1622. idx = list_ctx.get("index")
  1623. if isinstance(idx, int):
  1624. list_index = idx
  1625. self._blocks.append(
  1626. {
  1627. "text": text,
  1628. "is_main": self._article_depth > 0,
  1629. "kind": block_kind,
  1630. "list_kind": list_kind,
  1631. "list_depth": list_depth,
  1632. "list_index": list_index,
  1633. }
  1634. )
  1635. def _looks_like_noise_paragraph(self, text: str) -> bool:
  1636. normalized = text.strip()
  1637. if not normalized:
  1638. return True
  1639. lowered = normalized.lower()
  1640. compact = re.sub(r"\s+", "", lowered)
  1641. for prefix in self._TEXT_NOISE_PREFIXES:
  1642. if lowered.startswith(prefix.lower()):
  1643. if len(normalized) <= 80:
  1644. return True
  1645. if len(normalized) <= 80:
  1646. for keyword in self._TEXT_NOISE_KEYWORDS:
  1647. if keyword in lowered or keyword in compact:
  1648. return True
  1649. # Skip very short bullet-like crumbs that mostly consist of symbols.
  1650. if len(normalized) <= 6 and sum(ch.isalnum() for ch in normalized) <= 1:
  1651. return True
  1652. return False
  1653. @staticmethod
  1654. def _parse_ordered_start(raw_value: Optional[str]) -> int:
  1655. if raw_value is None:
  1656. return 1
  1657. value = raw_value.strip()
  1658. if not value:
  1659. return 1
  1660. try:
  1661. parsed = int(value)
  1662. return parsed if parsed >= 1 else 1
  1663. except ValueError:
  1664. return 1
  1665. def handle_starttag(self, tag, attrs):
  1666. lowered = tag.lower()
  1667. # Paragraph boundary before starting a new block element or <br>.
  1668. if lowered in self._BLOCK_TAGS or lowered == "br":
  1669. if self._skip_depth == 0:
  1670. self._finish_paragraph()
  1671. # Entering a <pre> region – treat it as a dedicated code block.
  1672. if lowered == "pre" and self._skip_depth == 0:
  1673. self._finish_paragraph()
  1674. self._in_pre = True
  1675. self._current_code_chunks = []
  1676. # Decide whether this element should be skipped entirely.
  1677. attr_dict = {k.lower(): (v or "") for k, v in attrs}
  1678. role = attr_dict.get("role", "").lower()
  1679. classes_ids = (attr_dict.get("class", "") + " " + attr_dict.get("id", "")).lower()
  1680. is_noise_attr = False
  1681. # Only treat class/id keywords as layout "noise" on structural
  1682. # containers (div/section/nav/etc). Inline tags with "comment"
  1683. # in their class (like mdspan-comment on Towards Data Science)
  1684. # should not be discarded, otherwise we lose the first words
  1685. # of sentences.
  1686. if lowered in self._STRUCTURAL_NOISE_TAGS:
  1687. is_noise_attr = any(key in classes_ids for key in self._NOISE_KEYWORDS)
  1688. if role in {"navigation", "banner", "contentinfo", "complementary"}:
  1689. is_noise_attr = True
  1690. skip_this = lowered in self._ALWAYS_SKIP_TAGS or is_noise_attr
  1691. if skip_this:
  1692. self._skip_depth += 1
  1693. self._skip_stack.append(skip_this)
  1694. # Track when we are inside an article-like container; only count if not skipped.
  1695. if self._skip_depth == 0 and lowered in {"article", "main", "section", "div"}:
  1696. # Treat semantic containers and common "main content" classes as
  1697. # part of the article area so that we keep their text and inline
  1698. # media but still avoid sidebars / nav.
  1699. if lowered in {"article", "main"} or any(
  1700. key in classes_ids for key in self._CONTENT_KEYWORDS
  1701. ) or role == "main":
  1702. self._article_depth += 1
  1703. if self._skip_depth == 0 and lowered in {"ul", "ol"}:
  1704. start = 1
  1705. if lowered == "ol":
  1706. start = self._parse_ordered_start(attr_dict.get("start"))
  1707. self._list_stack.append(
  1708. {
  1709. "type": lowered,
  1710. "start": start,
  1711. "next_index": start,
  1712. }
  1713. )
  1714. if lowered == "li" and self._skip_depth == 0:
  1715. list_ctx = self._list_stack[-1] if self._list_stack else None
  1716. depth = len(self._list_stack) if self._list_stack else 1
  1717. list_type = list_ctx.get("type") if list_ctx else "ul"
  1718. index = None
  1719. if list_ctx and list_ctx["type"] == "ol":
  1720. index = list_ctx["next_index"]
  1721. list_ctx["next_index"] = index + 1
  1722. li_value = attr_dict.get("value")
  1723. if li_value and list_ctx and list_ctx["type"] == "ol":
  1724. try:
  1725. value_idx = int(li_value)
  1726. index = value_idx
  1727. list_ctx["next_index"] = value_idx + 1
  1728. except ValueError:
  1729. pass
  1730. self._list_item_stack.append(
  1731. {
  1732. "list_type": list_type,
  1733. "index": index,
  1734. "depth": depth,
  1735. }
  1736. )
  1737. if lowered == "title" and self._skip_depth == 0:
  1738. self._in_title = True
  1739. if lowered == "h1" and self._skip_depth == 0:
  1740. self._in_h1 = True
  1741. if lowered == "code" and self._skip_depth == 0 and self._in_pre:
  1742. # Nested <code> inside <pre> – keep track but we don't need
  1743. # separate buffering beyond the enclosing pre block.
  1744. self._in_code = True
  1745. # Inline image handling: only keep <img> elements that are inside the
  1746. # main article content (tracked via _article_depth) and that do not
  1747. # look like avatars / logos / decorative icons. We insert a stable
  1748. # placeholder token into the text stream so that the /proxy renderer
  1749. # can later replace it with a real <img> tag while preserving the
  1750. # grammar highlighting.
  1751. if lowered == "img" and self._skip_depth == 0 and self._article_depth > 0:
  1752. src = attr_dict.get("src", "").strip()
  1753. if src:
  1754. alt = attr_dict.get("alt", "") or ""
  1755. title = attr_dict.get("title", "") or ""
  1756. width = (attr_dict.get("width") or "").strip()
  1757. height = (attr_dict.get("height") or "").strip()
  1758. img_classes_ids = classes_ids + " " + src.lower()
  1759. if any(key in img_classes_ids for key in self._IMAGE_NOISE_KEYWORDS):
  1760. return
  1761. marker = f"__GHIMG_{len(self._images)}__"
  1762. img_info: Dict[str, str] = {
  1763. "marker": marker,
  1764. "src": src,
  1765. "alt": alt,
  1766. "title": title,
  1767. }
  1768. if width:
  1769. img_info["width"] = width
  1770. if height:
  1771. img_info["height"] = height
  1772. self._images.append(img_info)
  1773. # Treat the image as an inline token within the current
  1774. # paragraph. Paragraph finishing logic will ensure it
  1775. # stays grouped with surrounding text.
  1776. self._current_parts.append(marker)
  1777. def handle_endtag(self, tag):
  1778. lowered = tag.lower()
  1779. if lowered == "code" and self._in_code:
  1780. self._in_code = False
  1781. if lowered == "pre" and self._in_pre:
  1782. self._in_pre = False
  1783. # Finalize the current code block into a single placeholder
  1784. # token so that it passes through the grammar highlighter
  1785. # untouched, and can later be restored as a <pre><code> block.
  1786. code_text = "".join(self._current_code_chunks)
  1787. self._current_code_chunks = []
  1788. if code_text.strip() and self._skip_depth == 0:
  1789. marker = f"__GHCODE_{len(self._code_blocks)}__"
  1790. self._code_blocks.append({"marker": marker, "text": code_text})
  1791. # We append the marker to the paragraph parts so that
  1792. # get_text() emits it in the right position.
  1793. self._current_parts.append(marker)
  1794. # Closing a block element ends the current paragraph.
  1795. if lowered in self._BLOCK_TAGS and self._skip_depth == 0:
  1796. self._finish_paragraph()
  1797. if lowered == "li" and self._skip_depth == 0 and self._list_item_stack:
  1798. self._list_item_stack.pop()
  1799. if lowered in {"ul", "ol"} and self._skip_depth == 0 and self._list_stack:
  1800. self._list_stack.pop()
  1801. if lowered == "title":
  1802. self._in_title = False
  1803. if lowered == "h1":
  1804. self._in_h1 = False
  1805. if lowered in {"article", "main", "section"} and self._skip_depth == 0 and self._article_depth > 0:
  1806. self._article_depth -= 1
  1807. if self._skip_stack:
  1808. skip_this = self._skip_stack.pop()
  1809. if skip_this and self._skip_depth > 0:
  1810. self._skip_depth -= 1
  1811. def handle_data(self, data):
  1812. if self._skip_depth > 0:
  1813. return
  1814. if self._in_pre or self._in_code:
  1815. # Preserve code blocks exactly as they appear, including
  1816. # newlines and indentation.
  1817. self._current_code_chunks.append(data)
  1818. return
  1819. stripped = data.strip()
  1820. if not stripped:
  1821. return
  1822. if self._in_title:
  1823. self._title_chunks.append(stripped)
  1824. return
  1825. # Regular visible text
  1826. self._current_parts.append(stripped)
  1827. if self._in_h1:
  1828. self._h1_chunks.append(stripped)
  1829. if self._article_depth > 0:
  1830. self._h1_main_chunks.append(stripped)
  1831. def get_text(self) -> str:
  1832. # Flush any trailing paragraph.
  1833. self._finish_paragraph()
  1834. blocks = self._selected_blocks()
  1835. if not blocks:
  1836. return ""
  1837. return "\n\n".join(block["text"] for block in blocks)
  1838. def _selected_blocks(self) -> List[Dict[str, Any]]:
  1839. if not self._blocks:
  1840. return []
  1841. main_blocks = [block for block in self._blocks if block.get("is_main")]
  1842. return main_blocks if main_blocks else self._blocks
  1843. def get_blocks(self) -> List[Dict[str, Any]]:
  1844. blocks = self._selected_blocks()
  1845. return [dict(block) for block in blocks]
  1846. def get_title(self) -> str:
  1847. # Prefer <h1> heading (especially inside <article>/<main>) as the
  1848. # primary title; fall back to <title>.
  1849. if self._h1_main_chunks:
  1850. raw = " ".join(self._h1_main_chunks)
  1851. elif self._h1_chunks:
  1852. raw = " ".join(self._h1_chunks)
  1853. elif self._title_chunks:
  1854. raw = " ".join(self._title_chunks)
  1855. else:
  1856. return ""
  1857. return re.sub(r"\s+", " ", raw).strip()
  1858. def get_images(self) -> List[Dict[str, str]]:
  1859. """Return the list of captured inline images in document order."""
  1860. return list(self._images)
  1861. def get_code_blocks(self) -> List[Dict[str, str]]:
  1862. """Return captured code blocks (from <pre>/<code>) in document order."""
  1863. return list(self._code_blocks)
  1864. def _normalize_target_url(raw_url: str) -> str:
  1865. candidate = (raw_url or "").strip()
  1866. if not candidate:
  1867. raise ValueError("请输入要抓取的 URL。")
  1868. parsed = urlparse(candidate if "://" in candidate else f"https://{candidate}")
  1869. if parsed.scheme not in ALLOWED_URL_SCHEMES:
  1870. raise ValueError("仅支持 http/https 协议链接。")
  1871. if not parsed.netloc:
  1872. raise ValueError("URL 缺少域名部分。")
  1873. sanitized = parsed._replace(fragment="")
  1874. return urlunparse(sanitized)
  1875. def _fallback_html_to_text(html_body: str) -> str:
  1876. """Very simple HTML-to-text fallback used when structured extraction fails.
  1877. This does not attempt to distinguish main content from navigation, but it
  1878. guarantees we return *something* for pages whose structure confuses the
  1879. SimpleHTMLStripper heuristics (e.g. some mirror sites).
  1880. """
  1881. # Drop script/style/noscript content outright.
  1882. cleaned = re.sub(
  1883. r"(?is)<(script|style|noscript)[^>]*>.*?</\1>",
  1884. " ",
  1885. html_body,
  1886. )
  1887. # Convert common block separators into newlines.
  1888. cleaned = re.sub(r"(?i)<br\s*/?>", "\n", cleaned)
  1889. cleaned = re.sub(r"(?i)</p\s*>", "\n\n", cleaned)
  1890. cleaned = re.sub(r"(?i)</(div|section|article|li|h[1-6])\s*>", "\n\n", cleaned)
  1891. # Remove all remaining tags.
  1892. cleaned = re.sub(r"(?is)<[^>]+>", " ", cleaned)
  1893. cleaned = html.unescape(cleaned)
  1894. # Normalize whitespace but keep paragraph-level blank lines.
  1895. cleaned = cleaned.replace("\r", "")
  1896. # Collapse runs of spaces/tabs inside lines.
  1897. cleaned = re.sub(r"[ \t\f\v]+", " ", cleaned)
  1898. # Collapse 3+ blank lines into just 2.
  1899. cleaned = re.sub(r"\n\s*\n\s*\n+", "\n\n", cleaned)
  1900. cleaned = cleaned.strip()
  1901. return cleaned
  1902. def _build_paragraph_metadata(blocks: List[Dict[str, Any]]) -> List[Dict[str, str]]:
  1903. """Convert stripped block info into span attributes for downstream rendering."""
  1904. if not blocks:
  1905. return []
  1906. paragraph_meta: List[Dict[str, str]] = []
  1907. for block in blocks:
  1908. attrs: Dict[str, str] = {}
  1909. if block.get("kind") == "list-item" and block.get("list_kind"):
  1910. attrs["data-list-kind"] = str(block["list_kind"])
  1911. depth = block.get("list_depth")
  1912. if depth:
  1913. attrs["data-list-depth"] = str(depth)
  1914. if block.get("list_kind") == "ol" and block.get("list_index") is not None:
  1915. attrs["data-list-index"] = str(block["list_index"])
  1916. paragraph_meta.append(attrs)
  1917. return paragraph_meta
  1918. def _decode_html_bytes(raw_content: bytes, encoding_hint: Optional[str]) -> str:
  1919. encoding_candidates: List[str] = []
  1920. if encoding_hint:
  1921. encoding_candidates.append(encoding_hint)
  1922. encoding_candidates.extend(["utf-8", "latin-1"])
  1923. last_exc: Optional[Exception] = None
  1924. for enc in encoding_candidates:
  1925. try:
  1926. html_body = raw_content.decode(enc, errors="replace")
  1927. break
  1928. except Exception as exc: # pragma: no cover - defensive
  1929. last_exc = exc
  1930. else: # pragma: no cover - extremely unlikely
  1931. raise RuntimeError(f"无法解码远程页面内容: {last_exc}")
  1932. if len(html_body) > MAX_REMOTE_HTML_BYTES:
  1933. html_body = html_body[:MAX_REMOTE_HTML_BYTES]
  1934. return html_body
  1935. async def _download_html_via_httpx(url: str) -> str:
  1936. async with httpx.AsyncClient(timeout=REMOTE_FETCH_TIMEOUT, follow_redirects=True) as client:
  1937. response = await client.get(url, headers=REMOTE_FETCH_HEADERS)
  1938. html_body = _decode_html_bytes(response.content, response.encoding)
  1939. response.raise_for_status()
  1940. return html_body
  1941. async def _download_html_via_stdlib(url: str) -> str:
  1942. def _sync_fetch() -> Tuple[bytes, Optional[str]]:
  1943. req = urllib_request.Request(url, headers=SIMPLE_FETCH_HEADERS)
  1944. opener = urllib_request.build_opener(urllib_request.ProxyHandler({}))
  1945. with opener.open(req, timeout=REMOTE_FETCH_TIMEOUT) as resp:
  1946. data = resp.read(MAX_REMOTE_HTML_BYTES + 1)
  1947. headers = getattr(resp, "headers", None)
  1948. encoding_hint = None
  1949. if headers is not None:
  1950. get_charset = getattr(headers, "get_content_charset", None)
  1951. if callable(get_charset):
  1952. encoding_hint = get_charset()
  1953. if not encoding_hint:
  1954. content_type = headers.get("Content-Type", "")
  1955. match = re.search(r"charset=([\w-]+)", content_type or "", re.IGNORECASE)
  1956. if match:
  1957. encoding_hint = match.group(1)
  1958. return data, encoding_hint
  1959. raw_content, encoding_hint = await asyncio.to_thread(_sync_fetch)
  1960. return _decode_html_bytes(raw_content, encoding_hint)
  1961. async def _download_html_with_fallback(url: str) -> str:
  1962. first_exc: Optional[Exception] = None
  1963. try:
  1964. return await _download_html_via_httpx(url)
  1965. except httpx.HTTPStatusError as exc:
  1966. status = exc.response.status_code if exc.response is not None else None
  1967. if status not in {401, 403, 407, 451, 429}:
  1968. raise
  1969. first_exc = exc
  1970. except httpx.HTTPError as exc:
  1971. first_exc = exc
  1972. try:
  1973. return await _download_html_via_stdlib(url)
  1974. except (urllib_error.URLError, urllib_error.HTTPError, TimeoutError) as fallback_exc:
  1975. if first_exc:
  1976. raise first_exc from fallback_exc
  1977. raise
  1978. async def _fetch_remote_plaintext(
  1979. url: str,
  1980. ) -> Tuple[str, str, str, List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]]:
  1981. normalized = _normalize_target_url(url)
  1982. html_body = await _download_html_with_fallback(normalized)
  1983. stripper = SimpleHTMLStripper()
  1984. stripper.feed(html_body)
  1985. title = stripper.get_title() or normalized
  1986. images = stripper.get_images()
  1987. code_blocks = stripper.get_code_blocks()
  1988. plain_text = stripper.get_text()
  1989. block_info = stripper.get_blocks()
  1990. if not plain_text:
  1991. plain_text = _fallback_html_to_text(html_body)
  1992. if not plain_text:
  1993. raise ValueError("未能从该页面提取正文。")
  1994. # Fallback text no longer contains structured placeholders, so any
  1995. # collected media/code markers would be invalid.
  1996. images = []
  1997. code_blocks = []
  1998. block_info = []
  1999. paragraph_meta = _build_paragraph_metadata(block_info)
  2000. return normalized, title, plain_text, images, code_blocks, paragraph_meta
  2001. def _render_proxy_page(
  2002. *,
  2003. url_value: str = "",
  2004. message: Optional[str] = None,
  2005. is_error: bool = False,
  2006. highlight_fragment: Optional[str] = None,
  2007. source_url: Optional[str] = None,
  2008. source_title: Optional[str] = None,
  2009. show_images: bool = False,
  2010. image_notice: Optional[str] = None,
  2011. ) -> str:
  2012. helper_state = "on" if SENTENCE_HELPER_ENABLED else "off"
  2013. status_block = ""
  2014. if message:
  2015. cls = "status err" if is_error else "status ok"
  2016. status_block = f"<p class='{cls}'>{html.escape(message)}</p>"
  2017. style_block = STYLE_BLOCK if highlight_fragment else ""
  2018. result_block = ""
  2019. if highlight_fragment and source_url:
  2020. safe_url = html.escape(source_url, quote=True)
  2021. safe_title = html.escape(source_title or source_url)
  2022. image_hint = ""
  2023. if image_notice:
  2024. image_hint = f"<p class='image-hint'>{html.escape(image_notice)}</p>"
  2025. result_block = (
  2026. "<section class='result'>"
  2027. f"<div class='source'>原页面:<a href='{safe_url}' target='_blank' rel='noopener'>{safe_title}</a></div>"
  2028. f"<div class='analysis' data-helper='{helper_state}'>{highlight_fragment}</div>"
  2029. f"{image_hint}"
  2030. "</section>"
  2031. )
  2032. show_images_checked = "checked" if show_images else ""
  2033. return PROXY_PAGE_TEMPLATE.substitute(
  2034. style_block=style_block,
  2035. url_value=html.escape(url_value or "", quote=True),
  2036. status_block=status_block,
  2037. result_block=result_block,
  2038. show_images_checked=show_images_checked,
  2039. )