Update web/core/scripture_normalizer.py
This commit is contained in:
parent
f75f16b03e
commit
c43ff0a5da
@ -1,37 +1,41 @@
|
|||||||
# core/scripture_normalizer.py
|
# core/scripture_normalizer.py
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import re
|
import re
|
||||||
from typing import Dict, List, Optional, Tuple
|
from typing import Dict, List, Tuple
|
||||||
|
|
||||||
# --------------------------
|
# -----------------------------
|
||||||
# Canonical book abbreviations
|
# Canonical book abbreviations
|
||||||
# --------------------------
|
# -----------------------------
|
||||||
BOOK_CANON: Dict[str, str] = {
|
BOOK_CANON: Dict[str, str] = {
|
||||||
# OT
|
# OT
|
||||||
"genesis": "Gen.", "gen": "Gen.", "genesisesis": "Gen.",
|
"genesis": "Gen.", "gen": "Gen.", "genesisesis": "Gen.",
|
||||||
"exodus": "Ex.", "ex": "Ex.",
|
"exodus": "Ex.", "ex": "Ex.",
|
||||||
"leviticus": "Lev.", "lev": "Lev.",
|
"leviticus": "Lev.", "lev": "Lev.",
|
||||||
"numbers": "Num.", "num": "Num.", "nums": "Num.",
|
"numbers": "Num.", "num": "Num.", "nums": "Num.",
|
||||||
"deuteronomy": "Deut.", "deut": "Deut.", "deutronomy": "Deut.", "deu": "Deut.",
|
"deuteronomy": "Deut.", "deut": "Deut.", "deu": "Deut.", "deutronomy": "Deut.", "deut.": "Deut.",
|
||||||
"joshua": "Josh.", "josh": "Josh.",
|
"joshua": "Josh.", "josh": "Josh.",
|
||||||
"judges": "Judg.", "judg": "Judg.",
|
"judges": "Judg.", "judg": "Judg.",
|
||||||
"ruth": "Ruth",
|
"ruth": "Ruth",
|
||||||
"1 samuel": "1 Sam.", "i samuel": "1 Sam.", "1 sam": "1 Sam.", "1sam": "1 Sam.",
|
"1 samuel": "1 Sam.", "i samuel": "1 Sam.", "1 sam": "1 Sam.",
|
||||||
"2 samuel": "2 Sam.", "ii samuel": "2 Sam.", "2 sam": "2 Sam.", "2sam": "2 Sam.",
|
"2 samuel": "2 Sam.", "ii samuel": "2 Sam.", "2 sam": "2 Sam.",
|
||||||
"1 kings": "1 Ki.", "i kings": "1 Ki.", "1 ki": "1 Ki.", "1kgs": "1 Ki.", "1 kgs": "1 Ki.",
|
"1 kings": "1 Ki.", "i kings": "1 Ki.", "1 ki": "1 Ki.", "1kgs": "1 Ki.", "1 kgs": "1 Ki.",
|
||||||
"2 kings": "2 Ki.", "ii kings": "2 Ki.", "2 ki": "2 Ki.", "2kgs": "2 Ki.", "2 kgs": "2 Ki.",
|
"2 kings": "2 Ki.", "ii kings": "2 Ki.", "2 ki": "2 Ki.", "2kgs": "2 Ki.", "2 kgs": "2 Ki.",
|
||||||
"1 chronicles": "1 Chron.", "i chronicles": "1 Chron.", "1 chron": "1 Chron.",
|
"1 chronicles": "1 Chron.", "i chronicles": "1 Chron.", "1 chron": "1 Chron.",
|
||||||
"2 chronicles": "2 Chron.", "ii chronicles": "2 Chron.", "2 chron": "2 Chron.",
|
"2 chronicles": "2 Chron.", "ii chronicles": "2 Chron.", "2 chron": "2 Chron.",
|
||||||
|
# short “Ch/Chr” forms (plus numbered)
|
||||||
|
"ch": "Chron.", "chr": "Chron.",
|
||||||
|
"1 ch": "1 Chron.", "1 chr": "1 Chron.",
|
||||||
|
"2 ch": "2 Chron.", "2 chr": "2 Chron.",
|
||||||
"ezra": "Ezra",
|
"ezra": "Ezra",
|
||||||
"nehemiah": "Neh.", "neh": "Neh.",
|
"nehemiah": "Neh.", "neh": "Neh.",
|
||||||
"esther": "Esth.", "esth": "Esth.",
|
"esther": "Esth.", "esth": "Esth.",
|
||||||
"job": "Job",
|
"job": "Job",
|
||||||
"psalms": "Ps.", "psalm": "Ps.", "ps": "Ps.", "pss": "Ps.",
|
"psalm": "Ps.", "psalms": "Ps.", "ps": "Ps.",
|
||||||
"proverbs": "Prov.", "prov": "Prov.",
|
"proverbs": "Prov.", "prov": "Prov.",
|
||||||
"ecclesiastes": "Eccl.", "eccles": "Eccl.", "eccl": "Eccl.",
|
"ecclesiastes": "Eccl.", "eccles": "Eccl.", "eccl": "Eccl.",
|
||||||
"song of solomon": "Song", "song of songs": "Song", "song": "Song",
|
"song of solomon": "Song", "song of songs": "Song", "song": "Song",
|
||||||
"isaiah": "Isa.", "isa": "Isa.", # IMPORTANT: direct alias so 'Isa.' never becomes '1 Sam.'
|
"isaiah": "Isa.", "isa": "Isa.",
|
||||||
"jeremiah": "Jer.", "jer": "Jer.",
|
"jeremiah": "Jer.", "jer": "Jer.", "jer.": "Jer.",
|
||||||
"lamentations": "Lam.", "lam": "Lam.",
|
"lamentations": "Lam.", "lam": "Lam.",
|
||||||
"ezekiel": "Ezek.", "ezek": "Ezek.",
|
"ezekiel": "Ezek.", "ezek": "Ezek.",
|
||||||
"daniel": "Dan.", "dan": "Dan.",
|
"daniel": "Dan.", "dan": "Dan.",
|
||||||
@ -49,22 +53,22 @@ BOOK_CANON: Dict[str, str] = {
|
|||||||
"malachi": "Mal.", "mal": "Mal.",
|
"malachi": "Mal.", "mal": "Mal.",
|
||||||
# NT
|
# NT
|
||||||
"matthew": "Matt.", "matt": "Matt.", "mt": "Matt.",
|
"matthew": "Matt.", "matt": "Matt.", "mt": "Matt.",
|
||||||
"mark": "Mark", "mk": "Mark", "mrk": "Mark", "mr": "Mark",
|
"mark": "Mark", "mk": "Mark.",
|
||||||
"luke": "Luke", "lk": "Luke",
|
"luke": "Luke", "lk": "Luke",
|
||||||
"john": "John", "jn": "John", "joh": "John",
|
"john": "John", "jn": "John",
|
||||||
"acts": "Acts",
|
"acts": "Acts",
|
||||||
"romans": "Rom.", "rom": "Rom.",
|
"romans": "Rom.", "rom": "Rom.",
|
||||||
"1 corinthians": "1 Cor.", "i corinthians": "1 Cor.", "1 cor": "1 Cor.", "1co": "1 Cor.",
|
"1 corinthians": "1 Cor.", "i corinthians": "1 Cor.", "1 cor": "1 Cor.", "1 cor.": "1 Cor.",
|
||||||
"2 corinthians": "2 Cor.", "ii corinthians": "2 Cor.", "2 cor": "2 Cor.", "2co": "2 Cor.",
|
"2 corinthians": "2 Cor.", "ii corinthians": "2 Cor.", "2 cor": "2 Cor.", "2 cor.": "2 Cor.",
|
||||||
"galatians": "Gal.", "gal": "Gal.",
|
"galatians": "Gal.", "gal": "Gal.",
|
||||||
"ephesians": "Eph.", "eph": "Eph.",
|
"ephesians": "Eph.", "eph": "Eph.", "eph.": "Eph.",
|
||||||
"philippians": "Phil.", "phil": "Phil.", "philippians 216": "Phil.", # import glitch seen
|
"philippians": "Phil.", "phil": "Phil.", "philippians 216": "Phil.",
|
||||||
"colossians": "Col.", "col": "Col.",
|
"colossians": "Col.", "col": "Col.",
|
||||||
"1 thessalonians": "1 Thess.", "i thessalonians": "1 Thess.", "1 thess": "1 Thess.",
|
"1 thessalonians": "1 Thess.", "i thessalonians": "1 Thess.", "1 thess": "1 Thess.",
|
||||||
"2 thessalonians": "2 Thess.", "ii thessalonians": "2 Thess.", "2 thess": "2 Thess.",
|
"2 thessalonians": "2 Thess.", "ii thessalonians": "2 Thess.", "2 thess": "2 Thess.",
|
||||||
"1 timothy": "1 Tim.", "i timothy": "1 Tim.", "1 tim": "1 Tim.", "1ti": "1 Tim.",
|
"1 timothy": "1 Tim.", "i timothy": "1 Tim.", "1 tim": "1 Tim.",
|
||||||
"2 timothy": "2 Tim.", "ii timothy": "2 Tim.", "2 tim": "2 Tim.", "2ti": "2 Tim.",
|
"2 timothy": "2 Tim.", "ii timothy": "2 Tim.", "2 tim": "2 Tim.",
|
||||||
"titus": "Titus", "tit": "Titus",
|
"titus": "Titus",
|
||||||
"philemon": "Philem.", "philem": "Philem.",
|
"philemon": "Philem.", "philem": "Philem.",
|
||||||
"hebrews": "Heb.", "heb": "Heb.",
|
"hebrews": "Heb.", "heb": "Heb.",
|
||||||
"james": "Jas.", "jas": "Jas.",
|
"james": "Jas.", "jas": "Jas.",
|
||||||
@ -74,151 +78,167 @@ BOOK_CANON: Dict[str, str] = {
|
|||||||
"2 john": "2 John", "ii john": "2 John",
|
"2 john": "2 John", "ii john": "2 John",
|
||||||
"3 john": "3 John", "iii john": "3 John",
|
"3 john": "3 John", "iii john": "3 John",
|
||||||
"jude": "Jude",
|
"jude": "Jude",
|
||||||
"revelation": "Rev.", "rev": "Rev.", "1 ch": "1 Chron.", "1 chr": "1 Chron.", "1 Ch": "1 Chron.", "1 Chr": "1 Chron.",
|
"revelation": "Rev.", "rev": "Rev.",
|
||||||
"2 ch": "2 Chron.", "2 chr": "2 Chron.", "2 Ch": "2 Chron.", "2 Chr": "2 Chron.",
|
|
||||||
"ch": "Chron.", "chr": "Chron.",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Short bare tokens that must *never* be treated as “roman numeral + book”
|
# For ultra-compact "1pe" / "2co" / "1ti" / "2th" etc.
|
||||||
# (prevents 'Isa.' => '1 Sam.' and similar).
|
NUMBERED_SHORT = {
|
||||||
NEVER_PREFIX_NUMERAL = {"isa", "isaiah", "job", "joel", "amos", "nah", "hag", "mal", "rom", "gal", "eph", "tit", "heb", "jas", "jude"}
|
"sa": "Sam.", "sam": "Sam.",
|
||||||
|
"ki": "Ki.", "kgs": "Ki.", "kg": "Ki.",
|
||||||
|
"ch": "Chron.", "chr": "Chron.",
|
||||||
|
"co": "Cor.", "cor": "Cor.",
|
||||||
|
"th": "Thess.", "ths": "Thess.",
|
||||||
|
"ti": "Tim.", "tim": "Tim.",
|
||||||
|
"pe": "Pet.", "pet": "Pet.",
|
||||||
|
"jn": "John", "jo": "John", "john": "John",
|
||||||
|
}
|
||||||
|
|
||||||
def _canon_key(s: str) -> str:
|
# strip cruft words like "Read", "chapter"
|
||||||
s = (s or "").strip().lower()
|
|
||||||
s = s.replace(".", " ")
|
|
||||||
s = re.sub(r"\s+", " ", s)
|
|
||||||
return s
|
|
||||||
|
|
||||||
def _direct_lookup(key: str) -> Optional[str]:
|
|
||||||
"""Try direct lookups with and without spaces (e.g., 'i samuel', '1co')."""
|
|
||||||
if key in BOOK_CANON:
|
|
||||||
return BOOK_CANON[key]
|
|
||||||
nospace = key.replace(" ", "")
|
|
||||||
if nospace in BOOK_CANON:
|
|
||||||
return BOOK_CANON[nospace]
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Split helpers
|
|
||||||
CRUFT_RE = re.compile(r"\b(read|see|chap(?:ter)?|ch)\b\.?", re.I)
|
CRUFT_RE = re.compile(r"\b(read|see|chap(?:ter)?|ch)\b\.?", re.I)
|
||||||
def _clean_text(s: str) -> str:
|
|
||||||
s = s.replace("\xa0", " ").replace("\u2009", " ").replace("\u202f", " ")
|
|
||||||
s = CRUFT_RE.sub("", s)
|
|
||||||
s = s.replace("—", "-").replace("–", "-")
|
|
||||||
s = re.sub(r"\s+", " ", s)
|
|
||||||
return s.strip(" ;,.\t\r\n ")
|
|
||||||
|
|
||||||
BOOK_PREFIX_RE = re.compile(
|
# book-like prefix (optional leading number)
|
||||||
|
BOOK_RE = re.compile(
|
||||||
r"""
|
r"""
|
||||||
^\s*
|
^\s*
|
||||||
(?:(?P<num>[1-3]|i{1,3})\s*)? # optional leading number (1/2/3 or i/ii/iii)
|
(?:(?P<num>[1-3]|i{1,3})\s*)? # 1/2/3 or i/ii/iii
|
||||||
(?P<book>[A-Za-z\.]+(?:\s+[A-Za-z\.]+){0,2}) # 1-3 words for the book
|
(?P<book>[A-Za-z\.]+(?:\s+[A-Za-z\.]+){0,2})
|
||||||
|
\s*
|
||||||
""",
|
""",
|
||||||
re.X,
|
re.X,
|
||||||
)
|
)
|
||||||
|
|
||||||
C_V_RE = re.compile(r"(?P<ch>\d+)(?::(?P<vs>[\d,\-\s]+))?")
|
# chapter/verse piece
|
||||||
|
C_V_RE = re.compile(
|
||||||
|
r"""
|
||||||
|
(?:
|
||||||
|
(?P<ch>\d+)
|
||||||
|
(?::(?P<vs>[\d,\-\u2013\u2014\s]+))?
|
||||||
|
)
|
||||||
|
""",
|
||||||
|
re.X,
|
||||||
|
)
|
||||||
|
|
||||||
def _canon_book(book_raw: str, num: Optional[str]) -> Optional[str]:
|
def _clean_text(s: str) -> str:
|
||||||
|
s = s.replace("\xa0", " ").replace("\u2009", " ").replace("\u202f", " ")
|
||||||
|
s = CRUFT_RE.sub("", s)
|
||||||
|
s = s.replace("..", ".").replace("—", "-").replace("–", "-")
|
||||||
|
s = re.sub(r"\s+", " ", s)
|
||||||
|
return s.strip(" ;,.\t\r\n ")
|
||||||
|
|
||||||
|
def _canon_book(book_raw: str, num: str | None = None) -> str | None:
|
||||||
"""
|
"""
|
||||||
Decide the canonical book abbreviation.
|
Normalize a book token (with or without a numeric prefix).
|
||||||
Rules:
|
Handles tight forms like '1pe', '2co', '1ch', etc.
|
||||||
1) Always try direct alias matches first (with/without spaces).
|
|
||||||
2) If the *book* piece looks like a short token that could be misread as a roman
|
|
||||||
numeral + another book (e.g., 'Isa'), *never* do numeral heuristics.
|
|
||||||
3) Otherwise, allow '1/2/3 + co/cor/ti/tim/pet/peter/jn/john' style heuristics.
|
|
||||||
"""
|
"""
|
||||||
key = _canon_key(book_raw)
|
if not book_raw and not num:
|
||||||
# Step 1: direct lookups
|
return None
|
||||||
direct = _direct_lookup(key)
|
|
||||||
if direct:
|
|
||||||
return direct
|
|
||||||
|
|
||||||
# Guard: tokens like 'isa' must not be split into 'i' + 'sa'
|
# Build a normalized key with number + book words
|
||||||
bare = key.replace(" ", "")
|
raw = ((num or "") + " " + (book_raw or "")).strip()
|
||||||
if bare in NEVER_PREFIX_NUMERAL:
|
key = raw.lower().replace(".", " ")
|
||||||
return _direct_lookup(bare)
|
key = re.sub(r"\s+", " ", key)
|
||||||
|
|
||||||
# Step 3: numeral + short token combos
|
# If it's like '1pe' (no space), insert a space
|
||||||
if num:
|
key = re.sub(r"^([1-3]|i{1,3})([a-z])", r"\1 \2", key)
|
||||||
n = {"i": "1", "ii": "2", "iii": "3"}.get(num.lower(), num)
|
|
||||||
short = key.replace(" ", "")
|
# Try exact phrase first (e.g., "1 peter", "2 cor")
|
||||||
# common short targets where number matters
|
if key in BOOK_CANON:
|
||||||
map_num = {
|
return BOOK_CANON[key]
|
||||||
"co": "Cor.", "cor": "Cor.",
|
|
||||||
"ti": "Tim.", "tim": "Tim.",
|
# Try without number (e.g., "peter", "cor")
|
||||||
"pe": "Pet.", "pet": "Pet.", "peter": "Pet.",
|
parts = key.split(" ", 1)
|
||||||
"jo": "John", "jn": "John", "joh": "John", "john": "John",
|
if len(parts) == 2 and parts[1] in BOOK_CANON and parts[0] in {"1","2","3","i","ii","iii"}:
|
||||||
"sa": "Sam.", "sam": "Sam.", "samuel": "Sam.",
|
canon_no_num = BOOK_CANON[parts[1]]
|
||||||
"ki": "Ki.", "kgs": "Ki.", "kings": "Ki.",
|
# replace leading number in canon if present (for John we keep "1 John")
|
||||||
"chronicles": "Chron.", "chron": "Chron.",
|
if canon_no_num.startswith(("1 ", "2 ", "3 ")):
|
||||||
"thessalonians": "Thess.", "thess": "Thess.",
|
# if canon is already numbered (like "1 John"), prefer that
|
||||||
}
|
return canon_no_num
|
||||||
if short in map_num:
|
lead = parts[0]
|
||||||
return f"{n} {map_num[short]}"
|
lead = {"i":"1","ii":"2","iii":"3"}.get(lead, lead)
|
||||||
|
return f"{lead} {canon_no_num}"
|
||||||
|
|
||||||
|
# Handle compact numbered shorts like '1 pe', '2 co', '1 ch', '2 th'
|
||||||
|
m = re.match(r"^(?P<n>[1-3]|i{1,3})\s*(?P<s>[a-z]{1,4})$", key)
|
||||||
|
if m:
|
||||||
|
n = m.group("n")
|
||||||
|
s = m.group("s")
|
||||||
|
n = {"i":"1","ii":"2","iii":"3"}.get(n, n)
|
||||||
|
if s in NUMBERED_SHORT:
|
||||||
|
return f"{n} {NUMBERED_SHORT[s]}"
|
||||||
|
|
||||||
|
# Lastly try pure book without number
|
||||||
|
if key in BOOK_CANON:
|
||||||
|
return BOOK_CANON[key]
|
||||||
|
|
||||||
# Fallback
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _parse_segment(seg: str, last_book: Optional[str]) -> Tuple[Optional[str], Optional[str], bool]:
|
def _parse_segment(seg: str, last_book: str | None) -> Tuple[str | None, str | None, bool]:
|
||||||
"""
|
"""
|
||||||
Parse one semicolon-delimited piece.
|
Parse one semicolon-delimited segment.
|
||||||
Returns (book_canon, cv, preserve_original_if_unparsed).
|
Returns (book_canon, cv_string, preserve_original_if_unknown)
|
||||||
"""
|
"""
|
||||||
raw = _clean_text(seg)
|
original = seg
|
||||||
if not raw:
|
s = _clean_text(seg)
|
||||||
|
if not s:
|
||||||
return (None, None, False)
|
return (None, None, False)
|
||||||
|
|
||||||
m = BOOK_PREFIX_RE.match(raw)
|
# Detect a leading number stuck to letters like "1co", "2pe"
|
||||||
book_part = None
|
m_tight = re.match(r"^\s*(?P<num>[1-3]|i{1,3})\s*(?P<letters>[a-z]{1,4})\b\.?", s, flags=re.I)
|
||||||
num = None
|
if m_tight:
|
||||||
rest = raw
|
num = m_tight.group("num")
|
||||||
|
letters = m_tight.group("letters")
|
||||||
|
canon = _canon_book(letters, num=num)
|
||||||
|
if canon:
|
||||||
|
rest = s[m_tight.end():].strip(",;: .")
|
||||||
|
book = canon
|
||||||
|
else:
|
||||||
|
# not recognized—keep whole piece verbatim
|
||||||
|
return (None, original.strip(), True)
|
||||||
|
else:
|
||||||
|
# General book matcher
|
||||||
|
m = BOOK_RE.match(s)
|
||||||
|
book = None
|
||||||
|
rest = s
|
||||||
|
if m:
|
||||||
|
num = m.group("num")
|
||||||
|
raw_book = (m.group("book") or "").strip()
|
||||||
|
canon = _canon_book(raw_book, num=num or None)
|
||||||
|
if canon:
|
||||||
|
book = canon
|
||||||
|
rest = s[m.end():].strip(",;: .")
|
||||||
|
else:
|
||||||
|
# Not a recognized book: if we already have a last_book, treat
|
||||||
|
# this as CV only; otherwise preserve the original piece.
|
||||||
|
if last_book:
|
||||||
|
book = last_book
|
||||||
|
else:
|
||||||
|
return (None, original.strip(), True)
|
||||||
|
else:
|
||||||
|
# No obvious book – inherit if we can, else preserve original
|
||||||
|
if last_book:
|
||||||
|
book = last_book
|
||||||
|
else:
|
||||||
|
return (None, original.strip(), True)
|
||||||
|
|
||||||
if m:
|
# Normalize chapter/verse part
|
||||||
# What the regex thinks is a numeric prefix and a book word blob:
|
|
||||||
num = (m.group("num") or "").strip() or None
|
|
||||||
book_text = (m.group("book") or "").strip()
|
|
||||||
rest = raw[m.end():].strip(" :,;.")
|
|
||||||
|
|
||||||
# Try to resolve the book using robust rules
|
|
||||||
book_part = _canon_book(book_text if not num else f"{num} {book_text}", num=num)
|
|
||||||
|
|
||||||
# If regex split out a bogus 'i' from something like "Isa.", fix by trying full token directly.
|
|
||||||
if not book_part:
|
|
||||||
whole = _canon_book(book_text, None)
|
|
||||||
if whole:
|
|
||||||
book_part = whole
|
|
||||||
rest = raw[m.end():].strip(" :,;.")
|
|
||||||
|
|
||||||
# no book found → inherit previous if we have verses; otherwise preserve as-is
|
|
||||||
if not book_part:
|
|
||||||
# If there's obvious chapter/verse, but no book, we cannot link → preserve as-is.
|
|
||||||
if C_V_RE.search(rest):
|
|
||||||
return (None, None, True)
|
|
||||||
# Or the whole thing might already be a known book alone:
|
|
||||||
whole = _canon_book(raw, None)
|
|
||||||
if whole:
|
|
||||||
return (whole, None, False)
|
|
||||||
return (None, None, True)
|
|
||||||
|
|
||||||
# normalize chapter/verse
|
|
||||||
rest = rest.replace(" ", "")
|
rest = rest.replace(" ", "")
|
||||||
|
rest = re.sub(r":\s+", ":", rest)
|
||||||
if not rest:
|
if not rest:
|
||||||
return (book_part, None, False)
|
cv = None
|
||||||
|
else:
|
||||||
|
if C_V_RE.search(rest):
|
||||||
|
cv = rest.replace(" ", "").replace("–", "-").replace("—", "-")
|
||||||
|
else:
|
||||||
|
m2 = re.search(r"\d+(?::[\d,\-]+)?", rest)
|
||||||
|
cv = m2.group(0).replace(" ", "") if m2 else None
|
||||||
|
|
||||||
mcv = C_V_RE.search(rest)
|
return (book, cv, False)
|
||||||
if mcv:
|
|
||||||
cv = rest
|
|
||||||
cv = cv.replace(" ", "")
|
|
||||||
return (book_part, cv, False)
|
|
||||||
|
|
||||||
# not a recognizable cv → keep original piece untouched
|
|
||||||
return (None, None, True)
|
|
||||||
|
|
||||||
def normalize_scripture_field(text: str) -> Tuple[str, List[str]]:
|
def normalize_scripture_field(text: str) -> Tuple[str, List[str]]:
|
||||||
"""
|
"""
|
||||||
Normalize a whole scripture_raw string.
|
Normalize a scripture_raw string.
|
||||||
Returns (normalized_text, warnings).
|
Returns (normalized_text, warnings).
|
||||||
Unknown/unparseable chunks are preserved verbatim.
|
Unknown segments are preserved as-is and reported in warnings.
|
||||||
"""
|
"""
|
||||||
warnings: List[str] = []
|
warnings: List[str] = []
|
||||||
if not text:
|
if not text:
|
||||||
@ -226,14 +246,16 @@ def normalize_scripture_field(text: str) -> Tuple[str, List[str]]:
|
|||||||
|
|
||||||
pieces = [p for p in re.split(r"\s*;\s*", text) if p and p.strip()]
|
pieces = [p for p in re.split(r"\s*;\s*", text) if p and p.strip()]
|
||||||
out: List[str] = []
|
out: List[str] = []
|
||||||
last_book: Optional[str] = None
|
last_book: str | None = None
|
||||||
|
|
||||||
for piece in pieces:
|
for piece in pieces:
|
||||||
book, cv, preserve = _parse_segment(piece, last_book)
|
book, cv, preserve = _parse_segment(piece, last_book)
|
||||||
|
|
||||||
if preserve:
|
if preserve:
|
||||||
# Keep the original chunk
|
out.append(piece.strip())
|
||||||
out.append(_clean_text(piece))
|
warnings.append(f"Unrecognized segment kept as-is: '{piece.strip()}'")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not book and not cv:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if book and not cv:
|
if book and not cv:
|
||||||
@ -242,13 +264,15 @@ def normalize_scripture_field(text: str) -> Tuple[str, List[str]]:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if not book and cv:
|
if not book and cv:
|
||||||
|
# Shouldn't really happen now; keep as-is
|
||||||
|
out.append(piece.strip())
|
||||||
warnings.append(f"Missing book for '{piece.strip()}'")
|
warnings.append(f"Missing book for '{piece.strip()}'")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if book and cv:
|
out.append(f"{book} {cv}")
|
||||||
out.append(f"{book} {cv}")
|
last_book = book
|
||||||
last_book = book
|
|
||||||
|
|
||||||
norm = "; ".join(s for s in (o.strip(" ;,") for o in out) if s)
|
norm = "; ".join(o.strip() for o in out if o.strip())
|
||||||
norm = re.sub(r"\s+", " ", norm).strip(" ;,")
|
norm = re.sub(r"\s+", " ", norm).strip(" ;,")
|
||||||
|
|
||||||
return (norm, warnings)
|
return (norm, warnings)
|
||||||
Loading…
Reference in New Issue
Block a user