Update web/core/scripture_normalizer.py
hopefully final version fixing Peter
This commit is contained in:
parent
3207582d17
commit
1de4b84e2e
@ -30,7 +30,7 @@ BOOK_CANON: Dict[str, str] = {
|
||||
"proverbs": "Prov.", "prov": "Prov.",
|
||||
"ecclesiastes": "Eccl.", "eccles": "Eccl.", "eccl": "Eccl.",
|
||||
"song of solomon": "Song", "song of songs": "Song", "song": "Song",
|
||||
"isaiah": "Isa.", "isa": "Isa.", # IMPORTANT: direct alias so 'Isa.' never becomes '1 Sam.'
|
||||
"isaiah": "Isa.", "isa": "Isa.",
|
||||
"jeremiah": "Jer.", "jer": "Jer.",
|
||||
"lamentations": "Lam.", "lam": "Lam.",
|
||||
"ezekiel": "Ezek.", "ezek": "Ezek.",
|
||||
@ -58,7 +58,7 @@ BOOK_CANON: Dict[str, str] = {
|
||||
"2 corinthians": "2 Cor.", "ii corinthians": "2 Cor.", "2 cor": "2 Cor.", "2co": "2 Cor.",
|
||||
"galatians": "Gal.", "gal": "Gal.",
|
||||
"ephesians": "Eph.", "eph": "Eph.",
|
||||
"philippians": "Phil.", "phil": "Phil.", "philippians 216": "Phil.", # import glitch seen
|
||||
"philippians": "Phil.", "phil": "Phil.", "philippians 216": "Phil.",
|
||||
"colossians": "Col.", "col": "Col.",
|
||||
"1 thessalonians": "1 Thess.", "i thessalonians": "1 Thess.", "1 thess": "1 Thess.",
|
||||
"2 thessalonians": "2 Thess.", "ii thessalonians": "2 Thess.", "2 thess": "2 Thess.",
|
||||
@ -68,19 +68,25 @@ BOOK_CANON: Dict[str, str] = {
|
||||
"philemon": "Philem.", "philem": "Philem.",
|
||||
"hebrews": "Heb.", "heb": "Heb.",
|
||||
"james": "Jas.", "jas": "Jas.",
|
||||
|
||||
# Peter (expanded aliases)
|
||||
"1 peter": "1 Pet.", "i peter": "1 Pet.", "1 pet": "1 Pet.",
|
||||
"1pe": "1 Pet.", "1 pe": "1 Pet.", "1pet": "1 Pet.", "1 pet.": "1 Pet.", "1peter": "1 Pet.",
|
||||
|
||||
"2 peter": "2 Pet.", "ii peter": "2 Pet.", "2 pet": "2 Pet.",
|
||||
"2pe": "2 Pet.", "2 pe": "2 Pet.", "2pet": "2 Pet.", "2 pet.": "2 Pet.", "2peter": "2 Pet.",
|
||||
|
||||
"1 john": "1 John", "i john": "1 John",
|
||||
"2 john": "2 John", "ii john": "2 John",
|
||||
"3 john": "3 John", "iii john": "3 John",
|
||||
"jude": "Jude",
|
||||
"revelation": "Rev.", "rev": "Rev.", "1 ch": "1 Chron.", "1 chr": "1 Chron.", "1 Ch": "1 Chron.", "1 Chr": "1 Chron.",
|
||||
"revelation": "Rev.", "rev": "Rev.",
|
||||
# Chronicles short forms
|
||||
"1 ch": "1 Chron.", "1 chr": "1 Chron.", "1 Ch": "1 Chron.", "1 Chr": "1 Chron.",
|
||||
"2 ch": "2 Chron.", "2 chr": "2 Chron.", "2 Ch": "2 Chron.", "2 Chr": "2 Chron.",
|
||||
"ch": "Chron.", "chr": "Chron.",
|
||||
}
|
||||
|
||||
# Short bare tokens that must *never* be treated as “roman numeral + book”
|
||||
# (prevents 'Isa.' => '1 Sam.' and similar).
|
||||
NEVER_PREFIX_NUMERAL = {"isa", "isaiah", "job", "joel", "amos", "nah", "hag", "mal", "rom", "gal", "eph", "tit", "heb", "jas", "jude"}
|
||||
|
||||
def _canon_key(s: str) -> str:
|
||||
@ -90,7 +96,6 @@ def _canon_key(s: str) -> str:
|
||||
return s
|
||||
|
||||
def _direct_lookup(key: str) -> Optional[str]:
|
||||
"""Try direct lookups with and without spaces (e.g., 'i samuel', '1co')."""
|
||||
if key in BOOK_CANON:
|
||||
return BOOK_CANON[key]
|
||||
nospace = key.replace(" ", "")
|
||||
@ -98,7 +103,6 @@ def _direct_lookup(key: str) -> Optional[str]:
|
||||
return BOOK_CANON[nospace]
|
||||
return None
|
||||
|
||||
# Split helpers
|
||||
CRUFT_RE = re.compile(r"\b(read|see|chap(?:ter)?|ch)\b\.?", re.I)
|
||||
def _clean_text(s: str) -> str:
|
||||
s = s.replace("\xa0", " ").replace("\u2009", " ").replace("\u202f", " ")
|
||||
@ -107,42 +111,20 @@ def _clean_text(s: str) -> str:
|
||||
s = re.sub(r"\s+", " ", s)
|
||||
return s.strip(" ;,.\t\r\n ")
|
||||
|
||||
BOOK_PREFIX_RE = re.compile(
|
||||
r"""
|
||||
^\s*
|
||||
(?:(?P<num>[1-3]|i{1,3})\s*)? # optional leading number (1/2/3 or i/ii/iii)
|
||||
(?P<book>[A-Za-z\.]+(?:\s+[A-Za-z\.]+){0,2}) # 1-3 words for the book
|
||||
""",
|
||||
re.X,
|
||||
)
|
||||
|
||||
BOOK_PREFIX_RE = re.compile(r"^\s*(?:(?P<num>[1-3]|i{1,3})\s*)?(?P<book>[A-Za-z\.]+(?:\s+[A-Za-z\.]+){0,2})", re.X)
|
||||
C_V_RE = re.compile(r"(?P<ch>\d+)(?::(?P<vs>[\d,\-\s]+))?")
|
||||
|
||||
def _canon_book(book_raw: str, num: Optional[str]) -> Optional[str]:
|
||||
"""
|
||||
Decide the canonical book abbreviation.
|
||||
Rules:
|
||||
1) Always try direct alias matches first (with/without spaces).
|
||||
2) If the *book* piece looks like a short token that could be misread as a roman
|
||||
numeral + another book (e.g., 'Isa'), *never* do numeral heuristics.
|
||||
3) Otherwise, allow '1/2/3 + co/cor/ti/tim/pet/peter/jn/john' style heuristics.
|
||||
"""
|
||||
key = _canon_key(book_raw)
|
||||
# Step 1: direct lookups
|
||||
direct = _direct_lookup(key)
|
||||
if direct:
|
||||
return direct
|
||||
|
||||
# Guard: tokens like 'isa' must not be split into 'i' + 'sa'
|
||||
bare = key.replace(" ", "")
|
||||
if bare in NEVER_PREFIX_NUMERAL:
|
||||
return _direct_lookup(bare)
|
||||
|
||||
# Step 3: numeral + short token combos
|
||||
if num:
|
||||
n = {"i": "1", "ii": "2", "iii": "3"}.get(num.lower(), num)
|
||||
short = key.replace(" ", "")
|
||||
# common short targets where number matters
|
||||
map_num = {
|
||||
"co": "Cor.", "cor": "Cor.",
|
||||
"ti": "Tim.", "tim": "Tim.",
|
||||
@ -155,100 +137,64 @@ def _canon_book(book_raw: str, num: Optional[str]) -> Optional[str]:
|
||||
}
|
||||
if short in map_num:
|
||||
return f"{n} {map_num[short]}"
|
||||
|
||||
# Fallback
|
||||
return None
|
||||
|
||||
def _parse_segment(seg: str, last_book: Optional[str]) -> Tuple[Optional[str], Optional[str], bool]:
|
||||
"""
|
||||
Parse one semicolon-delimited piece.
|
||||
Returns (book_canon, cv, preserve_original_if_unparsed).
|
||||
"""
|
||||
raw = _clean_text(seg)
|
||||
if not raw:
|
||||
return (None, None, False)
|
||||
|
||||
m = BOOK_PREFIX_RE.match(raw)
|
||||
book_part = None
|
||||
num = None
|
||||
rest = raw
|
||||
|
||||
if m:
|
||||
# What the regex thinks is a numeric prefix and a book word blob:
|
||||
num = (m.group("num") or "").strip() or None
|
||||
book_text = (m.group("book") or "").strip()
|
||||
rest = raw[m.end():].strip(" :,;.")
|
||||
|
||||
# Try to resolve the book using robust rules
|
||||
book_part = _canon_book(book_text if not num else f"{num} {book_text}", num=num)
|
||||
|
||||
# If regex split out a bogus 'i' from something like "Isa.", fix by trying full token directly.
|
||||
if not book_part:
|
||||
whole = _canon_book(book_text, None)
|
||||
if whole:
|
||||
book_part = whole
|
||||
rest = raw[m.end():].strip(" :,;.")
|
||||
|
||||
# no book found → inherit previous if we have verses; otherwise preserve as-is
|
||||
if not book_part:
|
||||
# If there's obvious chapter/verse, but no book, we cannot link → preserve as-is.
|
||||
if C_V_RE.search(rest):
|
||||
return (None, None, True)
|
||||
# Or the whole thing might already be a known book alone:
|
||||
whole = _canon_book(raw, None)
|
||||
if whole:
|
||||
return (whole, None, False)
|
||||
return (None, None, True)
|
||||
|
||||
# normalize chapter/verse
|
||||
rest = rest.replace(" ", "")
|
||||
if not rest:
|
||||
return (book_part, None, False)
|
||||
|
||||
mcv = C_V_RE.search(rest)
|
||||
if mcv:
|
||||
cv = rest
|
||||
cv = cv.replace(" ", "")
|
||||
cv = rest.replace(" ", "")
|
||||
return (book_part, cv, False)
|
||||
|
||||
# not a recognizable cv → keep original piece untouched
|
||||
return (None, None, True)
|
||||
|
||||
def normalize_scripture_field(text: str) -> Tuple[str, List[str]]:
|
||||
"""
|
||||
Normalize a whole scripture_raw string.
|
||||
Returns (normalized_text, warnings).
|
||||
Unknown/unparseable chunks are preserved verbatim.
|
||||
"""
|
||||
warnings: List[str] = []
|
||||
if not text:
|
||||
return ("", warnings)
|
||||
|
||||
pieces = [p for p in re.split(r"\s*;\s*", text) if p and p.strip()]
|
||||
out: List[str] = []
|
||||
last_book: Optional[str] = None
|
||||
|
||||
for piece in pieces:
|
||||
book, cv, preserve = _parse_segment(piece, last_book)
|
||||
|
||||
if preserve:
|
||||
# Keep the original chunk
|
||||
out.append(_clean_text(piece))
|
||||
continue
|
||||
|
||||
if book and not cv:
|
||||
out.append(book)
|
||||
last_book = book
|
||||
continue
|
||||
|
||||
if not book and cv:
|
||||
warnings.append(f"Missing book for '{piece.strip()}'")
|
||||
continue
|
||||
|
||||
if book and cv:
|
||||
out.append(f"{book} {cv}")
|
||||
last_book = book
|
||||
|
||||
norm = "; ".join(s for s in (o.strip(" ;,") for o in out) if s)
|
||||
norm = re.sub(r"\s+", " ", norm).strip(" ;,")
|
||||
return (norm, warnings)
|
||||
Loading…
Reference in New Issue
Block a user