Update web/core/views.py

This commit is contained in:
Joshua Laymon 2025-08-22 03:23:50 +00:00
parent a32d58eec2
commit f64d41313e

View File

@ -329,95 +329,90 @@ def entry_delete(request, entry_id):
@login_required @login_required
@user_passes_test(is_admin) @user_passes_test(is_admin)
def import_wizard(request): def import_wizard(request):
# Safety: expected header list (matches DB/order the importer expects) EXPECTED = [
_EXPECTED_HEADERS = [
"Subject", "Illustration", "Application", "Scripture", "Source", "Subject", "Illustration", "Application", "Scripture", "Source",
"Talk Title", "Talk Number", "Code", "Date", "Date Edited", "Talk Title", "Talk Number", "Code", "Date", "Date Edited",
] ]
EXPECTED_NORM = [h.lower() for h in EXPECTED]
if request.method == "POST": if request.method == "POST":
form = ImportForm(request.POST, request.FILES) form = ImportForm(request.POST, request.FILES)
if form.is_valid(): if form.is_valid():
try: try:
import io, re, csv as _csv
raw = form.cleaned_data["file"].read() raw = form.cleaned_data["file"].read()
text = raw.decode("utf-8-sig", errors="replace") # BOM-safe
import io # Try to sniff; fall back to excel dialect
import csv as _csv
# Decode once (BOMsafe)
text = raw.decode("utf-8-sig", errors="replace")
# Try to sniff a dialect; fall back to Excel-style CSV
try: try:
first_line = text.splitlines()[0] if text else "" first_line = text.splitlines()[0] if text else ""
dialect = _csv.Sniffer().sniff(first_line) if first_line else _csv.excel dialect = _csv.Sniffer().sniff(first_line) if first_line else _csv.excel
except Exception: except Exception:
dialect = _csv.excel dialect = _csv.excel
rdr = _csv.reader(io.StringIO(text), dialect) rows = list(_csv.reader(io.StringIO(text), dialect))
rows = list(rdr)
if not rows: if not rows:
raise ValueError("The CSV file appears to be empty.") raise ValueError("The CSV file appears to be empty.")
expected = _EXPECTED_HEADERS # --- header cleaning ---
expected_norm = [h.lower() for h in expected] # Handles: r."Talk Title", r:'Talk Title', r=Talk Title, r: Talk Title, etc.
_r_prefix = re.compile(r'^[rR]\s*[\.\:\=\-]\s*')
# Header cleaner: fixes r:"Talk Title", stray quotes, spaces, case def clean_header_cell(s: str) -> str:
def _clean_header(s):
s = "" if s is None else str(s) s = "" if s is None else str(s)
s = s.strip() s = s.strip()
if s.lower().startswith("r:") or s.lower().startswith("r="): # strip balanced quotes
s = s[2:].lstrip() if len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'"):
if (len(s) >= 2) and (s[0] == s[-1]) and s[0] in ('"', "'"): s = s[1:-1].strip()
s = s[1:-1] # strip weird r.<sep> prefix
s = _r_prefix.sub("", s)
# final trim + lower for comparison
return s.strip().lower() return s.strip().lower()
first = rows[0] first = rows[0]
norm_first = [_clean_header(c) for c in first] norm_first = [clean_header_cell(c) for c in first]
# If first row isnt our header but length matches, inject one header_ok = (norm_first == EXPECTED_NORM)
header_ok = (norm_first == expected_norm)
if not header_ok and len(first) == len(expected): # If first row isnt the header but length matches, inject our clean header
rows.insert(0, expected) if not header_ok and len(first) == len(EXPECTED):
elif not header_ok and len(first) != len(expected): rows.insert(0, EXPECTED)
# Try common alternate delimiters if column count is off elif not header_ok and len(first) != len(EXPECTED):
# Try common alternate delimiters if the column count is off
for delim in (";", "\t"): for delim in (";", "\t"):
rdr2 = _csv.reader(io.StringIO(text), delimiter=delim) test = list(_csv.reader(io.StringIO(text), delimiter=delim))
test_rows = list(rdr2) if test and len(test[0]) == len(EXPECTED):
if test_rows and len(test_rows[0]) == len(expected): rows = test
rows = test_rows
first = rows[0] first = rows[0]
norm_first = [_clean_header(c) for c in first] norm_first = [clean_header_cell(c) for c in first]
header_ok = (norm_first == expected_norm) header_ok = (norm_first == EXPECTED_NORM)
if not header_ok: if not header_ok:
rows.insert(0, expected) rows.insert(0, EXPECTED)
break break
# Re-encode a sanitized CSV for the existing importer # Reencode a sanitized CSV to feed the existing importer
out = io.StringIO() out = io.StringIO()
w = _csv.writer(out) w = _csv.writer(out)
for r in rows: for r in rows:
w.writerow(r) w.writerow(r)
fixed_raw = out.getvalue().encode("utf-8") fixed_raw = out.getvalue().encode("utf-8")
# Keep utils in sync for importer variants that read EXPECTED_HEADERS # Keep utils in sync for any helpers that read EXPECTED_HEADERS
from . import utils as core_utils from . import utils as core_utils
core_utils.EXPECTED_HEADERS = expected core_utils.EXPECTED_HEADERS = EXPECTED
# Hand off to the robust importer you already have
report = import_csv_bytes(fixed_raw, dry_run=form.cleaned_data["dry_run"]) or {} report = import_csv_bytes(fixed_raw, dry_run=form.cleaned_data["dry_run"]) or {}
report["header_ok"] = header_ok report["header_ok"] = header_ok
if not header_ok: if not header_ok:
messages.warning( messages.warning(
request, request,
"The first row didnt match the expected header; a clean header was injected automatically." "The first row didnt match the expected header; it was cleaned/normalized automatically."
) )
return render( return render(request, "import_result.html",
request, {"report": report, "dry_run": form.cleaned_data["dry_run"]})
"import_result.html",
{"report": report, "dry_run": form.cleaned_data["dry_run"]},
)
except Exception as e: except Exception as e:
messages.error(request, f"Import failed: {e}") messages.error(request, f"Import failed: {e}")
else: else: