Update web/templates/entry_view.html

This commit is contained in:
Joshua Laymon 2025-08-21 01:32:39 +00:00
parent 7a0023b55f
commit 72854a1d20

View File

@ -281,99 +281,78 @@
<script>
(function(){
const btn = document.getElementById('ttsBtn');
if (!btn) return;
const ttsBtn = document.getElementById('ttsBtn');
if (!ttsBtn) return;
const ttsUrl = btn.dataset.ttsUrl || ""; // Present only for staff
let audioEl = null;
// Provided by the view for staff, or empty for non-staff
const TTS_URL = "{{ tts_url|default:'' }}";
// Build combined text (punctuation-safe) for browser TTS fallback
function buildCombinedText() {
async function playOpenAITTS() {
// fetch audio bytes from your Django endpoint
const r = await fetch(TTS_URL, {
credentials: 'same-origin',
cache: 'no-store'
});
if (!r.ok) {
const msg = await r.text().catch(()=> String(r.status));
throw new Error(`HTTP ${r.status}: ${msg.slice(0,200)}`);
}
const ct = (r.headers.get('content-type') || '').toLowerCase();
if (!ct.startsWith('audio/')) {
const preview = await r.text().catch(()=> '(non-audio response)');
throw new Error(`Unexpected content-type "${ct}". Preview: ${preview.slice(0,200)}`);
}
const blob = await r.blob();
const url = URL.createObjectURL(blob);
const audio = new Audio(url);
audio.preload = 'auto';
audio.autoplay = true;
audio.setAttribute('playsinline',''); // iOS/Safari
try {
await audio.play();
} finally {
audio.onended = () => URL.revokeObjectURL(url);
}
}
function buildCombinedText(){
const ill = (document.getElementById('illustration-text')?.innerText || '').trim();
const app = (document.getElementById('application-text')?.innerText || '').trim();
const punctIll = ill && /[.!?…—]$/.test(ill) ? ill : (ill ? ill + '.' : '');
return [punctIll, app].filter(Boolean).join(' ') || 'No text available for this illustration.';
const illP = ill && /[.!?…]$/.test(ill) ? ill : (ill ? ill + '.' : '');
return [illP, app].filter(Boolean).join(' ') || 'No text available for this illustration.';
}
// --- Staff path: stream OpenAI TTS from server and toggle on click ---
if (ttsUrl) {
btn.addEventListener('click', () => {
// Stop if already playing
if (audioEl && !audioEl.paused) {
audioEl.pause();
audioEl.currentTime = 0;
return;
}
if (!audioEl) {
audioEl = new Audio();
audioEl.addEventListener('ended', () => {
btn.classList.remove('playing');
});
}
// Cache-buster so we always pick up newly cached files
audioEl.src = ttsUrl + (ttsUrl.includes('?') ? '&' : '?') + 't=' + Date.now();
audioEl.play().then(() => {
btn.classList.add('playing');
}).catch(err => {
alert('TTS error: ' + err);
});
});
// Optional little notice the first time
if (!sessionStorage.getItem('tts_notice_shown')) {
sessionStorage.setItem('tts_notice_shown', '1');
// toast-style notice:
const notice = document.createElement('div');
notice.textContent = 'Using OpenAI TTS (gpt-4o-mini-tts, alloy)';
Object.assign(notice.style, {
position:'fixed', bottom:'18px', left:'50%', transform:'translateX(-50%)',
background:'#333', color:'#fff', padding:'8px 12px', borderRadius:'6px',
fontSize:'13px', zIndex:'99999'
});
document.body.appendChild(notice);
setTimeout(()=>notice.remove(), 2500);
}
return;
}
// --- Non-staff path: keep your browser TTS (Web Speech API) ---
function pickVoice() {
const voices = speechSynthesis.getVoices();
// Preference order (same as you had)
const prefs = [/Google .*English/i, /Microsoft .*Natural/i, /Siri/i, /Alex/i, /English/i];
for (const p of prefs) {
const v = voices.find(v => p.test(v.name) && v.lang.toLowerCase().startsWith('en'));
if (v) return v;
}
return voices.find(v => v.lang.toLowerCase().startsWith('en')) || voices[0] || null;
}
function speakBrowserTTS() {
function speakBrowserTTS(){
const text = buildCombinedText();
if (!window.speechSynthesis || !window.SpeechSynthesisUtterance) {
alert('Text-to-Speech not supported in this browser.');
return;
}
// If currently speaking—toggle off
if (speechSynthesis.speaking || speechSynthesis.pending) {
speechSynthesis.cancel();
return;
}
const start = () => {
const u = new SpeechSynthesisUtterance(text);
const v = pickVoice();
if (v) u.voice = v;
u.rate = 1.0; u.pitch = 1.0; u.volume = 1.0;
speechSynthesis.speak(u);
};
if (!speechSynthesis.getVoices().length) {
speechSynthesis.onvoiceschanged = () => start();
speechSynthesis.getVoices();
} else {
start();
if (!('speechSynthesis' in window) || !('SpeechSynthesisUtterance' in window)) {
throw new Error('Browser TTS not supported.');
}
window.speechSynthesis.cancel();
const u = new SpeechSynthesisUtterance(text);
u.rate = 1.0; u.pitch = 1.0; u.volume = 1.0;
speechSynthesis.speak(u);
}
btn.addEventListener('click', speakBrowserTTS);
ttsBtn.addEventListener('click', async () => {
try {
ttsBtn.disabled = true;
// Staff users get the OpenAI endpoint; others fall back
const usingOpenAI = Boolean(TTS_URL);
if (usingOpenAI) {
await playOpenAITTS();
alert('Speaking with: OpenAI (server TTS).');
} else {
speakBrowserTTS();
alert('Speaking with: Browser TTS.');
}
} catch (err) {
alert('TTS error: ' + (err && err.message ? err.message : String(err)));
} finally {
ttsBtn.disabled = false;
}
});
})();
</script>
{% endblock %}