diff --git a/application/external_apps/databaseseeder/artifacts/admin_settings.json b/application/external_apps/databaseseeder/artifacts/admin_settings.json index 897285cd..24c1c7ea 100644 --- a/application/external_apps/databaseseeder/artifacts/admin_settings.json +++ b/application/external_apps/databaseseeder/artifacts/admin_settings.json @@ -119,14 +119,17 @@ "video_indexer_endpoint": "https://api.videoindexer.ai", "video_indexer_location": "", "video_indexer_account_id": "", - "video_indexer_api_key": "", "video_indexer_resource_group": "", "video_indexer_subscription_id": "", "video_indexer_account_name": "", - "video_indexer_arm_api_version": "2021-11-10-preview", + "video_indexer_arm_api_version": "2025-04-01", "video_index_timeout": 600, "speech_service_endpoint": "https://eastus.api.cognitive.microsoft.com", "speech_service_location": "eastus", + "speech_service_subscription_id": "", + "speech_service_resource_group": "", + "speech_service_resource_name": "", + "speech_service_resource_id": "", "speech_service_locale": "en-US", "speech_service_key": "", "classification_banner_enabled": true, diff --git a/application/single_app/functions_authentication.py b/application/single_app/functions_authentication.py index 8bdf4b5c..a0ecde0a 100644 --- a/application/single_app/functions_authentication.py +++ b/application/single_app/functions_authentication.py @@ -385,7 +385,7 @@ def get_video_indexer_managed_identity_token(settings, video_id=None): rg = settings["video_indexer_resource_group"] sub = settings["video_indexer_subscription_id"] acct = settings["video_indexer_account_name"] - api_ver = settings.get("video_indexer_arm_api_version", "2021-11-10-preview") + api_ver = settings.get("video_indexer_arm_api_version", DEFAULT_VIDEO_INDEXER_ARM_API_VERSION) debug_print(f"[VIDEO INDEXER AUTH] Settings extracted - Subscription: {sub}, Resource Group: {rg}, Account: {acct}, API Version: {api_ver}") diff --git a/application/single_app/functions_documents.py b/application/single_app/functions_documents.py index 7bff48d8..2f2f46e3 100644 --- a/application/single_app/functions_documents.py +++ b/application/single_app/functions_documents.py @@ -6242,6 +6242,34 @@ def _get_speech_config(settings, endpoint: str, locale: str): print(f"[Debug] Speech config obtained successfully", flush=True) return speech_config + +def get_speech_synthesis_config(settings, endpoint: str, location: str): + """Get speech synthesis config for either key or managed identity auth.""" + auth_type = settings.get("speech_service_authentication_type") + + if auth_type == "managed_identity": + resource_id = (settings.get("speech_service_resource_id") or "").strip() + if not location: + raise ValueError("Speech service location is required for text-to-speech with managed identity.") + if not resource_id: + raise ValueError("Speech service resource ID is required for text-to-speech with managed identity.") + + credential = DefaultAzureCredential() + token = credential.get_token(cognitive_services_scope) + authorization_token = f"aad#{resource_id}#{token.token}" + speech_config = speechsdk.SpeechConfig(auth_token=authorization_token, region=location) + else: + key = (settings.get("speech_service_key") or "").strip() + if not endpoint: + raise ValueError("Speech service endpoint is required for text-to-speech.") + if not key: + raise ValueError("Speech service key is required for text-to-speech when using key authentication.") + + speech_config = speechsdk.SpeechConfig(endpoint=endpoint, subscription=key) + + print(f"[Debug] Speech synthesis config obtained successfully", flush=True) + return speech_config + def process_audio_document( document_id: str, user_id: str, diff --git a/application/single_app/functions_settings.py b/application/single_app/functions_settings.py index 324f82fc..8d09ee61 100644 --- a/application/single_app/functions_settings.py +++ b/application/single_app/functions_settings.py @@ -372,6 +372,10 @@ def get_settings(use_cosmos=False, include_source=False): # Audio file settings with Azure speech service "speech_service_endpoint": '', "speech_service_location": '', + "speech_service_subscription_id": '', + "speech_service_resource_group": '', + "speech_service_resource_name": '', + "speech_service_resource_id": '', "speech_service_locale": "en-US", "speech_service_key": "", "speech_service_authentication_type": "key", # 'key' or 'managed_identity' diff --git a/application/single_app/route_backend_chats.py b/application/single_app/route_backend_chats.py index 005fb67d..e16d7242 100644 --- a/application/single_app/route_backend_chats.py +++ b/application/single_app/route_backend_chats.py @@ -3922,7 +3922,7 @@ def get_tabular_result_coverage_summary(invocations): if total_matches is not None and returned_rows is not None: if returned_rows >= total_matches: coverage_summary['has_full_result_coverage'] = True - elif returned_rows < total_matches: + else: coverage_summary['has_partial_result_coverage'] = True distinct_count = parse_tabular_result_count(result_payload.get('distinct_count')) @@ -3930,7 +3930,7 @@ def get_tabular_result_coverage_summary(invocations): if distinct_count is not None and returned_values is not None: if returned_values >= distinct_count: coverage_summary['has_full_result_coverage'] = True - elif returned_values < distinct_count: + else: coverage_summary['has_partial_result_coverage'] = True if result_payload.get('full_rows_included') or result_payload.get('full_values_included'): diff --git a/application/single_app/route_backend_tts.py b/application/single_app/route_backend_tts.py index 11d14cc3..61830490 100644 --- a/application/single_app/route_backend_tts.py +++ b/application/single_app/route_backend_tts.py @@ -2,6 +2,8 @@ from config import * from functions_authentication import * +from functions_appinsights import log_event +from functions_documents import get_speech_synthesis_config from functions_settings import * from functions_debug import debug_print from swagger_wrapper import swagger_route, get_auth_security @@ -41,14 +43,26 @@ def synthesize_speech(): return jsonify({"error": "Text-to-speech is not enabled"}), 403 # Validate speech service configuration - speech_key = settings.get('speech_service_key', '') - speech_region = settings.get('speech_service_location', '') + speech_endpoint = (settings.get('speech_service_endpoint') or '').strip().rstrip('/') + speech_region = (settings.get('speech_service_location') or '').strip() + speech_auth_type = settings.get('speech_service_authentication_type', 'key') - if not speech_key or not speech_region: - debug_print("[TTS] Speech service not configured - missing key or region") + if not speech_endpoint: + debug_print("[TTS] Speech service not configured - missing endpoint") + return jsonify({"error": "Speech service not configured"}), 500 + + if speech_auth_type == 'key' and not (settings.get('speech_service_key') or '').strip(): + debug_print("[TTS] Speech service not configured - missing key for key authentication") + return jsonify({"error": "Speech service not configured"}), 500 + + if speech_auth_type == 'managed_identity' and not speech_region: + debug_print("[TTS] Speech service not configured - missing location for managed identity") return jsonify({"error": "Speech service not configured"}), 500 - debug_print(f"[TTS] Speech service configured - region: {speech_region}") + debug_print( + f"[TTS] Speech service configured - auth_type: {speech_auth_type}, " + f"endpoint: {speech_endpoint}, location: {speech_region or 'n/a'}" + ) # Parse request data data = request.get_json() @@ -71,10 +85,12 @@ def synthesize_speech(): debug_print(f"[TTS] Request params - voice: {voice}, speed: {speed}, text_length: {len(text)}") # Configure speech service - speech_config = speechsdk.SpeechConfig( - subscription=speech_key, - region=speech_region - ) + try: + speech_config = get_speech_synthesis_config(settings, speech_endpoint, speech_region) + except ValueError as config_error: + debug_print(f"[TTS] Speech service configuration invalid: {str(config_error)}") + return jsonify({"error": str(config_error)}), 500 + speech_config.speech_synthesis_voice_name = voice # Set output format to high quality diff --git a/application/single_app/route_frontend_admin_settings.py b/application/single_app/route_frontend_admin_settings.py index 94053752..129dfcde 100644 --- a/application/single_app/route_frontend_admin_settings.py +++ b/application/single_app/route_frontend_admin_settings.py @@ -367,6 +367,9 @@ def admin_settings(): 'admin_settings.html', app_settings=settings_for_template, settings=settings_for_template, + azure_environment=AZURE_ENVIRONMENT, + default_video_indexer_endpoint=video_indexer_endpoint, + default_video_indexer_arm_api_version=DEFAULT_VIDEO_INDEXER_ARM_API_VERSION, user_settings=user_settings, update_available=update_available, latest_version=latest_version, @@ -1325,12 +1328,16 @@ def is_valid_url(url): 'video_indexer_resource_group': form_data.get('video_indexer_resource_group', '').strip(), 'video_indexer_subscription_id': form_data.get('video_indexer_subscription_id', '').strip(), 'video_indexer_account_name': form_data.get('video_indexer_account_name', '').strip(), - 'video_indexer_arm_api_version': form_data.get('video_indexer_arm_api_version', '2024-01-01').strip(), + 'video_indexer_arm_api_version': form_data.get('video_indexer_arm_api_version', DEFAULT_VIDEO_INDEXER_ARM_API_VERSION).strip(), 'video_index_timeout': int(form_data.get('video_index_timeout', 600)), # Audio file settings with Azure speech service 'speech_service_endpoint': form_data.get('speech_service_endpoint', '').strip(), 'speech_service_location': form_data.get('speech_service_location', '').strip(), + 'speech_service_subscription_id': form_data.get('speech_service_subscription_id', '').strip(), + 'speech_service_resource_group': form_data.get('speech_service_resource_group', '').strip(), + 'speech_service_resource_name': form_data.get('speech_service_resource_name', '').strip(), + 'speech_service_resource_id': form_data.get('speech_service_resource_id', '').strip(), 'speech_service_locale': form_data.get('speech_service_locale', '').strip(), 'speech_service_authentication_type': form_data.get('speech_service_authentication_type', 'key'), 'speech_service_key': form_data.get('speech_service_key', '').strip(), diff --git a/application/single_app/static/js/admin/admin_settings.js b/application/single_app/static/js/admin/admin_settings.js index 896bf6b3..7861b801 100644 --- a/application/single_app/static/js/admin/admin_settings.js +++ b/application/single_app/static/js/admin/admin_settings.js @@ -1994,14 +1994,152 @@ function setupToggles() { } const speechAuthType = document.getElementById('speech_service_authentication_type'); + const speechEndpointInput = document.getElementById('speech_service_endpoint'); + const speechKeyContainer = document.getElementById('speech_service_key_container'); + const speechResourceIdContainer = document.getElementById('speech_service_resource_id_container'); + const speechResourceIdInput = document.getElementById('speech_service_resource_id'); + const speechSubscriptionInput = document.getElementById('speech_service_subscription_id'); + const speechResourceGroupInput = document.getElementById('speech_service_resource_group'); + const speechResourceNameInput = document.getElementById('speech_service_resource_name'); + const buildSpeechResourceIdButton = document.getElementById('build_speech_resource_id_btn'); + const speechResourceIdBuilderStatus = document.getElementById('speech_resource_id_builder_status'); + + function inferSpeechResourceNameFromEndpoint(endpointValue) { + const trimmedValue = (endpointValue || '').trim(); + if (!trimmedValue) { + return ''; + } + + try { + const parsedUrl = new URL(trimmedValue); + const hostName = parsedUrl.hostname.toLowerCase(); + const supportedSuffixes = [ + '.cognitiveservices.azure.com', + '.cognitiveservices.azure.us' + ]; + + for (const suffix of supportedSuffixes) { + if (hostName.endsWith(suffix)) { + const resourceName = hostName.slice(0, -suffix.length); + if (resourceName && !resourceName.includes('.')) { + return resourceName; + } + } + } + } catch (error) { + return ''; + } + + return ''; + } + + function setSpeechResourceIdBuilderStatus(message) { + if (speechResourceIdBuilderStatus) { + speechResourceIdBuilderStatus.textContent = message; + } + } + + function buildSpeechResourceIdFromFields() { + const subscriptionId = speechSubscriptionInput?.value?.trim() || ''; + const resourceGroup = speechResourceGroupInput?.value?.trim() || ''; + const resourceName = speechResourceNameInput?.value?.trim() || ''; + + if (!subscriptionId || !resourceGroup || !resourceName) { + return ''; + } + + return `/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.CognitiveServices/accounts/${resourceName}`; + } + + function syncSpeechResourceIdBuilder(force) { + if (!speechResourceIdInput) { + return ''; + } + + if (speechResourceNameInput && !speechResourceNameInput.value.trim()) { + const inferredResourceName = inferSpeechResourceNameFromEndpoint(speechEndpointInput?.value || ''); + if (inferredResourceName) { + speechResourceNameInput.value = inferredResourceName; + } + } + + const builtResourceId = buildSpeechResourceIdFromFields(); + const currentValue = speechResourceIdInput.value.trim(); + const previousGeneratedValue = speechResourceIdInput.dataset.generatedValue || ''; + const wasGenerated = speechResourceIdInput.dataset.generated === 'true' || currentValue === '' || currentValue === previousGeneratedValue; + + if (builtResourceId) { + speechResourceIdInput.dataset.generatedValue = builtResourceId; + if (force || wasGenerated) { + speechResourceIdInput.value = builtResourceId; + speechResourceIdInput.dataset.generated = 'true'; + } + setSpeechResourceIdBuilderStatus('Resource ID can be generated from the helper fields. You can still override it manually if needed.'); + return builtResourceId; + } + + const missingParts = []; + if (!speechSubscriptionInput?.value?.trim()) { + missingParts.push('Subscription ID'); + } + if (!speechResourceGroupInput?.value?.trim()) { + missingParts.push('Resource Group'); + } + if (!speechResourceNameInput?.value?.trim()) { + missingParts.push('Speech Resource Name'); + } + + speechResourceIdInput.dataset.generatedValue = ''; + if (speechResourceIdInput.dataset.generated === 'true' && !currentValue) { + speechResourceIdInput.dataset.generated = 'false'; + } + + setSpeechResourceIdBuilderStatus(`To auto-build the resource ID, provide: ${missingParts.join(', ')}.`); + return ''; + } + if (speechAuthType) { + const updateSpeechAuthFields = function () { + const usingKeyAuth = this.value === 'key'; + setSectionVisibility(speechKeyContainer, usingKeyAuth); + setSectionVisibility(speechResourceIdContainer, !usingKeyAuth); + }; + + updateSpeechAuthFields.call(speechAuthType); speechAuthType.addEventListener('change', function () { - document.getElementById('speech_service_key_container').style.display = - (this.value === 'key') ? 'block' : 'none'; + updateSpeechAuthFields.call(this); markFormAsModified(); }); } + if (speechResourceIdInput) { + syncSpeechResourceIdBuilder(false); + speechResourceIdInput.addEventListener('input', function () { + const builtResourceId = buildSpeechResourceIdFromFields(); + this.dataset.generated = builtResourceId && this.value.trim() === builtResourceId ? 'true' : 'false'; + }); + } + + [speechEndpointInput, speechSubscriptionInput, speechResourceGroupInput, speechResourceNameInput].forEach((element) => { + if (!element) { + return; + } + + element.addEventListener('input', () => { + syncSpeechResourceIdBuilder(false); + markFormAsModified(); + }); + }); + + if (buildSpeechResourceIdButton) { + buildSpeechResourceIdButton.addEventListener('click', () => { + const builtResourceId = syncSpeechResourceIdBuilder(true); + if (builtResourceId) { + markFormAsModified(); + } + }); + } + const officeAuthType = document.getElementById('office_docs_authentication_type'); const connStrGroup = document.getElementById('office_docs_storage_conn_str_group'); const urlGroup = document.getElementById('office_docs_storage_url_group'); @@ -3434,29 +3572,104 @@ function togglePassword(btnId, inputId) { } } +function setSectionVisibility(element, visible) { + if (!element) { + return; + } + + element.classList.toggle('d-none', !visible); +} + // --- Video Indexer Settings toggle --- const videoSupportToggle = document.getElementById('enable_video_file_support'); -const videoIndexerDiv = document.getElementById('video_indexer_settings'); +const videoIndexerDiv = document.getElementById('video_indexer_settings'); +const videoIndexerCloudSelect = document.getElementById('video_indexer_cloud'); +const videoIndexerEndpointInput = document.getElementById('video_indexer_endpoint'); +const videoIndexerEndpointDisplay = document.getElementById('video_indexer_endpoint_display'); +const videoIndexerCustomEndpointGroup = document.getElementById('video_indexer_custom_endpoint_group'); +const videoIndexerCustomEndpointInput = document.getElementById('video_indexer_custom_endpoint'); +const videoIndexerCloudMismatchAlert = document.getElementById('video_indexer_cloud_mismatch_alert'); + +function updateVideoIndexerEndpointSelection() { + if (!videoIndexerCloudSelect || !videoIndexerEndpointInput) { + return; + } + + const selectedCloud = videoIndexerCloudSelect.value; + const publicEndpoint = videoIndexerCloudSelect.dataset.publicEndpoint || 'https://api.videoindexer.ai'; + const governmentEndpoint = videoIndexerCloudSelect.dataset.governmentEndpoint || 'https://api.videoindexer.ai.azure.us'; + const runtimeCloud = videoIndexerCloudSelect.dataset.runtimeCloud || 'public'; + + let endpointValue = publicEndpoint; + if (selectedCloud === 'usgovernment') { + endpointValue = governmentEndpoint; + } else if (selectedCloud === 'custom') { + endpointValue = videoIndexerCustomEndpointInput?.value?.trim() || ''; + } + + videoIndexerEndpointInput.value = endpointValue; + + if (videoIndexerEndpointDisplay) { + videoIndexerEndpointDisplay.value = endpointValue; + } + + setSectionVisibility(videoIndexerCustomEndpointGroup, selectedCloud === 'custom'); + setSectionVisibility(videoIndexerCloudMismatchAlert, selectedCloud !== runtimeCloud); + + if (typeof updateVideoIndexerModalInfo === 'function') { + updateVideoIndexerModalInfo(); + } +} + if (videoSupportToggle && videoIndexerDiv) { - // on load - videoIndexerDiv.style.display = videoSupportToggle.checked ? 'block' : 'none'; - // on change - videoSupportToggle.addEventListener('change', () => { - videoIndexerDiv.style.display = videoSupportToggle.checked ? 'block' : 'none'; - markFormAsModified(); - }); + setSectionVisibility(videoIndexerDiv, videoSupportToggle.checked); + videoSupportToggle.addEventListener('change', () => { + setSectionVisibility(videoIndexerDiv, videoSupportToggle.checked); + markFormAsModified(); + }); +} + +if (videoIndexerCloudSelect) { + updateVideoIndexerEndpointSelection(); + videoIndexerCloudSelect.addEventListener('change', () => { + updateVideoIndexerEndpointSelection(); + markFormAsModified(); + }); +} + +if (videoIndexerCustomEndpointInput) { + videoIndexerCustomEndpointInput.addEventListener('input', () => { + updateVideoIndexerEndpointSelection(); + markFormAsModified(); + }); } // --- Speech Service Settings toggle --- -const audioSupportToggle = document.getElementById('enable_audio_file_support'); -const audioServiceDiv = document.getElementById('audio_service_settings'); -if (audioSupportToggle && audioServiceDiv) { - // initial visibility - audioServiceDiv.style.display = audioSupportToggle.checked ? 'block' : 'none'; - audioSupportToggle.addEventListener('change', () => { - audioServiceDiv.style.display = audioSupportToggle.checked ? 'block' : 'none'; - markFormAsModified(); - }); +const audioSupportToggle = document.getElementById('enable_audio_file_support'); +const speechToTextToggle = document.getElementById('enable_speech_to_text_input'); +const textToSpeechToggle = document.getElementById('enable_text_to_speech'); +const audioServiceDiv = document.getElementById('audio_service_settings'); + +function areAnySpeechFeaturesEnabled() { + return [audioSupportToggle, speechToTextToggle, textToSpeechToggle].some((toggle) => Boolean(toggle?.checked)); +} + +function updateSpeechServiceSettingsVisibility() { + setSectionVisibility(audioServiceDiv, areAnySpeechFeaturesEnabled()); +} + +if (audioServiceDiv) { + updateSpeechServiceSettingsVisibility(); + [audioSupportToggle, speechToTextToggle, textToSpeechToggle].forEach((toggle) => { + if (!toggle) { + return; + } + + toggle.addEventListener('change', () => { + updateSpeechServiceSettingsVisibility(); + markFormAsModified(); + }); + }); } // Metadata Extraction UI @@ -3495,12 +3708,12 @@ function populateExtractionModels() { } if (extractToggle) { - // show/hide the model dropdown - extractModelDiv.style.display = extractToggle.checked ? 'block' : 'none'; - extractToggle.addEventListener('change', () => { + // show/hide the model dropdown extractModelDiv.style.display = extractToggle.checked ? 'block' : 'none'; - markFormAsModified(); - }); + extractToggle.addEventListener('change', () => { + extractModelDiv.style.display = extractToggle.checked ? 'block' : 'none'; + markFormAsModified(); + }); } // Multi-Modal Vision UI @@ -3509,232 +3722,232 @@ const visionModelDiv = document.getElementById('multimodal_vision_model_settings const visionSelect = document.getElementById('multimodal_vision_model'); function populateVisionModels() { - if (!visionSelect) return; + if (!visionSelect) return; - // remember previously chosen value - const prev = visionSelect.getAttribute('data-prev') || ''; - - // clear out old options (except the placeholder) - visionSelect.innerHTML = ''; - - if (document.getElementById('enable_gpt_apim').checked) { - // use comma-separated APIM deployments - const text = document.getElementById('azure_apim_gpt_deployment').value || ''; - text.split(',') - .map(s => s.trim()) - .filter(s => s) - .forEach(d => { - const opt = new Option(d, d); - visionSelect.add(opt); + // remember previously chosen value + const prev = visionSelect.getAttribute('data-prev') || ''; + + // clear out old options (except the placeholder) + visionSelect.innerHTML = ''; + + if (document.getElementById('enable_gpt_apim').checked) { + // use comma-separated APIM deployments + const text = document.getElementById('azure_apim_gpt_deployment').value || ''; + text.split(',') + .map(s => s.trim()) + .filter(s => s) + .forEach(d => { + const opt = new Option(d, d); + visionSelect.add(opt); + }); + } else { + // use direct GPT selected deployments - filter for vision-capable models + (window.gptSelected || []).forEach(m => { + // Only include models with vision capabilities + // Vision-enabled models per Azure OpenAI docs: + // - o-series reasoning models (o1, o3, etc.) + // - GPT-5 series + // - GPT-4.1 series + // - GPT-4.5 + // - GPT-4o series (gpt-4o, gpt-4o-mini) + // - GPT-4 vision models (gpt-4-vision, gpt-4-turbo-vision) + const modelNameLower = (m.modelName || '').toLowerCase(); + const isVisionCapable = + modelNameLower.includes('vision') || + modelNameLower.includes('gpt-4o') || + modelNameLower.includes('gpt-4.1') || + modelNameLower.includes('gpt-4.5') || + modelNameLower.includes('gpt-5') || + modelNameLower.match(/^o\d+/) || + modelNameLower.includes('o1-') || + modelNameLower.includes('o3-'); + + if (isVisionCapable) { + const label = `${m.deploymentName} (${m.modelName})`; + const opt = new Option(label, m.deploymentName); + visionSelect.add(opt); + } }); - } else { - // use direct GPT selected deployments - filter for vision-capable models - (window.gptSelected || []).forEach(m => { - // Only include models with vision capabilities - // Vision-enabled models per Azure OpenAI docs: - // - o-series reasoning models (o1, o3, etc.) - // - GPT-5 series - // - GPT-4.1 series - // - GPT-4.5 - // - GPT-4o series (gpt-4o, gpt-4o-mini) - // - GPT-4 vision models (gpt-4-vision, gpt-4-turbo-vision) - const modelNameLower = (m.modelName || '').toLowerCase(); - const isVisionCapable = - modelNameLower.includes('vision') || // gpt-4-vision, gpt-4-turbo-vision - modelNameLower.includes('gpt-4o') || // gpt-4o, gpt-4o-mini - modelNameLower.includes('gpt-4.1') || // gpt-4.1 series - modelNameLower.includes('gpt-4.5') || // gpt-4.5 - modelNameLower.includes('gpt-5') || // gpt-5 series - modelNameLower.match(/^o\d+/) || // o1, o3, etc. (o-series) - modelNameLower.includes('o1-') || // o1-preview, o1-mini - modelNameLower.includes('o3-'); // o3-mini, etc. - - if (isVisionCapable) { - const label = `${m.deploymentName} (${m.modelName})`; - const opt = new Option(label, m.deploymentName); - visionSelect.add(opt); - } - }); - } + } - // restore previous - if (prev) { - visionSelect.value = prev; - } + // restore previous + if (prev) { + visionSelect.value = prev; + } } if (visionToggle && visionModelDiv) { - // show/hide the model dropdown - visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none'; - visionToggle.addEventListener('change', () => { + // show/hide the model dropdown visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none'; - markFormAsModified(); - }); + visionToggle.addEventListener('change', () => { + visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none'; + markFormAsModified(); + }); } // Listen for vision model selection changes if (visionSelect) { - visionSelect.addEventListener('change', () => { - // Update data-prev to remember the selection - visionSelect.setAttribute('data-prev', visionSelect.value); - markFormAsModified(); - }); + visionSelect.addEventListener('change', () => { + // Update data-prev to remember the selection + visionSelect.setAttribute('data-prev', visionSelect.value); + markFormAsModified(); + }); } -// when APIM‐toggle flips, repopulate +// when APIM-toggle flips, repopulate const apimToggle = document.getElementById('enable_gpt_apim'); if (apimToggle) { - apimToggle.addEventListener('change', () => { - populateExtractionModels(); - populateVisionModels(); - }); + apimToggle.addEventListener('change', () => { + populateExtractionModels(); + populateVisionModels(); + }); } // on load, stash previous & populate document.addEventListener('DOMContentLoaded', () => { - if (extractSelect) { - extractSelect.setAttribute('data-prev', extractSelect.value); - populateExtractionModels(); - } - if (visionSelect) { - visionSelect.setAttribute('data-prev', visionSelect.value); - populateVisionModels(); - } + if (extractSelect) { + extractSelect.setAttribute('data-prev', extractSelect.value); + populateExtractionModels(); + } + if (visionSelect) { + visionSelect.setAttribute('data-prev', visionSelect.value); + populateVisionModels(); + } }); document.addEventListener('DOMContentLoaded', () => { - ['user','group','public'].forEach(type => { - const warnDiv = document.getElementById(`index-warning-${type}`); - const missingSpan = document.getElementById(`missing-fields-${type}`); - const fixBtn = document.getElementById(`fix-${type}-index-btn`); + ['user','group','public'].forEach(type => { + const warnDiv = document.getElementById(`index-warning-${type}`); + const missingSpan = document.getElementById(`missing-fields-${type}`); + const fixBtn = document.getElementById(`fix-${type}-index-btn`); - // 1) check for missing fields - fetch('/api/admin/settings/check_index_fields', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - credentials: 'same-origin', - body: JSON.stringify({ indexType: type }) - }) - .then(r => { - if (!r.ok) { - return r.json().then(errorData => { - throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`); - }); - } - return r.json(); - }) - .then(response => { - if (response.autoFixed) { - // Fields were automatically fixed - console.log(`✅ Auto-fixed ${type} index: added ${response.fieldsAdded.length} field(s):`, response.fieldsAdded.join(', ')); - if (warnDiv) { - warnDiv.className = 'alert alert-success'; - missingSpan.textContent = `Automatically added ${response.fieldsAdded.length} field(s): ${response.fieldsAdded.join(', ')}`; - warnDiv.style.display = 'block'; - if (fixBtn) fixBtn.style.display = 'none'; - - // Hide success message after 5 seconds - setTimeout(() => { - warnDiv.style.display = 'none'; - }, 5000); - } - } else if (response.autoFixFailed) { - // Auto-fix failed, show manual button - console.warn(`Auto-fix failed for ${type} index:`, response.error); - missingSpan.textContent = response.missingFields.join(', ') + ' (Auto-fix failed - please fix manually)'; - warnDiv.className = 'alert alert-warning'; - warnDiv.style.display = 'block'; - if (fixBtn) { - fixBtn.textContent = `Fix ${type} Index Fields`; - fixBtn.style.display = 'inline-block'; - } - } else if (response.missingFields && response.missingFields.length > 0) { - // Missing fields but auto-fix was disabled - missingSpan.textContent = response.missingFields.join(', '); - warnDiv.className = 'alert alert-warning'; - warnDiv.style.display = 'block'; - if (fixBtn) { - fixBtn.textContent = `Fix ${type} Index Fields`; - fixBtn.style.display = 'inline-block'; - } - } else if (response.indexExists) { - // Index exists and is complete - if (warnDiv) warnDiv.style.display = 'none'; - console.log(`${type} index is properly configured`); - } - }) - .catch(err => { - console.warn(`Checking ${type} index fields:`, err.message); + // 1) check for missing fields + fetch('/api/admin/settings/check_index_fields', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'same-origin', + body: JSON.stringify({ indexType: type }) + }) + .then(r => { + if (!r.ok) { + return r.json().then(errorData => { + throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`); + }); + } + return r.json(); + }) + .then(response => { + if (response.autoFixed) { + // Fields were automatically fixed + console.log(`✅ Auto-fixed ${type} index: added ${response.fieldsAdded.length} field(s):`, response.fieldsAdded.join(', ')); + if (warnDiv) { + warnDiv.className = 'alert alert-success'; + missingSpan.textContent = `Automatically added ${response.fieldsAdded.length} field(s): ${response.fieldsAdded.join(', ')}`; + warnDiv.style.display = 'block'; + if (fixBtn) fixBtn.style.display = 'none'; + + // Hide success message after 5 seconds + setTimeout(() => { + warnDiv.style.display = 'none'; + }, 5000); + } + } else if (response.autoFixFailed) { + // Auto-fix failed, show manual button + console.warn(`Auto-fix failed for ${type} index:`, response.error); + missingSpan.textContent = response.missingFields.join(', ') + ' (Auto-fix failed - please fix manually)'; + warnDiv.className = 'alert alert-warning'; + warnDiv.style.display = 'block'; + if (fixBtn) { + fixBtn.textContent = `Fix ${type} Index Fields`; + fixBtn.style.display = 'inline-block'; + } + } else if (response.missingFields && response.missingFields.length > 0) { + // Missing fields but auto-fix was disabled + missingSpan.textContent = response.missingFields.join(', '); + warnDiv.className = 'alert alert-warning'; + warnDiv.style.display = 'block'; + if (fixBtn) { + fixBtn.textContent = `Fix ${type} Index Fields`; + fixBtn.style.display = 'inline-block'; + } + } else if (response.indexExists) { + // Index exists and is complete + if (warnDiv) warnDiv.style.display = 'none'; + console.log(`${type} index is properly configured`); + } + }) + .catch(err => { + console.warn(`Checking ${type} index fields:`, err.message); - // Check if this is an index not found error - if (err.message.includes('does not exist yet') || err.message.includes('not found')) { - // Show a different message for missing index - if (warnDiv && missingSpan && fixBtn) { - missingSpan.textContent = `Index "${type}" does not exist yet`; - warnDiv.style.display = 'block'; - fixBtn.textContent = `Create ${type} Index`; - fixBtn.style.display = 'inline-block'; - fixBtn.dataset.action = 'create'; - } - } else if (err.message.includes('not configured')) { - // Azure AI Search not configured - if (warnDiv && missingSpan) { - missingSpan.textContent = 'Azure AI Search not configured'; - warnDiv.style.display = 'block'; - if (fixBtn) fixBtn.style.display = 'none'; - } - } else { - // Hide the warning div for other errors - if (warnDiv) warnDiv.style.display = 'none'; - } - }); + // Check if this is an index not found error + if (err.message.includes('does not exist yet') || err.message.includes('not found')) { + // Show a different message for missing index + if (warnDiv && missingSpan && fixBtn) { + missingSpan.textContent = `Index "${type}" does not exist yet`; + warnDiv.style.display = 'block'; + fixBtn.textContent = `Create ${type} Index`; + fixBtn.style.display = 'inline-block'; + fixBtn.dataset.action = 'create'; + } + } else if (err.message.includes('not configured')) { + // Azure AI Search not configured + if (warnDiv && missingSpan) { + missingSpan.textContent = 'Azure AI Search not configured'; + warnDiv.style.display = 'block'; + if (fixBtn) fixBtn.style.display = 'none'; + } + } else { + // Hide the warning div for other errors + if (warnDiv) warnDiv.style.display = 'none'; + } + }); - // 2) wire up the “fix” button - fixBtn.addEventListener('click', () => { - fixBtn.disabled = true; - const action = fixBtn.dataset.action || 'fix'; - const endpoint = action === 'create' ? '/api/admin/settings/create_index' : '/api/admin/settings/fix_index_fields'; - const actionText = action === 'create' ? 'Creating' : 'Fixing'; + // 2) wire up the fix button + fixBtn.addEventListener('click', () => { + fixBtn.disabled = true; + const action = fixBtn.dataset.action || 'fix'; + const endpoint = action === 'create' ? '/api/admin/settings/create_index' : '/api/admin/settings/fix_index_fields'; + const actionText = action === 'create' ? 'Creating' : 'Fixing'; - fixBtn.textContent = `${actionText}...`; + fixBtn.textContent = `${actionText}...`; - fetch(endpoint, { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - credentials: 'same-origin', - body: JSON.stringify({ indexType: type }) - }) - .then(r => { - if (!r.ok) { - return r.json().then(errorData => { - throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`); + fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + credentials: 'same-origin', + body: JSON.stringify({ indexType: type }) + }) + .then(r => { + if (!r.ok) { + return r.json().then(errorData => { + throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`); + }); + } + return r.json(); + }) + .then(resp => { + if (resp.status === 'success') { + alert(resp.message || `Successfully ${action === 'create' ? 'created' : 'fixed'} ${type} index!`); + window.location.reload(); + } else { + alert(`Failed to ${action} ${type} index: ${resp.error}`); + fixBtn.disabled = false; + fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`; + } + }) + .catch(err => { + alert(`Error ${action === 'create' ? 'creating' : 'fixing'} ${type} index: ${err.message || err}`); + fixBtn.disabled = false; + fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`; + }); }); - } - return r.json(); - }) - .then(resp => { - if (resp.status === 'success') { - alert(resp.message || `Successfully ${action === 'create' ? 'created' : 'fixed'} ${type} index!`); - window.location.reload(); - } else { - alert(`Failed to ${action} ${type} index: ${resp.error}`); - fixBtn.disabled = false; - fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`; - } - }) - .catch(err => { - alert(`Error ${action === 'create' ? 'creating' : 'fixing'} ${type} index: ${err.message || err}`); - fixBtn.disabled = false; - fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`; }); - }); }); - }); togglePassword('toggle_gpt_key', 'azure_openai_gpt_key'); @@ -3756,7 +3969,6 @@ togglePassword('toggle_audio_files_key', 'audio_files_key'); togglePassword('toggle_office_conn_str', 'office_docs_storage_account_blob_endpoint'); togglePassword('toggle_video_conn_str', 'video_files_storage_account_url'); togglePassword('toggle_audio_conn_str', 'audio_files_storage_account_url'); -togglePassword('toggle_video_indexer_api_key', 'video_indexer_api_key'); togglePassword('toggle_speech_service_key', 'speech_service_key'); togglePassword('toggle_redis_key', 'redis_key'); togglePassword('toggle_azure_apim_redis_subscription_key', 'azure_apim_redis_subscription_key'); @@ -4050,6 +4262,9 @@ function calculateAvailableWalkthroughSteps() { const videoEnabled = document.getElementById('enable_video_file_support')?.checked || false; const audioEnabled = document.getElementById('enable_audio_file_support')?.checked || false; + const speechToTextEnabled = document.getElementById('enable_speech_to_text_input')?.checked || false; + const textToSpeechEnabled = document.getElementById('enable_text_to_speech')?.checked || false; + const speechFeaturesEnabled = audioEnabled || speechToTextEnabled || textToSpeechEnabled; const availableSteps = [1, 2, 3, 4]; // Base steps always available @@ -4060,10 +4275,10 @@ function calculateAvailableWalkthroughSteps() { if (videoEnabled) { availableSteps.push(8); // Video support } - - if (audioEnabled) { - availableSteps.push(9); // Audio support - } + } + + if (speechFeaturesEnabled) { + availableSteps.push(9); // Shared Speech Service } // Optional steps always available @@ -4123,8 +4338,10 @@ function findNextApplicableStep(currentStep) { case 9: // Audio support const audioEnabled = document.getElementById('enable_audio_file_support')?.checked || false; - if (!workspacesEnabled || !audioEnabled) { - // Skip this step if workspaces not enabled or audio not enabled + const speechToTextEnabled = document.getElementById('enable_speech_to_text_input')?.checked || false; + const textToSpeechEnabled = document.getElementById('enable_text_to_speech')?.checked || false; + if (!(audioEnabled || speechToTextEnabled || textToSpeechEnabled)) { + // Skip this step if no speech features are enabled nextStep++; continue; } @@ -4390,25 +4607,48 @@ function isStepComplete(stepNumber) { const videoEndpoint = document.getElementById('video_indexer_endpoint')?.value; const videoLocation = document.getElementById('video_indexer_location')?.value; const videoAccountId = document.getElementById('video_indexer_account_id')?.value; - - return videoLocation && videoAccountId && videoEndpoint; + const videoResourceGroup = document.getElementById('video_indexer_resource_group')?.value; + const videoSubscriptionId = document.getElementById('video_indexer_subscription_id')?.value; + const videoAccountName = document.getElementById('video_indexer_account_name')?.value; + + return Boolean( + videoLocation && + videoAccountId && + videoEndpoint && + videoResourceGroup && + videoSubscriptionId && + videoAccountName + ); case 9: // Audio support const audioEnabled = document.getElementById('enable_audio_file_support').checked || false; + const speechToTextEnabled = document.getElementById('enable_speech_to_text_input')?.checked || false; + const textToSpeechEnabled = document.getElementById('enable_text_to_speech')?.checked || false; + const speechFeaturesEnabled = audioEnabled || speechToTextEnabled || textToSpeechEnabled; - // If workspaces not enabled or audio not enabled, it's always complete - if (!workspacesEnabled || !audioEnabled) return true; + // If no speech features are enabled, it's always complete + if (!speechFeaturesEnabled) return true; // Otherwise check settings const speechEndpoint = document.getElementById('speech_service_endpoint')?.value; const authType = document.getElementById('speech_service_authentication_type').value; const key = document.getElementById('speech_service_key').value; - - if (!speechEndpoint || (authType === 'key' && !key)) { - return false; - } else { - return true; + const speechLocation = document.getElementById('speech_service_location')?.value; + const speechResourceId = document.getElementById('speech_service_resource_id')?.value; + + if (!speechEndpoint) { + return false; } + + if (authType === 'key') { + return Boolean(key); + } + + if (textToSpeechEnabled) { + return Boolean(speechLocation && speechResourceId); + } + + return true; case 10: // Content safety - always complete (optional) case 11: // User feedback and archiving - always complete (optional) @@ -4608,14 +4848,26 @@ function setupWalkthroughFieldListeners() { ], 8: [ // Video settings {selector: '#enable_video_file_support', event: 'change'}, + {selector: '#video_indexer_cloud', event: 'change'}, + {selector: '#video_indexer_custom_endpoint', event: 'input'}, {selector: '#video_indexer_location', event: 'input'}, {selector: '#video_indexer_account_id', event: 'input'}, - {selector: '#video_indexer_api_key', event: 'input'} + {selector: '#video_indexer_resource_group', event: 'input'}, + {selector: '#video_indexer_subscription_id', event: 'input'}, + {selector: '#video_indexer_account_name', event: 'input'} ], 9: [ // Audio settings {selector: '#enable_audio_file_support', event: 'change'}, + {selector: '#enable_speech_to_text_input', event: 'change'}, + {selector: '#enable_text_to_speech', event: 'change'}, {selector: '#speech_service_endpoint', event: 'input'}, - {selector: '#speech_service_key', event: 'input'} + {selector: '#speech_service_authentication_type', event: 'change'}, + {selector: '#speech_service_subscription_id', event: 'input'}, + {selector: '#speech_service_resource_group', event: 'input'}, + {selector: '#speech_service_resource_name', event: 'input'}, + {selector: '#speech_service_key', event: 'input'}, + {selector: '#speech_service_location', event: 'input'}, + {selector: '#speech_service_resource_id', event: 'input'} ] }; diff --git a/application/single_app/templates/_speech_service_info.html b/application/single_app/templates/_speech_service_info.html new file mode 100644 index 00000000..fad60130 --- /dev/null +++ b/application/single_app/templates/_speech_service_info.html @@ -0,0 +1,330 @@ + + + + \ No newline at end of file diff --git a/application/single_app/templates/_video_indexer_info.html b/application/single_app/templates/_video_indexer_info.html index 904ef900..cd806e30 100644 --- a/application/single_app/templates/_video_indexer_info.html +++ b/application/single_app/templates/_video_indexer_info.html @@ -12,12 +12,12 @@