Add Agent Config data asset system with ElevenLabs editor integration

Introduces UPS_AI_ConvAgent_AgentConfig_ElevenLabs data asset to encapsulate
full agent configuration (voice, LLM, prompt, language, emotions) with a
custom Detail Customization providing:
- Voice/TTS Model/LLM/Language pickers with Fetch buttons (ElevenLabs API)
- LLM latency hints in dropdown (~250ms, ~700ms, etc.)
- Create/Update/Fetch Agent buttons for REST API CRUD
- Auto-fetch on editor open, auto-select first voice for new assets
- Prompt fragment management (language, multilingual, emotion tool)
- Smart defaults: gemini-2.5-flash LLM, eleven_turbo_v2_5 TTS, English
- Speed range expanded to 0.7-1.95 (was 0.7-1.2)
- bAutoStartConversation + StartConversationWithSelectedAgent() on InteractionComponent

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
j.foucher 2026-03-01 20:51:05 +01:00
parent 8175375c28
commit 259a77f9f6
21 changed files with 2118 additions and 22 deletions

View File

@ -0,0 +1,3 @@
// Copyright ASTERION. All Rights Reserved.
#include "PS_AI_ConvAgent_AgentConfig_ElevenLabs.h"

View File

@ -1,6 +1,7 @@
// Copyright ASTERION. All Rights Reserved.
#include "PS_AI_ConvAgent_ElevenLabsComponent.h"
#include "PS_AI_ConvAgent_AgentConfig_ElevenLabs.h"
#include "PS_AI_ConvAgent_MicrophoneCaptureComponent.h"
#include "PS_AI_ConvAgent_PostureComponent.h"
#include "PS_AI_ConvAgent_InteractionSubsystem.h"
@ -266,7 +267,13 @@ void UPS_AI_ConvAgent_ElevenLabsComponent::StartConversation_Internal()
// Pass configuration to the proxy before connecting.
WebSocketProxy->TurnMode = TurnMode;
WebSocketProxy->Connect(AgentID);
// Resolve AgentID by priority: AgentConfig > component string > project default.
FString ResolvedAgentID = AgentID;
if (AgentConfig && !AgentConfig->AgentID.IsEmpty())
{
ResolvedAgentID = AgentConfig->AgentID;
}
WebSocketProxy->Connect(ResolvedAgentID);
}
void UPS_AI_ConvAgent_ElevenLabsComponent::EndConversation()

View File

@ -252,15 +252,17 @@ void UPS_AI_ConvAgent_InteractionComponent::SetSelectedAgent(UPS_AI_ConvAgent_El
}
// Network: auto-start conversation if the agent isn't connected yet.
if (!NewAgent->IsConnected() && !NewAgent->bNetIsConversing)
// Only when bAutoStartConversation is true — otherwise the user must
// call StartConversationWithSelectedAgent() explicitly (e.g. on key press).
if (bAutoStartConversation && !NewAgent->IsConnected() && !NewAgent->bNetIsConversing)
{
NewAgent->StartConversation();
}
// Ensure mic is capturing so we can route audio to the new agent.
if (MicComponent && !MicComponent->IsCapturing())
{
MicComponent->StartCapture();
// Ensure mic is capturing so we can route audio to the new agent.
if (MicComponent && !MicComponent->IsCapturing())
{
MicComponent->StartCapture();
}
}
// ── Posture: attach (eyes+head only — body tracking is enabled later
@ -351,6 +353,48 @@ void UPS_AI_ConvAgent_InteractionComponent::ClearSelection()
SetSelectedAgent(nullptr);
}
void UPS_AI_ConvAgent_InteractionComponent::StartConversationWithSelectedAgent()
{
UPS_AI_ConvAgent_ElevenLabsComponent* Agent = SelectedAgent.Get();
if (!Agent)
{
if (bDebug)
{
UE_LOG(LogPS_AI_ConvAgent_Select, Warning, TEXT("StartConversationWithSelectedAgent: no agent selected."));
}
return;
}
if (Agent->IsConnected() || Agent->bNetIsConversing)
{
if (bDebug)
{
UE_LOG(LogPS_AI_ConvAgent_Select, Log, TEXT("StartConversationWithSelectedAgent: agent already connected/conversing."));
}
return;
}
if (bDebug)
{
UE_LOG(LogPS_AI_ConvAgent_Select, Log, TEXT("StartConversationWithSelectedAgent: starting conversation with %s"),
Agent->GetOwner() ? *Agent->GetOwner()->GetName() : TEXT("(null)"));
}
Agent->StartConversation();
// Ensure mic is capturing so we can route audio to the agent.
if (MicComponent && !MicComponent->IsCapturing())
{
MicComponent->StartCapture();
}
// Start listening if auto-managed.
if (bAutoManageListening)
{
Agent->StartListening();
}
}
// ─────────────────────────────────────────────────────────────────────────────
// Posture helpers
// ─────────────────────────────────────────────────────────────────────────────

View File

@ -724,9 +724,9 @@ FString UPS_AI_ConvAgent_WebSocket_ElevenLabsProxy::BuildWebSocketURL(const FStr
return Settings->CustomWebSocketURL;
}
const FString ResolvedAgentID = AgentIDOverride.IsEmpty() ? Settings->AgentID : AgentIDOverride;
if (ResolvedAgentID.IsEmpty())
if (AgentIDOverride.IsEmpty())
{
UE_LOG(LogTemp, Error, TEXT("[PS_AI_ConvAgent] No AgentID provided. Set one via AgentConfig data asset or the AgentID property on the component."));
return FString();
}
@ -734,5 +734,5 @@ FString UPS_AI_ConvAgent_WebSocket_ElevenLabsProxy::BuildWebSocketURL(const FStr
// wss://api.elevenlabs.io/v1/convai/conversation?agent_id=<ID>
return FString::Printf(
TEXT("wss://api.elevenlabs.io/v1/convai/conversation?agent_id=%s"),
*ResolvedAgentID);
*AgentIDOverride);
}

View File

@ -23,13 +23,6 @@ public:
UPROPERTY(Config, EditAnywhere, Category = "PS AI ConvAgent|ElevenLabs API")
FString API_Key;
/**
* The default ElevenLabs Agent ID to use when none is specified
* on the component. Create agents at https://elevenlabs.io/app/conversational-ai
*/
UPROPERTY(Config, EditAnywhere, Category = "PS AI ConvAgent|ElevenLabs API")
FString AgentID;
/**
* Override the ElevenLabs WebSocket base URL. Leave empty to use the default:
* wss://api.elevenlabs.io/v1/convai/conversation

View File

@ -0,0 +1,234 @@
// Copyright ASTERION. All Rights Reserved.
#pragma once
#include "CoreMinimal.h"
#include "Engine/DataAsset.h"
#include "PS_AI_ConvAgent_AgentConfig_ElevenLabs.generated.h"
/**
* Reusable data asset that encapsulates a full ElevenLabs agent configuration:
* voice, LLM prompt, language, emotion tool, and API identity.
*
* Create ONE instance per agent in the Content Browser
* (right-click > Miscellaneous > PS AI ConvAgent Agent Config),
* then assign it on the PS AI ConvAgent ElevenLabs component.
*
* The editor Detail Customization provides:
* - Voice picker (fetches available voices from the ElevenLabs API)
* - Model picker (fetches TTS models from the ElevenLabs API)
* - LLM picker (dropdown with supported LLMs)
* - Language picker (dropdown with supported languages)
* - Create / Update / Fetch Agent buttons (REST API)
* - Pre-configured emotion tool prompt fragment
*
* At runtime, the ElevenLabsComponent reads AgentID from this asset
* to establish the WebSocket conversation.
*/
UCLASS(BlueprintType, Blueprintable,
DisplayName = "PS AI ConvAgent Agent Config (ElevenLabs)")
class PS_AI_CONVAGENT_API UPS_AI_ConvAgent_AgentConfig_ElevenLabs : public UPrimaryDataAsset
{
GENERATED_BODY()
public:
// ── Identity ─────────────────────────────────────────────────────────────
/** Agent ID assigned by ElevenLabs after Create/Sync.
* Populated automatically by the "Create Agent" editor action.
* This is the ID used to connect the WebSocket conversation.
* You can also paste an existing ID here, then use "Fetch Agent" to pull its config. */
UPROPERTY(EditAnywhere, BlueprintReadOnly, Category = "Identity",
meta = (ToolTip = "ElevenLabs Agent ID.\nPopulated when created/synced via API.\nPaste an existing ID + Fetch Agent to import."))
FString AgentID;
/** Human-readable name for this agent.
* Used as the agent name when creating on ElevenLabs. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Identity",
meta = (ToolTip = "Agent display name (visible on ElevenLabs dashboard)."))
FString AgentName;
// ── Voice ────────────────────────────────────────────────────────────────
/** Voice ID from ElevenLabs.
* Managed by the Voice picker dropdown do not edit manually. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ToolTip = "ElevenLabs Voice ID.\nManaged by the Voice picker dropdown."))
FString VoiceID;
/** Display name of the selected voice (informational, not sent to API). */
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Voice",
meta = (ToolTip = "Name of the selected voice (display only)."))
FString VoiceName;
/** TTS model ID (e.g. "eleven_turbo_v2_5", "eleven_multilingual_v2").
* Managed by the Model picker dropdown. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ToolTip = "TTS model ID.\nManaged by the Model picker dropdown."))
FString TTSModelID = TEXT("eleven_turbo_v2_5");
/** TTS stability (0.0 - 1.0). Controls voice consistency. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ClampMin = "0.0", ClampMax = "1.0",
ToolTip = "Voice stability.\n0 = variable/expressive, 1 = consistent.\nDefault: 0.5"))
float Stability = 0.5f;
/** TTS similarity boost (0.0 - 1.0). Higher = closer to original voice. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ClampMin = "0.0", ClampMax = "1.0",
ToolTip = "Similarity boost.\n0 = less similar, 1 = more similar.\nDefault: 0.75"))
float SimilarityBoost = 0.75f;
/** TTS speed multiplier. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ClampMin = "0.7", ClampMax = "1.95",
ToolTip = "Speech speed multiplier.\nRange: 0.7-1.95.\nDefault: 1.0"))
float Speed = 1.0f;
/** LLM model used by the agent (e.g. "gpt-4o-mini", "claude-3-5-sonnet").
* Managed by the LLM picker dropdown.
* Leave empty for ElevenLabs default. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ToolTip = "LLM model.\nManaged by the LLM picker dropdown."))
FString LLMModel = TEXT("gemini-2.5-flash");
/** Agent language code (e.g. "en", "fr", "ja").
* Managed by the Language picker dropdown.
* Controls STT and TTS language selection on ElevenLabs. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ToolTip = "Language code.\nManaged by the Language picker dropdown."))
FString Language = TEXT("en");
/** Enable multilingual mode: the agent dynamically adapts to whatever
* language the user speaks in, switching seamlessly mid-conversation.
* Requires a multilingual TTS model (e.g. eleven_multilingual_v2 or eleven_turbo_v2_5).
* When enabled, the fixed language instruction (bAutoLanguageInstruction) is replaced
* by a multilingual prompt that tells the LLM to mirror the user's language.
* The Language field still serves as the default/fallback for STT. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ToolTip = "Allow the agent to switch languages dynamically.\nThe agent responds in whatever language the user speaks.\nRequires a multilingual TTS model (turbo_v2_5, multilingual_v2, flash_v2_5)."))
bool bMultilingual = false;
/** Prompt fragment appended when bMultilingual is true.
* Instructs the LLM to mirror the user's language. Editable for customization. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (MultiLine = "true", EditCondition = "bMultilingual",
ToolTip = "Prompt instructions for multilingual behavior.\nAppended when bMultilingual is true."))
FString MultilingualPromptFragment = TEXT(
"## Language\n"
"You are multilingual. ALWAYS respond in the same language the user is speaking. "
"If the user switches language mid-conversation, switch with them immediately. "
"Match the user's language exactly — do not default to English. "
"If the user has not spoken yet, use the language of your first message.");
/** Append a language instruction to the system prompt when not English.
* Ensures the LLM generates text in the correct language.
* The Language field controls STT/TTS, but NOT the LLM output language.
* Without this, the LLM may default to English for follow-up messages.
* Ignored when bMultilingual is true (multilingual prompt takes priority). */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (EditCondition = "!bMultilingual",
ToolTip = "Append a language instruction for non-English agents.\nThe Language field only controls STT/TTS, not the LLM output.\nIgnored when Multilingual is enabled."))
bool bAutoLanguageInstruction = true;
/** Prompt fragment appended when bAutoLanguageInstruction is true and language is not English.
* Use {Language} as a placeholder it will be replaced by the language name (e.g. "French").
* Pre-filled with a standard instruction. Editable for customization. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (MultiLine = "true",
EditCondition = "bAutoLanguageInstruction && !bMultilingual",
ToolTip = "Prompt instruction for fixed-language mode.\n{Language} is replaced by the selected language name.\nAppended when language is not English."))
FString LanguagePromptFragment = TEXT(
"## Language\n"
"You MUST always respond in {Language}. "
"Never switch to any other language, "
"even for follow-up messages or when the user is silent.");
// ── Behavior ─────────────────────────────────────────────────────────────
/** Character-specific prompt describing THIS agent's personality and context.
* This is YOUR prompt write what makes this character unique.
* The emotion tool instructions are appended automatically if enabled. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Behavior",
meta = (MultiLine = "true",
ToolTip = "Character-specific prompt.\nDescribe the character's personality, backstory, and behavior.\nEmotion tool instructions are appended automatically if enabled."))
FString CharacterPrompt;
/** First message the agent says when the conversation starts.
* Leave empty to let the agent wait for the user to speak first. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Behavior",
meta = (ToolTip = "Agent's opening message.\nLeave empty for no greeting."))
FString FirstMessage;
/** Disable the idle follow-up behavior where the agent automatically speaks again
* if the user remains silent after the greeting / last response.
* When enabled: sets turn_timeout to -1 (infinite wait) so the agent
* waits indefinitely for the user to speak first. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ToolTip = "Prevent the agent from speaking again unprompted.\nSets turn_timeout to -1 (infinite wait).\nUseful when you want the player to initiate the conversation."))
bool bDisableIdleFollowUp = false;
/** Time (seconds) the agent waits for the user to speak before re-engaging.
* ElevenLabs API: conversation_config.turn.turn_timeout.
* Range: 130 seconds. Default: 7. -1 = wait indefinitely.
* Higher values make the agent more patient.
* When bDisableIdleFollowUp is true, this is overridden to -1 (infinite). */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (DisplayName = "Follow-up Timeout",
ClampMin = "-1.0", ClampMax = "30.0",
EditCondition = "!bDisableIdleFollowUp",
ToolTip = "Seconds before the agent speaks again if the user is silent.\nRange: 1-30. Default: 7. -1 = wait indefinitely.\nWhen 'Disable Idle Follow-up' is on, forced to -1 (infinite)."))
float TurnTimeout = 7.0f;
/** Maximum number of turns in a conversation. 0 = unlimited. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Voice",
meta = (ClampMin = "0",
ToolTip = "Max conversation turns.\n0 = unlimited."))
int32 MaxTurns = 0;
// ── Emotion Tool ─────────────────────────────────────────────────────────
/** Include the built-in "set_emotion" client tool in the agent configuration.
* Allows the LLM to set facial expressions (Joy, Sadness, Anger, etc.)
* that drive the FacialExpression component in real-time. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Emotion Tool",
meta = (ToolTip = "Include the set_emotion client tool.\nAllows the LLM to drive facial expressions."))
bool bIncludeEmotionTool = true;
/** System prompt fragment appended to CharacterPrompt when bIncludeEmotionTool is true.
* Pre-filled with the standard emotion instruction. Editable for customization. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Emotion Tool",
meta = (MultiLine = "true", EditCondition = "bIncludeEmotionTool",
ToolTip = "Prompt instructions for the emotion tool.\nAppended to CharacterPrompt when creating/updating the agent."))
FString EmotionToolPromptFragment = TEXT(
"## Facial Expressions\n"
"You have a set_emotion tool to control your facial expression. "
"Use it whenever the emotional context changes:\n"
"- Call set_emotion with emotion=\"joy\" when happy, laughing, or excited\n"
"- Call set_emotion with emotion=\"sadness\" when empathetic or discussing sad topics\n"
"- Call set_emotion with emotion=\"anger\" when frustrated or discussing injustice\n"
"- Call set_emotion with emotion=\"surprise\" when reacting to unexpected information\n"
"- Call set_emotion with emotion=\"fear\" when discussing scary or worrying topics\n"
"- Call set_emotion with emotion=\"disgust\" when reacting to unpleasant things\n"
"- Call set_emotion with emotion=\"neutral\" to return to a calm expression\n\n"
"Use intensity to match the strength of the emotion:\n"
"- \"low\" for subtle hints (slight smile, mild concern)\n"
"- \"medium\" for normal expression (default)\n"
"- \"high\" for strong reactions (big laugh, deep sadness, shock)\n\n"
"Always return to neutral when the emotional moment passes.");
// ── Dynamic Variables ────────────────────────────────────────────────────
/** Key-value pairs sent as dynamic_variables at conversation start.
* Referenced in the system prompt as {{variable_name}}. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Dynamic Variables",
meta = (ToolTip = "Dynamic variables available in the system prompt as {{key}}.\nSent at conversation start."))
TMap<FString, FString> DefaultDynamicVariables;
// ── Metadata (read-only, populated by API) ───────────────────────────────
/** Timestamp of last API sync (ISO 8601). */
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Metadata",
meta = (ToolTip = "When this asset was last synced with ElevenLabs."))
FString LastSyncTimestamp;
};

View File

@ -14,6 +14,7 @@
class UAudioComponent;
class USoundAttenuation;
class UPS_AI_ConvAgent_MicrophoneCaptureComponent;
class UPS_AI_ConvAgent_AgentConfig_ElevenLabs;
class APlayerController;
// ─────────────────────────────────────────────────────────────────────────────
@ -110,9 +111,17 @@ public:
// ── Configuration ─────────────────────────────────────────────────────────
/** ElevenLabs Agent ID used for this conversation. Leave empty to use the default from Project Settings > PS AI ConvAgent - ElevenLabs. */
/** Agent configuration data asset.
* When set, the AgentID is resolved from this asset at conversation start.
* Create one via Content Browser right-click Miscellaneous PS AI ConvAgent Agent Config. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "PS AI ConvAgent|ElevenLabs",
meta = (ToolTip = "ElevenLabs Agent ID. Leave empty to use the project default from Project Settings."))
meta = (ToolTip = "Agent configuration data asset.\nOverrides the AgentID string below when set."))
TObjectPtr<UPS_AI_ConvAgent_AgentConfig_ElevenLabs> AgentConfig;
/** ElevenLabs Agent ID used for this conversation. Leave empty to use the default from Project Settings > PS AI ConvAgent - ElevenLabs.
* Overridden by AgentConfig if set. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "PS AI ConvAgent|ElevenLabs",
meta = (ToolTip = "ElevenLabs Agent ID. Leave empty to use the project default from Project Settings.\nOverridden by AgentConfig if set."))
FString AgentID;
/** How turn-taking is managed between the user and the agent.\n- Server VAD (recommended): ElevenLabs automatically detects when the user stops speaking.\n- Client Controlled: You manually call StartListening/StopListening (push-to-talk with a key). */

View File

@ -114,6 +114,18 @@ public:
ToolTip = "Seconds to wait before the agent stops looking at the pawn.\n0 = immediate."))
float PostureDetachDelay = 0.0f;
// ── Conversation management ──────────────────────────────────────────────
/** Automatically start the WebSocket conversation when an agent is selected
* (enters range + view cone). When false, selecting an agent only manages
* posture and visual awareness the conversation must be started explicitly
* via StartConversationWithSelectedAgent() (e.g. on a key press).
* Set to false when you have multiple agents in a scene to prevent them
* all from greeting the player simultaneously. */
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "PS AI ConvAgent|Interaction",
meta = (ToolTip = "Auto-start the WebSocket when an agent is selected by proximity.\nSet to false to require explicit interaction (call StartConversationWithSelectedAgent).\nUseful with multiple agents to prevent simultaneous greetings."))
bool bAutoStartConversation = true;
// ── Listening management ─────────────────────────────────────────────────
/** Automatically call StartListening/StopListening on the agent's
@ -163,6 +175,14 @@ public:
UFUNCTION(BlueprintCallable, Category = "PS AI ConvAgent|Interaction")
void ForceSelectAgent(UPS_AI_ConvAgent_ElevenLabsComponent* Agent);
/** Start the WebSocket conversation with the currently selected agent.
* Use this when bAutoStartConversation is false and the player explicitly
* interacts (e.g. presses a key, enters a trigger zone).
* Does nothing if no agent is selected or the agent is already connected.
* Also starts mic capture and listening automatically. */
UFUNCTION(BlueprintCallable, Category = "PS AI ConvAgent|Interaction")
void StartConversationWithSelectedAgent();
/** Clear the current selection. Automatic selection resumes next tick. */
UFUNCTION(BlueprintCallable, Category = "PS AI ConvAgent|Interaction")
void ClearSelection();

View File

@ -22,5 +22,17 @@ public class PS_AI_ConvAgentEditor : ModuleRules
// Runtime module containing FAnimNode_PS_AI_ConvAgent_LipSync
"PS_AI_ConvAgent",
});
PrivateDependencyModuleNames.AddRange(new string[]
{
// Slate UI for Detail Customization
"Slate",
"SlateCore",
"PropertyEditor",
// HTTP requests for ElevenLabs API (voice list, agent CRUD)
"HTTP",
"Json",
"JsonUtilities",
});
}
}

View File

@ -1,16 +1,39 @@
// Copyright ASTERION. All Rights Reserved.
#include "Modules/ModuleManager.h"
#include "PropertyEditorModule.h"
#include "PS_AI_ConvAgent_AgentConfig_ElevenLabs.h"
#include "PS_AI_ConvAgent_AgentConfigCustomization_ElevenLabs.h"
/**
* Editor module for PS_AI_ConvAgent plugin.
* Provides AnimGraph node(s) for the PS AI ConvAgent Lip Sync system.
* Provides AnimGraph nodes, asset factories, and Detail Customizations.
*/
class FPS_AI_ConvAgentEditorModule : public IModuleInterface
{
public:
virtual void StartupModule() override {}
virtual void ShutdownModule() override {}
virtual void StartupModule() override
{
FPropertyEditorModule& PropertyModule =
FModuleManager::LoadModuleChecked<FPropertyEditorModule>("PropertyEditor");
PropertyModule.RegisterCustomClassLayout(
UPS_AI_ConvAgent_AgentConfig_ElevenLabs::StaticClass()->GetFName(),
FOnGetDetailCustomizationInstance::CreateStatic(
&FPS_AI_ConvAgent_AgentConfigCustomization_ElevenLabs::MakeInstance));
}
virtual void ShutdownModule() override
{
if (FModuleManager::Get().IsModuleLoaded("PropertyEditor"))
{
FPropertyEditorModule& PropertyModule =
FModuleManager::GetModuleChecked<FPropertyEditorModule>("PropertyEditor");
PropertyModule.UnregisterCustomClassLayout(
UPS_AI_ConvAgent_AgentConfig_ElevenLabs::StaticClass()->GetFName());
}
}
};
IMPLEMENT_MODULE(FPS_AI_ConvAgentEditorModule, PS_AI_ConvAgentEditor)

View File

@ -0,0 +1,95 @@
// Copyright ASTERION. All Rights Reserved.
#pragma once
#include "CoreMinimal.h"
#include "IDetailCustomization.h"
class IDetailLayoutBuilder;
/**
* Detail Customization for UPS_AI_ConvAgent_AgentConfig_ElevenLabs data assets.
*
* Provides:
* - Voice category: "Fetch Voices" button + STextComboBox dropdown
* - Voice category: "Fetch Models" button + STextComboBox dropdown
* - Behavior category: LLM picker with "Fetch" button + STextComboBox dropdown
* - Behavior category: Language picker dropdown (static list)
* - Identity category: "Create Agent" / "Update Agent" / "Fetch Agent" buttons + status text
* - Hidden properties: VoiceID, VoiceName, TTSModelID, LLMModel, Language (managed by dropdowns)
*/
class FPS_AI_ConvAgent_AgentConfigCustomization_ElevenLabs : public IDetailCustomization
{
public:
static TSharedRef<IDetailCustomization> MakeInstance();
virtual void CustomizeDetails(IDetailLayoutBuilder& DetailBuilder) override;
private:
// ── Voice picker ─────────────────────────────────────────────────────────
void OnFetchVoicesClicked();
void OnVoiceSelected(TSharedPtr<FString> NewSelection, ESelectInfo::Type SelectInfo);
// ── Model picker ─────────────────────────────────────────────────────────
void OnFetchModelsClicked();
void OnModelSelected(TSharedPtr<FString> NewSelection, ESelectInfo::Type SelectInfo);
// ── LLM picker ──────────────────────────────────────────────────────────
void OnFetchLLMsClicked();
void OnLLMSelected(TSharedPtr<FString> NewSelection, ESelectInfo::Type SelectInfo);
// ── Language picker ──────────────────────────────────────────────────────
void OnFetchLanguagesClicked();
void OnLanguageSelected(TSharedPtr<FString> NewSelection, ESelectInfo::Type SelectInfo);
// ── Agent API ────────────────────────────────────────────────────────────
void OnCreateAgentClicked();
void OnUpdateAgentClicked();
void OnFetchAgentClicked();
// ── Helpers ──────────────────────────────────────────────────────────────
FString GetAPIKey() const;
TSharedPtr<FJsonObject> BuildAgentPayload() const;
TSharedPtr<FJsonObject> BuildEmotionToolDefinition() const;
/** Display a status message in the Identity category.
* Color: red for errors, green for success, blue/cyan for info. */
void SetStatusText(const FString& Text);
void SetStatusError(const FString& Text);
void SetStatusSuccess(const FString& Text);
/** Parse ElevenLabs API error JSON and return a human-readable message. */
static FString ParseAPIError(int32 HttpCode, const FString& ResponseBody);
/** Retrieve the data asset being edited (first selected object). */
class UPS_AI_ConvAgent_AgentConfig_ElevenLabs* GetEditedAsset() const;
// ── Cached state ─────────────────────────────────────────────────────────
TArray<TWeakObjectPtr<UObject>> SelectedObjects;
// Voice combo data
TArray<TSharedPtr<FString>> VoiceDisplayNames;
TArray<FString> VoiceIDs;
TSharedPtr<class STextComboBox> VoiceComboBox;
// Model combo data
TArray<TSharedPtr<FString>> ModelDisplayNames;
TArray<FString> ModelIDs;
TSharedPtr<class STextComboBox> ModelComboBox;
// LLM combo data
TArray<TSharedPtr<FString>> LLMDisplayNames;
TArray<FString> LLMModelIDs;
TSharedPtr<class STextComboBox> LLMComboBox;
// Language combo data
TArray<TSharedPtr<FString>> LanguageDisplayNames;
TArray<FString> LanguageCodes;
TSharedPtr<class STextComboBox> LanguageComboBox;
// Status feedback
TSharedPtr<class STextBlock> StatusTextBlock;
// Guard: prevents infinite auto-fetch loop when PostEditChange re-triggers CustomizeDetails.
bool bAutoFetchDone = false;
};

View File

@ -0,0 +1,29 @@
// Copyright ASTERION. All Rights Reserved.
#include "PS_AI_ConvAgent_AgentConfigFactory_ElevenLabs.h"
#include "PS_AI_ConvAgent_AgentConfig_ElevenLabs.h"
#include "AssetTypeCategories.h"
UPS_AI_ConvAgent_AgentConfigFactory_ElevenLabs::UPS_AI_ConvAgent_AgentConfigFactory_ElevenLabs()
{
SupportedClass = UPS_AI_ConvAgent_AgentConfig_ElevenLabs::StaticClass();
bCreateNew = true;
bEditAfterNew = true;
}
UObject* UPS_AI_ConvAgent_AgentConfigFactory_ElevenLabs::FactoryCreateNew(
UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags,
UObject* Context, FFeedbackContext* Warn)
{
return NewObject<UPS_AI_ConvAgent_AgentConfig_ElevenLabs>(InParent, Class, Name, Flags);
}
FText UPS_AI_ConvAgent_AgentConfigFactory_ElevenLabs::GetDisplayName() const
{
return FText::FromString(TEXT("PS AI ConvAgent Agent Config (ElevenLabs)"));
}
uint32 UPS_AI_ConvAgent_AgentConfigFactory_ElevenLabs::GetMenuCategories() const
{
return EAssetTypeCategories::Misc;
}

View File

@ -0,0 +1,27 @@
// Copyright ASTERION. All Rights Reserved.
#pragma once
#include "CoreMinimal.h"
#include "Factories/Factory.h"
#include "PS_AI_ConvAgent_AgentConfigFactory_ElevenLabs.generated.h"
/**
* Factory that lets users create PS_AI_ConvAgent_AgentConfig_ElevenLabs assets
* directly from the Content Browser (right-click Miscellaneous).
*/
UCLASS()
class UPS_AI_ConvAgent_AgentConfigFactory_ElevenLabs : public UFactory
{
GENERATED_BODY()
public:
UPS_AI_ConvAgent_AgentConfigFactory_ElevenLabs();
virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent,
FName Name, EObjectFlags Flags, UObject* Context,
FFeedbackContext* Warn) override;
virtual FText GetDisplayName() const override;
virtual uint32 GetMenuCategories() const override;
};