From 2e96e3c7665e1a7d9ca7479e148351fd934b0f2b Mon Sep 17 00:00:00 2001 From: "j.foucher" Date: Thu, 5 Mar 2026 11:09:00 +0100 Subject: [PATCH] Smooth transitions, lip sync speech blend, body expression override mode, SendTextToSelectedAgent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace FInterpConstantTo with FInterpTo (exponential ease-out) in all 4 anim components - Apply SmoothStep to crossfade alphas for smooth animation transitions - Add SpeechBlendAlpha to LipSync: fades out mouth curves when not speaking, letting FacialExpression emotion curves (including mouth) show through - Remove LipSync AnimNode grace period zeroing to avoid overwriting emotion curves - Revert BodyExpression to override mode (additive broken with full-pose anims) - Default ExcludeBones = neck_01 to prevent Gaze/Posture conflicts - Fix mid-crossfade animation pop in BodyExpression SwitchToNewAnim - Add Neutral emotion fallback in Body and Facial expression components - Add SendTextToSelectedAgent convenience method on InteractionComponent - Add debug HUD display for BodyExpression component - Update CoreRedirects for Posture → Gaze rename Co-Authored-By: Claude Opus 4.6 --- Unreal/PS_AI_Agent/Config/DefaultEngine.ini | 17 +- ...nimNode_PS_AI_ConvAgent_BodyExpression.cpp | 140 ++++++++++----- .../AnimNode_PS_AI_ConvAgent_LipSync.cpp | 44 ++--- ...S_AI_ConvAgent_BodyExpressionComponent.cpp | 165 +++++++++++++++--- ...AI_ConvAgent_FacialExpressionComponent.cpp | 18 +- .../Private/PS_AI_ConvAgent_GazeComponent.cpp | 22 ++- .../PS_AI_ConvAgent_InteractionComponent.cpp | 31 ++++ .../PS_AI_ConvAgent_LipSyncComponent.cpp | 70 +++++++- .../AnimNode_PS_AI_ConvAgent_BodyExpression.h | 18 +- .../PS_AI_ConvAgent_BodyExpressionComponent.h | 11 ++ .../Public/PS_AI_ConvAgent_GazeComponent.h | 13 +- .../PS_AI_ConvAgent_InteractionComponent.h | 7 + .../Public/PS_AI_ConvAgent_LipSyncComponent.h | 28 +++ 13 files changed, 464 insertions(+), 120 deletions(-) diff --git a/Unreal/PS_AI_Agent/Config/DefaultEngine.ini b/Unreal/PS_AI_Agent/Config/DefaultEngine.ini index ae771ec..5897085 100644 --- a/Unreal/PS_AI_Agent/Config/DefaultEngine.ini +++ b/Unreal/PS_AI_Agent/Config/DefaultEngine.ini @@ -90,7 +90,7 @@ FontDPI=72 ; ── Generic classes: ElevenLabs → PS_AI_ConvAgent ── +ClassRedirects=(OldName="ElevenLabsLipSyncComponent", NewName="PS_AI_ConvAgent_LipSyncComponent") +ClassRedirects=(OldName="ElevenLabsFacialExpressionComponent", NewName="PS_AI_ConvAgent_FacialExpressionComponent") -+ClassRedirects=(OldName="ElevenLabsPostureComponent", NewName="PS_AI_ConvAgent_PostureComponent") ++ClassRedirects=(OldName="ElevenLabsPostureComponent", NewName="PS_AI_ConvAgent_GazeComponent") +ClassRedirects=(OldName="ElevenLabsMicrophoneCaptureComponent", NewName="PS_AI_ConvAgent_MicrophoneCaptureComponent") +ClassRedirects=(OldName="ElevenLabsLipSyncPoseMap", NewName="PS_AI_ConvAgent_LipSyncPoseMap") +ClassRedirects=(OldName="ElevenLabsEmotionPoseMap", NewName="PS_AI_ConvAgent_EmotionPoseMap") @@ -98,7 +98,7 @@ FontDPI=72 ; ── Generic classes: PS_AI_Agent → PS_AI_ConvAgent (intermediate rename) ── +ClassRedirects=(OldName="PS_AI_Agent_LipSyncComponent", NewName="PS_AI_ConvAgent_LipSyncComponent") +ClassRedirects=(OldName="PS_AI_Agent_FacialExpressionComponent", NewName="PS_AI_ConvAgent_FacialExpressionComponent") -+ClassRedirects=(OldName="PS_AI_Agent_PostureComponent", NewName="PS_AI_ConvAgent_PostureComponent") ++ClassRedirects=(OldName="PS_AI_Agent_PostureComponent", NewName="PS_AI_ConvAgent_GazeComponent") +ClassRedirects=(OldName="PS_AI_Agent_MicrophoneCaptureComponent", NewName="PS_AI_ConvAgent_MicrophoneCaptureComponent") +ClassRedirects=(OldName="PS_AI_Agent_LipSyncPoseMap", NewName="PS_AI_ConvAgent_LipSyncPoseMap") +ClassRedirects=(OldName="PS_AI_Agent_EmotionPoseMap", NewName="PS_AI_ConvAgent_EmotionPoseMap") @@ -114,18 +114,23 @@ FontDPI=72 ; ── AnimNode structs ── +StructRedirects=(OldName="AnimNode_ElevenLabsLipSync", NewName="AnimNode_PS_AI_ConvAgent_LipSync") +StructRedirects=(OldName="AnimNode_ElevenLabsFacialExpression", NewName="AnimNode_PS_AI_ConvAgent_FacialExpression") -+StructRedirects=(OldName="AnimNode_ElevenLabsPosture", NewName="AnimNode_PS_AI_ConvAgent_Posture") ++StructRedirects=(OldName="AnimNode_ElevenLabsPosture", NewName="AnimNode_PS_AI_ConvAgent_Gaze") +StructRedirects=(OldName="AnimNode_PS_AI_Agent_LipSync", NewName="AnimNode_PS_AI_ConvAgent_LipSync") +StructRedirects=(OldName="AnimNode_PS_AI_Agent_FacialExpression", NewName="AnimNode_PS_AI_ConvAgent_FacialExpression") -+StructRedirects=(OldName="AnimNode_PS_AI_Agent_Posture", NewName="AnimNode_PS_AI_ConvAgent_Posture") ++StructRedirects=(OldName="AnimNode_PS_AI_Agent_Posture", NewName="AnimNode_PS_AI_ConvAgent_Gaze") ; ── AnimGraphNode classes ── +ClassRedirects=(OldName="AnimGraphNode_ElevenLabsLipSync", NewName="AnimGraphNode_PS_AI_ConvAgent_LipSync") +ClassRedirects=(OldName="AnimGraphNode_ElevenLabsFacialExpression", NewName="AnimGraphNode_PS_AI_ConvAgent_FacialExpression") -+ClassRedirects=(OldName="AnimGraphNode_ElevenLabsPosture", NewName="AnimGraphNode_PS_AI_ConvAgent_Posture") ++ClassRedirects=(OldName="AnimGraphNode_ElevenLabsPosture", NewName="AnimGraphNode_PS_AI_ConvAgent_Gaze") +ClassRedirects=(OldName="AnimGraphNode_PS_AI_Agent_LipSync", NewName="AnimGraphNode_PS_AI_ConvAgent_LipSync") +ClassRedirects=(OldName="AnimGraphNode_PS_AI_Agent_FacialExpression", NewName="AnimGraphNode_PS_AI_ConvAgent_FacialExpression") -+ClassRedirects=(OldName="AnimGraphNode_PS_AI_Agent_Posture", NewName="AnimGraphNode_PS_AI_ConvAgent_Posture") ++ClassRedirects=(OldName="AnimGraphNode_PS_AI_Agent_Posture", NewName="AnimGraphNode_PS_AI_ConvAgent_Gaze") + +; ── Posture → Gaze rename ── ++ClassRedirects=(OldName="PS_AI_ConvAgent_PostureComponent", NewName="PS_AI_ConvAgent_GazeComponent") ++StructRedirects=(OldName="AnimNode_PS_AI_ConvAgent_Posture", NewName="AnimNode_PS_AI_ConvAgent_Gaze") ++ClassRedirects=(OldName="AnimGraphNode_PS_AI_ConvAgent_Posture", NewName="AnimGraphNode_PS_AI_ConvAgent_Gaze") ; ── Factory classes ── +ClassRedirects=(OldName="ElevenLabsLipSyncPoseMapFactory", NewName="PS_AI_ConvAgent_LipSyncPoseMapFactory") diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_BodyExpression.cpp b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_BodyExpression.cpp index 5351aef..d7df488 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_BodyExpression.cpp +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_BodyExpression.cpp @@ -127,7 +127,10 @@ void FAnimNode_PS_AI_ConvAgent_BodyExpression::Evaluate_AnyThread(FPoseContext& const bool bHavePrevPose = bCrossfading && PrevPose.GetNumBones() > 0; - // ── Per-bone blend ────────────────────────────────────────────────────── + // ── Per-bone blend (override mode) ────────────────────────────────────── + // Lerp each bone from the upstream pose toward the expression pose, + // weighted by the bone mask and activation alpha. + // ExcludeBones (default: neck_01) prevents conflicts with Gaze/Posture. for (int32 CompactIdx = 0; CompactIdx < NumBones; ++CompactIdx) { if (BoneMask[CompactIdx] <= 0.0f) @@ -136,18 +139,19 @@ void FAnimNode_PS_AI_ConvAgent_BodyExpression::Evaluate_AnyThread(FPoseContext& const FCompactPoseBoneIndex BoneIdx(CompactIdx); const float W = BoneMask[CompactIdx] * FinalWeight; + // Compute the expression pose (handle crossfade between prev and active) + FTransform ExpressionPose; if (bHavePrevPose) { - // Crossfade between previous and active emotion poses - FTransform BlendedEmotion; - BlendedEmotion.Blend(PrevPose[BoneIdx], ActivePose[BoneIdx], CachedSnapshot.CrossfadeAlpha); - Output.Pose[BoneIdx].BlendWith(BlendedEmotion, W); + ExpressionPose.Blend(PrevPose[BoneIdx], ActivePose[BoneIdx], CachedSnapshot.CrossfadeAlpha); } else { - // Direct blend with active emotion pose - Output.Pose[BoneIdx].BlendWith(ActivePose[BoneIdx], W); + ExpressionPose = ActivePose[BoneIdx]; } + + // Override: replace (lerp) upstream toward expression pose + Output.Pose[BoneIdx].BlendWith(ExpressionPose, W); } } @@ -169,59 +173,111 @@ void FAnimNode_PS_AI_ConvAgent_BodyExpression::BuildBoneMask(const FBoneContaine BoneMask.SetNumZeroed(NumBones); bBoneMaskValid = false; - // Full body mode: all bones get weight 1.0 + const FReferenceSkeleton& RefSkel = RequiredBones.GetReferenceSkeleton(); + const TArray& BoneIndices = RequiredBones.GetBoneIndicesArray(); + + // ── Step 1: Build initial mask (include) ────────────────────────────── + if (!bUpperBodyOnly) { + // Full body mode: all bones get weight 1.0 for (int32 i = 0; i < NumBones; ++i) { BoneMask[i] = 1.0f; } - bBoneMaskValid = (NumBones > 0); - - UE_LOG(LogPS_AI_ConvAgent_BodyExprAnimNode, Log, - TEXT("Bone mask built: FULL BODY (%d bones)."), NumBones); - return; } - - // Upper body mode: only BlendRootBone descendants - const FReferenceSkeleton& RefSkel = RequiredBones.GetReferenceSkeleton(); - const int32 RootMeshIdx = RefSkel.FindBoneIndex(BlendRootBone); - - if (RootMeshIdx == INDEX_NONE) + else { - UE_LOG(LogPS_AI_ConvAgent_BodyExprAnimNode, Warning, - TEXT("BlendRootBone '%s' not found in skeleton. Body expression disabled."), - *BlendRootBone.ToString()); - return; + // Upper body mode: only BlendRootBone descendants + const int32 RootMeshIdx = RefSkel.FindBoneIndex(BlendRootBone); + + if (RootMeshIdx == INDEX_NONE) + { + UE_LOG(LogPS_AI_ConvAgent_BodyExprAnimNode, Warning, + TEXT("BlendRootBone '%s' not found in skeleton. Body expression disabled."), + *BlendRootBone.ToString()); + return; + } + + for (int32 CompactIdx = 0; CompactIdx < NumBones; ++CompactIdx) + { + const int32 MeshIdx = static_cast(BoneIndices[CompactIdx]); + + // Walk up the parent chain: if BlendRootBone is an ancestor + // (or is this bone itself), mark it for blending. + int32 Current = MeshIdx; + while (Current != INDEX_NONE) + { + if (Current == RootMeshIdx) + { + BoneMask[CompactIdx] = 1.0f; + break; + } + Current = RefSkel.GetParentIndex(Current); + } + } } - const TArray& BoneIndices = RequiredBones.GetBoneIndicesArray(); + // ── Step 2: Exclude bone subtrees ──────────────────────────────────── + // For each exclude bone, zero out it and all its descendants. + // This prevents conflicts with Gaze/Posture on neck/head bones. + + int32 ExcludedCount = 0; + if (ExcludeBones.Num() > 0) + { + // Collect mesh indices for all exclude roots + TArray ExcludeRootIndices; + for (const FName& BoneName : ExcludeBones) + { + const int32 ExclIdx = RefSkel.FindBoneIndex(BoneName); + if (ExclIdx != INDEX_NONE) + { + ExcludeRootIndices.Add(ExclIdx); + } + else + { + UE_LOG(LogPS_AI_ConvAgent_BodyExprAnimNode, Warning, + TEXT("ExcludeBone '%s' not found in skeleton — ignored."), + *BoneName.ToString()); + } + } + + // Zero out any bone whose parent chain includes an exclude root + for (int32 CompactIdx = 0; CompactIdx < NumBones; ++CompactIdx) + { + if (BoneMask[CompactIdx] <= 0.0f) + continue; // Already excluded or not included + + const int32 MeshIdx = static_cast(BoneIndices[CompactIdx]); + int32 Current = MeshIdx; + while (Current != INDEX_NONE) + { + if (ExcludeRootIndices.Contains(Current)) + { + BoneMask[CompactIdx] = 0.0f; + ++ExcludedCount; + break; + } + Current = RefSkel.GetParentIndex(Current); + } + } + } + + // ── Count final active bones ───────────────────────────────────────── int32 MaskedCount = 0; - for (int32 CompactIdx = 0; CompactIdx < NumBones; ++CompactIdx) + for (int32 i = 0; i < NumBones; ++i) { - const int32 MeshIdx = static_cast(BoneIndices[CompactIdx]); - - // Walk up the parent chain: if BlendRootBone is an ancestor - // (or is this bone itself), mark it for blending. - int32 Current = MeshIdx; - while (Current != INDEX_NONE) - { - if (Current == RootMeshIdx) - { - BoneMask[CompactIdx] = 1.0f; - ++MaskedCount; - break; - } - Current = RefSkel.GetParentIndex(Current); - } + if (BoneMask[i] > 0.0f) + ++MaskedCount; } bBoneMaskValid = (MaskedCount > 0); UE_LOG(LogPS_AI_ConvAgent_BodyExprAnimNode, Log, - TEXT("Bone mask built: %d/%d bones from root '%s'."), - MaskedCount, NumBones, *BlendRootBone.ToString()); + TEXT("Bone mask built: %d/%d active bones (root='%s', excluded=%d from %d subtrees)."), + MaskedCount, NumBones, *BlendRootBone.ToString(), + ExcludedCount, ExcludeBones.Num()); } // ───────────────────────────────────────────────────────────────────────────── diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_LipSync.cpp b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_LipSync.cpp index 1b4c99f..0fafba8 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_LipSync.cpp +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/AnimNode_PS_AI_ConvAgent_LipSync.cpp @@ -89,17 +89,15 @@ void FAnimNode_PS_AI_ConvAgent_LipSync::Evaluate_AnyThread(FPoseContext& Output) // ── Inject lip sync curves into the pose output ────────────────────── // - // IMPORTANT: Always write ALL curves that lip sync has ever touched, - // including at 0.0. If we skip near-zero curves, upstream animation - // values (idle expressions, breathing, etc.) leak through and cause - // visible pops when lip sync curves cross the threshold. + // While lip sync is producing curves: write them all (including 0s) + // and track every curve name in KnownCurveNames. // - // Strategy: - // - While lip sync is producing curves: write them all (including 0s) - // and track every curve name in KnownCurveNames. - // - After lip sync goes silent: keep writing 0s for a grace period - // (30 frames ≈ 0.5s) so the upstream can blend back in smoothly - // via the component's activation alpha, then release. + // When lip sync goes silent (CachedCurves empty): release immediately. + // The component's SpeechBlendAlpha already handles smooth fade-out, + // so by the time CachedCurves becomes empty, values have already + // decayed to near-zero. Releasing immediately allows the upstream + // FacialExpressionComponent's emotion curves (including mouth) to + // flow through without being overwritten by zeros. if (CachedCurves.Num() > 0) { @@ -113,7 +111,9 @@ void FAnimNode_PS_AI_ConvAgent_LipSync::Evaluate_AnyThread(FPoseContext& Output) } // Zero any known curves NOT in the current frame - // (e.g. a blendshape that was active last frame but decayed away) + // (e.g. a blendshape that was active last frame but decayed away). + // This prevents upstream animation values from leaking through + // during active speech when a curve temporarily goes to zero. for (const FName& Name : KnownCurveNames) { if (!CachedCurves.Contains(Name)) @@ -124,23 +124,11 @@ void FAnimNode_PS_AI_ConvAgent_LipSync::Evaluate_AnyThread(FPoseContext& Output) } else if (KnownCurveNames.Num() > 0) { - // Lip sync went silent — keep zeroing known curves for a grace - // period so upstream values don't pop in abruptly. - ++FramesSinceLastActive; - - if (FramesSinceLastActive < 30) - { - for (const FName& Name : KnownCurveNames) - { - Output.Curve.Set(Name, 0.0f); - } - } - else - { - // Grace period over — release curves, let upstream through - KnownCurveNames.Reset(); - FramesSinceLastActive = 0; - } + // Lip sync went silent — release immediately. + // The smooth fade-out was handled at the component level + // (SpeechBlendAlpha), so upstream emotion curves can take over. + KnownCurveNames.Reset(); + FramesSinceLastActive = 0; } } diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_BodyExpressionComponent.cpp b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_BodyExpressionComponent.cpp index d7aa621..631247a 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_BodyExpressionComponent.cpp +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_BodyExpressionComponent.cpp @@ -4,6 +4,7 @@ #include "PS_AI_ConvAgent_ElevenLabsComponent.h" #include "PS_AI_ConvAgent_BodyPoseMap.h" #include "Animation/AnimSequence.h" +#include "Engine/Engine.h" DEFINE_LOG_CATEGORY_STATIC(LogPS_AI_ConvAgent_BodyExpr, Log, All); @@ -119,6 +120,12 @@ const TArray>* UPS_AI_ConvAgent_BodyExpressionComponen if (!BodyPoseMap) return nullptr; const FPS_AI_ConvAgent_BodyAnimList* AnimList = BodyPoseMap->BodyPoses.Find(Emotion); + + // Fallback to Neutral if the requested emotion has no entry in the data asset + if (!AnimList && Emotion != EPS_AI_ConvAgent_Emotion::Neutral) + { + AnimList = BodyPoseMap->BodyPoses.Find(EPS_AI_ConvAgent_Emotion::Neutral); + } if (!AnimList) return nullptr; if (!bSpeaking) @@ -178,25 +185,49 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::SwitchToNewAnim(UAnimSequence* Ne if (!bForce && NewAnim == ActiveAnim) return; if (!NewAnim) return; - // Current active becomes previous for crossfade - PrevAnim = ActiveAnim; - PrevPlaybackTime = ActivePlaybackTime; - - // New anim starts from the beginning - ActiveAnim = NewAnim; - ActivePlaybackTime = 0.0f; - - // Begin crossfade - CrossfadeAlpha = 0.0f; - - if (bDebug && DebugVerbosity >= 1) + if (CrossfadeAlpha < 1.0f && PrevAnim) { - UE_LOG(LogPS_AI_ConvAgent_BodyExpr, Log, - TEXT("Body anim switch: %s → %s (%s, %s)"), - PrevAnim ? *PrevAnim->GetName() : TEXT("(none)"), - *NewAnim->GetName(), - bIsSpeaking ? TEXT("speaking") : TEXT("idle"), - *UEnum::GetValueAsString(ActiveEmotion)); + // Mid-crossfade: a crossfade is already in progress. + // DON'T reset CrossfadeAlpha — just swap the target animation. + // This preserves PrevAnim's contribution and avoids a visual pop. + // + // Before: Blend(PrevAnim, OldActive, alpha) e.g. 70% Prev + 30% Active + // After: Blend(PrevAnim, NewAnim, alpha) e.g. 70% Prev + 30% New + // + // The crossfade continues naturally — New fades in, Prev fades out. + // Pop is only 30% * (New@0 - OldActive@t) instead of 70% * (Prev - Active). + ActiveAnim = NewAnim; + ActivePlaybackTime = 0.0f; + // CrossfadeAlpha stays where it is — continuity + + if (bDebug && DebugVerbosity >= 1) + { + UE_LOG(LogPS_AI_ConvAgent_BodyExpr, Log, + TEXT("Body anim switch (MID-CROSSFADE α=%.2f): target → %s (%s, %s)"), + CrossfadeAlpha, *NewAnim->GetName(), + bIsSpeaking ? TEXT("speaking") : TEXT("idle"), + *UEnum::GetValueAsString(ActiveEmotion)); + } + } + else + { + // No crossfade in progress — normal switch with full crossfade + PrevAnim = ActiveAnim; + PrevPlaybackTime = ActivePlaybackTime; + + ActiveAnim = NewAnim; + ActivePlaybackTime = 0.0f; + CrossfadeAlpha = 0.0f; + + if (bDebug && DebugVerbosity >= 1) + { + UE_LOG(LogPS_AI_ConvAgent_BodyExpr, Log, + TEXT("Body anim switch: %s → %s (%s, %s)"), + PrevAnim ? *PrevAnim->GetName() : TEXT("(none)"), + *NewAnim->GetName(), + bIsSpeaking ? TEXT("speaking") : TEXT("idle"), + *UEnum::GetValueAsString(ActiveEmotion)); + } } } @@ -221,6 +252,8 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::OnConversationConnected( { bActive = true; bIsSpeaking = false; + LastEventName = TEXT("Connected"); + LastEventWorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; // Start with an idle anim PickAndSwitchAnim(); @@ -237,6 +270,8 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::OnConversationDisconnected( { bActive = false; bIsSpeaking = false; + LastEventName = TEXT("Disconnected"); + LastEventWorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; if (bDebug) { @@ -252,6 +287,8 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::OnConversationDisconnected( void UPS_AI_ConvAgent_BodyExpressionComponent::OnSpeakingStarted() { bIsSpeaking = true; + LastEventName = TEXT("SpeakStart"); + LastEventWorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; // Crossfade from idle anim to a speaking anim PickAndSwitchAnim(); @@ -266,6 +303,8 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::OnSpeakingStarted() void UPS_AI_ConvAgent_BodyExpressionComponent::OnSpeakingStopped() { bIsSpeaking = false; + LastEventName = TEXT("SpeakStop"); + LastEventWorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; // Crossfade from speaking anim to an idle anim PickAndSwitchAnim(); @@ -280,6 +319,8 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::OnSpeakingStopped() void UPS_AI_ConvAgent_BodyExpressionComponent::OnInterrupted() { bIsSpeaking = false; + LastEventName = TEXT("Interrupted"); + LastEventWorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; // Crossfade to idle anim PickAndSwitchAnim(); @@ -303,6 +344,9 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::OnEmotionChanged( ActiveEmotion = Emotion; ActiveEmotionIntensity = Intensity; + LastEventName = FString::Printf(TEXT("Emotion:%s"), + *UEnum::GetDisplayValueAsText(Emotion).ToString()); + LastEventWorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; // Pick a new anim from the appropriate list for the new emotion PickAndSwitchAnim(); @@ -358,9 +402,12 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::TickComponent( const float TargetAlpha = bActive ? 1.0f : 0.0f; if (!FMath::IsNearlyEqual(CurrentActiveAlpha, TargetAlpha, 0.001f)) { - const float BlendSpeed = 1.0f / FMath::Max(ActivationBlendDuration, 0.01f); - CurrentActiveAlpha = FMath::FInterpConstantTo( - CurrentActiveAlpha, TargetAlpha, DeltaTime, BlendSpeed); + // Exponential ease-out: fast start, gradual approach to target. + // Factor of 3 compensates for FInterpTo's exponential decay + // reaching ~95% in ActivationBlendDuration seconds. + const float InterpSpeed = 3.0f / FMath::Max(ActivationBlendDuration, 0.01f); + CurrentActiveAlpha = FMath::FInterpTo( + CurrentActiveAlpha, TargetAlpha, DeltaTime, InterpSpeed); } else { @@ -416,6 +463,8 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::TickComponent( if (NewAnim) { SwitchToNewAnim(NewAnim, true); + LastEventName = TEXT("AutoCycle"); + LastEventWorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; if (bDebug && DebugVerbosity >= 2) { @@ -449,8 +498,80 @@ void UPS_AI_ConvAgent_BodyExpressionComponent::TickComponent( CurrentSnapshot.PrevAnim = PrevAnim; CurrentSnapshot.ActiveTime = ActivePlaybackTime; CurrentSnapshot.PrevTime = PrevPlaybackTime; - CurrentSnapshot.CrossfadeAlpha = CrossfadeAlpha; + // Apply SmoothStep for ease-in-out crossfade (raw alpha is linear) + CurrentSnapshot.CrossfadeAlpha = FMath::SmoothStep(0.0f, 1.0f, CrossfadeAlpha); CurrentSnapshot.ActivationAlpha = CurrentActiveAlpha; CurrentSnapshot.BlendWeight = BlendWeight; } + + // ── On-screen debug HUD ─────────────────────────────────────────────── + if (bDebug && DebugVerbosity >= 1) + { + DrawDebugHUD(); + } +} + +// ───────────────────────────────────────────────────────────────────────────── +// On-screen debug display +// ───────────────────────────────────────────────────────────────────────────── + +void UPS_AI_ConvAgent_BodyExpressionComponent::DrawDebugHUD() const +{ + if (!GEngine) return; + + const float WorldTime = GetWorld() ? GetWorld()->GetTimeSeconds() : 0.0f; + const float EventAge = WorldTime - LastEventWorldTime; + + // Active anim name + FString ActiveName = ActiveAnim ? ActiveAnim->GetName() : TEXT("(none)"); + FString PrevName = PrevAnim ? PrevAnim->GetName() : TEXT("---"); + + // Smoothed crossfade for display + const float SmoothedCrossfade = FMath::SmoothStep(0.0f, 1.0f, CrossfadeAlpha); + + // State label + FString StateStr; + if (!bActive) + StateStr = TEXT("INACTIVE"); + else if (bIsSpeaking) + StateStr = TEXT("SPEAKING"); + else + StateStr = TEXT("IDLE"); + + // Event label with age + FString EventStr = LastEventName.IsEmpty() + ? TEXT("---") + : FString::Printf(TEXT("%s (%.1fs ago)"), *LastEventName, EventAge); + + // Use key offset to avoid colliding with other debug messages + // Keys 2000-2010 reserved for BodyExpression + const int32 BaseKey = 2000; + const float DisplayTime = 0.0f; // Refresh every frame + const FColor MainColor = FColor::Cyan; + const FColor WarnColor = FColor::Yellow; + + GEngine->AddOnScreenDebugMessage(BaseKey, DisplayTime, MainColor, + FString::Printf(TEXT("=== BODY EXPR: %s ==="), *StateStr)); + + GEngine->AddOnScreenDebugMessage(BaseKey + 1, DisplayTime, MainColor, + FString::Printf(TEXT(" ActivationAlpha: %.3f (target: %s)"), + CurrentActiveAlpha, bActive ? TEXT("1") : TEXT("0"))); + + GEngine->AddOnScreenDebugMessage(BaseKey + 2, DisplayTime, MainColor, + FString::Printf(TEXT(" Active: %s t=%.2f"), *ActiveName, ActivePlaybackTime)); + + GEngine->AddOnScreenDebugMessage(BaseKey + 3, DisplayTime, + CrossfadeAlpha < 1.0f ? WarnColor : MainColor, + FString::Printf(TEXT(" Crossfade: %.3f (smooth: %.3f) Prev: %s"), + CrossfadeAlpha, SmoothedCrossfade, *PrevName)); + + GEngine->AddOnScreenDebugMessage(BaseKey + 4, DisplayTime, MainColor, + FString::Printf(TEXT(" Emotion: %s (%s) Weight: %.2f"), + *UEnum::GetDisplayValueAsText(ActiveEmotion).ToString(), + *UEnum::GetDisplayValueAsText(ActiveEmotionIntensity).ToString(), + BlendWeight)); + + GEngine->AddOnScreenDebugMessage(BaseKey + 5, DisplayTime, + EventAge < 1.0f ? FColor::Green : MainColor, + FString::Printf(TEXT(" LastEvent: %s"), *EventStr)); } diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_FacialExpressionComponent.cpp b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_FacialExpressionComponent.cpp index b9cdf11..2b36d8c 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_FacialExpressionComponent.cpp +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_FacialExpressionComponent.cpp @@ -136,6 +136,12 @@ UAnimSequence* UPS_AI_ConvAgent_FacialExpressionComponent::FindAnimForEmotion( if (!EmotionPoseMap) return nullptr; const FPS_AI_ConvAgent_EmotionPoseSet* PoseSet = EmotionPoseMap->EmotionPoses.Find(Emotion); + + // Fallback to Neutral if the requested emotion has no entry in the data asset + if (!PoseSet && Emotion != EPS_AI_ConvAgent_Emotion::Neutral) + { + PoseSet = EmotionPoseMap->EmotionPoses.Find(EPS_AI_ConvAgent_Emotion::Neutral); + } if (!PoseSet) return nullptr; // Direct match @@ -282,9 +288,10 @@ void UPS_AI_ConvAgent_FacialExpressionComponent::TickComponent( const float TargetAlpha = bActive ? 1.0f : 0.0f; if (!FMath::IsNearlyEqual(CurrentActiveAlpha, TargetAlpha, 0.001f)) { - const float BlendSpeed = 1.0f / FMath::Max(ActivationBlendDuration, 0.01f); - CurrentActiveAlpha = FMath::FInterpConstantTo( - CurrentActiveAlpha, TargetAlpha, DeltaTime, BlendSpeed); + // Exponential ease-out: fast start, gradual approach to target. + const float InterpSpeed = 3.0f / FMath::Max(ActivationBlendDuration, 0.01f); + CurrentActiveAlpha = FMath::FInterpTo( + CurrentActiveAlpha, TargetAlpha, DeltaTime, InterpSpeed); } else { @@ -356,13 +363,16 @@ void UPS_AI_ConvAgent_FacialExpressionComponent::TickComponent( for (const auto& P : ActiveCurves) AllCurves.Add(P.Key); for (const auto& P : PrevCurves) AllCurves.Add(P.Key); + // Apply SmoothStep for ease-in-out crossfade (raw alpha is linear) + const float SmoothedCrossfade = FMath::SmoothStep(0.0f, 1.0f, CrossfadeAlpha); + for (const FName& CurveName : AllCurves) { const float PrevVal = PrevCurves.Contains(CurveName) ? PrevCurves[CurveName] : 0.0f; const float ActiveVal = ActiveCurves.Contains(CurveName) ? ActiveCurves[CurveName] : 0.0f; - const float Blended = FMath::Lerp(PrevVal, ActiveVal, CrossfadeAlpha); + const float Blended = FMath::Lerp(PrevVal, ActiveVal, SmoothedCrossfade); // Always include the curve even at 0 — the AnimNode needs // to see it to block upstream values from popping through. diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_GazeComponent.cpp b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_GazeComponent.cpp index 336d620..ded02e0 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_GazeComponent.cpp +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_GazeComponent.cpp @@ -3,7 +3,9 @@ #include "PS_AI_ConvAgent_GazeComponent.h" #include "PS_AI_ConvAgent_ElevenLabsComponent.h" #include "Components/SkeletalMeshComponent.h" +#include "Camera/CameraComponent.h" #include "GameFramework/Actor.h" +#include "GameFramework/Pawn.h" #include "Math/UnrealMathUtility.h" #include "DrawDebugHelpers.h" @@ -38,7 +40,8 @@ static const FName TargetHeadBone(TEXT("head")); * bAutoTargetEyes = true: * 1. Try eye bones (FACIAL_L_Eye / FACIAL_R_Eye midpoint) on the target's Face mesh. * 2. Fallback to "head" bone on any skeletal mesh. - * 3. Fallback to TargetActor origin + (0, 0, FallbackEyeHeight). + * 3. Fallback to CameraComponent location (first-person pawn). + * 4. Fallback to TargetActor origin + (0, 0, FallbackEyeHeight) for non-camera actors. * * bAutoTargetEyes = false: * Always returns TargetActor origin + TargetOffset. @@ -76,7 +79,15 @@ static FVector ResolveTargetPosition(const AActor* Target, bool bAutoEyes, } } - // No skeleton — use FallbackEyeHeight + // Fallback: CameraComponent — the canonical eye position for + // first-person pawns and any actor with an active camera. + if (const UCameraComponent* Cam = + const_cast(Target)->FindComponentByClass()) + { + return Cam->GetComponentLocation(); + } + + // Final fallback: actor origin + height offset return Target->GetActorLocation() + FVector(0.0f, 0.0f, FallbackHeight); } @@ -335,9 +346,10 @@ void UPS_AI_ConvAgent_GazeComponent::TickComponent( const float TargetAlpha = bActive ? 1.0f : 0.0f; if (!FMath::IsNearlyEqual(CurrentActiveAlpha, TargetAlpha, 0.001f)) { - const float BlendSpeed = 1.0f / FMath::Max(ActivationBlendDuration, 0.01f); - CurrentActiveAlpha = FMath::FInterpConstantTo( - CurrentActiveAlpha, TargetAlpha, SafeDeltaTime, BlendSpeed); + // Exponential ease-out: fast start, gradual approach to target. + const float InterpSpeed = 3.0f / FMath::Max(ActivationBlendDuration, 0.01f); + CurrentActiveAlpha = FMath::FInterpTo( + CurrentActiveAlpha, TargetAlpha, SafeDeltaTime, InterpSpeed); } else { diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_InteractionComponent.cpp b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_InteractionComponent.cpp index acc11d7..edff176 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_InteractionComponent.cpp +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_InteractionComponent.cpp @@ -476,6 +476,37 @@ void UPS_AI_ConvAgent_InteractionComponent::StartConversationWithSelectedAgent() } } +void UPS_AI_ConvAgent_InteractionComponent::SendTextToSelectedAgent(const FString& Text) +{ + UPS_AI_ConvAgent_ElevenLabsComponent* Agent = SelectedAgent.Get(); + if (!Agent) + { + if (bDebug) + { + UE_LOG(LogPS_AI_ConvAgent_Select, Warning, + TEXT("SendTextToSelectedAgent: no agent selected.")); + } + return; + } + + // Route through relay on clients (can't call Server RPCs on NPC actors). + if (GetOwnerRole() == ROLE_Authority || (GetWorld() && GetWorld()->GetNetMode() == NM_Standalone)) + { + Agent->SendTextMessage(Text); + } + else + { + ServerRelaySendText(Agent->GetOwner(), Text); + } + + if (bDebug) + { + UE_LOG(LogPS_AI_ConvAgent_Select, Log, + TEXT("SendTextToSelectedAgent: \"%s\" → %s"), + *Text, Agent->GetOwner() ? *Agent->GetOwner()->GetName() : TEXT("(null)")); + } +} + // ───────────────────────────────────────────────────────────────────────────── // Gaze helpers // ───────────────────────────────────────────────────────────────────────────── diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_LipSyncComponent.cpp b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_LipSyncComponent.cpp index 550829f..0d8c489 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_LipSyncComponent.cpp +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Private/PS_AI_ConvAgent_LipSyncComponent.cpp @@ -702,9 +702,10 @@ void UPS_AI_ConvAgent_LipSyncComponent::TickComponent(float DeltaTime, ELevelTic const float TargetAlpha = bActive ? 1.0f : 0.0f; if (!FMath::IsNearlyEqual(CurrentActiveAlpha, TargetAlpha, 0.001f)) { - const float BlendSpeed = 1.0f / FMath::Max(ActivationBlendDuration, 0.01f); - CurrentActiveAlpha = FMath::FInterpConstantTo( - CurrentActiveAlpha, TargetAlpha, DeltaTime, BlendSpeed); + // Exponential ease-out: fast start, gradual approach to target. + const float InterpSpeed = 3.0f / FMath::Max(ActivationBlendDuration, 0.01f); + CurrentActiveAlpha = FMath::FInterpTo( + CurrentActiveAlpha, TargetAlpha, DeltaTime, InterpSpeed); } else { @@ -712,6 +713,24 @@ void UPS_AI_ConvAgent_LipSyncComponent::TickComponent(float DeltaTime, ELevelTic } } + // ── Smooth speech blend ──────────────────────────────────────────────── + // Fades lip sync curves in/out when speech starts/stops. + // When not speaking, lip sync releases all mouth curves so emotion + // facial expression (from FacialExpressionComponent) controls the mouth. + { + const float TargetSpeech = bIsSpeaking ? 1.0f : 0.0f; + if (!FMath::IsNearlyEqual(SpeechBlendAlpha, TargetSpeech, 0.001f)) + { + const float SpeechSpeed = 3.0f / FMath::Max(SpeechBlendDuration, 0.01f); + SpeechBlendAlpha = FMath::FInterpTo( + SpeechBlendAlpha, TargetSpeech, DeltaTime, SpeechSpeed); + } + else + { + SpeechBlendAlpha = TargetSpeech; + } + } + // ── Lazy binding: in packaged builds, BeginPlay may run before the ──────── // ElevenLabsComponent is fully initialized. Retry discovery until bound. if (!AgentComponent.IsValid()) @@ -858,6 +877,12 @@ void UPS_AI_ConvAgent_LipSyncComponent::TickComponent(float DeltaTime, ELevelTic { bVisemeTimelineActive = false; + // Fallback: mark as not speaking when timeline ends + if (bIsSpeaking && AudioEnvelopeValue < 0.01f) + { + bIsSpeaking = false; + } + if (AccumulatedText.Len() > 0 && bTextVisemesApplied && bFullTextReceived) { AccumulatedText.Reset(); @@ -964,6 +989,14 @@ void UPS_AI_ConvAgent_LipSyncComponent::TickComponent(float DeltaTime, ELevelTic TargetVisemes.FindOrAdd(FName("sil")) = 1.0f; PlaybackTimer = 0.0f; + // Fallback: if the queue has been dry and envelope is near zero, + // mark as not speaking. OnAgentStopped should have already done this, + // but this handles edge cases where the event doesn't fire. + if (bIsSpeaking && AudioEnvelopeValue < 0.01f) + { + bIsSpeaking = false; + } + if (AccumulatedText.Len() > 0 && bTextVisemesApplied && bFullTextReceived) { AccumulatedText.Reset(); @@ -1052,6 +1085,24 @@ void UPS_AI_ConvAgent_LipSyncComponent::TickComponent(float DeltaTime, ELevelTic } } + // ── Apply speech blend alpha ───────────────────────────────────────── + // When the agent is NOT speaking, fade out all lip sync curves so the + // FacialExpressionComponent's emotion curves (including mouth expressions) + // can flow through unobstructed. Without this, lip sync zeros on mouth + // curves (via the AnimNode → mh_arkit_mapping_pose) would overwrite the + // emotion mouth curves even during silence. + if (SpeechBlendAlpha < 0.001f) + { + CurrentBlendshapes.Reset(); + } + else if (SpeechBlendAlpha < 0.999f) + { + for (auto& Pair : CurrentBlendshapes) + { + Pair.Value *= SpeechBlendAlpha; + } + } + // Auto-apply morph targets if a target mesh is set if (TargetMesh) { @@ -1090,6 +1141,10 @@ void UPS_AI_ConvAgent_LipSyncComponent::OnAgentStopped() VisemeTimeline.Reset(); VisemeTimelineCursor = 0.0f; TotalActiveFramesSeen = 0; + + // Speech ended — SpeechBlendAlpha will smoothly fade out lip sync curves, + // allowing FacialExpression emotion curves to take over the mouth. + bIsSpeaking = false; } void UPS_AI_ConvAgent_LipSyncComponent::ResetToNeutral() @@ -1113,6 +1168,11 @@ void UPS_AI_ConvAgent_LipSyncComponent::ResetToNeutral() VisemeTimelineCursor = 0.0f; TotalActiveFramesSeen = 0; + // Speech interrupted — snap speech blend to 0 immediately + // so emotion curves take over the mouth right away. + bIsSpeaking = false; + SpeechBlendAlpha = 0.0f; + // Snap all visemes to silence immediately (no smoothing delay) for (const FName& Name : VisemeNames) { @@ -1136,6 +1196,10 @@ void UPS_AI_ConvAgent_LipSyncComponent::OnAudioChunkReceived(const TArray { if (!SpectrumAnalyzer) return; + // Mark as speaking — audio is arriving from the agent. + // SpeechBlendAlpha will smoothly fade in lip sync curves. + bIsSpeaking = true; + // Convert int16 PCM to float32 [-1, 1] const int16* Samples = reinterpret_cast(PCMData.GetData()); const int32 NumSamples = PCMData.Num() / sizeof(int16); diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/AnimNode_PS_AI_ConvAgent_BodyExpression.h b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/AnimNode_PS_AI_ConvAgent_BodyExpression.h index 9ee199d..8bff66e 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/AnimNode_PS_AI_ConvAgent_BodyExpression.h +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/AnimNode_PS_AI_ConvAgent_BodyExpression.h @@ -16,10 +16,13 @@ class UAnimSequence; * AnimSequences from the PS_AI_ConvAgent_BodyExpressionComponent and blends * them per-bone onto the upstream pose (idle, locomotion). * - * Two modes: - * - bUpperBodyOnly = true (default): only bones at and below BlendRootBone - * are blended; lower body passes through from the upstream pose. - * - bUpperBodyOnly = false: the emotion pose is applied to the entire skeleton. + * The node uses Override mode: it replaces (lerps) the upstream pose toward + * the expression pose. Use ExcludeBones (default: neck_01) to prevent + * conflicts with Gaze/Posture on neck/head bones. + * + * Region modes: + * - bUpperBodyOnly = true (default): only bones at and below BlendRootBone. + * - bUpperBodyOnly = false: full skeleton. * * Graph layout: * [Upstream body anims (idle, locomotion)] → [PS AI ConvAgent Body Expression] → [Output] @@ -50,6 +53,13 @@ struct PS_AI_CONVAGENT_API FAnimNode_PS_AI_ConvAgent_BodyExpression : public FAn ToolTip = "Root bone for upper body blend.\nAll descendants of this bone are blended with the emotion pose.\nDefault: spine_02 (arms, spine, neck, head).")) FName BlendRootBone = FName(TEXT("spine_02")); + /** Bones to EXCLUDE from body expression blending (each bone and its entire subtree). + * Prevents conflicts with Gaze/Posture on neck/head bones. + * Default: neck_01 (excludes neck + head, leaving them to Gaze/Posture). */ + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Settings", + meta = (ToolTip = "Exclude these bones and their subtrees from blending.\nDefault: neck_01 prevents conflicts with Gaze/Posture on neck/head.")) + TArray ExcludeBones = { FName(TEXT("neck_01")) }; + // ── FAnimNode_Base interface ────────────────────────────────────────────── virtual void Initialize_AnyThread(const FAnimationInitializeContext& Context) override; diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_BodyExpressionComponent.h b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_BodyExpressionComponent.h index 9e081a3..9c8962f 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_BodyExpressionComponent.h +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_BodyExpressionComponent.h @@ -232,4 +232,15 @@ private: /** Cached reference to the agent component on the same Actor. */ TWeakObjectPtr AgentComponent; + + // ── Debug event tracking ──────────────────────────────────────────────── + + /** Last event name (for on-screen debug display). */ + FString LastEventName; + + /** World time when the last event fired. */ + float LastEventWorldTime = 0.0f; + + /** Draw on-screen debug info (called from TickComponent when bDebug). */ + void DrawDebugHUD() const; }; diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_GazeComponent.h b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_GazeComponent.h index b791e51..ee0de0e 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_GazeComponent.h +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_GazeComponent.h @@ -95,17 +95,18 @@ public: /** Automatically aim at the target's eye bones (MetaHuman FACIAL_L_Eye / FACIAL_R_Eye). * When enabled, TargetOffset is ignored and the agent looks at the midpoint * between the target pawn's eye bones. - * Fallback chain: eye bones → head bone → ActorOrigin + (0,0,FallbackEyeHeight). */ + * Fallback chain: eye bones → head bone → PawnViewLocation → FallbackEyeHeight. */ UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "PS AI ConvAgent|Gaze", - meta = (ToolTip = "Auto-target the pawn's eye bones for eye contact.\nFallback: eye bones > head bone > FallbackEyeHeight.")) + meta = (ToolTip = "Auto-target the pawn's eye bones for eye contact.\nFallback: eye bones > head bone > PawnViewLocation > FallbackEyeHeight.")) bool bAutoTargetEyes = true; - /** Height offset (cm) from the target actor's origin when no eye/head bones are found. - * Used as fallback when bAutoTargetEyes is true but the target has no skeleton - * (e.g. first-person pawn, simple actor). 160 ≈ eye height for a standing human. */ + /** Height offset (cm) from the target actor's origin when no eye/head bones are found + * AND the target is not a Pawn. For Pawns without skeleton, GetPawnViewLocation() + * is used instead (accounts for BaseEyeHeight automatically). + * Only applies to non-Pawn actors (props, triggers, etc.). */ UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "PS AI ConvAgent|Gaze", meta = (EditCondition = "bAutoTargetEyes", ClampMin = "0", - ToolTip = "Height offset (cm) when no eye/head bones exist on the target.\n160 = standing human eye level.\nOnly used as last-resort fallback.")) + ToolTip = "Height offset (cm) for non-Pawn targets without skeleton.\nPawns use BaseEyeHeight automatically.\nOnly used as last-resort fallback for non-Pawn actors.")) float FallbackEyeHeight = 160.0f; /** Offset from the target actor's origin to aim at. diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_InteractionComponent.h b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_InteractionComponent.h index 19ce726..1e63c64 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_InteractionComponent.h +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_InteractionComponent.h @@ -184,6 +184,13 @@ public: UFUNCTION(BlueprintCallable, Category = "PS AI ConvAgent|Interaction") void StartConversationWithSelectedAgent(); + /** Send a text message to the currently selected agent. + * The agent responds with audio and text, just as if it heard the player speak. + * Handles network relay automatically (works on both server and client). + * Does nothing if no agent is selected or not connected. */ + UFUNCTION(BlueprintCallable, Category = "PS AI ConvAgent|Interaction") + void SendTextToSelectedAgent(const FString& Text); + /** Clear the current selection. Automatic selection resumes next tick. */ UFUNCTION(BlueprintCallable, Category = "PS AI ConvAgent|Interaction") void ClearSelection(); diff --git a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_LipSyncComponent.h b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_LipSyncComponent.h index c12a394..85997b1 100644 --- a/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_LipSyncComponent.h +++ b/Unreal/PS_AI_Agent/Plugins/PS_AI_ConvAgent/Source/PS_AI_ConvAgent/Public/PS_AI_ConvAgent_LipSyncComponent.h @@ -89,6 +89,17 @@ public: ToolTip = "Smoothing speed for viseme transitions.\n35 = smooth and soft, 50 = balanced, 65 = sharp and responsive.")) float SmoothingSpeed = 50.0f; + // ── Speech Blend ──────────────────────────────────────────────────────── + + /** How long (seconds) to blend lip sync curves in/out when speech starts/stops. + * When the agent is NOT speaking, lip sync releases all mouth curves so the + * FacialExpressionComponent's emotion curves (including mouth) can show through. + * When the agent starts speaking, lip sync smoothly takes over mouth curves. */ + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "PS AI ConvAgent|LipSync", + meta = (ClampMin = "0.05", ClampMax = "1.0", + ToolTip = "Blend duration when speech starts/stops.\nDuring silence, emotion facial curves control the mouth.\nDuring speech, lip sync takes over.")) + float SpeechBlendDuration = 0.15f; + // ── Emotion Expression Blend ───────────────────────────────────────────── /** How much facial emotion (from PS_AI_ConvAgent_FacialExpressionComponent) bleeds through @@ -163,6 +174,15 @@ public: UFUNCTION(BlueprintCallable, Category = "PS AI ConvAgent|LipSync") TMap GetCurrentBlendshapes() const { return CurrentBlendshapes; } + /** True when the agent is currently producing speech audio. + * When false, lip sync releases mouth curves to let emotion curves through. */ + UFUNCTION(BlueprintPure, Category = "PS AI ConvAgent|LipSync") + bool IsSpeaking() const { return bIsSpeaking; } + + /** Current speech blend alpha (0 = silent/emotion mouth, 1 = lip sync mouth). + * Smooth transition between silence and speech states. */ + float GetSpeechBlendAlpha() const { return SpeechBlendAlpha; } + // ── UActorComponent overrides ───────────────────────────────────────────── virtual void BeginPlay() override; virtual void EndPlay(const EEndPlayReason::Type EndPlayReason) override; @@ -258,6 +278,14 @@ private: // Current blend alpha (0 = fully inactive/passthrough, 1 = fully active). float CurrentActiveAlpha = 1.0f; + // True when the agent is currently producing speech audio. + // Set true in OnAudioChunkReceived, false in OnAgentStopped/OnAgentInterrupted. + bool bIsSpeaking = false; + + // Smooth blend alpha for speech state: 0 = not speaking (emotion controls mouth), + // 1 = speaking (lip sync controls mouth). Interpolated each tick. + float SpeechBlendAlpha = 0.0f; + // MetaHuman mode: Face mesh has no morph targets, use animation curves instead. // Set automatically in BeginPlay when TargetMesh has 0 morph targets. bool bUseCurveMode = false;