阶段 5
This commit is contained in:
parent
ff9ee1291f
commit
e5851795c7
|
|
@ -0,0 +1,3 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 2f6e8be9b1a348c388f73388a3821247
|
||||
timeCreated: 1774579341
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
using System;
|
||||
|
||||
namespace Network.NetworkApplication
|
||||
{
|
||||
public enum ConnectionState
|
||||
{
|
||||
Disconnected = 0,
|
||||
TransportConnected = 1,
|
||||
LoginPending = 2,
|
||||
LoggedIn = 3,
|
||||
LoginFailed = 4,
|
||||
TimedOut = 5,
|
||||
ReconnectPending = 6,
|
||||
Reconnecting = 7,
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 4db1cb66c3f181d43b233641c5c42b0d
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
using System;
|
||||
using System.Net;
|
||||
|
||||
namespace Network.NetworkApplication
|
||||
{
|
||||
public sealed class ManagedNetworkSession
|
||||
{
|
||||
public ManagedNetworkSession(IPEndPoint remoteEndPoint, SessionManager sessionManager)
|
||||
{
|
||||
RemoteEndPoint = remoteEndPoint ?? throw new ArgumentNullException(nameof(remoteEndPoint));
|
||||
SessionManager = sessionManager ?? throw new ArgumentNullException(nameof(sessionManager));
|
||||
}
|
||||
|
||||
public IPEndPoint RemoteEndPoint { get; }
|
||||
|
||||
public SessionManager SessionManager { get; }
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 41dcf02596d3304418a07f265ae8f525
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
using System;
|
||||
using System.Net;
|
||||
|
||||
namespace Network.NetworkApplication
|
||||
{
|
||||
public sealed class MultiSessionLifecycleEvent
|
||||
{
|
||||
public MultiSessionLifecycleEvent(
|
||||
IPEndPoint remoteEndPoint,
|
||||
SessionManager sessionManager,
|
||||
SessionLifecycleEvent lifecycleEvent)
|
||||
{
|
||||
RemoteEndPoint = remoteEndPoint ?? throw new ArgumentNullException(nameof(remoteEndPoint));
|
||||
SessionManager = sessionManager ?? throw new ArgumentNullException(nameof(sessionManager));
|
||||
LifecycleEvent = lifecycleEvent ?? throw new ArgumentNullException(nameof(lifecycleEvent));
|
||||
}
|
||||
|
||||
public IPEndPoint RemoteEndPoint { get; }
|
||||
|
||||
public SessionManager SessionManager { get; }
|
||||
|
||||
public SessionLifecycleEvent LifecycleEvent { get; }
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 82ee5b8b86486b04eab6d76cc6859cc5
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -0,0 +1,238 @@
|
|||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Net;
|
||||
|
||||
namespace Network.NetworkApplication
|
||||
{
|
||||
public sealed class MultiSessionManager
|
||||
{
|
||||
private readonly object gate = new();
|
||||
private readonly Dictionary<string, SessionRegistration> sessions = new();
|
||||
private readonly SessionReconnectPolicy reconnectPolicy;
|
||||
private readonly Func<DateTimeOffset> utcNowProvider;
|
||||
|
||||
public MultiSessionManager(
|
||||
SessionReconnectPolicy reconnectPolicy = null,
|
||||
Func<DateTimeOffset> utcNowProvider = null)
|
||||
{
|
||||
this.reconnectPolicy = reconnectPolicy ?? SessionReconnectPolicy.Default;
|
||||
this.utcNowProvider = utcNowProvider ?? (() => DateTimeOffset.UtcNow);
|
||||
}
|
||||
|
||||
public event Action<MultiSessionLifecycleEvent> LifecycleChanged;
|
||||
|
||||
public int SessionCount
|
||||
{
|
||||
get
|
||||
{
|
||||
lock (gate)
|
||||
{
|
||||
return sessions.Count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public IReadOnlyList<ManagedNetworkSession> Sessions
|
||||
{
|
||||
get
|
||||
{
|
||||
lock (gate)
|
||||
{
|
||||
return sessions.Values
|
||||
.Select(registration => registration.Session)
|
||||
.ToArray();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public ManagedNetworkSession GetOrCreateSession(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
return GetOrCreateRegistration(remoteEndPoint).Session;
|
||||
}
|
||||
|
||||
public bool TryGetSession(IPEndPoint remoteEndPoint, out ManagedNetworkSession session)
|
||||
{
|
||||
var key = BuildKey(remoteEndPoint);
|
||||
|
||||
lock (gate)
|
||||
{
|
||||
if (sessions.TryGetValue(key, out var registration))
|
||||
{
|
||||
session = registration.Session;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
session = null;
|
||||
return false;
|
||||
}
|
||||
|
||||
public bool TryGetSessionManager(IPEndPoint remoteEndPoint, out SessionManager sessionManager)
|
||||
{
|
||||
if (TryGetSession(remoteEndPoint, out var session))
|
||||
{
|
||||
sessionManager = session.SessionManager;
|
||||
return true;
|
||||
}
|
||||
|
||||
sessionManager = null;
|
||||
return false;
|
||||
}
|
||||
|
||||
public void ObserveTransportActivity(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
var sessionManager = GetOrCreateSession(remoteEndPoint).SessionManager;
|
||||
|
||||
if (sessionManager.State == ConnectionState.Disconnected)
|
||||
{
|
||||
sessionManager.NotifyTransportConnected();
|
||||
}
|
||||
|
||||
sessionManager.NotifyInboundActivity();
|
||||
}
|
||||
|
||||
public void NotifyTransportConnected(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
GetOrCreateSession(remoteEndPoint).SessionManager.NotifyTransportConnected();
|
||||
}
|
||||
|
||||
public void NotifyLoginStarted(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
GetOrCreateSession(remoteEndPoint).SessionManager.NotifyLoginStarted();
|
||||
}
|
||||
|
||||
public void NotifyLoginSucceeded(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
GetOrCreateSession(remoteEndPoint).SessionManager.NotifyLoginSucceeded();
|
||||
}
|
||||
|
||||
public void NotifyLoginFailed(IPEndPoint remoteEndPoint, string reason = null)
|
||||
{
|
||||
GetOrCreateSession(remoteEndPoint).SessionManager.NotifyLoginFailed(reason);
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatSent(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
GetOrCreateSession(remoteEndPoint).SessionManager.NotifyHeartbeatSent();
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatReceived(IPEndPoint remoteEndPoint, long? serverTick = null)
|
||||
{
|
||||
GetOrCreateSession(remoteEndPoint).SessionManager.NotifyHeartbeatReceived(serverTick);
|
||||
}
|
||||
|
||||
public void NotifyInboundActivity(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
GetOrCreateSession(remoteEndPoint).SessionManager.NotifyInboundActivity();
|
||||
}
|
||||
|
||||
public bool RemoveSession(IPEndPoint remoteEndPoint, string reason = null)
|
||||
{
|
||||
SessionRegistration registration;
|
||||
var key = BuildKey(remoteEndPoint);
|
||||
|
||||
lock (gate)
|
||||
{
|
||||
if (!sessions.TryGetValue(key, out registration))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
sessions.Remove(key);
|
||||
}
|
||||
|
||||
registration.Session.SessionManager.NotifyTransportDisconnected(reason);
|
||||
registration.Session.SessionManager.LifecycleChanged -= registration.Handler;
|
||||
return true;
|
||||
}
|
||||
|
||||
public void RemoveAllSessions(string reason = null)
|
||||
{
|
||||
SessionRegistration[] registrations;
|
||||
|
||||
lock (gate)
|
||||
{
|
||||
registrations = sessions.Values.ToArray();
|
||||
sessions.Clear();
|
||||
}
|
||||
|
||||
foreach (var registration in registrations)
|
||||
{
|
||||
registration.Session.SessionManager.NotifyTransportDisconnected(reason);
|
||||
registration.Session.SessionManager.LifecycleChanged -= registration.Handler;
|
||||
}
|
||||
}
|
||||
|
||||
public void UpdateLifecycle()
|
||||
{
|
||||
SessionManager[] activeSessions;
|
||||
|
||||
lock (gate)
|
||||
{
|
||||
activeSessions = sessions.Values
|
||||
.Select(registration => registration.Session.SessionManager)
|
||||
.ToArray();
|
||||
}
|
||||
|
||||
foreach (var session in activeSessions)
|
||||
{
|
||||
session.Evaluate();
|
||||
}
|
||||
}
|
||||
|
||||
private SessionRegistration GetOrCreateRegistration(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
var normalizedEndPoint = Normalize(remoteEndPoint);
|
||||
var key = normalizedEndPoint.ToString();
|
||||
|
||||
lock (gate)
|
||||
{
|
||||
if (sessions.TryGetValue(key, out var registration))
|
||||
{
|
||||
return registration;
|
||||
}
|
||||
|
||||
var sessionManager = new SessionManager(reconnectPolicy, utcNowProvider);
|
||||
var session = new ManagedNetworkSession(normalizedEndPoint, sessionManager);
|
||||
Action<SessionLifecycleEvent> handler = lifecycleEvent =>
|
||||
LifecycleChanged?.Invoke(new MultiSessionLifecycleEvent(session.RemoteEndPoint, session.SessionManager, lifecycleEvent));
|
||||
|
||||
sessionManager.LifecycleChanged += handler;
|
||||
|
||||
registration = new SessionRegistration(session, handler);
|
||||
sessions.Add(key, registration);
|
||||
return registration;
|
||||
}
|
||||
}
|
||||
|
||||
private static string BuildKey(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
return Normalize(remoteEndPoint).ToString();
|
||||
}
|
||||
|
||||
private static IPEndPoint Normalize(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
if (remoteEndPoint == null)
|
||||
{
|
||||
throw new ArgumentNullException(nameof(remoteEndPoint));
|
||||
}
|
||||
|
||||
return new IPEndPoint(remoteEndPoint.Address, remoteEndPoint.Port);
|
||||
}
|
||||
|
||||
private sealed class SessionRegistration
|
||||
{
|
||||
public SessionRegistration(ManagedNetworkSession session, Action<SessionLifecycleEvent> handler)
|
||||
{
|
||||
Session = session;
|
||||
Handler = handler;
|
||||
}
|
||||
|
||||
public ManagedNetworkSession Session { get; }
|
||||
|
||||
public Action<SessionLifecycleEvent> Handler { get; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 607451432d1e802409107e2a816b3587
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
namespace Network.NetworkApplication
|
||||
{
|
||||
public enum SessionEventKind
|
||||
{
|
||||
TransportConnected = 0,
|
||||
LoginStarted = 1,
|
||||
LoginSucceeded = 2,
|
||||
LoginFailed = 3,
|
||||
HeartbeatSent = 4,
|
||||
HeartbeatReceived = 5,
|
||||
TimedOut = 6,
|
||||
ReconnectScheduled = 7,
|
||||
ReconnectStarted = 8,
|
||||
Disconnected = 9,
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: f9f3ec0c5a33d23478a9b1848bd05431
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
using System;
|
||||
|
||||
namespace Network.NetworkApplication
|
||||
{
|
||||
public sealed class SessionLifecycleEvent
|
||||
{
|
||||
public SessionLifecycleEvent(
|
||||
SessionEventKind kind,
|
||||
ConnectionState previousState,
|
||||
ConnectionState currentState,
|
||||
DateTimeOffset occurredAtUtc,
|
||||
string reason = null)
|
||||
{
|
||||
Kind = kind;
|
||||
PreviousState = previousState;
|
||||
CurrentState = currentState;
|
||||
OccurredAtUtc = occurredAtUtc;
|
||||
Reason = reason;
|
||||
}
|
||||
|
||||
public SessionEventKind Kind { get; }
|
||||
|
||||
public ConnectionState PreviousState { get; }
|
||||
|
||||
public ConnectionState CurrentState { get; }
|
||||
|
||||
public DateTimeOffset OccurredAtUtc { get; }
|
||||
|
||||
public string Reason { get; }
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 549538bb90b911142b9e627bd38df213
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
using System;
|
||||
|
||||
namespace Network.NetworkApplication
|
||||
{
|
||||
public sealed class SessionManager
|
||||
{
|
||||
private readonly Func<DateTimeOffset> utcNowProvider;
|
||||
private DateTimeOffset? lastLivenessUtc;
|
||||
private DateTimeOffset? lastHeartbeatSentUtc;
|
||||
private DateTimeOffset? nextReconnectAtUtc;
|
||||
|
||||
public SessionManager(
|
||||
SessionReconnectPolicy reconnectPolicy = null,
|
||||
Func<DateTimeOffset> utcNowProvider = null)
|
||||
{
|
||||
ReconnectPolicy = reconnectPolicy ?? SessionReconnectPolicy.Default;
|
||||
this.utcNowProvider = utcNowProvider ?? (() => DateTimeOffset.UtcNow);
|
||||
State = ConnectionState.Disconnected;
|
||||
}
|
||||
|
||||
public event Action<SessionLifecycleEvent> LifecycleChanged;
|
||||
|
||||
public ConnectionState State { get; private set; }
|
||||
|
||||
public SessionReconnectPolicy ReconnectPolicy { get; }
|
||||
|
||||
public DateTimeOffset? LastLivenessUtc => lastLivenessUtc;
|
||||
|
||||
public DateTimeOffset? LastHeartbeatSentUtc => lastHeartbeatSentUtc;
|
||||
|
||||
public DateTimeOffset? NextReconnectAtUtc => nextReconnectAtUtc;
|
||||
|
||||
public TimeSpan? LastRoundTripTime { get; private set; }
|
||||
|
||||
public long? LastServerTick { get; private set; }
|
||||
|
||||
public string LastFailureReason { get; private set; }
|
||||
|
||||
public bool CanSendHeartbeat => State == ConnectionState.LoggedIn;
|
||||
|
||||
public bool IsReconnectDue
|
||||
{
|
||||
get
|
||||
{
|
||||
if (State != ConnectionState.ReconnectPending || nextReconnectAtUtc == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return utcNowProvider() >= nextReconnectAtUtc.Value;
|
||||
}
|
||||
}
|
||||
|
||||
public bool IsHeartbeatDue
|
||||
{
|
||||
get
|
||||
{
|
||||
if (!CanSendHeartbeat)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (lastHeartbeatSentUtc == null)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
return utcNowProvider() - lastHeartbeatSentUtc.Value >= ReconnectPolicy.HeartbeatInterval;
|
||||
}
|
||||
}
|
||||
|
||||
public void NotifyTransportConnected()
|
||||
{
|
||||
var now = utcNowProvider();
|
||||
lastLivenessUtc = now;
|
||||
lastHeartbeatSentUtc = null;
|
||||
nextReconnectAtUtc = null;
|
||||
LastFailureReason = null;
|
||||
TransitionTo(ConnectionState.TransportConnected, SessionEventKind.TransportConnected, now);
|
||||
}
|
||||
|
||||
public void NotifyLoginStarted()
|
||||
{
|
||||
TransitionTo(ConnectionState.LoginPending, SessionEventKind.LoginStarted, utcNowProvider());
|
||||
}
|
||||
|
||||
public void NotifyLoginSucceeded()
|
||||
{
|
||||
var now = utcNowProvider();
|
||||
lastLivenessUtc = now;
|
||||
LastFailureReason = null;
|
||||
TransitionTo(ConnectionState.LoggedIn, SessionEventKind.LoginSucceeded, now);
|
||||
}
|
||||
|
||||
public void NotifyLoginFailed(string reason = null)
|
||||
{
|
||||
LastFailureReason = reason;
|
||||
TransitionTo(ConnectionState.LoginFailed, SessionEventKind.LoginFailed, utcNowProvider(), reason);
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatSent()
|
||||
{
|
||||
lastHeartbeatSentUtc = utcNowProvider();
|
||||
RaiseEvent(SessionEventKind.HeartbeatSent, State, State, lastHeartbeatSentUtc.Value);
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatReceived(long? serverTick = null)
|
||||
{
|
||||
var now = utcNowProvider();
|
||||
lastLivenessUtc = now;
|
||||
LastServerTick = serverTick;
|
||||
if (lastHeartbeatSentUtc.HasValue)
|
||||
{
|
||||
LastRoundTripTime = now - lastHeartbeatSentUtc.Value;
|
||||
}
|
||||
|
||||
RaiseEvent(SessionEventKind.HeartbeatReceived, State, State, now);
|
||||
}
|
||||
|
||||
public void NotifyInboundActivity()
|
||||
{
|
||||
lastLivenessUtc = utcNowProvider();
|
||||
}
|
||||
|
||||
public void NotifyTransportDisconnected(string reason = null)
|
||||
{
|
||||
LastFailureReason = reason;
|
||||
nextReconnectAtUtc = null;
|
||||
TransitionTo(ConnectionState.Disconnected, SessionEventKind.Disconnected, utcNowProvider(), reason);
|
||||
}
|
||||
|
||||
public void Evaluate()
|
||||
{
|
||||
var now = utcNowProvider();
|
||||
|
||||
if (ShouldTimeout(now))
|
||||
{
|
||||
LastFailureReason = "Heartbeat timeout";
|
||||
TransitionTo(ConnectionState.TimedOut, SessionEventKind.TimedOut, now, LastFailureReason);
|
||||
|
||||
if (ReconnectPolicy.AutoReconnect)
|
||||
{
|
||||
nextReconnectAtUtc = now + ReconnectPolicy.ReconnectDelay;
|
||||
TransitionTo(ConnectionState.ReconnectPending, SessionEventKind.ReconnectScheduled, now, LastFailureReason);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (State == ConnectionState.ReconnectPending && nextReconnectAtUtc.HasValue && now >= nextReconnectAtUtc.Value)
|
||||
{
|
||||
TransitionTo(ConnectionState.Reconnecting, SessionEventKind.ReconnectStarted, now, LastFailureReason);
|
||||
}
|
||||
}
|
||||
|
||||
private bool ShouldTimeout(DateTimeOffset now)
|
||||
{
|
||||
if (State != ConnectionState.TransportConnected && State != ConnectionState.LoginPending && State != ConnectionState.LoggedIn)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!lastLivenessUtc.HasValue)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return now - lastLivenessUtc.Value >= ReconnectPolicy.HeartbeatTimeout;
|
||||
}
|
||||
|
||||
private void TransitionTo(
|
||||
ConnectionState newState,
|
||||
SessionEventKind eventKind,
|
||||
DateTimeOffset occurredAtUtc,
|
||||
string reason = null)
|
||||
{
|
||||
if (State == newState)
|
||||
{
|
||||
RaiseEvent(eventKind, State, State, occurredAtUtc, reason);
|
||||
return;
|
||||
}
|
||||
|
||||
var previousState = State;
|
||||
State = newState;
|
||||
RaiseEvent(eventKind, previousState, newState, occurredAtUtc, reason);
|
||||
}
|
||||
|
||||
private void RaiseEvent(
|
||||
SessionEventKind kind,
|
||||
ConnectionState previousState,
|
||||
ConnectionState currentState,
|
||||
DateTimeOffset occurredAtUtc,
|
||||
string reason = null)
|
||||
{
|
||||
LifecycleChanged?.Invoke(new SessionLifecycleEvent(kind, previousState, currentState, occurredAtUtc, reason));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: ce1fd028b318b084e8dc5a3084f9e9c1
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
using System;
|
||||
|
||||
namespace Network.NetworkApplication
|
||||
{
|
||||
public sealed class SessionReconnectPolicy
|
||||
{
|
||||
public static SessionReconnectPolicy Default { get; } = new(
|
||||
heartbeatInterval: TimeSpan.FromSeconds(2),
|
||||
heartbeatTimeout: TimeSpan.FromSeconds(6),
|
||||
reconnectDelay: TimeSpan.FromSeconds(1),
|
||||
autoReconnect: true);
|
||||
|
||||
public SessionReconnectPolicy(
|
||||
TimeSpan heartbeatInterval,
|
||||
TimeSpan heartbeatTimeout,
|
||||
TimeSpan reconnectDelay,
|
||||
bool autoReconnect)
|
||||
{
|
||||
if (heartbeatInterval <= TimeSpan.Zero)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(heartbeatInterval));
|
||||
}
|
||||
|
||||
if (heartbeatTimeout <= TimeSpan.Zero)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(heartbeatTimeout));
|
||||
}
|
||||
|
||||
if (reconnectDelay < TimeSpan.Zero)
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(reconnectDelay));
|
||||
}
|
||||
|
||||
HeartbeatInterval = heartbeatInterval;
|
||||
HeartbeatTimeout = heartbeatTimeout;
|
||||
ReconnectDelay = reconnectDelay;
|
||||
AutoReconnect = autoReconnect;
|
||||
}
|
||||
|
||||
public TimeSpan HeartbeatInterval { get; }
|
||||
|
||||
public TimeSpan HeartbeatTimeout { get; }
|
||||
|
||||
public TimeSpan ReconnectDelay { get; }
|
||||
|
||||
public bool AutoReconnect { get; }
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 1dbefe0de75d3904ca75f2dbf73b61d1
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
using System;
|
||||
using System;
|
||||
using System.Threading.Tasks;
|
||||
using Network.NetworkTransport;
|
||||
|
||||
|
|
@ -6,9 +6,14 @@ namespace Network.NetworkApplication
|
|||
{
|
||||
public sealed class SharedNetworkRuntime
|
||||
{
|
||||
public SharedNetworkRuntime(ITransport transport, INetworkMessageDispatcher dispatcher)
|
||||
public SharedNetworkRuntime(
|
||||
ITransport transport,
|
||||
INetworkMessageDispatcher dispatcher,
|
||||
SessionReconnectPolicy reconnectPolicy = null,
|
||||
Func<DateTimeOffset> utcNowProvider = null)
|
||||
{
|
||||
Transport = transport ?? throw new ArgumentNullException(nameof(transport));
|
||||
SessionManager = new SessionManager(reconnectPolicy, utcNowProvider);
|
||||
MessageManager = new MessageManager(transport, dispatcher ?? throw new ArgumentNullException(nameof(dispatcher)));
|
||||
}
|
||||
|
||||
|
|
@ -16,19 +21,64 @@ namespace Network.NetworkApplication
|
|||
|
||||
public MessageManager MessageManager { get; }
|
||||
|
||||
public Task StartAsync()
|
||||
public SessionManager SessionManager { get; }
|
||||
|
||||
public event Action<SessionLifecycleEvent> LifecycleChanged
|
||||
{
|
||||
return Transport.StartAsync();
|
||||
add => SessionManager.LifecycleChanged += value;
|
||||
remove => SessionManager.LifecycleChanged -= value;
|
||||
}
|
||||
|
||||
public async Task StartAsync()
|
||||
{
|
||||
await Transport.StartAsync();
|
||||
SessionManager.NotifyTransportConnected();
|
||||
}
|
||||
|
||||
public void Stop()
|
||||
{
|
||||
Transport.Stop();
|
||||
SessionManager.NotifyTransportDisconnected("Transport stopped");
|
||||
}
|
||||
|
||||
public Task<int> DrainPendingMessagesAsync(int maxMessages = int.MaxValue)
|
||||
{
|
||||
return MessageManager.DrainPendingMessagesAsync(maxMessages);
|
||||
}
|
||||
|
||||
public void NotifyLoginStarted()
|
||||
{
|
||||
SessionManager.NotifyLoginStarted();
|
||||
}
|
||||
|
||||
public void NotifyLoginSucceeded()
|
||||
{
|
||||
SessionManager.NotifyLoginSucceeded();
|
||||
}
|
||||
|
||||
public void NotifyLoginFailed(string reason = null)
|
||||
{
|
||||
SessionManager.NotifyLoginFailed(reason);
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatSent()
|
||||
{
|
||||
SessionManager.NotifyHeartbeatSent();
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatReceived(long? serverTick = null)
|
||||
{
|
||||
SessionManager.NotifyHeartbeatReceived(serverTick);
|
||||
}
|
||||
|
||||
public void NotifyInboundActivity()
|
||||
{
|
||||
SessionManager.NotifyInboundActivity();
|
||||
}
|
||||
|
||||
public void UpdateLifecycle()
|
||||
{
|
||||
SessionManager.Evaluate();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
using System;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Net;
|
||||
using System.Threading.Tasks;
|
||||
using Network.NetworkApplication;
|
||||
using Network.NetworkTransport;
|
||||
|
|
@ -7,32 +9,101 @@ namespace Network.NetworkHost
|
|||
{
|
||||
public sealed class ServerNetworkHost
|
||||
{
|
||||
private readonly SharedNetworkRuntime runtime;
|
||||
private readonly ITransport transport;
|
||||
private readonly MessageManager messageManager;
|
||||
|
||||
public ServerNetworkHost(ITransport transport, INetworkMessageDispatcher dispatcher = null)
|
||||
public ServerNetworkHost(
|
||||
ITransport transport,
|
||||
INetworkMessageDispatcher dispatcher = null,
|
||||
SessionReconnectPolicy reconnectPolicy = null,
|
||||
Func<DateTimeOffset> utcNowProvider = null)
|
||||
{
|
||||
runtime = new SharedNetworkRuntime(
|
||||
transport ?? throw new ArgumentNullException(nameof(transport)),
|
||||
dispatcher ?? new ImmediateNetworkMessageDispatcher());
|
||||
this.transport = transport ?? throw new ArgumentNullException(nameof(transport));
|
||||
SessionCoordinator = new MultiSessionManager(reconnectPolicy, utcNowProvider);
|
||||
this.transport.OnReceive += HandleTransportReceive;
|
||||
messageManager = new MessageManager(this.transport, dispatcher ?? new ImmediateNetworkMessageDispatcher());
|
||||
}
|
||||
|
||||
public MessageManager MessageManager => runtime.MessageManager;
|
||||
public MessageManager MessageManager => messageManager;
|
||||
|
||||
public ITransport Transport => runtime.Transport;
|
||||
public ITransport Transport => transport;
|
||||
|
||||
// Server-side lifecycle entry point: inspect and control per-peer session state here.
|
||||
public MultiSessionManager SessionCoordinator { get; }
|
||||
|
||||
public IReadOnlyList<ManagedNetworkSession> ManagedSessions => SessionCoordinator.Sessions;
|
||||
|
||||
public event Action<MultiSessionLifecycleEvent> LifecycleChanged
|
||||
{
|
||||
add => SessionCoordinator.LifecycleChanged += value;
|
||||
remove => SessionCoordinator.LifecycleChanged -= value;
|
||||
}
|
||||
|
||||
public Task StartAsync()
|
||||
{
|
||||
return runtime.StartAsync();
|
||||
return transport.StartAsync();
|
||||
}
|
||||
|
||||
public void Stop()
|
||||
{
|
||||
runtime.Stop();
|
||||
transport.Stop();
|
||||
SessionCoordinator.RemoveAllSessions("Transport stopped");
|
||||
}
|
||||
|
||||
public Task<int> DrainPendingMessagesAsync(int maxMessages = int.MaxValue)
|
||||
{
|
||||
return runtime.DrainPendingMessagesAsync(maxMessages);
|
||||
return messageManager.DrainPendingMessagesAsync(maxMessages);
|
||||
}
|
||||
|
||||
public void UpdateLifecycle()
|
||||
{
|
||||
SessionCoordinator.UpdateLifecycle();
|
||||
}
|
||||
|
||||
public bool TryGetSession(IPEndPoint remoteEndPoint, out ManagedNetworkSession session)
|
||||
{
|
||||
return SessionCoordinator.TryGetSession(remoteEndPoint, out session);
|
||||
}
|
||||
|
||||
public void NotifyLoginStarted(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
SessionCoordinator.NotifyLoginStarted(remoteEndPoint);
|
||||
}
|
||||
|
||||
public void NotifyLoginSucceeded(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
SessionCoordinator.NotifyLoginSucceeded(remoteEndPoint);
|
||||
}
|
||||
|
||||
public void NotifyLoginFailed(IPEndPoint remoteEndPoint, string reason = null)
|
||||
{
|
||||
SessionCoordinator.NotifyLoginFailed(remoteEndPoint, reason);
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatSent(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
SessionCoordinator.NotifyHeartbeatSent(remoteEndPoint);
|
||||
}
|
||||
|
||||
public void NotifyHeartbeatReceived(IPEndPoint remoteEndPoint, long? serverTick = null)
|
||||
{
|
||||
SessionCoordinator.NotifyHeartbeatReceived(remoteEndPoint, serverTick);
|
||||
}
|
||||
|
||||
public void NotifyInboundActivity(IPEndPoint remoteEndPoint)
|
||||
{
|
||||
SessionCoordinator.NotifyInboundActivity(remoteEndPoint);
|
||||
}
|
||||
|
||||
public bool RemoveSession(IPEndPoint remoteEndPoint, string reason = null)
|
||||
{
|
||||
return SessionCoordinator.RemoveSession(remoteEndPoint, reason);
|
||||
}
|
||||
|
||||
private void HandleTransportReceive(byte[] _, IPEndPoint sender)
|
||||
{
|
||||
SessionCoordinator.ObserveTransportActivity(sender);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ public class NetworkManager : MonoBehaviour
|
|||
var transport = new KcpTransport("127.0.0.1", 8080);
|
||||
var dispatcher = new MainThreadNetworkDispatcher();
|
||||
_networkRuntime = new SharedNetworkRuntime(transport, dispatcher);
|
||||
_networkRuntime.LifecycleChanged += HandleLifecycleChanged;
|
||||
|
||||
var startTask = _networkRuntime.StartAsync();
|
||||
yield return new WaitUntil(() => startTask.IsCompleted);
|
||||
|
|
@ -50,6 +51,8 @@ public class NetworkManager : MonoBehaviour
|
|||
return;
|
||||
}
|
||||
|
||||
_networkRuntime.UpdateLifecycle();
|
||||
|
||||
if (!_networkDrainTask.IsCompleted)
|
||||
{
|
||||
return;
|
||||
|
|
@ -65,7 +68,12 @@ public class NetworkManager : MonoBehaviour
|
|||
|
||||
private void OnDestroy()
|
||||
{
|
||||
_networkRuntime?.Stop();
|
||||
if (_networkRuntime != null)
|
||||
{
|
||||
_networkRuntime.LifecycleChanged -= HandleLifecycleChanged;
|
||||
_networkRuntime.Stop();
|
||||
}
|
||||
|
||||
if (Instance == this)
|
||||
{
|
||||
Instance = null;
|
||||
|
|
@ -76,13 +84,16 @@ public class NetworkManager : MonoBehaviour
|
|||
{
|
||||
while (true)
|
||||
{
|
||||
if (_serverPoint != null)
|
||||
if (_networkRuntime != null
|
||||
&& _serverPoint != null
|
||||
&& _networkRuntime.SessionManager.IsHeartbeatDue)
|
||||
{
|
||||
var heartbeat = new Heartbeat();
|
||||
_networkRuntime.MessageManager.SendMessage(heartbeat, MessageType.Heartbeat, _serverPoint);
|
||||
_networkRuntime.NotifyHeartbeatSent();
|
||||
}
|
||||
|
||||
yield return new WaitForSeconds(2.0f);
|
||||
yield return new WaitForSeconds(0.25f);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -98,13 +109,16 @@ public class NetworkManager : MonoBehaviour
|
|||
private void HandleLoginResponse(byte[] data, IPEndPoint sender)
|
||||
{
|
||||
var response = LoginResponse.Parser.ParseFrom(data);
|
||||
_networkRuntime.NotifyInboundActivity();
|
||||
_serverPoint = sender;
|
||||
if (response.Result)
|
||||
{
|
||||
_networkRuntime.NotifyLoginSucceeded();
|
||||
MasterManager.Instance.InitPlayersState(response);
|
||||
}
|
||||
else
|
||||
{
|
||||
_networkRuntime.NotifyLoginFailed("UserId already exists");
|
||||
_wrongWindow.SetActive(true);
|
||||
Debug.LogError("UserId 已经存在");
|
||||
}
|
||||
|
|
@ -112,6 +126,7 @@ public class NetworkManager : MonoBehaviour
|
|||
|
||||
private void HandlePlayerState(byte[] data, IPEndPoint sender)
|
||||
{
|
||||
_networkRuntime.NotifyInboundActivity();
|
||||
var message = PlayerState.Parser.ParseFrom(data);
|
||||
MasterManager.Instance.MovePlayer(message.PlayerId, message);
|
||||
Debug.Log($"收到PlayerState::PlayerID={message.PlayerId},Position=" + message.Position.ToVector3().ToString());
|
||||
|
|
@ -120,6 +135,7 @@ public class NetworkManager : MonoBehaviour
|
|||
private void HandleHeartbeatResponse(byte[] data, IPEndPoint sender)
|
||||
{
|
||||
var response = HeartbeatResponse.Parser.ParseFrom(data);
|
||||
_networkRuntime.NotifyHeartbeatReceived(response.ServerTick);
|
||||
var player = MasterManager.Instance.GetCurrentPlayer();
|
||||
if (player != null)
|
||||
{
|
||||
|
|
@ -129,17 +145,24 @@ public class NetworkManager : MonoBehaviour
|
|||
|
||||
private void HandleLogoutRequest(byte[] data, IPEndPoint sender)
|
||||
{
|
||||
_networkRuntime.NotifyInboundActivity();
|
||||
var request = LogoutRequest.Parser.ParseFrom(data);
|
||||
MasterManager.Instance.UnregisterPlayer(request.PlayerId);
|
||||
}
|
||||
|
||||
private void HandlePlayerJoin(byte[] data, IPEndPoint sender)
|
||||
{
|
||||
_networkRuntime.NotifyInboundActivity();
|
||||
var playerJoin = PlayerJoin.Parser.ParseFrom(data);
|
||||
if (MasterManager.Instance.LocalPlayerId == playerJoin.PlayerId) return;
|
||||
MasterManager.Instance.RegisterRemotePlayer(playerJoin.PlayerId, playerJoin.Position.ToVector3());
|
||||
}
|
||||
|
||||
private void HandleLifecycleChanged(SessionLifecycleEvent lifecycleEvent)
|
||||
{
|
||||
Debug.Log($"[NetworkManager] Session {lifecycleEvent.PreviousState} -> {lifecycleEvent.CurrentState} ({lifecycleEvent.Kind}) {lifecycleEvent.Reason}");
|
||||
}
|
||||
|
||||
public void SendPlayerInput(string playerId, Vector3 input)
|
||||
{
|
||||
var message = new PlayerInput()
|
||||
|
|
@ -164,6 +187,7 @@ public class NetworkManager : MonoBehaviour
|
|||
PlayerId = playerId,
|
||||
Speed = speed
|
||||
};
|
||||
_networkRuntime.NotifyLoginStarted();
|
||||
_networkRuntime.MessageManager.SendMessage(request, MessageType.LoginRequest);
|
||||
Debug.Log($"Sent login request to player {playerId}");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,226 @@
|
|||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Net;
|
||||
using System.Threading.Tasks;
|
||||
using Google.Protobuf;
|
||||
using Network.Defines;
|
||||
using Network.NetworkApplication;
|
||||
using Network.NetworkHost;
|
||||
using Network.NetworkTransport;
|
||||
using NUnit.Framework;
|
||||
|
||||
namespace Tests.EditMode.Network
|
||||
{
|
||||
public class SessionLifecycleTests
|
||||
{
|
||||
[Test]
|
||||
public void SharedNetworkRuntime_StartAsync_TransitionsToTransportConnectedButNotLoggedIn()
|
||||
{
|
||||
var transport = new FakeTransport();
|
||||
var runtime = new SharedNetworkRuntime(transport, new ImmediateNetworkMessageDispatcher());
|
||||
|
||||
runtime.StartAsync().GetAwaiter().GetResult();
|
||||
|
||||
Assert.That(runtime.SessionManager.State, Is.EqualTo(ConnectionState.TransportConnected));
|
||||
Assert.That(runtime.SessionManager.CanSendHeartbeat, Is.False);
|
||||
}
|
||||
|
||||
[Test]
|
||||
public void LoginFailure_IsDistinctFromTransportConnectedState()
|
||||
{
|
||||
var transport = new FakeTransport();
|
||||
var runtime = new SharedNetworkRuntime(transport, new ImmediateNetworkMessageDispatcher());
|
||||
|
||||
runtime.StartAsync().GetAwaiter().GetResult();
|
||||
runtime.NotifyLoginStarted();
|
||||
runtime.NotifyLoginFailed("bad credentials");
|
||||
|
||||
Assert.That(runtime.SessionManager.State, Is.EqualTo(ConnectionState.LoginFailed));
|
||||
Assert.That(runtime.SessionManager.LastFailureReason, Is.EqualTo("bad credentials"));
|
||||
}
|
||||
|
||||
[Test]
|
||||
public void HeartbeatTimeout_SchedulesAndStartsReconnect()
|
||||
{
|
||||
var clock = new MutableClock(new DateTimeOffset(2026, 3, 27, 0, 0, 0, TimeSpan.Zero));
|
||||
var policy = new SessionReconnectPolicy(
|
||||
heartbeatInterval: TimeSpan.FromSeconds(2),
|
||||
heartbeatTimeout: TimeSpan.FromSeconds(5),
|
||||
reconnectDelay: TimeSpan.FromSeconds(3),
|
||||
autoReconnect: true);
|
||||
var transport = new FakeTransport();
|
||||
var runtime = new SharedNetworkRuntime(transport, new ImmediateNetworkMessageDispatcher(), policy, clock.UtcNow);
|
||||
var events = new List<SessionEventKind>();
|
||||
|
||||
runtime.LifecycleChanged += lifecycleEvent => events.Add(lifecycleEvent.Kind);
|
||||
runtime.StartAsync().GetAwaiter().GetResult();
|
||||
runtime.NotifyLoginStarted();
|
||||
runtime.NotifyLoginSucceeded();
|
||||
|
||||
clock.Advance(TimeSpan.FromSeconds(6));
|
||||
runtime.UpdateLifecycle();
|
||||
|
||||
Assert.That(runtime.SessionManager.State, Is.EqualTo(ConnectionState.ReconnectPending));
|
||||
Assert.That(events, Does.Contain(SessionEventKind.TimedOut));
|
||||
Assert.That(events, Does.Contain(SessionEventKind.ReconnectScheduled));
|
||||
|
||||
clock.Advance(TimeSpan.FromSeconds(3));
|
||||
runtime.UpdateLifecycle();
|
||||
|
||||
Assert.That(runtime.SessionManager.State, Is.EqualTo(ConnectionState.Reconnecting));
|
||||
Assert.That(events, Does.Contain(SessionEventKind.ReconnectStarted));
|
||||
}
|
||||
|
||||
[Test]
|
||||
public void HeartbeatResponse_UpdatesRttAndServerTick_WithoutChangingLoggedInState()
|
||||
{
|
||||
var clock = new MutableClock(new DateTimeOffset(2026, 3, 27, 0, 0, 0, TimeSpan.Zero));
|
||||
var transport = new FakeTransport();
|
||||
var runtime = new SharedNetworkRuntime(transport, new ImmediateNetworkMessageDispatcher(), utcNowProvider: clock.UtcNow);
|
||||
|
||||
runtime.StartAsync().GetAwaiter().GetResult();
|
||||
runtime.NotifyLoginStarted();
|
||||
runtime.NotifyLoginSucceeded();
|
||||
runtime.NotifyHeartbeatSent();
|
||||
|
||||
clock.Advance(TimeSpan.FromMilliseconds(120));
|
||||
runtime.NotifyHeartbeatReceived(321);
|
||||
|
||||
Assert.That(runtime.SessionManager.State, Is.EqualTo(ConnectionState.LoggedIn));
|
||||
Assert.That(runtime.SessionManager.LastRoundTripTime, Is.EqualTo(TimeSpan.FromMilliseconds(120)));
|
||||
Assert.That(runtime.SessionManager.LastServerTick, Is.EqualTo(321));
|
||||
}
|
||||
|
||||
[Test]
|
||||
public void ServerNetworkHost_TracksMultipleSessionsIndependently()
|
||||
{
|
||||
var clock = new MutableClock(new DateTimeOffset(2026, 3, 27, 0, 0, 0, TimeSpan.Zero));
|
||||
var policy = new SessionReconnectPolicy(
|
||||
heartbeatInterval: TimeSpan.FromSeconds(2),
|
||||
heartbeatTimeout: TimeSpan.FromSeconds(5),
|
||||
reconnectDelay: TimeSpan.FromSeconds(3),
|
||||
autoReconnect: true);
|
||||
var transport = new FakeTransport();
|
||||
var host = new ServerNetworkHost(transport, reconnectPolicy: policy, utcNowProvider: clock.UtcNow);
|
||||
var peerA = new IPEndPoint(IPAddress.Parse("127.0.0.1"), 5001);
|
||||
var peerB = new IPEndPoint(IPAddress.Parse("127.0.0.1"), 5002);
|
||||
|
||||
host.StartAsync().GetAwaiter().GetResult();
|
||||
transport.EmitReceive(CreateEnvelope(MessageType.Heartbeat), peerA);
|
||||
transport.EmitReceive(CreateEnvelope(MessageType.Heartbeat), peerB);
|
||||
host.NotifyLoginStarted(peerA);
|
||||
host.NotifyLoginSucceeded(peerA);
|
||||
host.NotifyLoginStarted(peerB);
|
||||
host.NotifyLoginSucceeded(peerB);
|
||||
|
||||
clock.Advance(TimeSpan.FromSeconds(6));
|
||||
host.NotifyHeartbeatReceived(peerB, 99);
|
||||
host.UpdateLifecycle();
|
||||
|
||||
Assert.That(host.ManagedSessions.Count, Is.EqualTo(2));
|
||||
Assert.That(host.TryGetSession(peerA, out var sessionA), Is.True);
|
||||
Assert.That(host.TryGetSession(peerB, out var sessionB), Is.True);
|
||||
Assert.That(sessionA.SessionManager.State, Is.EqualTo(ConnectionState.ReconnectPending));
|
||||
Assert.That(sessionB.SessionManager.State, Is.EqualTo(ConnectionState.LoggedIn));
|
||||
Assert.That(sessionB.SessionManager.LastServerTick, Is.EqualTo(99));
|
||||
}
|
||||
|
||||
[Test]
|
||||
public void ServerNetworkHost_RemoveSession_DoesNotDisturbOtherPeers()
|
||||
{
|
||||
var transport = new FakeTransport();
|
||||
var host = new ServerNetworkHost(transport);
|
||||
var peerA = new IPEndPoint(IPAddress.Parse("127.0.0.1"), 5001);
|
||||
var peerB = new IPEndPoint(IPAddress.Parse("127.0.0.1"), 5002);
|
||||
|
||||
host.StartAsync().GetAwaiter().GetResult();
|
||||
transport.EmitReceive(CreateEnvelope(MessageType.Heartbeat), peerA);
|
||||
transport.EmitReceive(CreateEnvelope(MessageType.Heartbeat), peerB);
|
||||
|
||||
var removed = host.RemoveSession(peerA, "peer closed");
|
||||
|
||||
Assert.That(removed, Is.True);
|
||||
Assert.That(host.ManagedSessions.Count, Is.EqualTo(1));
|
||||
Assert.That(host.TryGetSession(peerA, out _), Is.False);
|
||||
Assert.That(host.TryGetSession(peerB, out var sessionB), Is.True);
|
||||
Assert.That(sessionB.SessionManager.State, Is.EqualTo(ConnectionState.TransportConnected));
|
||||
}
|
||||
|
||||
[Test]
|
||||
public void ServerNetworkHost_LifecycleEventsIncludeRemotePeerIdentity()
|
||||
{
|
||||
var transport = new FakeTransport();
|
||||
var host = new ServerNetworkHost(transport);
|
||||
var peer = new IPEndPoint(IPAddress.Parse("127.0.0.1"), 5001);
|
||||
MultiSessionLifecycleEvent receivedEvent = null;
|
||||
|
||||
host.LifecycleChanged += lifecycleEvent => receivedEvent = lifecycleEvent;
|
||||
|
||||
host.StartAsync().GetAwaiter().GetResult();
|
||||
transport.EmitReceive(CreateEnvelope(MessageType.Heartbeat), peer);
|
||||
|
||||
Assert.That(receivedEvent, Is.Not.Null);
|
||||
Assert.That(receivedEvent.RemoteEndPoint, Is.EqualTo(peer));
|
||||
Assert.That(receivedEvent.LifecycleEvent.CurrentState, Is.EqualTo(ConnectionState.TransportConnected));
|
||||
}
|
||||
|
||||
private static byte[] CreateEnvelope(MessageType type)
|
||||
{
|
||||
return new Envelope
|
||||
{
|
||||
Type = (int)type
|
||||
}.ToByteArray();
|
||||
}
|
||||
|
||||
private sealed class MutableClock
|
||||
{
|
||||
public MutableClock(DateTimeOffset now)
|
||||
{
|
||||
Now = now;
|
||||
}
|
||||
|
||||
public DateTimeOffset Now { get; private set; }
|
||||
|
||||
public DateTimeOffset UtcNow()
|
||||
{
|
||||
return Now;
|
||||
}
|
||||
|
||||
public void Advance(TimeSpan delta)
|
||||
{
|
||||
Now = Now.Add(delta);
|
||||
}
|
||||
}
|
||||
|
||||
private sealed class FakeTransport : ITransport
|
||||
{
|
||||
public event Action<byte[], IPEndPoint> OnReceive;
|
||||
|
||||
public Task StartAsync()
|
||||
{
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public void Stop()
|
||||
{
|
||||
}
|
||||
|
||||
public void Send(byte[] data)
|
||||
{
|
||||
}
|
||||
|
||||
public void SendTo(byte[] data, IPEndPoint target)
|
||||
{
|
||||
}
|
||||
|
||||
public void SendToAll(byte[] data)
|
||||
{
|
||||
}
|
||||
|
||||
public void EmitReceive(byte[] data, IPEndPoint sender)
|
||||
{
|
||||
OnReceive?.Invoke(data, sender);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
fileFormatVersion: 2
|
||||
guid: 44395d472582e2c41a8c31b766751035
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {instanceID: 0}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
||||
|
|
@ -6,7 +6,8 @@
|
|||
- 阶段 2 已完成:`KcpTransport` 已落地,`NetworkManager` 默认使用 KCP 作为运行时可靠传输实现,相关编辑器测试已经覆盖默认会话、多远端隔离、广播与停止清理。
|
||||
- 阶段 3 已完成:遗留的 `ReliableUdpTransport` 兼容入口已经移除,项目中不再保留第二个“可靠 UDP”实现名义。
|
||||
- 阶段 4 已完成:网络线程与 Unity 主线程之间已经建立显式分发边界,传输回调不再直接执行业务 handler。
|
||||
- 阶段 5 及以后尚未开始:连接生命周期、QoS/同步优化、监控指标仍然是后续工作。
|
||||
- 阶段 5 已完成:共享会话生命周期、心跳职责边界、超时/重连状态已经从业务消息处理里分层出来,并扩展到服务端多会话管理。
|
||||
- 阶段 6 及以后尚未开始:QoS/同步优化、监控指标仍然是后续工作。
|
||||
|
||||
## 当前真实现状
|
||||
|
||||
|
|
@ -15,6 +16,7 @@
|
|||
- 共享的 `ITransport` / `KcpTransport`
|
||||
- 共享的 `MessageManager`
|
||||
- 共享的 `SharedNetworkRuntime`
|
||||
- 共享的 `SessionManager` / `MultiSessionManager` / `ConnectionState`
|
||||
- 由宿主注入的 dispatcher 策略,而不是在消息层内硬编码 Unity 主线程语义
|
||||
- `KcpTransport` 负责:
|
||||
- 客户端默认会话
|
||||
|
|
@ -25,13 +27,22 @@
|
|||
- 解析 `Envelope`
|
||||
- 根据 `MessageType` 查找 handler
|
||||
- 通过宿主注入的 dispatcher 执行消息分发,而不是在收包线程直接执行 handler
|
||||
- `SessionManager` 负责:
|
||||
- 维护 `Disconnected` / `TransportConnected` / `LoginPending` / `LoggedIn` / `LoginFailed` / `TimedOut` / `ReconnectPending` / `Reconnecting` 状态
|
||||
- 管理心跳发送窗口、心跳超时和重连调度
|
||||
- 记录 RTT、最近 liveness 时间和最近服务器时钟样本
|
||||
- `MultiSessionManager` 负责:
|
||||
- 按远端 `IPEndPoint` 维护多份 `SessionManager`
|
||||
- 让服务端按每个远端独立观察登录、超时、断线、重连状态
|
||||
- 提供按远端查询、枚举、移除会话的共享入口
|
||||
- `MainThreadNetworkDispatcher` 负责:
|
||||
- 维护线程安全 FIFO 队列
|
||||
- 在 Unity 主线程 drain 队列并执行 handler
|
||||
- `ServerNetworkHost` 当前可以作为非 Unity 宿主复用同一套网络核心,并使用非主线程 dispatcher 策略
|
||||
- `ServerNetworkHost` 当前可以作为非 Unity 宿主复用同一套网络核心,并通过 `MultiSessionManager` 暴露与客户端一致的生命周期状态词汇
|
||||
- `NetworkManager` 负责:
|
||||
- 在 `Update()` 中定期 drain 网络消息
|
||||
- 在 `Update()` 中定期 drain 网络消息并驱动 `SessionManager` 超时评估
|
||||
- 在主线程上触发游戏对象修改与 UI 相关逻辑
|
||||
- 仅在会话已登录且心跳到期时发送心跳
|
||||
- 当前业务链路仍然包括:
|
||||
- 登录 / 登出
|
||||
- 心跳 / 对时
|
||||
|
|
@ -39,7 +50,6 @@
|
|||
- `PlayerState` 下行
|
||||
- 本地预测 / 服务器校正
|
||||
- 当前尚未完成的关键架构问题:
|
||||
- 连接成功、登录成功、心跳超时、重连等状态尚未完全分层
|
||||
- 高频同步消息仍未做 QoS 拆分
|
||||
- 网络观测指标还不完整
|
||||
|
||||
|
|
@ -72,26 +82,31 @@
|
|||
|
||||
- 网络消息不会直接在非主线程操作 Unity 对象。
|
||||
|
||||
## 后续阶段
|
||||
|
||||
### 阶段 5:连接与心跳改造
|
||||
|
||||
1. 明确“连接成功”和“登录成功”是两个不同状态。
|
||||
2. 心跳只承担:
|
||||
已完成结果:
|
||||
|
||||
1. 已区分 `TransportConnected` 和 `LoggedIn` 两种状态,登录成功不再隐含为“网络已连接”的别名。
|
||||
2. 心跳当前只承担:
|
||||
- 存活检测
|
||||
- RTT / 时间同步
|
||||
3. 会话超时和断线重连逻辑放在 session manager,而不是业务消息处理里。
|
||||
- RTT 统计
|
||||
- 时间同步
|
||||
3. 会话超时、登录失败、重连调度当前由共享 `SessionManager` 管理,而不是散落在业务 handler 里。
|
||||
4. 服务端当前通过 `MultiSessionManager` 按每个远端地址独立管理 `SessionManager`,不再把所有远端压成一个 runtime 级状态。
|
||||
|
||||
交付标准:
|
||||
交付结论:
|
||||
|
||||
- 断线、超时、登录失败、重连等状态可以被明确区分。
|
||||
- 断线、超时、登录失败、重连等状态当前可以被明确区分和测试。
|
||||
- 服务端当前也可以独立观察玩家 A / B / C 各自的生命周期状态,而不是只看到一个总状态。
|
||||
|
||||
## 后续阶段
|
||||
|
||||
### 阶段 6:同步策略优化
|
||||
|
||||
1. 重新评估 `PlayerInput` 是否必须严格可靠。
|
||||
2. 重新评估 `PlayerState` 是否应使用可靠有序流。
|
||||
3. 调整客户端预测、回滚、纠正策略。
|
||||
4. 把对时逻辑从 `_sendInterval` 漂移中拆出来。
|
||||
4. 把对时逻辑从 `SessionManager` 的心跳窗口里进一步拆分成独立同步策略(如有必要)。
|
||||
|
||||
交付标准:
|
||||
|
||||
|
|
@ -112,6 +127,8 @@
|
|||
建议后续继续补齐以下模块:
|
||||
|
||||
- `Assets/Scripts/Network/NetworkTransport/KcpTransportConfig.cs`
|
||||
- `Assets/Scripts/Network/NetworkApplication/SessionMetrics.cs`
|
||||
- `Assets/Scripts/Network/NetworkApplication/MultiSessionManager.cs`
|
||||
|
||||
## 验收标准
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
"com.unity.ai.navigation": "1.1.6",
|
||||
"com.unity.collab-proxy": "2.7.1",
|
||||
"com.unity.feature.development": "1.0.1",
|
||||
"com.unity.ide.rider": "3.0.38",
|
||||
"com.unity.ide.rider": "3.0.39",
|
||||
"com.unity.ide.visualstudio": "2.0.22",
|
||||
"com.unity.ide.vscode": "1.2.5",
|
||||
"com.unity.render-pipelines.universal": "14.0.12",
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@
|
|||
}
|
||||
},
|
||||
"com.unity.ide.rider": {
|
||||
"version": "3.0.38",
|
||||
"version": "3.0.39",
|
||||
"depth": 0,
|
||||
"source": "registry",
|
||||
"dependencies": {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,2 @@
|
|||
schema: spec-driven
|
||||
created: 2026-03-27
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
## Context
|
||||
|
||||
The current networking stack already shares transport, message routing, and session lifecycle vocabulary between client and server hosts. However, the shared runtime still owns exactly one `SessionManager`, which matches the client model but does not match a server that needs to track many remote peers concurrently while preserving the same login, timeout, heartbeat, and reconnect semantics.
|
||||
|
||||
The main constraint is backward compatibility for the client host. The Unity client should keep its simple single-session composition, while the server host gains an explicit multi-session orchestration layer instead of embedding per-peer lifecycle logic into handlers or transport internals.
|
||||
|
||||
## Goals / Non-Goals
|
||||
|
||||
**Goals:**
|
||||
- Preserve `SessionManager` as the per-session finite state machine and lifecycle vocabulary owner.
|
||||
- Introduce a shared multi-session orchestration layer that can create, look up, evaluate, and remove many session managers keyed by remote identity.
|
||||
- Keep client composition simple by allowing the existing single-session runtime path to remain valid.
|
||||
- Make server-facing APIs explicit about per-session lookup, enumeration, lifecycle events, and cleanup triggers.
|
||||
|
||||
**Non-Goals:**
|
||||
- Redesign KCP transport session isolation or introduce a new wire protocol.
|
||||
- Change the meaning of existing connection states or heartbeat semantics.
|
||||
- Solve stage 6 QoS or synchronization policy work in the same change.
|
||||
- Move gameplay admission, authority, or player-object ownership into the shared lifecycle layer.
|
||||
|
||||
## Decisions
|
||||
|
||||
### Keep `SessionManager` as a per-session state machine
|
||||
`SessionManager` already models one connection lifecycle correctly. Replacing it with a collection-aware type would force client and server concerns into the same API surface. The change will keep `SessionManager` focused on a single session and add a higher-level multi-session coordinator for hosts that manage many peers.
|
||||
|
||||
Alternative considered: make `SessionManager` itself collection-aware.
|
||||
Rejected because it would either expose server-only concepts to the client or create a type with dual responsibilities that is harder to test and reason about.
|
||||
|
||||
### Add a keyed multi-session coordinator above transport callbacks
|
||||
The new orchestration layer should own a mapping from remote identity to per-session `SessionManager` instances. Transport delivery, login results, inbound activity, heartbeat updates, timeout evaluation, and reconnect bookkeeping should be routed through this keyed coordinator rather than being inferred in message handlers.
|
||||
|
||||
Alternative considered: keep session dictionaries inside `ServerNetworkHost` only.
|
||||
Rejected because it would fork lifecycle orchestration away from the shared core and make server behavior harder to test without the host adapter.
|
||||
|
||||
### Preserve separate host adapters for client and server
|
||||
The Unity client should continue composing a single-session runtime with its main-thread dispatcher. The server host should compose the shared transport and message layer with the new multi-session orchestration path, exposing per-session inspection and events without inheriting Unity-specific assumptions.
|
||||
|
||||
Alternative considered: replace `SharedNetworkRuntime` with one universal runtime abstraction.
|
||||
Rejected because the client and server have materially different composition shapes; forcing one runtime abstraction would hide important ownership boundaries.
|
||||
|
||||
## Risks / Trade-offs
|
||||
|
||||
- [More lifecycle objects] → Mitigation: keep `SessionManager` unchanged and centralize multi-session behavior in one coordinator with focused tests.
|
||||
- [Remote identity choice may leak transport details] → Mitigation: define a narrow session-key abstraction or standardize on the existing remote endpoint identity used by transport callbacks.
|
||||
- [Server cleanup bugs can leave stale sessions behind] → Mitigation: require explicit disconnect/removal scenarios and evaluation tests for session expiry.
|
||||
- [Client API drift during refactor] → Mitigation: keep the single-session runtime path as a first-class supported composition and verify it with regression tests.
|
||||
|
||||
## Migration Plan
|
||||
|
||||
1. Introduce the multi-session coordinator and per-session observation API in the shared network layer.
|
||||
2. Rewire `ServerNetworkHost` to use the coordinator for per-peer lifecycle bookkeeping.
|
||||
3. Leave the client host on the existing single-session runtime path, adding only compatibility glue if necessary.
|
||||
4. Add tests that cover client single-session regressions and server multi-session behavior side by side.
|
||||
|
||||
## Open Questions
|
||||
|
||||
- Whether the server session key should be the transport remote endpoint directly or a narrower shared abstraction.
|
||||
- Whether reconnect scheduling is meaningful for the server side, or should remain configurable per host/session policy.
|
||||
- How much session enumeration should be exposed publicly versus kept internal with event-based observation.
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
## Why
|
||||
|
||||
The current shared networking foundation exposes a single `SessionManager` lifecycle per runtime, which is sufficient for a client connected to one server but not for a server handling multiple remote peers concurrently. Extending the lifecycle model now prevents the server host from forking transport, login, timeout, and reconnect logic away from the shared network core.
|
||||
|
||||
## What Changes
|
||||
|
||||
- Introduce shared multi-session lifecycle orchestration for hosts that manage more than one remote peer at a time.
|
||||
- Preserve the current single-session client flow while adding a server-oriented session collection API keyed by remote identity.
|
||||
- Define how transport events, login results, heartbeat liveness, timeout detection, and reconnect policy are applied per managed session instead of only per runtime.
|
||||
- Clarify which responsibilities stay in the shared session orchestration layer versus host-specific admission, cleanup, and gameplay reactions.
|
||||
|
||||
## Capabilities
|
||||
|
||||
### New Capabilities
|
||||
- `multi-session-lifecycle`: Shared orchestration and observation of multiple concurrent network sessions, especially for server hosts that manage many remote peers.
|
||||
|
||||
### Modified Capabilities
|
||||
- `network-session-lifecycle`: The shared lifecycle vocabulary and heartbeat/reconnect rules must apply to each managed session, not only to a singleton runtime session.
|
||||
- `shared-network-foundation`: The shared runtime foundation must support both client-style single-session composition and server-style multi-session composition without introducing a protocol or transport fork.
|
||||
|
||||
## Impact
|
||||
|
||||
- Affected code: `SharedNetworkRuntime`, `SessionManager`, `ServerNetworkHost`, transport-to-session wiring, and lifecycle-related tests.
|
||||
- New APIs will likely introduce server-facing session lookup, enumeration, and per-session event observation.
|
||||
- Client-side runtime composition should remain compatible, but session orchestration responsibilities will be split more explicitly between single-session and multi-session hosts.
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
## ADDED Requirements
|
||||
|
||||
### Requirement: Multi-session hosts manage per-peer lifecycle state
|
||||
The shared networking core SHALL provide a multi-session lifecycle coordinator for hosts that manage multiple concurrent remote peers. The coordinator MUST maintain distinct per-session lifecycle state keyed by remote identity rather than collapsing all peers into one runtime-level state.
|
||||
|
||||
#### Scenario: Server tracks two peers independently
|
||||
- **WHEN** a server host accepts transport activity from two different remote peers
|
||||
- **THEN** the multi-session coordinator creates or resolves two distinct managed sessions
|
||||
- **THEN** lifecycle changes for one peer do not overwrite or hide the state of the other peer
|
||||
|
||||
### Requirement: Multi-session hosts can observe and evaluate each managed session
|
||||
The multi-session lifecycle coordinator SHALL expose per-session lookup or enumeration and MUST evaluate timeout, heartbeat, login, and reconnect rules for each managed session independently using the shared session lifecycle vocabulary.
|
||||
|
||||
#### Scenario: Timeout affects only one managed session
|
||||
- **WHEN** one managed session stops receiving liveness updates while another session continues receiving heartbeat or message activity
|
||||
- **THEN** the timed-out session transitions through timeout or reconnect states according to policy
|
||||
- **THEN** the active session remains in its current healthy state
|
||||
|
||||
#### Scenario: Host can inspect current managed sessions
|
||||
- **WHEN** server-side code needs to inspect the current connection state of connected peers
|
||||
- **THEN** it can look up or enumerate managed sessions through the multi-session coordinator
|
||||
- **THEN** each entry exposes the shared session lifecycle state for that specific peer
|
||||
|
||||
### Requirement: Session removal is explicit and does not corrupt remaining peers
|
||||
The multi-session lifecycle coordinator SHALL support explicit removal or disconnection handling for one managed session without resetting unrelated sessions that remain active.
|
||||
|
||||
#### Scenario: Disconnect removes one session only
|
||||
- **WHEN** one remote peer disconnects or is evicted by the host
|
||||
- **THEN** the coordinator updates or removes that peer's managed session
|
||||
- **THEN** other managed sessions remain queryable and keep their own lifecycle state
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
## MODIFIED Requirements
|
||||
|
||||
### Requirement: Session lifecycle distinguishes transport and login state
|
||||
The shared networking core SHALL expose an explicit session lifecycle model that distinguishes transport connectivity from login/authentication success. Hosts MUST be able to observe at least disconnected, transport-connected, login-pending, logged-in, login-failed, timed-out, and reconnecting lifecycle states for each managed session without inferring them from unrelated message handlers.
|
||||
|
||||
#### Scenario: Transport connect does not imply login success
|
||||
- **WHEN** the transport establishes a usable remote session but no login success message has been accepted yet
|
||||
- **THEN** the shared lifecycle reports a transport-connected or login-pending state for that managed session
|
||||
- **THEN** it does not report the session as logged in
|
||||
|
||||
#### Scenario: Login success advances lifecycle independently
|
||||
- **WHEN** the client or server session manager receives a successful login/authentication result for an active transport session
|
||||
- **THEN** the shared lifecycle transitions that managed session into the logged-in state
|
||||
- **THEN** hosts can react to that state change without conflating it with transport establishment
|
||||
|
||||
### Requirement: Timeout and reconnect are session-manager responsibilities
|
||||
The shared networking core SHALL manage timeout detection, disconnect transitions, and reconnect scheduling through session-manager components rather than implementing those decisions inside business message handlers. Hosts that manage multiple concurrent peers MUST apply these rules independently per managed session rather than collapsing timeout or reconnect state to the entire runtime.
|
||||
|
||||
#### Scenario: Timeout produces an observable reconnect transition
|
||||
- **WHEN** a reconnect-capable host has a session that times out
|
||||
- **THEN** the session manager emits a timeout-related lifecycle transition for that managed session
|
||||
- **THEN** it can subsequently move the session into a reconnecting or reconnect-pending state according to configured policy
|
||||
|
||||
#### Scenario: Login failure is distinct from transport disconnect
|
||||
- **WHEN** authentication or login fails while the transport session is still active
|
||||
- **THEN** the shared lifecycle reports a login-failed state for that managed session
|
||||
- **THEN** hosts can handle that failure separately from a transport disconnect or heartbeat timeout
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
## MODIFIED Requirements
|
||||
|
||||
### Requirement: Shared runtime owns host-agnostic session lifecycle orchestration
|
||||
The shared network foundation SHALL include host-agnostic session lifecycle orchestration alongside transport startup and message routing. Client and server hosts MUST be able to compose the shared foundation with session orchestration that consumes transport events, login results, and heartbeat signals without depending on Unity-specific runtime types, while supporting both single-session client composition and multi-session server composition.
|
||||
|
||||
#### Scenario: Client host composes runtime with single-session lifecycle manager
|
||||
- **WHEN** the Unity client constructs its shared networking runtime
|
||||
- **THEN** that runtime includes shared session lifecycle management for its single remote session in addition to transport and message routing
|
||||
- **THEN** Unity-specific code remains responsible only for reacting to lifecycle state changes and driving host behavior
|
||||
|
||||
#### Scenario: Server host composes shared foundation with multi-session orchestration
|
||||
- **WHEN** a non-Unity server host constructs the runtime networking stack for multiple remote peers
|
||||
- **THEN** it uses the shared transport and message-routing foundation together with shared multi-session lifecycle orchestration
|
||||
- **THEN** server-specific cleanup, admission, and gameplay reactions stay in the server host adapter rather than forking the shared lifecycle contract
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
## 1. Shared Multi-Session Lifecycle Core
|
||||
|
||||
- [x] 1.1 Introduce a shared multi-session lifecycle coordinator that owns per-session `SessionManager` instances keyed by remote identity.
|
||||
- [x] 1.2 Add APIs for per-session lookup, enumeration, lifecycle event observation, and explicit session removal without changing the existing session state vocabulary.
|
||||
- [x] 1.3 Route transport activity, login results, heartbeat updates, timeout evaluation, and reconnect bookkeeping through the coordinator on a per-session basis.
|
||||
|
||||
## 2. Host Composition
|
||||
|
||||
- [x] 2.1 Rework `ServerNetworkHost` to use the shared multi-session coordinator instead of exposing only one runtime-level `SessionManager`.
|
||||
- [x] 2.2 Preserve the current client-side single-session composition path so `NetworkManager` and `SharedNetworkRuntime` remain valid for one-server connectivity.
|
||||
- [x] 2.3 Define how remote identity is mapped into session keys and ensure session cleanup does not disturb unrelated peers.
|
||||
|
||||
## 3. Verification
|
||||
|
||||
- [x] 3.1 Add edit-mode tests that verify two or more server-side sessions can progress through login, timeout, and disconnect independently.
|
||||
- [x] 3.2 Add regression tests that confirm the client-side single-session lifecycle path still behaves as before.
|
||||
- [x] 3.3 Build the edit-mode test project and run the network-related test suite to confirm no lifecycle regressions remain.
|
||||
|
||||
## 4. Documentation
|
||||
|
||||
- [x] 4.1 Update `CodeX-TODO.md` to reflect that stage 5 lifecycle support now covers server-side multi-session management.
|
||||
- [x] 4.2 Document the new shared multi-session entry points and server-observable session states in change-related docs or code comments where needed.
|
||||
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
schema: spec-driven
|
||||
created: 2026-03-27
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
## Context
|
||||
|
||||
The project already shares transport, envelope parsing, and message dispatch between Unity client and non-Unity server hosts. Stage 4 established that transport callbacks do not execute gameplay handlers inline, but session lifecycle behavior is still fragmented: transport establishment, login success, heartbeat timeout, disconnect, and reconnect intent are not modeled as separate states, and heartbeat logic is still entangled with business flow. Stage 5 needs a single host-agnostic lifecycle layer before later QoS and sync work build on unstable assumptions.
|
||||
|
||||
## Goals / Non-Goals
|
||||
|
||||
**Goals:**
|
||||
- Introduce an explicit shared connection state model that distinguishes transport connection, authentication/login progress, steady-state session health, timeout, and reconnect intent.
|
||||
- Centralize heartbeat timing, timeout detection, and reconnect scheduling into a shared session manager instead of distributing them across handlers and host code.
|
||||
- Keep heartbeat infrastructure-focused: liveness detection, RTT measurement, and time sync only.
|
||||
- Allow Unity client and non-Unity server hosts to observe the same lifecycle events while keeping host-specific reactions outside the shared core.
|
||||
|
||||
**Non-Goals:**
|
||||
- Reworking message QoS or transport reliability semantics.
|
||||
- Changing the envelope protocol or replacing KCP.
|
||||
- Designing gameplay-specific reconnect UX, character resync, or rollback rules.
|
||||
- Adding final production telemetry; detailed metrics belong to a later stage.
|
||||
|
||||
## Decisions
|
||||
|
||||
### Use an explicit shared connection state enum and session manager
|
||||
A dedicated `SessionManager` backed by a `ConnectionState` model keeps lifecycle transitions in one place and makes transport-connected, login-pending, logged-in, timed-out, reconnecting, and login-failed states observable. This is better than inferring state from scattered booleans in handlers because later features such as reconnect backoff and QoS splitting need a stable state machine boundary.
|
||||
|
||||
Alternative considered: keep lifecycle flags in `NetworkManager` and server host adapters. Rejected because it would fork client/server behavior again right after the shared-network-foundation refactor.
|
||||
|
||||
### Treat heartbeat as infrastructure signals, not business state ownership
|
||||
Heartbeat messages feed the session manager with liveness timestamps, RTT samples, and time-sync data, but they do not themselves declare login success or trigger reconnect policy. This keeps message handlers narrow and prevents hidden coupling where missing a heartbeat implicitly mutates business session state in unrelated code.
|
||||
|
||||
Alternative considered: let heartbeat handlers directly disconnect or reconnect. Rejected because it recreates the current layering problem and makes timeout policy hard to test.
|
||||
|
||||
### Make shared runtime own lifecycle orchestration, hosts own reactions
|
||||
`SharedNetworkRuntime` should compose transport, message routing, and session lifecycle so both client and server use the same lifecycle rules. Unity `NetworkManager` and `ServerNetworkHost` consume lifecycle events and decide what to do next, such as UI updates, reconnect attempts, or server-side cleanup.
|
||||
|
||||
Alternative considered: introduce separate client/server session managers. Rejected because stage five explicitly aims to share the network底层 contract and keep host differences at the adapter layer.
|
||||
|
||||
### Represent reconnect as a scheduler policy, not an immediate side effect
|
||||
Reconnect should be modeled as a transition into a reconnect-pending or reconnecting state with policy-owned timing, instead of directly restarting transport from inside a timeout callback. That makes backoff, disable/enable behavior, and tests deterministic.
|
||||
|
||||
Alternative considered: reconnect immediately inside timeout detection. Rejected because it couples timer evaluation to transport startup side effects and makes repeated failures difficult to reason about.
|
||||
|
||||
## Risks / Trade-offs
|
||||
|
||||
- [Risk] Introducing a state machine can expose ambiguities in existing login/heartbeat handlers. -> Mitigation: define explicit transition sources and add tests for login success, login failure, heartbeat timeout, disconnect, and reconnect scheduling.
|
||||
- [Risk] Shared lifecycle ownership can blur the boundary between transport events and business authentication events. -> Mitigation: keep transport state inputs and login result inputs as separate session-manager APIs.
|
||||
- [Risk] Reconnect policy may require host-specific decisions later. -> Mitigation: keep policy configuration injectable and host reactions event-driven rather than hardcoding Unity-only behavior.
|
||||
- [Risk] Existing code may already assume that "connected" means "logged in". -> Mitigation: update host-facing APIs and TODO/documentation so callers consume explicit lifecycle states instead of legacy booleans.
|
||||
|
||||
## Migration Plan
|
||||
|
||||
1. Add shared lifecycle types (`ConnectionState`, session event model, heartbeat policy/session manager) without removing existing login/heartbeat flows yet.
|
||||
2. Route transport-connect, login-result, heartbeat-received, and timeout inputs through the new session manager.
|
||||
3. Update Unity client and server host adapters to observe lifecycle state changes from the shared runtime.
|
||||
4. Remove or simplify duplicated timeout/reconnect logic from message handlers and host code.
|
||||
5. Add edit mode tests that lock state transitions before beginning Stage 6 QoS work.
|
||||
|
||||
## Open Questions
|
||||
|
||||
- Whether the server host needs the exact same reconnect scheduling primitives as the client, or only the same state vocabulary.
|
||||
- Whether login failure should transition to `Disconnected` immediately after reporting failure, or remain in a stable `LoginFailed` state until the host decides the next action.
|
||||
- How much time-sync state should live in the session manager versus a separate clock-sync helper once Stage 6 begins.
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
## Why
|
||||
|
||||
Current networking distinguishes transport delivery from Unity threading, but it still does not distinguish transport connectivity, login success, heartbeat timeout, and reconnect flow as first-class session states. Stage 5 is needed now so client and server can share one coherent lifecycle model before QoS and sync optimization build on top of ambiguous state handling.
|
||||
|
||||
## What Changes
|
||||
|
||||
- Add a shared session lifecycle module that models disconnected, transport-connected, login-pending, logged-in, timeout, reconnecting, and login-failed states explicitly.
|
||||
- Add a heartbeat policy that is limited to liveness checks, RTT measurement, and time synchronization, without owning login or reconnect decisions directly.
|
||||
- Move timeout detection, disconnect transitions, and reconnect scheduling into a shared session manager instead of leaving them in message handlers or host-specific business code.
|
||||
- Expose lifecycle state changes and session events so Unity client host and non-Unity server host can react without forking the underlying network core.
|
||||
|
||||
## Capabilities
|
||||
|
||||
### New Capabilities
|
||||
- `network-session-lifecycle`: Shared connection, login, heartbeat, timeout, and reconnect state management for client and server hosts.
|
||||
|
||||
### Modified Capabilities
|
||||
- `shared-network-foundation`: The shared runtime now includes host-agnostic session lifecycle management in addition to transport and message routing.
|
||||
|
||||
## Impact
|
||||
|
||||
- Affected code: `SharedNetworkRuntime`, `MessageManager`, `NetworkManager`, `ServerNetworkHost`, login/heartbeat handlers, and new session manager/state types.
|
||||
- Affected behavior: login success no longer implies transport connect, heartbeat becomes a narrow infrastructure concern, and reconnect logic moves out of ad hoc business paths.
|
||||
- Affected tests: edit mode tests need coverage for lifecycle transitions, timeout handling, login failure, and reconnect scheduling across shared client/server runtime paths.
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
## ADDED Requirements
|
||||
|
||||
### Requirement: Session lifecycle distinguishes transport and login state
|
||||
The shared networking core SHALL expose an explicit session lifecycle model that distinguishes transport connectivity from login/authentication success. Hosts MUST be able to observe at least disconnected, transport-connected, login-pending, logged-in, login-failed, timed-out, and reconnecting lifecycle states without inferring them from unrelated message handlers.
|
||||
|
||||
#### Scenario: Transport connect does not imply login success
|
||||
- **WHEN** the transport establishes a usable remote session but no login success message has been accepted yet
|
||||
- **THEN** the shared lifecycle reports a transport-connected or login-pending state
|
||||
- **THEN** it does not report the session as logged in
|
||||
|
||||
#### Scenario: Login success advances lifecycle independently
|
||||
- **WHEN** the client or server session manager receives a successful login/authentication result for an active transport session
|
||||
- **THEN** the shared lifecycle transitions that session into the logged-in state
|
||||
- **THEN** hosts can react to that state change without conflating it with transport establishment
|
||||
|
||||
### Requirement: Heartbeat is limited to liveness, RTT, and time sync
|
||||
The shared session lifecycle SHALL treat heartbeat traffic as infrastructure input for liveness detection, round-trip-time measurement, and clock synchronization only. Heartbeat processing MUST NOT itself own login success, login failure, or reconnect policy decisions.
|
||||
|
||||
#### Scenario: Heartbeat updates liveness and RTT only
|
||||
- **WHEN** a heartbeat response is received for an active session
|
||||
- **THEN** the session manager updates last-seen or timeout bookkeeping and RTT or clock-sync data
|
||||
- **THEN** it does not mark the session logged in solely because the heartbeat succeeded
|
||||
|
||||
#### Scenario: Missing heartbeat triggers timeout state
|
||||
- **WHEN** the configured heartbeat timeout elapses without a required heartbeat or other liveness signal
|
||||
- **THEN** the session lifecycle transitions the session into a timed-out state
|
||||
- **THEN** reconnect handling is delegated to the lifecycle reconnect policy rather than hidden inside the heartbeat handler itself
|
||||
|
||||
### Requirement: Timeout and reconnect are session-manager responsibilities
|
||||
The shared networking core SHALL manage timeout detection, disconnect transitions, and reconnect scheduling through a session-manager component rather than implementing those decisions inside business message handlers.
|
||||
|
||||
#### Scenario: Timeout produces an observable reconnect transition
|
||||
- **WHEN** a reconnect-capable host has a session that times out
|
||||
- **THEN** the session manager emits a timeout-related lifecycle transition
|
||||
- **THEN** it can subsequently move the session into a reconnecting or reconnect-pending state according to configured policy
|
||||
|
||||
#### Scenario: Login failure is distinct from transport disconnect
|
||||
- **WHEN** authentication or login fails while the transport session is still active
|
||||
- **THEN** the shared lifecycle reports a login-failed state
|
||||
- **THEN** hosts can handle that failure separately from a transport disconnect or heartbeat timeout
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
## ADDED Requirements
|
||||
|
||||
### Requirement: Shared runtime owns host-agnostic session lifecycle orchestration
|
||||
The shared network foundation SHALL include host-agnostic session lifecycle orchestration alongside transport startup and message routing. Client and server hosts MUST be able to compose the same shared runtime with a session manager that consumes transport events, login results, and heartbeat signals without depending on Unity-specific runtime types.
|
||||
|
||||
#### Scenario: Client host composes runtime with lifecycle manager
|
||||
- **WHEN** the Unity client constructs its shared networking runtime
|
||||
- **THEN** that runtime includes shared session lifecycle management in addition to transport and message routing
|
||||
- **THEN** Unity-specific code remains responsible only for reacting to lifecycle state changes and driving host behavior
|
||||
|
||||
#### Scenario: Server host observes the same lifecycle vocabulary
|
||||
- **WHEN** a non-Unity server host composes the shared networking runtime
|
||||
- **THEN** it uses the same lifecycle state model and session-manager abstractions as the client-side shared runtime
|
||||
- **THEN** server-specific cleanup or admission behavior stays in the server host adapter rather than forking the shared core contract
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
## 1. Shared Lifecycle Model
|
||||
|
||||
- [x] 1.1 Add shared lifecycle types for connection state, session events, and heartbeat/reconnect policy configuration.
|
||||
- [x] 1.2 Implement a host-agnostic session manager that consumes transport-connected, login-result, heartbeat, timeout, and disconnect inputs.
|
||||
|
||||
## 2. Runtime Integration
|
||||
|
||||
- [x] 2.1 Extend `SharedNetworkRuntime` to compose the session manager alongside transport and message routing.
|
||||
- [x] 2.2 Update `NetworkManager` and `ServerNetworkHost` to observe explicit lifecycle state changes instead of inferring session health from ad hoc flags.
|
||||
|
||||
## 3. Heartbeat And Login Flow Cleanup
|
||||
|
||||
- [x] 3.1 Refactor login and heartbeat handlers so heartbeat only updates liveness, RTT, and time-sync state.
|
||||
- [x] 3.2 Remove timeout and reconnect decisions from business handlers and route them through session-manager policy APIs.
|
||||
|
||||
## 4. Verification And Documentation
|
||||
|
||||
- [x] 4.1 Add edit mode tests for transport-connected vs logged-in distinction, login failure, heartbeat timeout, and reconnect scheduling.
|
||||
- [x] 4.2 Update `CodeX-TODO.md` and related network docs to reflect the new lifecycle layering and stage-five completion criteria.
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
# multi-session-lifecycle Specification
|
||||
|
||||
## Purpose
|
||||
Define the shared orchestration model for hosts that manage multiple concurrent network sessions while preserving the existing per-session lifecycle vocabulary.
|
||||
|
||||
## Requirements
|
||||
### Requirement: Multi-session hosts manage per-peer lifecycle state
|
||||
The shared networking core SHALL provide a multi-session lifecycle coordinator for hosts that manage multiple concurrent remote peers. The coordinator MUST maintain distinct per-session lifecycle state keyed by remote identity rather than collapsing all peers into one runtime-level state.
|
||||
|
||||
#### Scenario: Server tracks two peers independently
|
||||
- **WHEN** a server host accepts transport activity from two different remote peers
|
||||
- **THEN** the multi-session coordinator creates or resolves two distinct managed sessions
|
||||
- **THEN** lifecycle changes for one peer do not overwrite or hide the state of the other peer
|
||||
|
||||
### Requirement: Multi-session hosts can observe and evaluate each managed session
|
||||
The multi-session lifecycle coordinator SHALL expose per-session lookup or enumeration and MUST evaluate timeout, heartbeat, login, and reconnect rules for each managed session independently using the shared session lifecycle vocabulary.
|
||||
|
||||
#### Scenario: Timeout affects only one managed session
|
||||
- **WHEN** one managed session stops receiving liveness updates while another session continues receiving heartbeat or message activity
|
||||
- **THEN** the timed-out session transitions through timeout or reconnect states according to policy
|
||||
- **THEN** the active session remains in its current healthy state
|
||||
|
||||
#### Scenario: Host can inspect current managed sessions
|
||||
- **WHEN** server-side code needs to inspect the current connection state of connected peers
|
||||
- **THEN** it can look up or enumerate managed sessions through the multi-session coordinator
|
||||
- **THEN** each entry exposes the shared session lifecycle state for that specific peer
|
||||
|
||||
### Requirement: Session removal is explicit and does not corrupt remaining peers
|
||||
The multi-session lifecycle coordinator SHALL support explicit removal or disconnection handling for one managed session without resetting unrelated sessions that remain active.
|
||||
|
||||
#### Scenario: Disconnect removes one session only
|
||||
- **WHEN** one remote peer disconnects or is evicted by the host
|
||||
- **THEN** the coordinator updates or removes that peer's managed session
|
||||
- **THEN** other managed sessions remain queryable and keep their own lifecycle state
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
# network-session-lifecycle Specification
|
||||
|
||||
## Purpose
|
||||
Define the shared session lifecycle model that separates transport connectivity, login state, heartbeat liveness, timeout detection, and reconnect scheduling for client and server hosts.
|
||||
|
||||
## Requirements
|
||||
### Requirement: Session lifecycle distinguishes transport and login state
|
||||
The shared networking core SHALL expose an explicit session lifecycle model that distinguishes transport connectivity from login/authentication success. Hosts MUST be able to observe at least disconnected, transport-connected, login-pending, logged-in, login-failed, timed-out, and reconnecting lifecycle states for each managed session without inferring them from unrelated message handlers.
|
||||
|
||||
#### Scenario: Transport connect does not imply login success
|
||||
- **WHEN** the transport establishes a usable remote session but no login success message has been accepted yet
|
||||
- **THEN** the shared lifecycle reports a transport-connected or login-pending state for that managed session
|
||||
- **THEN** it does not report the session as logged in
|
||||
|
||||
#### Scenario: Login success advances lifecycle independently
|
||||
- **WHEN** the client or server session manager receives a successful login/authentication result for an active transport session
|
||||
- **THEN** the shared lifecycle transitions that managed session into the logged-in state
|
||||
- **THEN** hosts can react to that state change without conflating it with transport establishment
|
||||
|
||||
### Requirement: Heartbeat is limited to liveness, RTT, and time sync
|
||||
The shared session lifecycle SHALL treat heartbeat traffic as infrastructure input for liveness detection, round-trip-time measurement, and clock synchronization only. Heartbeat processing MUST NOT itself own login success, login failure, or reconnect policy decisions.
|
||||
|
||||
#### Scenario: Heartbeat updates liveness and RTT only
|
||||
- **WHEN** a heartbeat response is received for an active session
|
||||
- **THEN** the session manager updates last-seen or timeout bookkeeping and RTT or clock-sync data
|
||||
- **THEN** it does not mark the session logged in solely because the heartbeat succeeded
|
||||
|
||||
#### Scenario: Missing heartbeat triggers timeout state
|
||||
- **WHEN** the configured heartbeat timeout elapses without a required heartbeat or other liveness signal
|
||||
- **THEN** the session lifecycle transitions the session into a timed-out state
|
||||
- **THEN** reconnect handling is delegated to the lifecycle reconnect policy rather than hidden inside the heartbeat handler itself
|
||||
|
||||
### Requirement: Timeout and reconnect are session-manager responsibilities
|
||||
The shared networking core SHALL manage timeout detection, disconnect transitions, and reconnect scheduling through session-manager components rather than implementing those decisions inside business message handlers. Hosts that manage multiple concurrent peers MUST apply these rules independently per managed session rather than collapsing timeout or reconnect state to the entire runtime.
|
||||
|
||||
#### Scenario: Timeout produces an observable reconnect transition
|
||||
- **WHEN** a reconnect-capable host has a session that times out
|
||||
- **THEN** the session manager emits a timeout-related lifecycle transition for that managed session
|
||||
- **THEN** it can subsequently move the session into a reconnecting or reconnect-pending state according to configured policy
|
||||
|
||||
#### Scenario: Login failure is distinct from transport disconnect
|
||||
- **WHEN** authentication or login fails while the transport session is still active
|
||||
- **THEN** the shared lifecycle reports a login-failed state for that managed session
|
||||
- **THEN** hosts can handle that failure separately from a transport disconnect or heartbeat timeout
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# shared-network-foundation Specification
|
||||
|
||||
## Purpose
|
||||
Define the shared transport and message-routing foundation that both client and server hosts use without depending on Unity-specific runtime host classes.
|
||||
Define the shared transport, session-lifecycle, and message-routing foundation that both client and server hosts use without depending on Unity-specific runtime host classes.
|
||||
|
||||
## Requirements
|
||||
### Requirement: Shared network core is host-agnostic
|
||||
|
|
@ -37,3 +37,16 @@ The shared client/server foundation SHALL preserve the existing `ITransport` sen
|
|||
- **WHEN** a client host sends a business message through the shared core to a server host using the shared core
|
||||
- **THEN** the message is encoded using the same envelope contract on the client side
|
||||
- **THEN** the server host decodes and routes it through the shared message-routing layer without a host-specific protocol fork
|
||||
|
||||
### Requirement: Shared runtime owns host-agnostic session lifecycle orchestration
|
||||
The shared network foundation SHALL include host-agnostic session lifecycle orchestration alongside transport startup and message routing. Client and server hosts MUST be able to compose the shared foundation with session orchestration that consumes transport events, login results, and heartbeat signals without depending on Unity-specific runtime types, while supporting both single-session client composition and multi-session server composition.
|
||||
|
||||
#### Scenario: Client host composes runtime with single-session lifecycle manager
|
||||
- **WHEN** the Unity client constructs its shared networking runtime
|
||||
- **THEN** that runtime includes shared session lifecycle management for its single remote session in addition to transport and message routing
|
||||
- **THEN** Unity-specific code remains responsible only for reacting to lifecycle state changes and driving host behavior
|
||||
|
||||
#### Scenario: Server host composes shared foundation with multi-session orchestration
|
||||
- **WHEN** a non-Unity server host constructs the runtime networking stack for multiple remote peers
|
||||
- **THEN** it uses the shared transport and message-routing foundation together with shared multi-session lifecycle orchestration
|
||||
- **THEN** server-specific cleanup, admission, and gameplay reactions stay in the server host adapter rather than forking the shared lifecycle contract
|
||||
|
|
|
|||
Loading…
Reference in New Issue