调整嵌套类位置 + 制订阶段 5 task

This commit is contained in:
SepComet 2026-03-27 16:37:43 +08:00
parent f21a4c51d2
commit ca26ab8e38
11 changed files with 446 additions and 213 deletions

View File

@ -0,0 +1,221 @@
using System;
using System.Net;
using System.Runtime.InteropServices;
using kcp;
namespace Network.NetworkTransport
{
public partial class KcpTransport
{
private sealed unsafe class KcpSession : IDisposable
{
private readonly KcpTransport _owner;
private readonly object _gate = new();
private readonly GCHandle _handle;
private IKCPCB* _kcp;
private bool _disposed;
private uint _nextUpdateAt;
public KcpSession(KcpTransport owner, IPEndPoint remoteEndPoint, uint conv)
{
_owner = owner ?? throw new ArgumentNullException(nameof(owner));
RemoteEndPoint = remoteEndPoint ?? throw new ArgumentNullException(nameof(remoteEndPoint));
Conv = conv;
LastActivityUtc = DateTime.UtcNow;
_handle = GCHandle.Alloc(this);
_kcp = KCP.ikcp_create(conv, (void*)GCHandle.ToIntPtr(_handle));
KCP.ikcp_setoutput(_kcp, &OutputCallback);
KCP.ikcp_nodelay(_kcp, DefaultNoDelay, DefaultInterval, DefaultResend, DefaultNc);
KCP.ikcp_wndsize(_kcp, DefaultSendWindow, DefaultReceiveWindow);
KCP.ikcp_setmtu(_kcp, DefaultMtu);
_nextUpdateAt = GetCurrentTimeMilliseconds();
}
public uint Conv { get; }
public IPEndPoint RemoteEndPoint { get; }
public DateTime LastActivityUtc { get; private set; }
public void Send(byte[] payload)
{
if (payload == null)
{
throw new ArgumentNullException(nameof(payload));
}
lock (_gate)
{
ThrowIfDisposed();
if (payload.Length == 0)
{
return;
}
fixed (byte* buffer = payload)
{
var result = KCP.ikcp_send(_kcp, buffer, payload.Length);
if (result < 0)
{
throw new InvalidOperationException($"KCP send failed with error code {result}.");
}
}
LastActivityUtc = DateTime.UtcNow;
UpdateNoLock(GetCurrentTimeMilliseconds());
}
}
public void Input(byte[] datagram)
{
if (datagram == null)
{
throw new ArgumentNullException(nameof(datagram));
}
if (datagram.Length == 0)
{
return;
}
lock (_gate)
{
ThrowIfDisposed();
fixed (byte* buffer = datagram)
{
var result = KCP.ikcp_input(_kcp, buffer, datagram.Length);
if (result < 0)
{
Console.WriteLine($"[KcpTransport] KCP input failed for {RemoteEndPoint}: {result}");
return;
}
}
LastActivityUtc = DateTime.UtcNow;
UpdateNoLock(GetCurrentTimeMilliseconds());
}
}
public bool TryReceive(out byte[] payload)
{
lock (_gate)
{
if (_disposed)
{
payload = null;
return false;
}
var size = KCP.ikcp_peeksize(_kcp);
if (size <= 0)
{
payload = null;
return false;
}
payload = new byte[size];
fixed (byte* buffer = payload)
{
var result = KCP.ikcp_recv(_kcp, buffer, payload.Length);
if (result < 0)
{
payload = null;
return false;
}
if (result != payload.Length)
{
Array.Resize(ref payload, result);
}
}
LastActivityUtc = DateTime.UtcNow;
return true;
}
}
public void UpdateIfDue(uint current)
{
lock (_gate)
{
if (_disposed)
{
return;
}
if (KCP._itimediff(current, _nextUpdateAt) < 0)
{
return;
}
UpdateNoLock(current);
}
}
public void Dispose()
{
lock (_gate)
{
if (_disposed)
{
return;
}
_disposed = true;
if (_kcp != null)
{
KCP.ikcp_release(_kcp);
_kcp = null;
}
if (_handle.IsAllocated)
{
_handle.Free();
}
}
}
private void UpdateNoLock(uint current)
{
KCP.ikcp_update(_kcp, current);
_nextUpdateAt = KCP.ikcp_check(_kcp, current);
}
private void ThrowIfDisposed()
{
if (_disposed || _kcp == null)
{
throw new ObjectDisposedException(nameof(KcpSession));
}
}
private int SendRaw(byte* buffer, int length)
{
return _owner.SendDatagram(buffer, length, RemoteEndPoint);
}
private static int OutputCallback(byte* buffer, int length, IKCPCB* kcp, void* user)
{
if (user == null)
{
return -1;
}
var handle = GCHandle.FromIntPtr((IntPtr)user);
if (handle.Target is not KcpSession session)
{
return -1;
}
return session.SendRaw(buffer, length);
}
}
}
}

View File

@ -0,0 +1,3 @@
fileFormatVersion: 2
guid: 63fb533d620e4ac0bf15ba0a9a71331e
timeCreated: 1774593005

View File

@ -9,7 +9,7 @@ using kcp;
namespace Network.NetworkTransport
{
public class KcpTransport : ITransport
public partial class KcpTransport : ITransport
{
private const uint DefaultConv = 1;
private const int DefaultNoDelay = 1;
@ -127,7 +127,8 @@ namespace Network.NetworkTransport
if (!_isServer && !target.Equals(_defaultRemoteEndPoint))
{
throw new InvalidOperationException("Client mode only supports the configured default remote endpoint.");
throw new InvalidOperationException(
"Client mode only supports the configured default remote endpoint.");
}
var session = GetOrCreateSession(target, _defaultConv);
@ -318,215 +319,6 @@ namespace Network.NetworkTransport
}
}
private unsafe sealed class KcpSession : IDisposable
{
private readonly KcpTransport _owner;
private readonly object _gate = new();
private readonly GCHandle _handle;
private IKCPCB* _kcp;
private bool _disposed;
private uint _nextUpdateAt;
public KcpSession(KcpTransport owner, IPEndPoint remoteEndPoint, uint conv)
{
_owner = owner ?? throw new ArgumentNullException(nameof(owner));
RemoteEndPoint = remoteEndPoint ?? throw new ArgumentNullException(nameof(remoteEndPoint));
Conv = conv;
LastActivityUtc = DateTime.UtcNow;
_handle = GCHandle.Alloc(this);
_kcp = KCP.ikcp_create(conv, (void*)GCHandle.ToIntPtr(_handle));
KCP.ikcp_setoutput(_kcp, &OutputCallback);
KCP.ikcp_nodelay(_kcp, DefaultNoDelay, DefaultInterval, DefaultResend, DefaultNc);
KCP.ikcp_wndsize(_kcp, DefaultSendWindow, DefaultReceiveWindow);
KCP.ikcp_setmtu(_kcp, DefaultMtu);
_nextUpdateAt = GetCurrentTimeMilliseconds();
}
public uint Conv { get; }
public IPEndPoint RemoteEndPoint { get; }
public DateTime LastActivityUtc { get; private set; }
public void Send(byte[] payload)
{
if (payload == null)
{
throw new ArgumentNullException(nameof(payload));
}
lock (_gate)
{
ThrowIfDisposed();
if (payload.Length == 0)
{
return;
}
fixed (byte* buffer = payload)
{
var result = KCP.ikcp_send(_kcp, buffer, payload.Length);
if (result < 0)
{
throw new InvalidOperationException($"KCP send failed with error code {result}.");
}
}
LastActivityUtc = DateTime.UtcNow;
UpdateNoLock(GetCurrentTimeMilliseconds());
}
}
public void Input(byte[] datagram)
{
if (datagram == null)
{
throw new ArgumentNullException(nameof(datagram));
}
if (datagram.Length == 0)
{
return;
}
lock (_gate)
{
ThrowIfDisposed();
fixed (byte* buffer = datagram)
{
var result = KCP.ikcp_input(_kcp, buffer, datagram.Length);
if (result < 0)
{
Console.WriteLine($"[KcpTransport] KCP input failed for {RemoteEndPoint}: {result}");
return;
}
}
LastActivityUtc = DateTime.UtcNow;
UpdateNoLock(GetCurrentTimeMilliseconds());
}
}
public bool TryReceive(out byte[] payload)
{
lock (_gate)
{
if (_disposed)
{
payload = null;
return false;
}
var size = KCP.ikcp_peeksize(_kcp);
if (size <= 0)
{
payload = null;
return false;
}
payload = new byte[size];
fixed (byte* buffer = payload)
{
var result = KCP.ikcp_recv(_kcp, buffer, payload.Length);
if (result < 0)
{
payload = null;
return false;
}
if (result != payload.Length)
{
Array.Resize(ref payload, result);
}
}
LastActivityUtc = DateTime.UtcNow;
return true;
}
}
public void UpdateIfDue(uint current)
{
lock (_gate)
{
if (_disposed)
{
return;
}
if (KCP._itimediff(current, _nextUpdateAt) < 0)
{
return;
}
UpdateNoLock(current);
}
}
public void Dispose()
{
lock (_gate)
{
if (_disposed)
{
return;
}
_disposed = true;
if (_kcp != null)
{
KCP.ikcp_release(_kcp);
_kcp = null;
}
if (_handle.IsAllocated)
{
_handle.Free();
}
}
}
private void UpdateNoLock(uint current)
{
KCP.ikcp_update(_kcp, current);
_nextUpdateAt = KCP.ikcp_check(_kcp, current);
}
private void ThrowIfDisposed()
{
if (_disposed || _kcp == null)
{
throw new ObjectDisposedException(nameof(KcpSession));
}
}
private int SendRaw(byte* buffer, int length)
{
return _owner.SendDatagram(buffer, length, RemoteEndPoint);
}
private static int OutputCallback(byte* buffer, int length, IKCPCB* kcp, void* user)
{
if (user == null)
{
return -1;
}
var handle = GCHandle.FromIntPtr((IntPtr)user);
if (handle.Target is not KcpSession session)
{
return -1;
}
return session.SendRaw(buffer, length);
}
}
}
}

View File

@ -0,0 +1,2 @@
schema: spec-driven
created: 2026-03-27

View File

@ -0,0 +1,70 @@
## Context
The networking stack now has a stable shared foundation: `KcpTransport` is the only reliable transport, message dispatch is host-injected, and session lifecycle is modeled explicitly for single-session clients and multi-session servers. However, `MessageManager` still sends every business message through one `ITransport`, `MovementComponent` still predicts and reconciles against authoritative state that arrives on the same reliable ordered lane as login and heartbeat traffic, and `SessionManager` still owns the last server-tick sample that prediction code reads indirectly through heartbeat handling.
Stage 6 needs a cross-cutting design because the current bottleneck is no longer transport correctness, but policy coupling. `PlayerInput` and `PlayerState` are high-frequency streams where newer data is usually more valuable than guaranteed delivery of older data. Keeping them on the same reliable ordered KCP lane as control-plane messages creates head-of-line blocking under packet loss or jitter. At the same time, time synchronization now serves prediction and reconciliation more than lifecycle ownership, so it should stop living inside the heartbeat/session state machine.
## Goals / Non-Goals
**Goals:**
- Introduce a host-agnostic delivery-policy layer that separates reliable control traffic from high-frequency gameplay synchronization traffic.
- Define latest-wins sequencing rules for `PlayerInput` and `PlayerState` so stale updates can be rejected deterministically.
- Extract clock-synchronization state from `SessionManager` into a dedicated sync-policy component that prediction and reconciliation code can consume directly.
- Preserve the existing client single-session composition and server multi-session composition while evolving shared networking behavior.
- Keep the envelope/message-type contract stable across the shared networking stack.
**Non-Goals:**
- Replace `KcpTransport` as the project's reliable control transport.
- Redesign login, logout, authentication, or reconnect semantics introduced in earlier stages.
- Deliver stage 7 metrics/logging work in the same change.
- Rewrite gameplay authority rules or build a full deterministic rollback system beyond the networking-facing prediction buffer changes needed here.
## Decisions
### Introduce delivery-policy routing above transport implementations
The shared runtime will add a policy-selection layer that resolves a delivery profile from `MessageType` before a message is sent or accepted. Reliable control messages continue to use the existing `ITransport` and `MessageManager` path, while high-frequency sync messages use a dedicated sync lane abstraction chosen by the host. This keeps transport choice centralized and prevents gameplay code from hard-coding which transport instance to call.
Alternative considered: add QoS flags or transport parameters to every `SendMessage` call.
Rejected because it spreads policy decisions across handlers and host code, making the routing contract harder to audit and easier to misuse.
### Model `PlayerInput` and `PlayerState` as sequenced latest-wins streams
The new sync strategy will treat `PlayerInput` and `PlayerState` as streams that carry monotonic ordering data, using the existing tick fields and allowing an explicit sequence field if the implementation needs one later. Receivers accept only the newest update for a given player/entity stream and drop older arrivals. This removes the main user-visible problem of reliable ordered delivery for movement: outdated packets blocking fresher state.
Alternative considered: keep both message types on reliable KCP and reduce send frequency.
Rejected because it preserves head-of-line blocking and only hides the symptom by lowering update density.
Alternative considered: send sync traffic unreliably without any ordering metadata.
Rejected because the receiver would have no deterministic way to reject stale state or reconcile prediction buffers safely.
### Extract clock sync into a dedicated strategy component
`SessionManager` should continue owning transport/login/liveness/timeout/reconnect semantics, but it should stop being the long-term owner of server-clock samples. A dedicated clock-sync component can consume server tick samples from heartbeat responses and authoritative gameplay updates, smooth them as needed, and expose the current estimate to prediction/reconciliation code without mutating lifecycle state. This matches the real ownership boundary: clock sync informs simulation alignment, not session health.
Alternative considered: keep `LastServerTick` inside `SessionManager` and let gameplay code keep reading it there.
Rejected because it couples sync tuning to lifecycle policy and makes later sampling changes look like session-state changes.
### Preserve explicit client and server host composition
The Unity client should keep composing a main-thread dispatcher, a single-session lifecycle path, and local prediction code, while the server host keeps explicit multi-session routing. The new sync abstractions should be shared, but host adapters remain responsible for how they drive ticking, buffering, and per-peer identity. This avoids forcing Unity frame-loop concerns or server peer-collection concerns into one universal runtime type.
Alternative considered: hide sync routing inside `KcpTransport` or `SessionManager`.
Rejected because both types already have narrower ownership boundaries, and embedding sync policy there would recreate the coupling earlier stages removed.
## Risks / Trade-offs
- [Two delivery lanes increase routing complexity] -> Mitigation: keep one central message-type-to-policy map and cover it with explicit routing tests.
- [Dropped input packets can momentarily reduce simulation fidelity] -> Mitigation: define latest-wins semantics around ticked input snapshots and allow the sender to keep publishing the newest state at a steady cadence.
- [Prediction corrections can become more visible if clock smoothing is noisy] -> Mitigation: isolate clock-sync state behind a dedicated component with deterministic tests for sample acceptance and smoothing behavior.
- [Client and server integration can drift if abstractions are too host-specific] -> Mitigation: keep the policy contracts in shared networking code and verify client single-session and server multi-session behavior in edit mode tests.
## Migration Plan
1. Introduce shared delivery-policy abstractions and a default policy map while leaving all traffic on the existing reliable path as a safe starting point.
2. Add the sync strategy lane and move `PlayerInput` and `PlayerState` routing onto it, while login/logout/heartbeat and other control traffic remain on KCP.
3. Move server-tick ownership out of `SessionManager` and into a dedicated clock-sync state object consumed by prediction/reconciliation code.
4. Update client reconciliation and server acceptance rules to use stale-drop/latest-wins semantics keyed by authoritative tick or sequence.
5. Add regression tests for routing, stale packet rejection, reconciliation buffer pruning, and clock-sync sampling. If rollback is needed, the policy map can route all message types back to the reliable KCP path without undoing earlier lifecycle work.
## Open Questions
- Should the first implementation of the sync lane use a dedicated `UdpClient`-backed transport, or should it start behind an abstract lane that can be backed by KCP tuning or raw UDP later?
- Do remote-player `PlayerState` updates need an explicit sync sequence separate from simulation tick for interpolation-heavy actors?
- Should the client send only the latest input snapshot each interval, or opportunistically bundle the newest few inputs to soften brief loss bursts without restoring head-of-line blocking?

View File

@ -0,0 +1,27 @@
## Why
The shared networking stack now has stable transport, dispatch, and session-lifecycle boundaries, but all gameplay traffic still rides the same reliable ordered KCP path. Stage 6 is needed now because high-frequency `PlayerInput` and `PlayerState` traffic can still suffer from head-of-line blocking, and clock synchronization is still coupled to heartbeat/session bookkeeping instead of being treated as a tunable sync policy.
## What Changes
- Add a shared high-frequency sync strategy layer that lets hosts assign delivery policies to gameplay synchronization messages instead of forcing `PlayerInput` and `PlayerState` through the same reliable ordered path as login and control traffic.
- Define latest-wins sequencing rules for high-frequency client input and authoritative player-state updates so stale packets can be discarded instead of blocking fresher movement data.
- Extract clock-synchronization sampling from `SessionManager` ownership into an explicit sync-policy component that can consume heartbeat or gameplay timing samples without changing lifecycle state semantics.
- Update client prediction and reconciliation flow so authoritative state correction is aligned with the new sync-message sequencing rules.
- Keep KCP as the only reliable transport for control-plane traffic such as login, logout, heartbeat/liveness, and other messages that still require guaranteed ordered delivery.
## Capabilities
### New Capabilities
- `network-sync-strategy`: Shared delivery-policy, sequencing, and reconciliation rules for high-frequency gameplay synchronization and independent clock-sync sampling.
### Modified Capabilities
- `kcp-transport`: Reliable KCP delivery remains the default control-plane path, but high-frequency `PlayerInput` and `PlayerState` are no longer required to stay on the same reliable ordered lane.
- `network-session-lifecycle`: Session lifecycle keeps heartbeat-focused liveness and timeout ownership, while clock-sync sampling moves to a separate sync strategy instead of living inside `SessionManager`.
- `shared-network-foundation`: The shared client/server runtime composes message routing with delivery-policy selection for reliable control traffic and high-frequency sync traffic without introducing Unity-specific dependencies.
## Impact
- Affected code: `MessageManager`, `SharedNetworkRuntime`, `NetworkManager`, `ServerNetworkHost`, transport composition around `ITransport`/future sync lanes, movement prediction/reconciliation code, and new sync-policy/state types.
- Affected behavior: login and other control traffic stay reliable, while `PlayerInput`/`PlayerState` follow latest-wins sequencing and stale-update dropping to reduce visible movement lag under packet loss or jitter.
- Affected tests: edit mode networking tests need explicit coverage for delivery-policy routing, stale packet rejection, prediction/correction behavior, and independent clock-sync sampling alongside existing lifecycle regressions.

View File

@ -0,0 +1,14 @@
## MODIFIED Requirements
### Requirement: KCP is the sole reliable transport implementation
The project SHALL expose `KcpTransport` as the only reliable `ITransport` implementation used by runtime networking paths. Reliable control-plane business messages, including login, logout, heartbeat, and other ordered session-management traffic, MUST continue to flow through KCP-backed sessions, while high-frequency `PlayerInput` and `PlayerState` synchronization MAY use a separate sync lane defined by the sync-strategy capability.
#### Scenario: Runtime networking uses KCP for reliable control delivery
- **WHEN** the application constructs the reliable transport used for login and session control traffic
- **THEN** that transport instance is `KcpTransport`
- **THEN** reliable control payloads are sent and received through KCP session state
#### Scenario: High-frequency sync is allowed to bypass reliable ordered delivery
- **WHEN** the runtime routes `PlayerInput` or `PlayerState` according to the high-frequency sync strategy
- **THEN** those messages are not forced to use the reliable ordered KCP lane
- **THEN** reliable KCP delivery remains available for control-plane traffic

View File

@ -0,0 +1,14 @@
## MODIFIED Requirements
### Requirement: Heartbeat is limited to liveness, RTT, and time sync
The shared session lifecycle SHALL treat heartbeat traffic as infrastructure input for liveness detection and round-trip-time measurement only. Clock-synchronization samples MUST be forwarded to a separate sync-strategy component rather than being owned by `SessionManager`, and heartbeat processing MUST NOT itself own login success, login failure, or reconnect policy decisions.
#### Scenario: Heartbeat updates liveness and RTT while forwarding clock samples
- **WHEN** a heartbeat response is received for an active session
- **THEN** the session manager updates last-seen or timeout bookkeeping and RTT data
- **THEN** any server-tick sample is forwarded to the clock-sync strategy without making heartbeat the owner of login state
#### Scenario: Missing heartbeat triggers timeout state
- **WHEN** the configured heartbeat timeout elapses without a required heartbeat or other liveness signal
- **THEN** the session lifecycle transitions the session into a timed-out state
- **THEN** reconnect handling is delegated to the lifecycle reconnect policy rather than hidden inside the heartbeat handler itself

View File

@ -0,0 +1,53 @@
# network-sync-strategy Specification
## Purpose
Define how client and server route high-frequency gameplay synchronization traffic, reject stale updates, reconcile authoritative state, and process clock-sync samples independently of session lifecycle.
## ADDED Requirements
### Requirement: Hosts assign delivery policies to synchronization message types
The shared networking core SHALL allow hosts to map business message types to delivery policies. `PlayerInput` and `PlayerState` MUST be assignable to a high-frequency sync policy that is independent from the reliable ordered control policy used by login and lifecycle traffic.
#### Scenario: High-frequency sync messages use a dedicated policy
- **WHEN** the client or server sends `PlayerInput` or `PlayerState`
- **THEN** the runtime resolves a high-frequency sync delivery policy for that message type
- **THEN** the message is sent through the sync lane configured for that policy instead of defaulting to reliable ordered delivery
#### Scenario: Control traffic keeps reliable delivery
- **WHEN** the runtime sends login, logout, heartbeat, or other session-management messages
- **THEN** the runtime resolves the reliable ordered control policy
- **THEN** those messages continue to use the reliable transport path
### Requirement: Sequenced sync receivers discard stale gameplay updates
The high-frequency sync strategy SHALL tag gameplay synchronization messages with monotonic sequencing information and MUST discard stale `PlayerInput` or `PlayerState` updates that arrive older than the last accepted update for the same peer or entity stream.
#### Scenario: Older player input is ignored
- **WHEN** the server receives a `PlayerInput` update with a tick or sequence older than the latest accepted input for that player
- **THEN** the server drops that stale input update
- **THEN** the newer accepted input remains authoritative for simulation
#### Scenario: Older player state does not rewind a client
- **WHEN** the client receives a `PlayerState` update with a tick or sequence older than the latest applied authoritative state for that player
- **THEN** the client ignores the stale state update
- **THEN** visible movement continues from the newer authoritative state without rewinding to older data
### Requirement: Authoritative correction prunes acknowledged prediction history
The client sync strategy SHALL reconcile local prediction against authoritative player-state updates by pruning acknowledged inputs at or before the authoritative tick and only reapplying newer pending inputs.
#### Scenario: Reconciliation removes already acknowledged inputs
- **WHEN** the client accepts an authoritative `PlayerState` update for tick `N`
- **THEN** locally buffered predicted inputs with tick less than or equal to `N` are removed from the replay buffer
- **THEN** only inputs newer than `N` remain eligible for re-simulation
### Requirement: Clock synchronization is a separate sync-policy concern
The shared networking core SHALL process server-tick or clock-synchronization samples through a dedicated sync-policy component rather than storing clock-sync ownership inside `SessionManager`.
#### Scenario: Heartbeat response contributes a clock sample without mutating lifecycle
- **WHEN** a heartbeat or gameplay sync message carries a server-tick sample
- **THEN** the runtime forwards that sample to the clock-sync strategy
- **THEN** session lifecycle state remains unchanged except for liveness or RTT bookkeeping
#### Scenario: Hosts can consume smoothed clock data for prediction
- **WHEN** prediction or reconciliation code needs the current server-time estimate
- **THEN** it reads that estimate from the clock-sync strategy or state object
- **THEN** it does not query `SessionManager` for authoritative clock ownership

View File

@ -0,0 +1,14 @@
## MODIFIED Requirements
### Requirement: Shared core preserves current transport and message contracts
The shared client/server foundation SHALL preserve the envelope-based business-message contract across client and server hosts while allowing delivery-policy selection behind the shared message-routing layer. Reliable control traffic MUST continue to use the existing `ITransport` contract, and high-frequency sync traffic MUST be composable through a host-agnostic sync strategy without introducing Unity-specific runtime types into the shared networking core.
#### Scenario: Shared hosts exchange the same envelope format across delivery lanes
- **WHEN** a client host sends a business message through either the reliable control path or the high-frequency sync path
- **THEN** the payload is encoded with the same shared envelope and message-type contract
- **THEN** the server host decodes and routes it through shared networking logic without a host-specific protocol fork
#### Scenario: Hosts compose delivery-policy selection without Unity dependencies
- **WHEN** a non-Unity server host constructs the runtime networking stack with reliable control traffic and a high-frequency sync lane
- **THEN** it uses shared delivery-policy abstractions without depending on Unity frame-loop types
- **THEN** the Unity client can use the same abstractions while still supplying its own host-specific dispatch behavior

View File

@ -0,0 +1,23 @@
## 1. Delivery Policy Infrastructure
- [ ] 1.1 Introduce shared delivery-policy abstractions and a default message-type map for reliable control traffic versus high-frequency sync traffic.
- [ ] 1.2 Extend `SharedNetworkRuntime`, `MessageManager`, and host composition points to route messages through the resolved policy without breaking the shared envelope contract.
- [ ] 1.3 Add the first sync-lane backend and any supporting transport adapter types needed to keep client single-session and server multi-session composition explicit.
## 2. High-Frequency Sync Routing
- [ ] 2.1 Route `PlayerInput` and `PlayerState` through the high-frequency sync policy while keeping login, logout, heartbeat, and other control messages on reliable KCP.
- [ ] 2.2 Implement monotonic ordering tracking for sync streams and reject stale `PlayerInput` / `PlayerState` updates on the receiving side.
- [ ] 2.3 Update server-side sync handling so each remote peer maintains independent latest-wins state instead of relying on reliable ordered delivery.
## 3. Clock Sync And Reconciliation
- [ ] 3.1 Introduce a dedicated clock-sync strategy/state object and move authoritative server-tick ownership out of `SessionManager`.
- [ ] 3.2 Refactor heartbeat and authoritative-state handlers so liveness/RTT updates stay in session lifecycle while clock samples flow through the sync strategy.
- [ ] 3.3 Update client prediction and reconciliation code to prune acknowledged inputs, ignore stale authoritative state, and replay only newer pending inputs.
## 4. Verification And Documentation
- [ ] 4.1 Add edit mode tests for delivery-policy routing, stale packet rejection, and clock-sync forwarding behavior.
- [ ] 4.2 Add regression tests covering client prediction buffer pruning and server multi-session sync isolation under delayed or out-of-order updates.
- [ ] 4.3 Update `CodeX-TODO.md` and related networking docs to reflect the phase 6 architecture and completion criteria.