process TODO.md step6

This commit is contained in:
SepComet 2026-03-29 09:53:33 +08:00
parent d3d25c8921
commit 4826bf15a5
20 changed files with 668 additions and 10 deletions

View File

@ -1,4 +1,5 @@
using System; using System;
using System.Threading.Tasks;
using Network.NetworkHost; using Network.NetworkHost;
using Network.NetworkTransport; using Network.NetworkTransport;
@ -84,6 +85,11 @@ namespace Network.NetworkApplication
syncSequenceTracker); syncSequenceTracker);
} }
public static Task<ServerRuntimeHandle> StartServerRuntimeAsync(ServerRuntimeConfiguration configuration)
{
return ServerRuntimeEntryPoint.StartAsync(configuration);
}
private static void ValidateDualPortConfiguration(int reliablePort, int? syncPort) private static void ValidateDualPortConfiguration(int reliablePort, int? syncPort)
{ {
if (reliablePort <= 0) if (reliablePort <= 0)

View File

@ -0,0 +1,58 @@
using System;
using Network.NetworkApplication;
using Network.NetworkTransport;
namespace Network.NetworkHost
{
public sealed class ServerRuntimeConfiguration
{
public ServerRuntimeConfiguration(int reliablePort)
{
if (reliablePort <= 0)
{
throw new ArgumentOutOfRangeException(nameof(reliablePort), "Reliable port must be positive.");
}
ReliablePort = reliablePort;
}
public int ReliablePort { get; }
public int? SyncPort { get; set; }
public INetworkMessageDispatcher Dispatcher { get; set; }
public SessionReconnectPolicy ReconnectPolicy { get; set; }
public Func<DateTimeOffset> UtcNowProvider { get; set; }
public IMessageDeliveryPolicyResolver DeliveryPolicyResolver { get; set; }
public SyncSequenceTracker SyncSequenceTracker { get; set; }
public Func<int, ITransport> TransportFactory { get; set; }
internal void Validate()
{
if (ReliablePort <= 0)
{
throw new ArgumentOutOfRangeException(nameof(ReliablePort), "Reliable port must be positive.");
}
if (!SyncPort.HasValue)
{
return;
}
if (SyncPort.Value <= 0)
{
throw new ArgumentOutOfRangeException(nameof(SyncPort), "Sync port must be positive.");
}
if (SyncPort.Value == ReliablePort)
{
throw new ArgumentException("Sync port must differ from reliable port.", nameof(SyncPort));
}
}
}
}

View File

@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: f6cf6d0542534955ad0dc92dd55a5429
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,40 @@
using System;
using System.Threading.Tasks;
using Network.NetworkApplication;
namespace Network.NetworkHost
{
public static class ServerRuntimeEntryPoint
{
public static async Task<ServerRuntimeHandle> StartAsync(ServerRuntimeConfiguration configuration)
{
if (configuration == null)
{
throw new ArgumentNullException(nameof(configuration));
}
configuration.Validate();
var host = NetworkIntegrationFactory.CreateServerHost(
configuration.ReliablePort,
configuration.SyncPort,
configuration.Dispatcher,
configuration.ReconnectPolicy,
configuration.UtcNowProvider,
configuration.DeliveryPolicyResolver,
configuration.SyncSequenceTracker,
configuration.TransportFactory);
try
{
await host.StartAsync();
return new ServerRuntimeHandle(host);
}
catch
{
host.Stop();
throw;
}
}
}
}

View File

@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 4f683e87b65449fdb2ace6f6826dcc27
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,64 @@
using System;
using System.Collections.Generic;
using System.Net;
using System.Threading.Tasks;
using Network.NetworkApplication;
namespace Network.NetworkHost
{
public sealed class ServerRuntimeHandle : IDisposable
{
private readonly ServerNetworkHost host;
private bool isStopped;
internal ServerRuntimeHandle(ServerNetworkHost host)
{
this.host = host ?? throw new ArgumentNullException(nameof(host));
IsRunning = true;
}
public ServerNetworkHost Host => host;
public bool IsRunning { get; private set; }
public IReadOnlyList<ManagedNetworkSession> ManagedSessions => host.ManagedSessions;
public event Action<MultiSessionLifecycleEvent> LifecycleChanged
{
add => host.LifecycleChanged += value;
remove => host.LifecycleChanged -= value;
}
public Task<int> DrainPendingMessagesAsync(int maxMessages = int.MaxValue)
{
return host.DrainPendingMessagesAsync(maxMessages);
}
public void UpdateLifecycle()
{
host.UpdateLifecycle();
}
public bool TryGetSession(IPEndPoint remoteEndPoint, out ManagedNetworkSession session)
{
return host.TryGetSession(remoteEndPoint, out session);
}
public void Stop()
{
if (isStopped)
{
return;
}
isStopped = true;
IsRunning = false;
host.Stop();
}
public void Dispose()
{
Stop();
}
}
}

View File

@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 5580fbf7c9f748f3a33bc6043d14faea
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@ -0,0 +1,182 @@
using System;
using System.Collections.Generic;
using System.Net;
using System.Threading.Tasks;
using Google.Protobuf;
using Network.Defines;
using Network.NetworkApplication;
using Network.NetworkHost;
using Network.NetworkTransport;
using NUnit.Framework;
namespace Tests.EditMode.Network
{
public class ServerRuntimeEntryPointTests
{
private static readonly IPEndPoint Peer = new(IPAddress.Loopback, 9100);
[Test]
public void StartServerRuntimeAsync_ReliableOnly_StartsAndExposesServerHost()
{
var createdTransports = new Dictionary<int, FakeTransport>();
var configuration = new ServerRuntimeConfiguration(9000)
{
TransportFactory = port => CreateTransport(createdTransports, port)
};
using var runtime = NetworkIntegrationFactory.StartServerRuntimeAsync(configuration).GetAwaiter().GetResult();
Assert.That(createdTransports.Keys, Is.EquivalentTo(new[] { 9000 }));
Assert.That(runtime.IsRunning, Is.True);
Assert.That(runtime.Host.Transport, Is.SameAs(createdTransports[9000]));
Assert.That(runtime.Host.SyncTransport, Is.Null);
Assert.That(createdTransports[9000].StartCallCount, Is.EqualTo(1));
}
[Test]
public void StartServerRuntimeAsync_DualTransport_StartsBothConfiguredLanes()
{
var createdTransports = new Dictionary<int, FakeTransport>();
var configuration = new ServerRuntimeConfiguration(9000)
{
SyncPort = 9001,
TransportFactory = port => CreateTransport(createdTransports, port)
};
using var runtime = ServerRuntimeEntryPoint.StartAsync(configuration).GetAwaiter().GetResult();
Assert.That(createdTransports.Keys, Is.EquivalentTo(new[] { 9000, 9001 }));
Assert.That(runtime.Host.Transport, Is.SameAs(createdTransports[9000]));
Assert.That(runtime.Host.SyncTransport, Is.SameAs(createdTransports[9001]));
Assert.That(createdTransports[9000].StartCallCount, Is.EqualTo(1));
Assert.That(createdTransports[9001].StartCallCount, Is.EqualTo(1));
}
[Test]
public void StartServerRuntimeAsync_SyncTransportStartFails_RollsBackStartedResources()
{
var createdTransports = new Dictionary<int, FakeTransport>();
var configuration = new ServerRuntimeConfiguration(9000)
{
SyncPort = 9001,
TransportFactory = port =>
{
var transport = CreateTransport(createdTransports, port);
if (port == 9001)
{
transport.StartException = new InvalidOperationException("sync failed");
}
return transport;
}
};
var exception = Assert.Throws<InvalidOperationException>(() =>
ServerRuntimeEntryPoint.StartAsync(configuration).GetAwaiter().GetResult());
Assert.That(exception.Message, Is.EqualTo("sync failed"));
Assert.That(createdTransports[9000].StartCallCount, Is.EqualTo(1));
Assert.That(createdTransports[9000].StopCallCount, Is.EqualTo(1));
Assert.That(createdTransports[9001].StartCallCount, Is.EqualTo(1));
Assert.That(createdTransports[9001].StopCallCount, Is.EqualTo(1));
}
[Test]
public void ServerRuntimeHandle_DrainsMessages_ExposesManagedSessions_AndStopsIdempotently()
{
var createdTransports = new Dictionary<int, FakeTransport>();
var configuration = new ServerRuntimeConfiguration(9000)
{
Dispatcher = new MainThreadNetworkDispatcher(),
TransportFactory = port => CreateTransport(createdTransports, port)
};
var runtime = ServerRuntimeEntryPoint.StartAsync(configuration).GetAwaiter().GetResult();
var handled = false;
runtime.Host.MessageManager.RegisterHandler(MessageType.Heartbeat, (payload, sender) =>
{
handled = true;
});
createdTransports[9000].EmitReceive(CreateEnvelope(MessageType.Heartbeat, new Heartbeat()), Peer);
Assert.That(runtime.ManagedSessions.Count, Is.EqualTo(1));
Assert.That(runtime.TryGetSession(Peer, out var session), Is.True);
Assert.That(session.SessionManager.State, Is.EqualTo(ConnectionState.TransportConnected));
Assert.That(runtime.Host.MessageManager.PendingMessageCount, Is.EqualTo(1));
runtime.DrainPendingMessagesAsync().GetAwaiter().GetResult();
runtime.UpdateLifecycle();
Assert.That(handled, Is.True);
Assert.That(runtime.Host.MessageManager.PendingMessageCount, Is.EqualTo(0));
runtime.Stop();
runtime.Stop();
Assert.That(runtime.IsRunning, Is.False);
Assert.That(runtime.ManagedSessions.Count, Is.EqualTo(0));
Assert.That(createdTransports[9000].StopCallCount, Is.EqualTo(1));
}
private static FakeTransport CreateTransport(IDictionary<int, FakeTransport> createdTransports, int port)
{
var transport = new FakeTransport();
createdTransports.Add(port, transport);
return transport;
}
private static byte[] CreateEnvelope(MessageType type, IMessage payload)
{
return new Envelope
{
Type = (int)type,
Payload = payload.ToByteString()
}.ToByteArray();
}
private sealed class FakeTransport : ITransport
{
public Exception StartException { get; set; }
public int StartCallCount { get; private set; }
public int StopCallCount { get; private set; }
public event Action<byte[], IPEndPoint> OnReceive;
public Task StartAsync()
{
StartCallCount++;
if (StartException != null)
{
throw StartException;
}
return Task.CompletedTask;
}
public void Stop()
{
StopCallCount++;
}
public void Send(byte[] data)
{
}
public void SendTo(byte[] data, IPEndPoint target)
{
}
public void SendToAll(byte[] data)
{
}
public void EmitReceive(byte[] data, IPEndPoint sender)
{
OnReceive?.Invoke(data, sender);
}
}
}
}

View File

@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 0a5c5d260e11429dafcee61f53a2f2d7
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

18
TODO.md
View File

@ -25,7 +25,7 @@ Still missing for MVP:
- [ ] Client-side `ShootInput` send path - [ ] Client-side `ShootInput` send path
- [ ] Client-side `CombatEvent` receive/apply path - [ ] Client-side `CombatEvent` receive/apply path
- [ ] Server startup path that actually uses `ServerNetworkHost` - [x] Server startup path that actually uses `ServerNetworkHost`
- [ ] Server-authoritative movement/state loop - [ ] Server-authoritative movement/state loop
- [ ] Server-authoritative shooting/combat resolution loop - [ ] Server-authoritative shooting/combat resolution loop
- [ ] Full `PlayerState` field application for rotation / HP / velocity - [ ] Full `PlayerState` field application for rotation / HP / velocity
@ -110,17 +110,17 @@ Acceptance:
### 6. Add A Real Server Startup / Integration Entry Point ### 6. Add A Real Server Startup / Integration Entry Point
- [ ] Add or update the runtime server bootstrap so production code actually constructs [`ServerNetworkHost`](./Assets/Scripts/Network/NetworkHost/ServerNetworkHost.cs) - [x] Add or update the runtime server bootstrap so production code actually constructs [`ServerNetworkHost`](./Assets/Scripts/Network/NetworkHost/ServerNetworkHost.cs) via [`ServerRuntimeEntryPoint`](./Assets/Scripts/Network/NetworkHost/ServerRuntimeEntryPoint.cs)
- [ ] Start both reliable and sync transports from the server integration layer - [x] Start both reliable and sync transports from the server integration layer
- [ ] Drain server pending messages on a regular loop - [x] Drain server pending messages on a regular loop through [`ServerRuntimeHandle`](./Assets/Scripts/Network/NetworkHost/ServerRuntimeHandle.cs)
- [ ] Hook server lifecycle logging/diagnostics in the same way the client runtime does - [x] Preserve server lifecycle diagnostics and visibility through the existing `ServerNetworkHost` lifecycle surface and metrics hooks
- [ ] Make the startup path easy to locate and test - [x] Make the startup path easy to locate and test
Acceptance: Acceptance:
- [ ] There is a concrete server startup path in production code, not only shared infrastructure and tests - [x] There is a concrete server startup path in production code, not only shared infrastructure and tests
- [ ] Server runtime uses two distinct transport instances when sync port is configured - [x] Server runtime uses two distinct transport instances when sync port is configured
- [ ] Server can receive gameplay traffic on both lanes - [x] Server can receive gameplay traffic on both lanes
### 7. Implement Server-Authoritative Movement And State Broadcast ### 7. Implement Server-Authoritative Movement And State Broadcast

View File

@ -0,0 +1,2 @@
schema: spec-driven
created: 2026-03-29

View File

@ -0,0 +1,94 @@
## Context
The repository already has shared networking building blocks for client and server roles. `NetworkIntegrationFactory.CreateClientRuntime(...)` and `SharedNetworkRuntime` provide a concrete client bootstrap path, while `NetworkIntegrationFactory.CreateServerHost(...)` only returns a `ServerNetworkHost` object with lower-level lifecycle methods such as `StartAsync()`, `Stop()`, `UpdateLifecycle()`, and message draining. That leaves TODO step 6 unfinished because there is still no single integration entry point that represents "boot a real server runtime with configuration, startup, and ownership semantics".
Constraints for this design:
- Shared networking code under `Assets/Scripts/Network/` must remain free of `UnityEngine` dependencies.
- The existing client single-session path must remain unchanged.
- Server hosting must continue to rely on `MultiSessionManager`, `ServerNetworkHost`, and the current transport abstractions instead of introducing a second lifecycle model.
- The result should be testable from edit-mode tests without needing a Unity scene or manual wiring.
## Goals / Non-Goals
**Goals:**
- Introduce a concrete server runtime entry point that can be invoked by a host process with minimal wiring.
- Centralize server startup configuration validation, transport construction, startup sequencing, and shutdown ownership in one place.
- Preserve the current `ServerNetworkHost` as the session/lifecycle owner for connected peers.
- Define a stable integration surface that can later be used by Unity adapters, console hosts, or dedicated-server launchers.
- Add regression coverage for startup success, startup failure, and shutdown cleanup behavior.
**Non-Goals:**
- Redesign `ServerNetworkHost`, `MultiSessionManager`, or transport interfaces.
- Change client runtime behavior or the single-session entry path.
- Introduce authentication, gameplay loop, or headless process packaging concerns beyond the networking startup contract.
- Add Unity-specific MonoBehaviour startup code inside shared networking assemblies.
## Decisions
### Decision: Add a dedicated server runtime entry type above `CreateServerHost`
The change will define a higher-level entry type, centered on a server bootstrap configuration object plus a startup/integration facade, instead of asking callers to directly compose transports and manually coordinate `ServerNetworkHost` lifecycle calls.
Rationale:
- `CreateServerHost(...)` is still a factory for components, not an integration entry point with ownership semantics.
- A dedicated entry type makes startup failure handling, disposal/stop behavior, and future host adapters explicit.
- It mirrors the repository's existing preference for focused types and keeps `ServerNetworkHost` focused on per-peer session orchestration.
Alternatives considered:
- Expand `ServerNetworkHost` to absorb bootstrap concerns: rejected because it mixes runtime assembly and per-session lifecycle responsibilities.
- Leave only `NetworkIntegrationFactory.CreateServerHost(...)` and document manual wiring: rejected because TODO step 6 specifically calls for a real startup/integration entry point.
### Decision: Keep the entry point in shared networking code and expose host-specific integration separately
The entry point contract will live under `Assets/Scripts/Network/` so it can be reused by any host. Unity or other environment-specific launchers may wrap it elsewhere, but they will call into the shared entry contract rather than duplicating bootstrap logic.
Rationale:
- The startup contract is not inherently Unity-specific.
- Tests can validate the shared contract directly.
- This preserves the repository rule against introducing `UnityEngine` into shared networking code.
Alternatives considered:
- Put the entry point only in a Unity-facing adapter: rejected because it would make dedicated-server and CLI-style integration harder and would not strengthen the shared network contract.
### Decision: Model startup as configuration + runtime handle
The design will use an immutable or validation-friendly configuration object for server ports, optional sync port, dispatcher, reconnect policy, time provider, and transport factory overrides. Starting the entry point produces or owns a runtime handle that exposes the underlying `ServerNetworkHost`, startup state, and a single stop/dispose path.
Rationale:
- Separating configuration from runtime ownership makes validation and tests straightforward.
- A runtime handle can guard against double-start, partial-start, and stop-before-start errors.
- This is the minimum structure needed to support a "real server startup" contract without over-designing deployment concerns.
Alternatives considered:
- Static `StartServer(...)` helper returning only `ServerNetworkHost`: rejected because ownership and shutdown semantics remain implicit.
- A fully generic dependency injection container entry flow: rejected as unnecessary for the current repository scope.
### Decision: Make startup and shutdown sequencing explicit and failure-safe
The entry point will be responsible for invoking reliable transport startup first, then sync transport startup if configured, and ensuring partial failures stop any already-started resources. Shutdown will be idempotent and will cascade into `ServerNetworkHost.Stop()` so session cleanup remains owned by existing lifecycle code.
Rationale:
- Startup sequencing already exists in lower-level runtime types; the integration entry point must make it observable and enforceable.
- Failure-safe cleanup prevents leaked transports and stale multi-session state during host bootstrap failures.
- Idempotent stop behavior is important for integration with external host lifecycles.
Alternatives considered:
- Let callers implement their own try/finally around startup: rejected because it recreates the missing integration contract in every host.
## Risks / Trade-offs
- [Risk] A new runtime wrapper could duplicate behavior already present in `ServerNetworkHost` or `NetworkIntegrationFactory`. → Mitigation: keep the wrapper narrowly focused on config validation, ownership, and lifecycle sequencing while delegating per-peer behavior to existing types.
- [Risk] Naming the new entry type poorly could create confusion between factory, host, and runtime responsibilities. → Mitigation: pick a name that clearly indicates bootstrap/integration ownership rather than per-session management.
- [Risk] Delta specs may over-constrain future dedicated-server hosting scenarios. → Mitigation: define minimal contractual guarantees around startup, shutdown, and lifecycle ownership, while leaving environment-specific hosting adapters out of scope.
- [Risk] Tests may only cover happy-path startup. → Mitigation: require regression cases for startup failure rollback and repeated shutdown calls.
## Migration Plan
1. Add the new entry-point/configuration types and wire them to existing `NetworkIntegrationFactory` and `ServerNetworkHost` infrastructure.
2. Add or update edit-mode tests to cover startup sequencing, failure rollback, and shutdown cleanup semantics.
3. Update any TODO/documented integration references to use the new entry point as the recommended server bootstrap path.
4. Keep existing `CreateServerHost(...)` available unless implementation evidence shows it should be downgraded or internally delegated.
Rollback strategy: remove the new entry point and revert callers to direct `CreateServerHost(...)` usage. Because the change is additive, rollback should not require transport or protocol migration.
## Open Questions
- Should the runtime handle implement `IDisposable`/`IAsyncDisposable`, or is an explicit `Stop()`/`StopAsync()` contract sufficient for current usage?
- Should the shared entry point expose a long-running integration loop hook, or only startup/stop plus access to `ServerNetworkHost` for callers that own their own loops?
- Is there an existing external host adapter in this repository that should be updated immediately once the shared entry point exists, or does the initial change stop at shared bootstrap plus tests?

View File

@ -0,0 +1,23 @@
## Why
The current networking foundation supports transports, sessions, and multi-session lifecycle building blocks, but it still lacks a concrete server startup path that can be used to boot a real host outside of tests or ad hoc wiring. This blocks end-to-end integration of the server role and leaves TODO step 6 unresolved.
## What Changes
- Add a real server startup and integration entry point that assembles the shared networking pieces needed to host a server session lifecycle.
- Define the contract for server bootstrap configuration, startup sequencing, and shutdown ownership so server hosting can be invoked consistently from a runtime entry point.
- Clarify how the server entry point integrates with existing shared/session lifecycle components without changing the client single-session path.
- Add regression coverage for server startup integration behavior and failure handling around startup/shutdown orchestration.
## Capabilities
### New Capabilities
- `server-runtime-entry-point`: Covers creating and running a concrete server bootstrap/integration entry point for the host process.
### Modified Capabilities
- `multi-session-lifecycle`: Extend requirements so a real server startup path can create, own, and tear down the multi-session manager through a defined integration entry point.
- `network-session-lifecycle`: Extend requirements for startup/shutdown sequencing expectations when sessions are initialized by the server runtime entry point.
## Impact
Affected areas include shared networking bootstrap code under `Assets/Scripts/Network/`, any host-specific adapter needed to invoke server startup, edit-mode regression tests for lifecycle integration, and the TODO-driven server integration workflow documented by this repository.

View File

@ -0,0 +1,14 @@
## ADDED Requirements
### Requirement: Multi-session server lifecycle can be owned by a runtime entry point
The multi-session lifecycle model SHALL support being created, owned, and torn down through a concrete server runtime entry point. Starting the entry point MUST provide access to the `MultiSessionManager` through the hosted `ServerNetworkHost`, and stopping the entry point MUST clear managed sessions without requiring callers to remove peers individually.
#### Scenario: Entry point startup exposes multi-session lifecycle ownership
- **WHEN** a server runtime entry point starts successfully
- **THEN** the hosted `ServerNetworkHost` exposes its `MultiSessionManager` for per-peer lifecycle observation
- **THEN** callers do not need to manually construct or inject a separate multi-session coordinator
#### Scenario: Entry point shutdown clears all managed peers
- **WHEN** the server runtime entry point is stopped while one or more managed sessions exist
- **THEN** the hosted `ServerNetworkHost` removes all managed sessions as part of shutdown
- **THEN** no unrelated external cleanup step is required to reset multi-session lifecycle state

View File

@ -0,0 +1,14 @@
## ADDED Requirements
### Requirement: Server startup and shutdown preserve explicit session lifecycle sequencing
When sessions are hosted through the server runtime entry point, the shared networking layer SHALL preserve explicit startup and shutdown sequencing boundaries for session lifecycle services. The runtime MUST NOT report server startup success until transport startup has completed, and shutdown MUST transition hosted sessions through the existing disconnect/cleanup path instead of abandoning lifecycle state.
#### Scenario: Startup success is reported only after transport startup completes
- **WHEN** a host starts the server runtime entry point
- **THEN** the runtime does not report success before required transport startup finishes
- **THEN** any sessions created after startup begin from the existing server-side lifecycle model
#### Scenario: Shutdown routes through existing lifecycle cleanup
- **WHEN** the caller stops a running server runtime entry point
- **THEN** hosted sessions are disconnected or removed through the existing session lifecycle cleanup path
- **THEN** subsequent lifecycle inspection reflects a stopped runtime rather than stale active sessions

View File

@ -0,0 +1,35 @@
## ADDED Requirements
### Requirement: Server runtime entry point assembles a host from validated configuration
The shared networking layer SHALL provide a concrete server runtime entry point that accepts validated bootstrap configuration for reliable transport startup and optional sync transport startup. The entry point MUST construct the server host using the existing shared integration components instead of requiring callers to manually assemble transports and session lifecycle services.
#### Scenario: Start server runtime with reliable transport only
- **WHEN** a host starts the server runtime entry point with a valid reliable port and no sync port
- **THEN** the entry point creates a `ServerNetworkHost` using the shared networking integration path
- **THEN** the reliable transport is started and the runtime exposes the started host to the caller
#### Scenario: Start server runtime with reliable and sync transports
- **WHEN** a host starts the server runtime entry point with both reliable and sync ports configured
- **THEN** the entry point creates the server host with both transport lanes configured
- **THEN** the runtime starts both transports before reporting startup success
### Requirement: Server runtime entry point owns startup failure rollback and shutdown
The server runtime entry point SHALL own startup/shutdown sequencing for the created server host. If startup fails after any transport or host resource has begun initializing, the entry point MUST stop already-started resources before surfacing the failure. The shutdown path MUST be safe to call repeatedly and MUST leave the server host with no remaining managed sessions.
#### Scenario: Startup failure rolls back started resources
- **WHEN** reliable transport startup succeeds and a later startup step fails before the runtime reports success
- **THEN** the entry point stops the already-started transport resources
- **THEN** the failure is surfaced to the caller without leaving a partially running server runtime
#### Scenario: Repeated shutdown is safe
- **WHEN** the caller stops the server runtime entry point more than once
- **THEN** shutdown does not throw because the runtime was already stopped
- **THEN** the underlying server host remains in a fully stopped state with its managed sessions cleared
### Requirement: Server runtime entry point exposes the integration surface needed by host processes
The server runtime entry point SHALL expose the started `ServerNetworkHost` and the minimal runtime handle required for host processes to drive message draining, lifecycle evaluation, and eventual shutdown.
#### Scenario: Host process drives the shared server lifecycle through the runtime handle
- **WHEN** startup completes successfully
- **THEN** the caller can access the started `ServerNetworkHost` through the runtime handle
- **THEN** the caller can use that handle to invoke message draining, lifecycle updates, and shutdown without reconstructing networking components

View File

@ -0,0 +1,17 @@
## 1. Server Bootstrap Contract
- [x] 1.1 Add a server bootstrap configuration type that captures reliable port, optional sync port, and supported dependency overrides for shared server startup.
- [x] 1.2 Add a concrete server runtime entry type that validates bootstrap configuration and creates the underlying `ServerNetworkHost` through shared integration code.
- [x] 1.3 Ensure the new entry contract preserves the existing client single-session path and does not introduce `UnityEngine` dependencies into shared networking code.
## 2. Runtime Lifecycle Ownership
- [x] 2.1 Implement startup sequencing so the server runtime entry point starts reliable and optional sync transports before reporting success.
- [x] 2.2 Implement failure rollback and idempotent shutdown so partially started resources are stopped and managed sessions are cleared through existing host cleanup behavior.
- [x] 2.3 Expose the minimal runtime handle needed for callers to access `ServerNetworkHost`, drain messages, evaluate lifecycle, and stop the runtime without reconstructing components.
## 3. Regression Coverage And Integration Guidance
- [x] 3.1 Add or update edit-mode tests covering reliable-only startup, dual-transport startup, startup failure rollback, and repeated shutdown behavior.
- [x] 3.2 Add or update tests confirming the runtime entry point preserves multi-session lifecycle visibility and cleanup semantics through `ServerNetworkHost`.
- [x] 3.3 Update TODO or related integration-facing documentation to point future server wiring at the new runtime entry point.

View File

@ -1,4 +1,4 @@
# multi-session-lifecycle Specification # multi-session-lifecycle Specification
## Purpose ## Purpose
Define the shared orchestration model for hosts that manage multiple concurrent network sessions while preserving the existing per-session lifecycle vocabulary. Define the shared orchestration model for hosts that manage multiple concurrent network sessions while preserving the existing per-session lifecycle vocabulary.
@ -32,3 +32,16 @@ The multi-session lifecycle coordinator SHALL support explicit removal or discon
- **WHEN** one remote peer disconnects or is evicted by the host - **WHEN** one remote peer disconnects or is evicted by the host
- **THEN** the coordinator updates or removes that peer's managed session - **THEN** the coordinator updates or removes that peer's managed session
- **THEN** other managed sessions remain queryable and keep their own lifecycle state - **THEN** other managed sessions remain queryable and keep their own lifecycle state
### Requirement: Multi-session server lifecycle can be owned by a runtime entry point
The multi-session lifecycle model SHALL support being created, owned, and torn down through a concrete server runtime entry point. Starting the entry point MUST provide access to the `MultiSessionManager` through the hosted `ServerNetworkHost`, and stopping the entry point MUST clear managed sessions without requiring callers to remove peers individually.
#### Scenario: Entry point startup exposes multi-session lifecycle ownership
- **WHEN** a server runtime entry point starts successfully
- **THEN** the hosted `ServerNetworkHost` exposes its `MultiSessionManager` for per-peer lifecycle observation
- **THEN** callers do not need to manually construct or inject a separate multi-session coordinator
#### Scenario: Entry point shutdown clears all managed peers
- **WHEN** the server runtime entry point is stopped while one or more managed sessions exist
- **THEN** the hosted `ServerNetworkHost` removes all managed sessions as part of shutdown
- **THEN** no unrelated external cleanup step is required to reset multi-session lifecycle state

View File

@ -53,3 +53,16 @@ Session lifecycle components SHALL initialize session-scoped networking services
#### Scenario: Server multi-session initialization with fallback transport #### Scenario: Server multi-session initialization with fallback transport
- **WHEN** the server integration path creates session-scoped services without a dedicated sync transport - **WHEN** the server integration path creates session-scoped services without a dedicated sync transport
- **THEN** each session SHALL continue to initialize successfully and SHALL use the primary reliable transport as the fallback lane - **THEN** each session SHALL continue to initialize successfully and SHALL use the primary reliable transport as the fallback lane
### Requirement: Server startup and shutdown preserve explicit session lifecycle sequencing
When sessions are hosted through the server runtime entry point, the shared networking layer SHALL preserve explicit startup and shutdown sequencing boundaries for session lifecycle services. The runtime MUST NOT report server startup success until transport startup has completed, and shutdown MUST transition hosted sessions through the existing disconnect/cleanup path instead of abandoning lifecycle state.
#### Scenario: Startup success is reported only after transport startup completes
- **WHEN** a host starts the server runtime entry point
- **THEN** the runtime does not report success before required transport startup finishes
- **THEN** any sessions created after startup begin from the existing server-side lifecycle model
#### Scenario: Shutdown routes through existing lifecycle cleanup
- **WHEN** the caller stops a running server runtime entry point
- **THEN** hosted sessions are disconnected or removed through the existing session lifecycle cleanup path
- **THEN** subsequent lifecycle inspection reflects a stopped runtime rather than stale active sessions

View File

@ -0,0 +1,39 @@
# server-runtime-entry-point Specification
## Purpose
Define the shared server bootstrap contract that turns validated host configuration into a running `ServerNetworkHost` with explicit startup, shutdown, and lifecycle ownership semantics.
## Requirements
### Requirement: Server runtime entry point assembles a host from validated configuration
The shared networking layer SHALL provide a concrete server runtime entry point that accepts validated bootstrap configuration for reliable transport startup and optional sync transport startup. The entry point MUST construct the server host using the existing shared integration components instead of requiring callers to manually assemble transports and session lifecycle services.
#### Scenario: Start server runtime with reliable transport only
- **WHEN** a host starts the server runtime entry point with a valid reliable port and no sync port
- **THEN** the entry point creates a `ServerNetworkHost` using the shared networking integration path
- **THEN** the reliable transport is started and the runtime exposes the started host to the caller
#### Scenario: Start server runtime with reliable and sync transports
- **WHEN** a host starts the server runtime entry point with both reliable and sync ports configured
- **THEN** the entry point creates the server host with both transport lanes configured
- **THEN** the runtime starts both transports before reporting startup success
### Requirement: Server runtime entry point owns startup failure rollback and shutdown
The server runtime entry point SHALL own startup/shutdown sequencing for the created server host. If startup fails after any transport or host resource has begun initializing, the entry point MUST stop already-started resources before surfacing the failure. The shutdown path MUST be safe to call repeatedly and MUST leave the server host with no remaining managed sessions.
#### Scenario: Startup failure rolls back started resources
- **WHEN** reliable transport startup succeeds and a later startup step fails before the runtime reports success
- **THEN** the entry point stops the already-started transport resources
- **THEN** the failure is surfaced to the caller without leaving a partially running server runtime
#### Scenario: Repeated shutdown is safe
- **WHEN** the caller stops the server runtime entry point more than once
- **THEN** shutdown does not throw because the runtime was already stopped
- **THEN** the underlying server host remains in a fully stopped state with its managed sessions cleared
### Requirement: Server runtime entry point exposes the integration surface needed by host processes
The server runtime entry point SHALL expose the started `ServerNetworkHost` and the minimal runtime handle required for host processes to drive message draining, lifecycle evaluation, and eventual shutdown.
#### Scenario: Host process drives the shared server lifecycle through the runtime handle
- **WHEN** startup completes successfully
- **THEN** the caller can access the started `ServerNetworkHost` through the runtime handle
- **THEN** the caller can use that handle to invoke message draining, lifecycle updates, and shutdown without reconstructing networking components