Skip to content

Commit f5441b0

Browse files
authored
Documentation Updates (apple#942)
* fix readme * rename StashError * rename ActorSystem files to ClusterSystem files; document init * introduction * document lifecycle watch * cluster lifecycle image * fix rename refactoring not having worked... * [System] implement system.terminated * wip on more cluster docs * no more warnings except in NIO
1 parent d3296f6 commit f5441b0

File tree

72 files changed

+518
-376
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+518
-376
lines changed
Binary file not shown.
-46.3 KB
Binary file not shown.

README.md

+48-222
Large diffs are not rendered by default.

Samples/Sources/SampleDiningPhilosophers/boot.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ typealias DefaultDistributedActorSystem = ClusterSystem
4343
let time = TimeAmount.seconds(20)
4444

4545
switch CommandLine.arguments.dropFirst().first {
46-
case "dist":
46+
case "dist", "distributed":
4747
try! await DistributedDiningPhilosophers().run(for: time)
4848
default:
4949
try! await DiningPhilosophers().run(for: time)

Sources/ActorSingletonPlugin/ActorSingletonProxy.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ internal class ActorSingletonProxy<Message: ActorMessage> {
6868
var behavior: _Behavior<Message> {
6969
.setup { context in
7070
if context.system.settings.enabled {
71-
// Subscribe to `Cluster.Event` in order to update `targetNode`
71+
// Subscribe to ``Cluster/Event`` in order to update `targetNode`
7272
context.system.cluster.events.subscribe(
7373
context.subReceive(_SubReceiveId(id: "clusterEvent-\(context.name)"), Cluster.Event.self) { event in
7474
try self.receiveClusterEvent(context, event)
@@ -196,7 +196,7 @@ internal class ActorSingletonProxy<Message: ActorMessage> {
196196
context.log.trace("Stashed message: \(message)", metadata: self.metadata(context))
197197
} catch {
198198
switch error {
199-
case StashError.full:
199+
case _StashError.full:
200200
// TODO: log this warning only "once in while" after buffer becomes full
201201
context.log.warning("Buffer is full. Messages might start getting disposed.", metadata: self.metadata(context))
202202
// Move the oldest message to dead letters to make room

Sources/DistributedActors/ActorAddress.swift

+3-1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ import Distributed
1717
// ==== ----------------------------------------------------------------------------------------------------------------
1818
// MARK: ActorAddress
1919

20+
/// The type of `ID` assigned to all distributed actors managed by the ``ClusterSystem``.
21+
public typealias ActorID = ActorAddress
22+
2023
/// Uniquely identifies a DistributedActor within the cluster.
2124
///
2225
/// It is assigned by the `ClusterSystem` at initialization time of a distributed actor,
@@ -50,7 +53,6 @@ import Distributed
5053
///
5154
/// For example: `sact://[email protected]:7337/user/wallet/id-121242`.
5255
/// Note that the `ActorIncarnation` is not printed by default in the String representation of a path, yet may be inspected on demand.
53-
@available(macOS 10.15, *)
5456
public struct ActorAddress: @unchecked Sendable {
5557
/// Knowledge about a node being `local` is purely an optimization, and should not be relied on by actual code anywhere.
5658
/// It is on purpose not exposed to end-user code as well, and must remain so to not break the location transparency promises made by the runtime.

Sources/DistributedActors/Cluster/Cluster+Membership.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ extension Cluster {
114114
self.members(withStatus: [status], reachability: reachability)
115115
}
116116

117-
/// Returns all members that are part of this membership, and have the any ``Cluster.MemberStatus`` that is part
117+
/// Returns all members that are part of this membership, and have the any ``Cluster/MemberStatus`` that is part
118118
/// of the `statuses` passed in and `reachability` status.
119119
///
120120
/// - Parameters:
@@ -574,7 +574,7 @@ extension Cluster.Membership {
574574
// MARK: Applying Cluster.Event to Membership
575575

576576
extension Cluster.Membership {
577-
/// Applies any kind of `Cluster.Event` to the `Membership`, modifying it appropriately.
577+
/// Applies any kind of ``Cluster/Event`` to the `Membership`, modifying it appropriately.
578578
/// This apply does not yield detailed information back about the type of change performed,
579579
/// and is useful as a catch-all to keep a `Membership` copy up-to-date, but without reacting on any specific transition.
580580
///

Sources/DistributedActors/Cluster/ClusterControl.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ public struct ClusterControl {
3535
///
3636
/// Consider subscribing to `cluster.events` in order to react to membership changes dynamically, and never miss a change.
3737
///
38-
/// It is guaranteed that a `membershipSnapshot` is always at-least as up-to-date as an emitted `Cluster.Event`.
38+
/// It is guaranteed that a `membershipSnapshot` is always at-least as up-to-date as an emitted ``Cluster/Event``.
3939
/// It may be "ahead" however, for example if a series of 3 events are published closely one after another,
4040
/// if one were to observe the `cluster.membershipSnapshot` when receiving the first event, it may already contain
4141
/// information related to the next two incoming events. For that reason is recommended to stick to one of the ways
@@ -115,7 +115,7 @@ public struct ClusterControl {
115115
self.ref.tell(.command(.downCommand(self.uniqueNode.node)))
116116
}
117117

118-
/// Mark *any* currently known member as `Cluster.MemberStatus.down`.
118+
/// Mark *any* currently known member as ``Cluster/MemberStatus/down``.
119119
///
120120
/// Beware that this API is not very precise and, if possible, the `down(Cluster.Member)` is preferred, as it indicates
121121
/// the downing intent of a *specific* actor system instance, rather than any system running on the given host-port pair.

Sources/DistributedActors/Cluster/ClusterEventStream.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import Logging
1616

1717
/// Specialized event stream behavior which takes into account emitting a snapshot event on first subscription,
18-
/// followed by a stream of `Cluster.Event`s.
18+
/// followed by a stream of ``Cluster/Event``s.
1919
///
2020
/// This ensures that every subscriber to cluster events never misses any of the membership events, meaning
2121
/// it is possible for anyone to maintain a local up-to-date copy of `Membership` by applying all these events to that copy.
@@ -26,7 +26,7 @@ internal enum ClusterEventStream {
2626

2727
// We maintain a snapshot i.e. the "latest version of the membership",
2828
// in order to eagerly publish it to anyone who subscribes immediately,
29-
// followed by joining them to the subsequent `Cluster.Event` publishes.
29+
// followed by joining them to the subsequent ``Cluster/Event`` publishes.
3030
//
3131
// Thanks to this, any subscriber immediately gets a pretty recent view of the membership,
3232
// followed by the usual updates via events. Since all events are published through this

Sources/DistributedActors/Cluster/ClusterShell.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ internal class ClusterShell {
313313
case inbound(InboundMessage)
314314
/// Used to request making a change to the membership owned by the ClusterShell;
315315
/// Issued by downing or leader election and similar facilities. Thanks to centralizing the application of changes,
316-
/// we can ensure that a `Cluster.Event` is signalled only once, and only when it is really needed.
316+
/// we can ensure that a ``Cluster/Event`` is signalled only once, and only when it is really needed.
317317
/// E.g. signalling a down twice for whatever reason, needs not be notified two times to all subscribers of cluster events.
318318
///
319319
/// If the passed in event applied to the current membership is an effective change, the change will be published using the `system.cluster.events`.

Sources/DistributedActors/Cluster/Downing/DowningSettings.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ public enum OnDownActionStrategySettings {
3838
/// Take no (automatic) action upon noticing that this member is marked as [.down].
3939
///
4040
/// When using this mode you should take special care to implement some form of shutting down of this node (!).
41-
/// As a `Cluster.MemberStatus.down` node is effectively useless for the rest of the cluster -- i.e. other
41+
/// As a ``Cluster/MemberStatus/down`` node is effectively useless for the rest of the cluster -- i.e. other
4242
/// members MUST refuse communication with this down node.
4343
case none
4444
/// Upon noticing that this member is marked as [.down], initiate a shutdown.

Sources/DistributedActors/Cluster/Leadership.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ import NIO // Future
3939
/// e.g. when a partition in the cluster occurs. This is usually beneficial to _liveness_
4040
///
4141
/// ### Leadership Change Cluster Event
42-
/// If a new member is selected as leader, a ``Cluster.Event`` carrying ``Cluster.LeadershipChange`` will be emitted.
42+
/// If a new member is selected as leader, a ``Cluster/Event`` carrying ``Cluster/LeadershipChange`` will be emitted.
4343
/// Other actors may subscribe to `ClusterSystem.cluster.events` in order to receive and react to such changes,
4444
/// e.g. if an actor should only perform its duties if it is residing on the current leader node.
4545
public protocol LeaderElection {
@@ -315,7 +315,7 @@ extension Leadership {
315315

316316
extension ClusterSystemSettings {
317317
public enum LeadershipSelectionSettings {
318-
/// No automatic leader selection, you can write your own logic and issue a `Cluster.LeadershipChange` `Cluster.Event` to the `system.cluster.events` event stream.
318+
/// No automatic leader selection, you can write your own logic and issue a `Cluster.LeadershipChange` ``Cluster/Event`` to the `system.cluster.events` event stream.
319319
case none
320320
/// All nodes get ordered by their node addresses and the "lowest" is always selected as a leader.
321321
case lowestReachable(minNumberOfMembers: Int)

Sources/DistributedActors/ClusterSystem.swift

+22-12
Original file line numberDiff line numberDiff line change
@@ -456,6 +456,15 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable {
456456
}
457457
}
458458

459+
/// Suspends until the ``ClusterSystem`` is terminated by a call to ``shutdown``.
460+
var terminated: Void {
461+
get async throws {
462+
try await Task.detached {
463+
try Shutdown(receptacle: self.shutdownReceptacle).wait()
464+
}.value
465+
}
466+
}
467+
459468
/// Forcefully stops this actor system and all actors that live within it.
460469
/// This is an asynchronous operation and will be executed on a separate thread.
461470
///
@@ -856,30 +865,31 @@ extension ClusterSystem {
856865
public func resolve<Act>(id address: ActorID, as actorType: Act.Type) throws -> Act?
857866
where Act: DistributedActor
858867
{
859-
self.log.info("RESOLVE: \(address)")
868+
self.log.trace("Resolve: \(address)")
860869
guard self.cluster.uniqueNode == address.uniqueNode else {
861-
self.log.info("Resolved \(address) as remote, on node: \(address.uniqueNode)")
870+
self.log.trace("Resolved \(address) as remote, on node: \(address.uniqueNode)")
862871
return nil
863872
}
864873

865874
return self.namingLock.withLock {
866875
guard let managed = self._managedDistributedActors.get(identifiedBy: address) else {
867-
log.info("Unknown reference on our UniqueNode", metadata: [
868-
"actor/identity": "\(address.detailedDescription)",
876+
log.trace("Resolved as remote reference", metadata: [
877+
"actor/identity": "\(address)",
869878
])
870879
// TODO(distributed): throw here, this should be a dead letter
871880
return nil
872881
}
873882

874-
log.info("Resolved as local instance", metadata: [
875-
"actor/identity": "\(address)",
876-
"actor": "\(managed)",
877-
])
878883
if let resolved = managed as? Act {
879-
log.info("Resolved \(address) as local")
884+
log.trace("Resolved as local instance", metadata: [
885+
"actor/identity": "\(address)",
886+
"actor": "\(managed)",
887+
])
880888
return resolved
881889
} else {
882-
log.info("Resolved \(address) as remote")
890+
log.trace("Resolved as remote reference", metadata: [
891+
"actor/identity": "\(address)",
892+
])
883893
return nil
884894
}
885895
}
@@ -1175,9 +1185,9 @@ public enum ClusterSystemError: DistributedActorSystemError {
11751185
case shuttingDown(String)
11761186
}
11771187

1178-
/// Error thrown when unable to resolve an ``ActorIdentity``.
1188+
/// Error thrown when unable to resolve an ``ActorID``.
11791189
///
1180-
/// Refer to ``ClusterSystem/resolve(_:as:)`` or the distributed actors Swift Evolution proposal for details.
1190+
/// Refer to ``ClusterSystem/resolve(id:as:)`` or the distributed actors Swift Evolution proposal for details.
11811191
public enum ResolveError: DistributedActorSystemError {
11821192
case illegalIdentity(ClusterSystem.ActorID)
11831193
}

Sources/DistributedActors/DeadLetters.swift

+4-2
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,10 @@ import Logging
2020
/// A "dead letter" is a message ("letter") that is impossible to deliver to its designated recipient.
2121
///
2222
/// Often the reason for this is that the message was sent to given actor while it was still alive,
23-
/// yet once it arrived the destination node (or mailbox) the actor had already terminated, leaving the message to be dropped.
24-
/// Since such races can happen and point to problems in an actor based algorithm, such messages are not silently dropped,
23+
/// yet once it arrived the destination node the actor had already terminated, leaving the message to be dropped.
24+
///
25+
/// Since such races can happen when a distributed remote actor is e.g. passivated before it receives a remote call,
26+
/// and can be tricky to diagnose otherwise, such messages are such messages are not silently dropped,
2527
/// but rather logged, with as much information as available (e.g. about the sender or source location of the initiating tell),
2628
/// such that when operating the system, bugs regarding undelivered messages can be spotted and fixed.
2729
///

0 commit comments

Comments
 (0)