From 36b4036d290340268f6462c705328b3391714a12 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 8 Jun 2023 18:15:22 +0100 Subject: [PATCH] Update to FsCodec 3rc10, Equinox 4rc10, Propulsion 3rc5 (#130) --- CHANGELOG.md | 4 + .../Domain.Tests/ExactlyOnceIngesterTests.fs | 4 +- .../Domain.Tests/PeriodsCarryingForward.fs | 4 +- equinox-patterns/Domain/Domain.fsproj | 8 +- .../Domain/ExactlyOnceIngester.fs | 28 ++-- equinox-patterns/Domain/Infrastructure.fs | 16 +-- equinox-patterns/Domain/ListEpoch.fs | 32 ++--- equinox-patterns/Domain/ListIngester.fs | 10 +- equinox-patterns/Domain/ListSeries.fs | 28 ++-- equinox-patterns/Domain/Period.fs | 56 ++++---- .../Domain/{Config.fs => Store.fs} | 15 +-- equinox-patterns/Domain/Types.fs | 24 ++-- .../Domain.Tests/ContainerTests.fs | 2 +- .../Domain.Tests/Domain.Tests.fsproj | 2 +- .../Domain.Tests/FinalizationProcessTests.fs | 6 +- .../FinalizationTransactionTests.fs | 2 +- .../Domain.Tests/Infrastructure.fs | 14 +- .../Domain.Tests/MemoryStoreFixture.fs | 4 +- .../Domain.Tests/SerilogLogFixture.fs | 6 +- .../Domain.Tests/ShipmentTests.fs | 2 +- equinox-shipping/Domain/Container.fs | 32 ++--- equinox-shipping/Domain/Domain.fsproj | 12 +- .../Domain/FinalizationProcess.fs | 16 +-- .../Domain/FinalizationTransaction.fs | 58 ++++---- equinox-shipping/Domain/Shipment.fs | 42 +++--- .../Domain/{Config.fs => Store.fs} | 16 +-- .../Domain/TransactionWatchdog.fs | 15 ++- equinox-shipping/Domain/Types.fs | 10 +- .../Watchdog.Integration/CosmosConnector.fs | 6 +- .../Watchdog.Integration/DynamoConnector.fs | 8 +- .../Watchdog.Integration/EsdbConnector.fs | 6 +- .../Watchdog.Integration/ReactorFixture.fs | 14 +- .../WatchdogIntegrationTests.fs | 24 ++-- .../Watchdog.Lambda.Cdk.fsproj | 2 +- .../WatchdogLambdaStack.fs | 22 +-- equinox-shipping/Watchdog.Lambda/Function.fs | 22 +-- .../Watchdog.Lambda/Watchdog.Lambda.fsproj | 2 +- equinox-shipping/Watchdog/Args.fs | 17 +-- equinox-shipping/Watchdog/Handler.fs | 42 +++--- equinox-shipping/Watchdog/Infrastructure.fs | 12 +- equinox-shipping/Watchdog/Program.fs | 32 ++--- equinox-shipping/Watchdog/SourceArgs.fs | 36 ++--- equinox-shipping/Watchdog/SourceConfig.fs | 91 ++++++------- equinox-shipping/Watchdog/Watchdog.fsproj | 8 +- equinox-testbed/Infrastructure.fs | 10 +- equinox-testbed/Program.fs | 18 +-- equinox-testbed/Services.fs | 42 +++--- equinox-testbed/Storage.fs | 20 +-- equinox-testbed/{Config.fs => Store.fs} | 17 ++- equinox-testbed/Testbed.fsproj | 12 +- equinox-web-csharp/Domain/Domain.csproj | 4 +- equinox-web-csharp/Domain/Infrastructure.cs | 2 +- equinox-web-csharp/Web/CosmosContext.cs | 2 +- equinox-web-csharp/Web/EquinoxContext.cs | 12 +- equinox-web-csharp/Web/Web.csproj | 8 +- equinox-web/Domain/Aggregate.fs | 34 ++--- equinox-web/Domain/Domain.fsproj | 12 +- equinox-web/Domain/Infrastructure.fs | 4 +- equinox-web/Domain/{Config.fs => Store.fs} | 23 ++-- equinox-web/Domain/Todo.fs | 62 ++++----- .../Web/Controllers/TodosController.fs | 16 +-- equinox-web/Web/Program.fs | 4 +- equinox-web/Web/Startup.fs | 50 +++---- equinox-web/Web/Web.fsproj | 6 +- feed-consumer/ApiClient.fs | 36 ++--- feed-consumer/FeedConsumer.fsproj | 10 +- feed-consumer/Infrastructure.fs | 76 +++++------ feed-consumer/Ingester.fs | 25 ++-- feed-consumer/Program.fs | 20 +-- feed-consumer/Types.fs | 8 +- feed-source/Domain.Tests/IngesterTests.fs | 8 +- feed-source/Domain/Domain.fsproj | 8 +- feed-source/Domain/{Config.fs => Store.fs} | 15 +-- feed-source/Domain/TicketsEpoch.fs | 57 ++++---- feed-source/Domain/TicketsIngester.fs | 22 +-- feed-source/Domain/TicketsSeries.fs | 32 ++--- feed-source/Domain/Types.fs | 30 ++--- .../FeedApi/Controllers/TicketsController.fs | 24 ++-- feed-source/FeedApi/FeedApi.fsproj | 2 +- feed-source/FeedApi/Infrastructure.fs | 14 +- feed-source/FeedApi/Program.fs | 22 +-- feed-source/FeedApi/Startup.fs | 6 +- periodic-ingester/ApiClient.fs | 10 +- periodic-ingester/Infrastructure.fs | 72 +++++----- periodic-ingester/Ingester.fs | 17 ++- periodic-ingester/IngesterPrometheus.fs | 2 +- periodic-ingester/PeriodicIngester.fsproj | 8 +- periodic-ingester/Program.fs | 25 ++-- periodic-ingester/Types.fs | 4 +- propulsion-archiver/Archiver.fsproj | 4 +- propulsion-archiver/Handler.fs | 4 +- propulsion-archiver/Infrastructure.fs | 22 +-- propulsion-archiver/Program.fs | 34 ++--- propulsion-consumer/Consumer.fsproj | 4 +- propulsion-consumer/Examples.fs | 125 +++++++++--------- propulsion-consumer/Infrastructure.fs | 37 ++++-- propulsion-consumer/Program.fs | 8 +- propulsion-cosmos-reactor/Contract.fs | 6 +- propulsion-cosmos-reactor/Infrastructure.fs | 49 ++++--- propulsion-cosmos-reactor/Program.fs | 26 ++-- propulsion-cosmos-reactor/Reactor.fs | 32 ++--- propulsion-cosmos-reactor/Reactor.fsproj | 8 +- propulsion-cosmos-reactor/ReactorMetrics.fs | 10 +- .../{Config.fs => Store.fs} | 15 +-- propulsion-cosmos-reactor/Todo.fs | 41 +++--- propulsion-cosmos-reactor/TodoSummary.fs | 24 ++-- .../DynamoStore.Cdk.fsproj | 6 +- propulsion-dynamostore-cdk/IndexerStack.fs | 16 +-- propulsion-dynamostore-cdk/NotifierStack.fs | 16 +-- propulsion-hotel/Domain.Tests/Arbitraries.fs | 2 +- .../Domain.Tests/GroupCheckoutFlow.fs | 6 +- propulsion-hotel/Domain/Domain.fsproj | 10 +- propulsion-hotel/Domain/GroupCheckout.fs | 54 ++++---- propulsion-hotel/Domain/GuestStay.fs | 48 +++---- .../Domain/{Config.fs => Store.fs} | 23 ++-- propulsion-hotel/Domain/Types.fs | 10 +- .../Reactor.Integration/DynamoConnector.fs | 8 +- .../Reactor.Integration/MessageDbConnector.fs | 8 +- .../Reactor.Integration/ReactorFixture.fs | 12 +- .../ReactorIntegrationTests.fs | 20 +-- .../Reactor.Integration/SerilogLogFixture.fs | 6 +- propulsion-hotel/Reactor/Args.fs | 6 +- .../Reactor/GroupCheckoutProcess.fs | 8 +- propulsion-hotel/Reactor/Handler.fs | 25 ++-- propulsion-hotel/Reactor/Infrastructure.fs | 22 +-- propulsion-hotel/Reactor/Program.fs | 26 ++-- propulsion-hotel/Reactor/Reactor.fsproj | 8 +- propulsion-hotel/Reactor/SourceArgs.fs | 14 +- propulsion-hotel/Reactor/SourceConfig.fs | 58 ++++---- propulsion-projector/Args.fs | 8 +- propulsion-projector/Handler.fs | 47 +++---- propulsion-projector/Infrastructure.fs | 10 +- propulsion-projector/Program.fs | 30 ++--- propulsion-projector/Projector.fsproj | 20 +-- propulsion-projector/SourceArgs.fs | 42 +++--- propulsion-projector/SourceConfig.fs | 103 +++++++-------- propulsion-projector/{Config.fs => Store.fs} | 16 +-- propulsion-pruner/Handler.fs | 8 +- propulsion-pruner/Infrastructure.fs | 24 ++-- propulsion-pruner/Program.fs | 30 ++--- propulsion-pruner/Pruner.fsproj | 4 +- propulsion-reactor/Args.fs | 8 +- propulsion-reactor/Contract.fs | 22 ++- propulsion-reactor/Handler.fs | 68 +++++----- propulsion-reactor/Infrastructure.fs | 50 ++++--- propulsion-reactor/Ingester.fs | 60 ++++----- propulsion-reactor/Program.fs | 67 +++++----- propulsion-reactor/Reactor.fsproj | 16 +-- propulsion-reactor/SourceArgs.fs | 68 +++++----- propulsion-reactor/SourceConfig.fs | 101 +++++++------- propulsion-reactor/{Config.fs => Store.fs} | 35 ++--- propulsion-reactor/Todo.fs | 51 +++---- propulsion-reactor/TodoSummary.fs | 32 ++--- propulsion-summary-consumer/Infrastructure.fs | 39 ++++-- propulsion-summary-consumer/Ingester.fs | 45 +++---- propulsion-summary-consumer/Program.fs | 20 ++- .../{Config.fs => Store.fs} | 19 +-- .../SummaryConsumer.fsproj | 8 +- propulsion-summary-consumer/TodoSummary.fs | 24 ++-- propulsion-sync/Infrastructure.fs | 12 +- propulsion-sync/Program.fs | 80 +++++------ propulsion-sync/Sync.fsproj | 6 +- .../Infrastructure.fs | 28 +--- propulsion-tracking-consumer/Ingester.fs | 39 +++--- propulsion-tracking-consumer/Program.fs | 16 +-- propulsion-tracking-consumer/SkuSummary.fs | 32 ++--- .../{Config.fs => Store.fs} | 11 +- .../TrackingConsumer.fsproj | 8 +- tests/Equinox.Templates.Tests/DotnetBuild.fs | 2 +- .../Equinox.Templates.Tests/Infrastructure.fs | 2 +- 170 files changed, 1920 insertions(+), 1975 deletions(-) rename equinox-patterns/Domain/{Config.fs => Store.fs} (81%) rename equinox-shipping/Domain/{Config.fs => Store.fs} (90%) rename equinox-testbed/{Config.fs => Store.fs} (73%) rename equinox-web/Domain/{Config.fs => Store.fs} (87%) rename feed-source/Domain/{Config.fs => Store.fs} (80%) rename propulsion-cosmos-reactor/{Config.fs => Store.fs} (74%) rename propulsion-hotel/Domain/{Config.fs => Store.fs} (79%) rename propulsion-projector/{Config.fs => Store.fs} (84%) rename propulsion-reactor/{Config.fs => Store.fs} (68%) rename propulsion-summary-consumer/{Config.fs => Store.fs} (55%) rename propulsion-tracking-consumer/{Config.fs => Store.fs} (78%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71557c5a6..604ad7d5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,10 @@ The `Unreleased` section name is replaced by the expected version of next releas ### Added ### Changed + +- Target `Equinox` v `4.0.0-rc.9`, `Propulsion` v `3.0.0-rc.3` [#128](https://github.com/jet/dotnet-templates/pull/128) +- `module Config` -> `module Store`/`module Factory` [#128](https://github.com/jet/dotnet-templates/pull/128) + ### Removed ### Fixed diff --git a/equinox-patterns/Domain.Tests/ExactlyOnceIngesterTests.fs b/equinox-patterns/Domain.Tests/ExactlyOnceIngesterTests.fs index 2771c57ec..a67fcdc8d 100644 --- a/equinox-patterns/Domain.Tests/ExactlyOnceIngesterTests.fs +++ b/equinox-patterns/Domain.Tests/ExactlyOnceIngesterTests.fs @@ -11,7 +11,7 @@ let linger, maxItemsPerEpoch = System.TimeSpan.FromMilliseconds 1., 5 let createSut = // While we use ~ 200ms when hitting Cosmos, there's no value in doing so in the context of these property based tests - ListIngester.Config.create_ linger maxItemsPerEpoch + ListIngester.Factory.create_ linger maxItemsPerEpoch type GuidStringN<[]'m> = GuidStringN of string<'m> with static member op_Explicit(GuidStringN x) = x let (|Ids|) = Array.map (function GuidStringN x -> x) @@ -28,7 +28,7 @@ type Custom = let [] properties shouldUseSameSut (Gap gap) (initialEpochId, NonEmptyArray (Ids initialItems)) (NonEmptyArray (Ids items)) = async { - let store = Equinox.MemoryStore.VolatileStore() |> Config.Store.Memory + let store = Equinox.MemoryStore.VolatileStore() |> Store.Context.Memory let mutable nextEpochId = initialEpochId for _ in 1 .. gap do nextEpochId <- ExactlyOnceIngester.Internal.next nextEpochId diff --git a/equinox-patterns/Domain.Tests/PeriodsCarryingForward.fs b/equinox-patterns/Domain.Tests/PeriodsCarryingForward.fs index 8493aa489..8de28d3b6 100644 --- a/equinox-patterns/Domain.Tests/PeriodsCarryingForward.fs +++ b/equinox-patterns/Domain.Tests/PeriodsCarryingForward.fs @@ -9,8 +9,8 @@ open Xunit [] let ``Happy path`` () = - let store = Equinox.MemoryStore.VolatileStore() |> Config.Store.Memory - let service = Config.create store + let store = Equinox.MemoryStore.VolatileStore() |> Store.Context.Memory + let service = Factory.create store let decide items _state = let apply = Array.truncate 2 items let overflow = Array.skip apply.Length items diff --git a/equinox-patterns/Domain/Domain.fsproj b/equinox-patterns/Domain/Domain.fsproj index 48b4ee60d..656290c63 100644 --- a/equinox-patterns/Domain/Domain.fsproj +++ b/equinox-patterns/Domain/Domain.fsproj @@ -7,7 +7,7 @@ - + @@ -16,9 +16,9 @@ - - - + + + diff --git a/equinox-patterns/Domain/ExactlyOnceIngester.fs b/equinox-patterns/Domain/ExactlyOnceIngester.fs index bec8b97f8..ffc1301c2 100644 --- a/equinox-patterns/Domain/ExactlyOnceIngester.fs +++ b/equinox-patterns/Domain/ExactlyOnceIngester.fs @@ -7,32 +7,32 @@ module Patterns.Domain.ExactlyOnceIngester open FSharp.UMX // % -type IngestResult<'req, 'res> = { accepted : 'res[]; closed : bool; residual : 'req[] } +type IngestResult<'req, 'res> = { accepted: 'res[]; closed: bool; residual: 'req[] } module Internal = let unknown<[]'m> = UMX.tag -1 - let next<[]'m> (value : int<'m>) = UMX.tag<'m>(UMX.untag value + 1) + let next<[]'m> (value: int<'m>) = UMX.tag<'m>(UMX.untag value + 1) /// Ensures any given item is only added to the series exactly once by virtue of the following protocol: /// 1. Caller obtains an origin epoch via ActiveIngestionEpochId, storing that alongside the source item /// 2. Caller deterministically obtains that origin epoch to supply to Ingest/TryIngest such that retries can be idempotent type Service<[]'id, 'req, 'res, 'outcome> internal - ( log : Serilog.ILogger, - readActiveEpoch : unit -> Async>, - markActiveEpoch : int<'id> -> Async, - ingest : int<'id> * 'req [] -> Async>, - mapResults : 'res [] -> 'outcome seq, + ( log: Serilog.ILogger, + readActiveEpoch: unit -> Async>, + markActiveEpoch: int<'id> -> Async, + ingest: int<'id> * 'req [] -> Async>, + mapResults: 'res [] -> 'outcome seq, linger) = - let uninitializedSentinel : int = %Internal.unknown + let uninitializedSentinel: int = %Internal.unknown let mutable currentEpochId_ = uninitializedSentinel let currentEpochId () = if currentEpochId_ <> uninitializedSentinel then Some %currentEpochId_ else None - let tryIngest (reqs : (int<'id> * 'req)[][]) = + let tryIngest (reqs: (int<'id> * 'req)[][]) = let rec aux ingestedItems items = async { let epochId = items |> Seq.map fst |> Seq.min - let epochItems, futureEpochItems = items |> Array.partition (fun (e, _ : 'req) -> e = epochId) + let epochItems, futureEpochItems = items |> Array.partition (fun (e, _: 'req) -> e = epochId) let! res = ingest (epochId, Array.map snd epochItems) let ingestedItemIds = Array.append ingestedItems res.accepted let logLevel = @@ -56,7 +56,7 @@ type Service<[]'id, 'req, 'res, 'outcome> internal /// In the overall processing using an Ingester, we frequently have a Scheduler running N streams concurrently /// If each thread works in isolation, they'll conflict with each other as they feed the Items into the batch in epochs.Ingest - /// Instead, we enable concurrent requests to coalesce by having requests converge in this AsyncBatchingGate + /// Instead, we enable concurrent requests to coalesce by having requests converge in this Batcher /// This has the following critical effects: /// - Traffic to CosmosDB is naturally constrained to a single flight in progress /// (BatchingGate does not release next batch for execution until current has succeeded or throws) @@ -65,11 +65,11 @@ type Service<[]'id, 'req, 'res, 'outcome> internal /// a) back-off, re-read and retry if there's a concurrent write Optimistic Concurrency Check failure when writing the stream /// b) enter a prolonged period of retries if multiple concurrent writes trigger rate limiting and 429s from CosmosDB /// c) readers will less frequently encounter sustained 429s on the batch - let batchedIngest = Equinox.Core.AsyncBatchingGate(tryIngest, linger) + let batchedIngest = Equinox.Core.Batching.Batcher(tryIngest, linger) /// Run the requests over a chain of epochs. /// Returns the subset that actually got handled this time around (i.e., exclusive of items that did not trigger writes per the idempotency rules). - member _.IngestMany(originEpoch, reqs) : Async<'outcome seq> = async { + member _.IngestMany(originEpoch, reqs): Async<'outcome seq> = async { if Array.isEmpty reqs then return Seq.empty else let! results = batchedIngest.Execute [| for x in reqs -> originEpoch, x |] @@ -80,7 +80,7 @@ type Service<[]'id, 'req, 'res, 'outcome> internal /// The fact that any Ingest call for a given item (or set of items) always commences from the same origin is key to exactly once insertion guarantee. /// Caller should first store this alongside the item in order to deterministically be able to start from the same origin in idempotent retry cases. /// Uses cached values as epoch transitions are rare, and caller needs to deal with the inherent race condition in any case - member _.ActiveIngestionEpochId() : Async> = + member _.ActiveIngestionEpochId(): Async> = match currentEpochId () with | Some currentEpochId -> async { return currentEpochId } | None -> readActiveEpoch() diff --git a/equinox-patterns/Domain/Infrastructure.fs b/equinox-patterns/Domain/Infrastructure.fs index e29ac3876..603915faa 100644 --- a/equinox-patterns/Domain/Infrastructure.fs +++ b/equinox-patterns/Domain/Infrastructure.fs @@ -2,31 +2,31 @@ module Patterns.Domain.Infrastructure /// Buffers events accumulated from a series of decisions while evolving the presented `state` to reflect said proposed `Events` -type Accumulator<'e, 's>(originState : 's, fold : 's -> seq<'e> -> 's) = +type Accumulator<'e, 's>(originState: 's, fold: 's -> seq<'e> -> 's) = let mutable state = originState let pendingEvents = ResizeArray<'e>() - let (|Apply|) (xs : #seq<'e>) = state <- fold state xs; pendingEvents.AddRange xs + let (|Apply|) (xs: #seq<'e>) = state <- fold state xs; pendingEvents.AddRange xs /// Run an Async interpret function that does not yield a result - member _.Transact(interpret : 's -> Async<#seq<'e>>) : Async = async { + member _.Transact(interpret: 's -> Async<#seq<'e>>): Async = async { let! Apply = interpret state in return () } /// Run an Async decision function, buffering and applying any Events yielded - member _.Transact(decide : 's -> Async<'r * #seq<'e>>) : Async<'r> = async { + member _.Transact(decide: 's -> Async<'r * #seq<'e>>): Async<'r> = async { let! r, Apply = decide state in return r } /// Run a decision function, buffering and applying any Events yielded - member _.Transact(decide : 's -> 'r * #seq<'e>) : 'r = + member _.Transact(decide: 's -> 'r * #seq<'e>): 'r = let r, Apply = decide state in r /// Accumulated events based on the Decisions applied to date - member _.Events : 'e list = + member _.Events: 'e list = List.ofSeq pendingEvents // /// Run a decision function that does not yield a result -// member x.Transact(interpret) : unit = +// member x.Transact(interpret): unit = // x.Transact(fun state -> (), interpret state) // /// Projects from the present state including accumulated events -// member _.Query(render : 's -> 'r) : 'r = +// member _.Query(render: 's -> 'r): 'r = // render state diff --git a/equinox-patterns/Domain/ListEpoch.fs b/equinox-patterns/Domain/ListEpoch.fs index 5e3d23f94..cb1b4def3 100644 --- a/equinox-patterns/Domain/ListEpoch.fs +++ b/equinox-patterns/Domain/ListEpoch.fs @@ -8,11 +8,11 @@ let streamId = Equinox.StreamId.gen ListEpochId.toString module Events = type Event = - | Ingested of {| ids : ItemId[] |} + | Ingested of {| ids: ItemId[] |} | Closed - | Snapshotted of {| ids : ItemId[]; closed : bool |} + | Snapshotted of {| ids: ItemId[]; closed: bool |} interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = @@ -22,7 +22,7 @@ module Fold = | Events.Ingested e -> Array.append e.ids ids, closed | Events.Closed -> (ids, true) | Events.Snapshotted e -> (e.ids, e.closed) - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false let toSnapshot (ids, closed) = Events.Snapshotted {| ids = ids; closed = closed |} @@ -41,35 +41,35 @@ let decide shouldClose candidateIds = function let ingestEvent = Events.Ingested {| ids = news |} news, if closing then [ ingestEvent ; Events.Closed ] else [ ingestEvent ] let _, closed = Fold.fold state events - let res : ExactlyOnceIngester.IngestResult<_, _> = { accepted = added; closed = closed; residual = [||] } + let res: ExactlyOnceIngester.IngestResult<_, _> = { accepted = added; closed = closed; residual = [||] } res, events | currentIds, true -> { accepted = [||]; closed = true; residual = candidateIds |> Array.except currentIds (*|> Array.distinct*) }, [] // NOTE see feedSource for example of separating Service logic into Ingestion and Read Services in order to vary the folding and/or state held type Service internal - ( shouldClose : ItemId[] -> ItemId[] -> bool, // let outer layers decide whether ingestion should trigger closing of the batch - resolve : ListEpochId -> Equinox.Decider) = + ( shouldClose: ItemId[] -> ItemId[] -> bool, // let outer layers decide whether ingestion should trigger closing of the batch + resolve: ListEpochId -> Equinox.Decider) = /// Ingest the supplied items. Yields relevant elements of the post-state to enable generation of stats /// and facilitate deduplication of incoming items in order to avoid null store round-trips where possible - member _.Ingest(epochId, items) : Async> = + member _.Ingest(epochId, items): Async> = let decider = resolve epochId // NOTE decider which will initially transact against potentially stale cached state, which will trigger a // resync if another writer has gotten in before us. This is a conscious decision in this instance; the bulk // of writes are presumed to be coming from within this same process - decider.Transact(decide shouldClose items, load = Equinox.AllowStale) + decider.Transact(decide shouldClose items, load = Equinox.AnyCachedValue) - /// Returns all the items currently held in the stream (Not using AllowStale on the assumption this needs to see updates from other apps) - member _.Read epochId : Async = + /// Returns all the items currently held in the stream (Not using AnyCachedValue on the assumption this needs to see updates from other apps) + member _.Read epochId: Async = let decider = resolve epochId - decider.Query id + decider.Query(id, Equinox.AllowStale (System.TimeSpan.FromSeconds 1)) -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) let create maxItemsPerEpoch (Category cat) = let shouldClose candidateItems currentItems = Array.length currentItems + Array.length candidateItems >= maxItemsPerEpoch - Service(shouldClose, streamId >> Config.resolveDecider cat Category) + Service(shouldClose, streamId >> Store.resolveDecider cat Category) diff --git a/equinox-patterns/Domain/ListIngester.fs b/equinox-patterns/Domain/ListIngester.fs index 9553af16f..98bbe5d33 100644 --- a/equinox-patterns/Domain/ListIngester.fs +++ b/equinox-patterns/Domain/ListIngester.fs @@ -1,22 +1,22 @@ module Patterns.Domain.ListIngester -type Service internal (ingester : ExactlyOnceIngester.Service<_, _, _, _>) = +type Service internal (ingester: ExactlyOnceIngester.Service<_, _, _, _>) = /// Slot the item into the series of epochs. /// Returns items that actually got added (i.e. may be empty if it was an idempotent retry). - member _.IngestItems(originEpochId, items : ItemId[]) : Async>= + member _.IngestItems(originEpochId, items: ItemId[]): Async>= ingester.IngestMany(originEpochId, items) /// Efficiently determine a valid ingestion origin epoch member _.ActiveIngestionEpochId() = ingester.ActiveIngestionEpochId() -module Config = +module Factory = let create_ linger maxItemsPerEpoch store = let log = Serilog.Log.ForContext() - let series = ListSeries.Config.create store - let epochs = ListEpoch.Config.create maxItemsPerEpoch store + let series = ListSeries.Factory.create store + let epochs = ListEpoch.Factory.create maxItemsPerEpoch store let ingester = ExactlyOnceIngester.create log linger (series.ReadIngestionEpochId, series.MarkIngestionEpochId) (epochs.Ingest, Array.toSeq) Service(ingester) let create store = diff --git a/equinox-patterns/Domain/ListSeries.fs b/equinox-patterns/Domain/ListSeries.fs index 136956fb5..021a07edb 100644 --- a/equinox-patterns/Domain/ListSeries.fs +++ b/equinox-patterns/Domain/ListSeries.fs @@ -13,10 +13,10 @@ let streamId () = Equinox.StreamId.gen ListSeriesId.toString ListSeriesId.wellKn module Events = type Event = - | Started of {| epochId : ListEpochId |} - | Snapshotted of {| active : ListEpochId |} + | Started of {| epochId: ListEpochId |} + | Snapshotted of {| active: ListEpochId |} interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = @@ -25,33 +25,33 @@ module Fold = let private evolve _state = function | Events.Started e -> Some e.epochId | Events.Snapshotted e -> Some e.active - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false let toSnapshot s = Events.Snapshotted {| active = Option.get s |} -let interpret epochId (state : Fold.State) = +let interpret epochId (state: Fold.State) = [if state |> Option.forall (fun cur -> cur < epochId) && epochId >= ListEpochId.initial then yield Events.Started {| epochId = epochId |}] -type Service internal (resolve : unit -> Equinox.Decider) = +type Service internal (resolve: unit -> Equinox.Decider) = /// Determines the current active epoch /// Uses cached values as epoch transitions are rare, and caller needs to deal with the inherent race condition in any case - member _.ReadIngestionEpochId() : Async = + member _.ReadIngestionEpochId(): Async = let decider = resolve () decider.Query(Option.defaultValue ListEpochId.initial) /// Mark specified `epochId` as live for the purposes of ingesting /// Writers are expected to react to having writes to an epoch denied (due to it being Closed) by anointing a successor via this - member _.MarkIngestionEpochId epochId : Async = + member _.MarkIngestionEpochId epochId: Async = let decider = resolve () - decider.Transact(interpret epochId, load = Equinox.AllowStale) + decider.Transact(interpret epochId, load = Equinox.AnyCachedValue) -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> - Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - let create (Category cat) = Service(streamId >> Config.resolveDecider cat Category) + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> + Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create (Category cat) = Service(streamId >> Store.resolveDecider cat Category) diff --git a/equinox-patterns/Domain/Period.fs b/equinox-patterns/Domain/Period.fs index ee626927d..657ab0fc6 100644 --- a/equinox-patterns/Domain/Period.fs +++ b/equinox-patterns/Domain/Period.fs @@ -11,23 +11,23 @@ let streamId = Equinox.StreamId.gen PeriodId.toString // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = - type ItemIds = { items : ItemId[] } + type ItemIds = { items: ItemId[] } type Balance = ItemIds type Event = | BroughtForward of Balance | Added of ItemIds | CarriedForward of Balance interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = type State = | Initial - | Open of items : OpenState - | Closed of items : ItemId[] * carryingForward : ItemId[] + | Open of items: OpenState + | Closed of items: ItemId[] * carryingForward: ItemId[] and OpenState = ItemId[] - let initial : State = Initial + let initial: State = Initial let (|Items|) = function Initial -> [||] | Open i | Closed (i, _) -> i open Events let evolve (Items items) = function @@ -37,7 +37,7 @@ module Fold = let fold = Seq.fold evolve /// Handles one-time opening of the Period, if applicable - let maybeOpen (getIncomingBalance : unit -> Async) state = async { + let maybeOpen (getIncomingBalance: unit -> Async) state = async { match state with | Initial -> let! balance = getIncomingBalance () return [BroughtForward balance] @@ -46,14 +46,14 @@ module Fold = /// Handles attempting to apply the request to the stream (assuming it's not already closed) /// The `decide` function can signal a need to close and/or split the request by emitting it as the residual - let tryIngest (decide : 'req -> State -> 'req * 'result * Event list) req = function + let tryIngest (decide: 'req -> State -> 'req * 'result * Event list) req = function | Initial -> failwith "Invalid tryIngest; stream not Open" | Open _ as s -> let residual, result, events = decide req s (residual, Some result), events | Closed _ -> (req, None), [] /// Yields or computes the Balance to be Carried forward and/or application of the event representing that decision - let maybeClose (decideCarryForward : 'residual -> OpenState -> Async) residual state = async { + let maybeClose (decideCarryForward: 'residual -> OpenState -> Async) residual state = async { match state with | Initial -> return failwith "Invalid maybeClose; stream not Open" | Open s -> let! cf = decideCarryForward residual s @@ -64,23 +64,23 @@ module Fold = [] type Rules<'request, 'result> = { getIncomingBalance: unit -> Async - decideIngestion : 'request -> Fold.State -> 'request * 'result * Events.Event list + decideIngestion: 'request -> Fold.State -> 'request * 'result * Events.Event list decideCarryForward: 'request -> Fold.OpenState -> Async } /// The result of the overall ingestion, consisting of type Result<'request, 'result> = { /// residual of the request, in the event where it was not possible to ingest it completely - residual : 'request + residual: 'request /// The result of the decision (assuming processing took place) - result : 'result option + result: 'result option /// balance being carried forward in the event that the successor period has yet to have the BroughtForward event generated - carryForward : Events.Balance option } + carryForward: Events.Balance option } /// Decision function ensuring the high level rules of an Period are adhered to viz. /// 1. Streams must open with a BroughtForward event (obtained via Rules.getIncomingBalance if this is an uninitialized Period) /// 2. (If the Period has not closed) Rules.decide gets to map the request to events and a residual /// 3. Rules.decideCarryForward may trigger the closing of the Period based on the residual and/or the State by emitting Some balance -let decideIngestWithCarryForward rules req s : Async * Events.Event list> = async { +let decideIngestWithCarryForward rules req s: Async * Events.Event list> = async { let acc = Accumulator(s, Fold.fold) do! acc.Transact(Fold.maybeOpen rules.getIncomingBalance) let residual, result = acc.Transact(Fold.tryIngest rules.decideIngestion req) @@ -89,17 +89,17 @@ let decideIngestWithCarryForward rules req s : Async * Eve } /// Manages Application of Requests to the Period's stream, including closing preceding periods as appropriate -type Service internal (resolve : PeriodId -> Equinox.Decider) = +type Service internal (resolve: PeriodId -> Equinox.Decider) = let calcBalance state = - let createEventsBalance items : Events.Balance = { items = items } + let createEventsBalance items: Events.Balance = { items = items } async { return createEventsBalance state } let genBalance state = async { let! bal = calcBalance state in return Some bal } /// Walks back as far as necessary to ensure any preceding Periods that are not yet Closed are, then closes the target if necessary /// Yields the accumulated balance to be carried forward into the next period - let rec close periodId : Async = - let rules : Rules = + let rec close periodId: Async = + let rules: Rules = { getIncomingBalance = fun () -> close periodId decideIngestion = fun () _state -> (), (), [] decideCarryForward = fun () -> genBalance } // always close @@ -107,20 +107,20 @@ type Service internal (resolve : PeriodId -> Equinox.Decider Fold.State -> 'request * 'result * Events.Event list) request shouldClose : Async> = - let rules : Rules<'request, 'result> = + let tryTransact periodId getIncoming (decide: 'request -> Fold.State -> 'request * 'result * Events.Event list) request shouldClose: Async> = + let rules: Rules<'request, 'result> = { getIncomingBalance = getIncoming decideIngestion = fun request state -> let residual, result, events = decide request state in residual, result, events decideCarryForward = fun res state -> async { if shouldClose res then return! genBalance state else return None } } // also close, if we should let decider = resolve periodId - decider.TransactAsync(decideIngestWithCarryForward rules request, load = Equinox.AllowStale) + decider.TransactAsync(decideIngestWithCarryForward rules request, load = Equinox.AnyCachedValue) /// Runs the decision function on the specified Period, closing and bringing forward balances from preceding Periods if necessary /// Processing completes when `decide` yields None for the residual of the 'request - member _.Transact(periodId, decide : 'request -> Fold.State -> 'request option * 'result * Events.Event list, request : 'request) : Async<'result> = + member _.Transact(periodId, decide: 'request -> Fold.State -> 'request option * 'result * Events.Event list, request: 'request): Async<'result> = let rec aux periodId getIncoming req = async { let decide req state = decide (Option.get req) state match! tryTransact periodId getIncoming decide req Option.isSome with @@ -133,17 +133,17 @@ type Service internal (resolve : PeriodId -> Equinox.Decider Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> // Not using snapshots, on the basis that the writes are all coming from this process, so the cache will be sufficient // to make reads cheap enough, with the benefit of writes being cheaper as you're not paying to maintain the snapshot - Config.Cosmos.createUnoptimized Events.codecJe Fold.initial Fold.fold (context, cache) - let create (Category cat) = Service(streamId >> Config.resolveDecider cat Category) + Store.Cosmos.createUnoptimized Events.codecJe Fold.initial Fold.fold (context, cache) + let create (Category cat) = Service(streamId >> Store.resolveDecider cat Category) diff --git a/equinox-patterns/Domain/Config.fs b/equinox-patterns/Domain/Store.fs similarity index 81% rename from equinox-patterns/Domain/Config.fs rename to equinox-patterns/Domain/Store.fs index 8dbc31f8a..32cdcd373 100644 --- a/equinox-patterns/Domain/Config.fs +++ b/equinox-patterns/Domain/Store.fs @@ -1,21 +1,20 @@ -module Patterns.Domain.Config +module Patterns.Domain.Store let log = Serilog.Log.ForContext("isMetric", true) let resolveDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = open FsCodec.SystemTextJson - let private defaultOptions = Options.Create() let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - Codec.Create<'t>(options = defaultOptions) + Codec.Create<'t>() // options = Options.Default let genJsonElement<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) + CodecJsonElement.Create<'t>() // options = Options.Default module Memory = - let create codec initial fold store : Equinox.Category<_, _, _> = + let create codec initial fold store: Equinox.Category<_, _, _> = Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Deflate.EncodeUncompressed codec, fold, initial) module Cosmos = @@ -33,6 +32,6 @@ module Cosmos = createCached codec initial fold accessStrategy (context, cache) [] -type Store<'t> = +type Context<'t> = | Memory of Equinox.MemoryStore.VolatileStore<'t> - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache diff --git a/equinox-patterns/Domain/Types.fs b/equinox-patterns/Domain/Types.fs index 3f7412b82..f73c2c57c 100644 --- a/equinox-patterns/Domain/Types.fs +++ b/equinox-patterns/Domain/Types.fs @@ -11,10 +11,10 @@ type PeriodId = int and [] periodId module PeriodId = - let parse : int -> PeriodId = UMX.tag - let tryPrev (value : PeriodId) : PeriodId option = match UMX.untag value with 0 -> None | x -> Some (UMX.tag(x - 1)) - let next (value : PeriodId) : PeriodId = UMX.tag (UMX.untag value + 1) - let toString : PeriodId -> string = UMX.untag >> string + let parse: int -> PeriodId = UMX.tag + let tryPrev (value: PeriodId): PeriodId option = match UMX.untag value with 0 -> None | x -> Some (UMX.tag(x - 1)) + let next (value: PeriodId): PeriodId = UMX.tag (UMX.untag value + 1) + let toString: PeriodId -> string = UMX.untag >> string /// Identifies an Epoch that holds a list of Items /// TODO replace `List` with a Domain term referencing the specific element being managed @@ -22,9 +22,9 @@ type ListEpochId = int and [] listEpochId module ListEpochId = - let initial : ListEpochId = UMX.tag 0 -// let value : ListEpochId -> int = UMX.untag - let toString : ListEpochId -> string = UMX.untag >> string + let initial: ListEpochId = UMX.tag 0 +// let value: ListEpochId -> int = UMX.untag + let toString: ListEpochId -> string = UMX.untag >> string /// Identifies an Item stored within an Epoch /// TODO replace `Item` with a Domain term referencing the specific element being managed @@ -32,8 +32,8 @@ type ItemId = string and [] itemId module ItemId = - let parse : string -> ItemId = UMX.tag - let toString : ItemId -> string = UMX.untag + let parse: string -> ItemId = UMX.tag + let toString: ItemId -> string = UMX.untag /// Identifies a group of chained Epochs /// TODO replace `List` with a Domain term referencing the specific element being managed @@ -41,9 +41,9 @@ type [] listSeriesId type ListSeriesId = string module ListSeriesId = - let wellKnownId : ListSeriesId = UMX.tag "0" - let toString : ListSeriesId -> string = UMX.untag + let wellKnownId: ListSeriesId = UMX.tag "0" + let toString: ListSeriesId -> string = UMX.untag module Guid = - let toStringN (g : Guid) = g.ToString "N" + let toStringN (g: Guid) = g.ToString "N" diff --git a/equinox-shipping/Domain.Tests/ContainerTests.fs b/equinox-shipping/Domain.Tests/ContainerTests.fs index 0448ff711..bd704fdf2 100644 --- a/equinox-shipping/Domain.Tests/ContainerTests.fs +++ b/equinox-shipping/Domain.Tests/ContainerTests.fs @@ -5,7 +5,7 @@ open Shipping.Domain.Container open FsCheck.Xunit open Swensen.Unquote -let [] ``events roundtrip`` (x : Events.Event) = +let [] ``events roundtrip`` (x: Events.Event) = let ee = Events.codec.Encode((), x) let e = FsCodec.Core.TimelineEvent.Create(0L, ee.EventType, ee.Data) let des = Events.codec.TryDecode e diff --git a/equinox-shipping/Domain.Tests/Domain.Tests.fsproj b/equinox-shipping/Domain.Tests/Domain.Tests.fsproj index 62dd66a49..ddfbbd451 100644 --- a/equinox-shipping/Domain.Tests/Domain.Tests.fsproj +++ b/equinox-shipping/Domain.Tests/Domain.Tests.fsproj @@ -20,7 +20,7 @@ - + diff --git a/equinox-shipping/Domain.Tests/FinalizationProcessTests.fs b/equinox-shipping/Domain.Tests/FinalizationProcessTests.fs index d87bdf36a..dfc2d8390 100644 --- a/equinox-shipping/Domain.Tests/FinalizationProcessTests.fs +++ b/equinox-shipping/Domain.Tests/FinalizationProcessTests.fs @@ -29,9 +29,9 @@ type Properties(testOutput) = ( GuidStringN transId1, GuidStringN transId2, GuidStringN containerId1, GuidStringN containerId2, NonEmptyArray (Ids shipmentIds1), NonEmptyArray (Ids shipmentIds2), GuidStringN shipment3) = async { let buffer = EventAccumulator() - use _ = store.Committed.Subscribe(fun struct (c, sid, e) -> buffer.Record((c, sid, e))) + use _ = store.Committed.Subscribe buffer.Record let eventTypes = seq { for e in buffer.All() -> e.EventType } - let manager = FinalizationProcess.Config.create 16 store.Config + let manager = FinalizationProcess.Factory.create 16 store.Config (* First, run the happy path - should pass through all stages of the lifecycle *) let requestedShipmentIds = Array.append shipmentIds1 shipmentIds2 @@ -42,7 +42,7 @@ type Properties(testOutput) = nameof(Container.Events.Finalized)] // Container test <@ res1 && set eventTypes = set expectedEvents @> let containerEvents = - buffer.Queue((Container.Category, Container.streamId containerId1 |> Equinox.StreamId.toString)) + buffer.Queue(Container.Category, Container.streamId containerId1) |> Seq.chooseV (FsCodec.Deflate.EncodeUncompressed Container.Events.codec).TryDecode |> List.ofSeq test <@ match containerEvents with diff --git a/equinox-shipping/Domain.Tests/FinalizationTransactionTests.fs b/equinox-shipping/Domain.Tests/FinalizationTransactionTests.fs index 493442eb3..c6b86fc35 100644 --- a/equinox-shipping/Domain.Tests/FinalizationTransactionTests.fs +++ b/equinox-shipping/Domain.Tests/FinalizationTransactionTests.fs @@ -5,7 +5,7 @@ open Shipping.Domain.FinalizationTransaction open FsCheck.Xunit open Swensen.Unquote -let [] ``events roundtrip`` (x : Events.Event) = +let [] ``events roundtrip`` (x: Events.Event) = let ee = Events.codec.Encode((), x) let e = FsCodec.Core.TimelineEvent.Create(0L, ee.EventType, ee.Data) let des = Events.codec.TryDecode e diff --git a/equinox-shipping/Domain.Tests/Infrastructure.fs b/equinox-shipping/Domain.Tests/Infrastructure.fs index 4fde14c80..119ec5d62 100644 --- a/equinox-shipping/Domain.Tests/Infrastructure.fs +++ b/equinox-shipping/Domain.Tests/Infrastructure.fs @@ -4,19 +4,19 @@ module Shipping.Domain.Tests.Infrastructure open System.Collections.Concurrent type EventAccumulator<'E>() = - let messages = ConcurrentDictionary>() + let messages = ConcurrentDictionary>() - member _.Record(struct (categoryName, streamId, events : 'E array)) = + member _.Record(struct (streamName, events: 'E[])) = let initStreamQueue _ = ConcurrentQueue events - let appendToQueue _ (queue : ConcurrentQueue<'E>) = events |> Seq.iter queue.Enqueue; queue - messages.AddOrUpdate(struct (categoryName, streamId), initStreamQueue, appendToQueue) |> ignore + let appendToQueue _ (queue: ConcurrentQueue<'E>) = events |> Seq.iter queue.Enqueue; queue + messages.AddOrUpdate(streamName, initStreamQueue, appendToQueue) |> ignore member _.Queue(stream) = match messages.TryGetValue stream with | false, _ -> Seq.empty<'E> | true, xs -> xs :> _ - + member x.Queue(cat, sid) = x.Queue(FsCodec.StreamName.create cat (Equinox.Core.StreamId.toString sid)) + member _.All() = seq { for KeyValue (_, xs) in messages do yield! xs } - member _.Clear() = - messages.Clear() + member _.Clear() = messages.Clear() diff --git a/equinox-shipping/Domain.Tests/MemoryStoreFixture.fs b/equinox-shipping/Domain.Tests/MemoryStoreFixture.fs index c002231f6..7b7bece6d 100644 --- a/equinox-shipping/Domain.Tests/MemoryStoreFixture.fs +++ b/equinox-shipping/Domain.Tests/MemoryStoreFixture.fs @@ -6,8 +6,8 @@ open System /// Holds Equinox MemoryStore. Disposable to correctly manage unsubscription of logger at end of test type MemoryStoreFixture() = let store = Equinox.MemoryStore.VolatileStore)>() - let mutable disconnectLog : (unit -> unit) option = None - member val Config = Shipping.Domain.Config.Store.Memory store + let mutable disconnectLog: (unit -> unit) option = None + member val Config = Shipping.Domain.Store.Context.Memory store member _.Committed = store.Committed member _.TestOutput with set testOutput = if Option.isSome disconnectLog then invalidOp "Cannot connect more than one test output" diff --git a/equinox-shipping/Domain.Tests/SerilogLogFixture.fs b/equinox-shipping/Domain.Tests/SerilogLogFixture.fs index 45f66cf65..d899ab836 100644 --- a/equinox-shipping/Domain.Tests/SerilogLogFixture.fs +++ b/equinox-shipping/Domain.Tests/SerilogLogFixture.fs @@ -5,14 +5,14 @@ module Log = /// Allow logging to filter out emission of log messages whose information is also surfaced as metrics let isStoreMetrics e = Serilog.Filters.Matching.WithProperty("isMetric").Invoke e -type XunitOutputSink(?messageSink : Xunit.Abstractions.IMessageSink, ?minLevel : Serilog.Events.LogEventLevel, ?templatePrefix) = +type XunitOutputSink(?messageSink: Xunit.Abstractions.IMessageSink, ?minLevel: Serilog.Events.LogEventLevel, ?templatePrefix) = let minLevel = defaultArg minLevel Serilog.Events.LogEventLevel.Information let formatter = let baseTemplate = "{Timestamp:HH:mm:ss.fff} {Level:u1} " + Option.toObj templatePrefix + "{Message} {Properties}{NewLine}{Exception}" let template = if minLevel <= Serilog.Events.LogEventLevel.Debug then baseTemplate else baseTemplate.Replace("{Properties}", "") Serilog.Formatting.Display.MessageTemplateTextFormatter(template, null) - let mutable currentTestOutput : Xunit.Abstractions.ITestOutputHelper option = None - let writeSerilogEvent (logEvent : Serilog.Events.LogEvent) = + let mutable currentTestOutput: Xunit.Abstractions.ITestOutputHelper option = None + let writeSerilogEvent (logEvent: Serilog.Events.LogEvent) = logEvent.RemovePropertyIfPresent Equinox.CosmosStore.Core.Log.PropertyTag logEvent.RemovePropertyIfPresent Equinox.DynamoStore.Core.Log.PropertyTag logEvent.RemovePropertyIfPresent Equinox.EventStoreDb.Log.PropertyTag diff --git a/equinox-shipping/Domain.Tests/ShipmentTests.fs b/equinox-shipping/Domain.Tests/ShipmentTests.fs index 9b89e8d51..26b2444e7 100644 --- a/equinox-shipping/Domain.Tests/ShipmentTests.fs +++ b/equinox-shipping/Domain.Tests/ShipmentTests.fs @@ -5,7 +5,7 @@ open Shipping.Domain.Shipment open FsCheck.Xunit open Swensen.Unquote -let [] ``events roundtrip`` (x : Events.Event) = +let [] ``events roundtrip`` (x: Events.Event) = let ee = Events.codec.Encode((), x) let e = FsCodec.Core.TimelineEvent.Create(0L, ee.EventType, ee.Data) let des = Events.codec.TryDecode e diff --git a/equinox-shipping/Domain/Container.fs b/equinox-shipping/Domain/Container.fs index 5727da80e..9811f59ea 100644 --- a/equinox-shipping/Domain/Container.fs +++ b/equinox-shipping/Domain/Container.fs @@ -7,39 +7,39 @@ let streamId = Equinox.StreamId.gen ContainerId.toString module Events = type Event = - | Finalized of {| shipmentIds : ShipmentId array |} - | Snapshotted of {| shipmentIds : ShipmentId array |} + | Finalized of {| shipmentIds: ShipmentId[] |} + | Snapshotted of {| shipmentIds: ShipmentId[] |} interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = - type State = { shipmentIds : ShipmentId array } + type State = { shipmentIds: ShipmentId[] } let initial = { shipmentIds = Array.empty } - let evolve (_state : State) (event : Events.Event) : State = + let evolve (_state: State) (event: Events.Event): State = match event with | Events.Snapshotted snapshot -> { shipmentIds = snapshot.shipmentIds } | Events.Finalized event -> { shipmentIds = event.shipmentIds } - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false - let toSnapshot (state : State) = Events.Snapshotted {| shipmentIds = state.shipmentIds |} + let toSnapshot (state: State) = Events.Snapshotted {| shipmentIds = state.shipmentIds |} -let interpretFinalize shipmentIds (state : Fold.State): Events.Event list = +let interpretFinalize shipmentIds (state: Fold.State): Events.Event list = [ if Array.isEmpty state.shipmentIds then yield Events.Finalized {| shipmentIds = shipmentIds |} ] -type Service internal (resolve : ContainerId -> Equinox.Decider) = +type Service internal (resolve: ContainerId -> Equinox.Decider) = - member _.Finalize(containerId, shipmentIds) : Async = + member _.Finalize(containerId, shipmentIds): Async = let decider = resolve containerId decider.Transact(interpretFinalize shipmentIds) -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - | Config.Store.Dynamo (context, cache) -> Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - | Config.Store.Esdb (context, cache) -> Config.Esdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Dynamo (context, cache) -> Store.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Esdb (context, cache) -> Store.Esdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/equinox-shipping/Domain/Domain.fsproj b/equinox-shipping/Domain/Domain.fsproj index b027ecac7..c29fbbf53 100644 --- a/equinox-shipping/Domain/Domain.fsproj +++ b/equinox-shipping/Domain/Domain.fsproj @@ -10,7 +10,7 @@ - + @@ -20,11 +20,11 @@ - - - - - + + + + + diff --git a/equinox-shipping/Domain/FinalizationProcess.fs b/equinox-shipping/Domain/FinalizationProcess.fs index 917493ef6..6c43c0052 100644 --- a/equinox-shipping/Domain/FinalizationProcess.fs +++ b/equinox-shipping/Domain/FinalizationProcess.fs @@ -2,7 +2,7 @@ module Shipping.Domain.FinalizationProcess open FinalizationTransaction -type internal Service internal (transactions : FinalizationTransaction.Service, containers : Container.Service, shipments : Shipment.Service) = +type internal Service internal (transactions: FinalizationTransaction.Service, containers: Container.Service, shipments: Shipment.Service) = member internal _.TryReserveShipment(shipmentId, transactionId) = shipments.TryReserve(shipmentId, transactionId) @@ -19,7 +19,7 @@ type internal Service internal (transactions : FinalizationTransaction.Service, member internal _.Step(transactionId, update) = transactions.Step(transactionId, update) -type Manager internal (service : Service, maxDop) = +type Manager internal (service: Service, maxDop) = let rec run transactionId update = async { let loop updateEvent = run transactionId (Some updateEvent) @@ -56,21 +56,21 @@ type Manager internal (service : Service, maxDop) = } /// Used by watchdog service to drive processing to a conclusion where a given request was orphaned - member _.Pump(transactionId : TransactionId) = + member _.Pump(transactionId: TransactionId) = run transactionId None // Caller should generate the TransactionId via a deterministic hash of the shipmentIds in order to ensure idempotency (and sharing of fate) of identical requests - member _.TryFinalizeContainer(transactionId, containerId, shipmentIds) : Async = + member _.TryFinalizeContainer(transactionId, containerId, shipmentIds): Async = if Array.isEmpty shipmentIds then invalidArg "shipmentIds" "must not be empty" let initialRequest = Events.FinalizationRequested {| container = containerId; shipments = shipmentIds |} run transactionId (Some initialRequest) -module Config = +module Factory = let private createService store = - let transactions = Config.create store - let containers = Container.Config.create store - let shipments = Shipment.Config.create store + let transactions = Factory.create store + let containers = Container.Factory.create store + let shipments = Shipment.Factory.create store Service(transactions, containers, shipments) let create maxDop store = Manager(createService store, maxDop = maxDop) diff --git a/equinox-shipping/Domain/FinalizationTransaction.fs b/equinox-shipping/Domain/FinalizationTransaction.fs index 0befdca7c..3ae6ab893 100644 --- a/equinox-shipping/Domain/FinalizationTransaction.fs +++ b/equinox-shipping/Domain/FinalizationTransaction.fs @@ -8,40 +8,40 @@ let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAn module Events = type Event = - | FinalizationRequested of {| container : ContainerId; shipments : ShipmentId array |} + | FinalizationRequested of {| container: ContainerId; shipments: ShipmentId[] |} | ReservationCompleted /// Signifies we're switching focus to relinquishing any assignments we completed. /// The list includes any items we could possibly have touched (including via idempotent retries) - | RevertCommenced of {| shipments : ShipmentId array |} + | RevertCommenced of {| shipments: ShipmentId[] |} | AssignmentCompleted /// Signifies all processing for the transaction has completed - the Watchdog looks for this event | Completed - | Snapshotted of {| state : State |} + | Snapshotted of {| state: State |} interface TypeShape.UnionContract.IUnionContract and // covered by autoUnionToJsonObject: [>)>] State = | Initial - | Reserving of {| container : ContainerId; shipments : ShipmentId array |} - | Reverting of {| shipments : ShipmentId array |} - | Assigning of {| container : ContainerId; shipments : ShipmentId array |} - | Assigned of {| container : ContainerId; shipments : ShipmentId array |} - | Completed of {| success : bool |} - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + | Reserving of {| container: ContainerId; shipments: ShipmentId[] |} + | Reverting of {| shipments: ShipmentId[] |} + | Assigning of {| container: ContainerId; shipments: ShipmentId[] |} + | Assigned of {| container: ContainerId; shipments: ShipmentId[] |} + | Completed of {| success: bool |} + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Reactions = /// Used by the Watchdog to infer whether a given event signifies that the processing has reached a terminal state - let isTerminalEvent (encoded : FsCodec.ITimelineEvent<_>) = + let isTerminalEvent (encoded: FsCodec.ITimelineEvent<_>) = encoded.EventType = nameof(Events.Completed) module Fold = type State = Events.State - let initial : State = State.Initial + let initial: State = State.Initial // The implementation trusts (does not spend time double checking) that events have passed an isValidTransition check - let evolve (state : State) (event : Events.Event) : State = + let evolve (state: State) (event: Events.Event): State = match state, event with | _, Events.FinalizationRequested e -> State.Reserving {| container = e.container; shipments = e.shipments |} | State.Reserving s, Events.ReservationCompleted -> State.Assigning {| container = s.container; shipments = s.shipments |} @@ -52,7 +52,7 @@ module Fold = | _, Events.Snapshotted state -> state.state // this shouldn't happen, but, if we did produce invalid events, we'll just ignore them | state, _ -> state - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false let toSnapshot state = Events.Snapshotted {| state = state |} @@ -61,13 +61,13 @@ module Fold = module Flow = type Action = - | ReserveShipments of shipmentIds : ShipmentId array - | RevertReservations of shipmentIds : ShipmentId array - | AssignShipments of shipmentIds : ShipmentId array * containerId : ContainerId - | FinalizeContainer of containerId : ContainerId * shipmentIds : ShipmentId array - | Finish of success : bool + | ReserveShipments of shipmentIds: ShipmentId[] + | RevertReservations of shipmentIds: ShipmentId[] + | AssignShipments of shipmentIds: ShipmentId[] * containerId: ContainerId + | FinalizeContainer of containerId: ContainerId * shipmentIds: ShipmentId[] + | Finish of success: bool - let nextAction : Fold.State -> Action = function + let nextAction: Fold.State -> Action = function | Fold.State.Reserving s -> Action.ReserveShipments s.shipments | Fold.State.Reverting s -> Action.RevertReservations s.shipments | Fold.State.Assigning s -> Action.AssignShipments (s.shipments, s.container) @@ -76,7 +76,7 @@ module Flow = // As all state transitions are driven by FinalizationProcess, we can rule this out | Fold.State.Initial as s -> failwith (sprintf "Cannot interpret state %A" s) - let isValidTransition (event : Events.Event) (state : Fold.State) = + let isValidTransition (event: Events.Event) (state: Fold.State) = match state, event with | Fold.State.Initial, Events.FinalizationRequested _ | Fold.State.Reserving _, Events.RevertCommenced _ @@ -86,25 +86,25 @@ module Flow = | Fold.State.Assigned _, Events.Completed -> true | _ -> false - let decide (update : Events.Event option) (state : Fold.State) : Events.Event list = + let decide (update: Events.Event option) (state: Fold.State): Events.Event list = match update with | Some e when isValidTransition e state -> [ e ] | _ -> [] -type Service internal (resolve : TransactionId -> Equinox.Decider) = +type Service internal (resolve: TransactionId -> Equinox.Decider) = /// (Optionally) idempotently applies an event representing progress achieved in some aspect of the workflow /// Yields a `Flow.Action` representing the next activity to be performed as implied by the workflow's State afterwards /// The workflow concludes when the action returned is `Action.Completed` - member _.Step(transactionId, maybeUpdate) : Async = + member _.Step(transactionId, maybeUpdate): Async = let decider = resolve transactionId decider.Transact(Flow.decide maybeUpdate, Flow.nextAction) -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - | Config.Store.Dynamo (context, cache) -> Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - | Config.Store.Esdb (context, cache) -> Config.Esdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Dynamo (context, cache) -> Store.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Esdb (context, cache) -> Store.Esdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/equinox-shipping/Domain/Shipment.fs b/equinox-shipping/Domain/Shipment.fs index bf3ae4f9e..7977778aa 100644 --- a/equinox-shipping/Domain/Shipment.fs +++ b/equinox-shipping/Domain/Shipment.fs @@ -7,62 +7,62 @@ let streamId = Equinox.StreamId.gen ShipmentId.toString module Events = type Event = - | Reserved of {| transaction : TransactionId |} - | Assigned of {| container : ContainerId |} + | Reserved of {| transaction: TransactionId |} + | Assigned of {| container: ContainerId |} | Revoked - | Snapshotted of {| reservation : TransactionId option; association : ContainerId option |} + | Snapshotted of {| reservation: TransactionId option; association: ContainerId option |} interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = - type State = { reservation : TransactionId option; association : ContainerId option } - let initial : State = { reservation = None; association = None } + type State = { reservation: TransactionId option; association: ContainerId option } + let initial: State = { reservation = None; association = None } let evolve (state: State) = function | Events.Reserved event -> { reservation = Some event.transaction; association = None } | Events.Revoked -> initial | Events.Assigned event -> { state with association = Some event.container } | Events.Snapshotted event -> { reservation = event.reservation; association = event.association } - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false - let toSnapshot (state : State) = Events.Snapshotted {| reservation = state.reservation; association = state.association |} + let toSnapshot (state: State) = Events.Snapshotted {| reservation = state.reservation; association = state.association |} -let decideReserve transactionId : Fold.State -> bool * Events.Event list = function +let decideReserve transactionId: Fold.State -> bool * Events.Event list = function | { reservation = Some r } when r = transactionId -> true, [] | { reservation = None } -> true, [ Events.Reserved {| transaction = transactionId |} ] | _ -> false, [] -let interpretRevoke transactionId : Fold.State -> Events.Event list = function +let interpretRevoke transactionId: Fold.State -> Events.Event list = function | { reservation = Some r; association = None } when r = transactionId -> [ Events.Revoked ] | _ -> [] // Ignore if a) already revoked/never reserved b) not reserved for this transactionId -let interpretAssign transactionId containerId : Fold.State -> Events.Event list = function +let interpretAssign transactionId containerId: Fold.State -> Events.Event list = function | { reservation = Some r; association = None } when r = transactionId -> [ Events.Assigned {| container = containerId |} ] | _ -> [] // Ignore if a) this transaction was not the one reserving it or b) it's already been assigned -type Service internal (resolve : ShipmentId -> Equinox.Decider) = +type Service internal (resolve: ShipmentId -> Equinox.Decider) = - member _.TryReserve(shipmentId, transactionId) : Async = + member _.TryReserve(shipmentId, transactionId): Async = let decider = resolve shipmentId decider.Transact(decideReserve transactionId) - member _.Revoke(shipmentId, transactionId) : Async = + member _.Revoke(shipmentId, transactionId): Async = let decider = resolve shipmentId decider.Transact(interpretRevoke transactionId) - member _.Assign(shipmentId, containerId, transactionId) : Async = + member _.Assign(shipmentId, containerId, transactionId): Async = let decider = resolve shipmentId decider.Transact(interpretAssign transactionId containerId) -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - | Config.Store.Dynamo (context, cache) -> Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - | Config.Store.Esdb (context, cache) -> Config.Esdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Dynamo (context, cache) -> Store.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Esdb (context, cache) -> Store.Esdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/equinox-shipping/Domain/Config.fs b/equinox-shipping/Domain/Store.fs similarity index 90% rename from equinox-shipping/Domain/Config.fs rename to equinox-shipping/Domain/Store.fs index 1bfcd6496..070a5597f 100644 --- a/equinox-shipping/Domain/Config.fs +++ b/equinox-shipping/Domain/Store.fs @@ -1,10 +1,10 @@ -module Shipping.Domain.Config +module Shipping.Domain.Store /// Tag log entries so we can filter them out if logging to the console let log = Serilog.Log.ForContext("isMetric", true) let createDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = open FsCodec.SystemTextJson @@ -16,7 +16,7 @@ module EventCodec = module Memory = - let create codec initial fold store : Equinox.Category<_, _, _> = + let create codec initial fold store: Equinox.Category<_, _, _> = Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Deflate.EncodeUncompressed codec, fold, initial) let defaultCacheDuration = System.TimeSpan.FromMinutes 20. @@ -48,7 +48,7 @@ module Dynamo = module Esdb = let private createCached codec initial fold accessStrategy (context, cache) = - let cacheStrategy = Equinox.EventStoreDb.CachingStrategy.SlidingWindow (cache, defaultCacheDuration) + let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, defaultCacheDuration) Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, cacheStrategy, ?access = accessStrategy) let createUnoptimized codec initial fold (context, cache) = createCached codec initial fold None (context, cache) @@ -56,8 +56,8 @@ module Esdb = createCached codec initial fold (Some Equinox.EventStoreDb.AccessStrategy.LatestKnownEvent) (context, cache) [] -type Store<'t> = +type Context<'t> = | Memory of Equinox.MemoryStore.VolatileStore<'t> - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache - | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache - | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Core.ICache + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Cache + | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Cache diff --git a/equinox-shipping/Domain/TransactionWatchdog.fs b/equinox-shipping/Domain/TransactionWatchdog.fs index 3db4af707..89ee010e6 100644 --- a/equinox-shipping/Domain/TransactionWatchdog.fs +++ b/equinox-shipping/Domain/TransactionWatchdog.fs @@ -9,14 +9,15 @@ open System module Events = type Categorization = - | NonTerminal of System.DateTimeOffset + | NonTerminal of DateTimeOffset | Terminal let createCategorizationCodec isTerminalEvent = - let tryDecode (encoded : FsCodec.ITimelineEvent>) = + let tryDecode (encoded: FsCodec.ITimelineEvent>) = ValueSome (if isTerminalEvent encoded then Terminal else NonTerminal encoded.Timestamp) let encode _ = failwith "Not Implemented" - let mapCausation _ = failwith "Not Implemented" - FsCodec.Codec.Create(encode, tryDecode, mapCausation) + let mapCausation () _ = failwith "Not Implemented" + // This is the only Create overload that exposes the Event info we need at present + FsCodec.Codec.Create(encode, tryDecode, mapCausation) module Fold = @@ -28,7 +29,7 @@ module Fold = else state | Events.Terminal -> Completed - let fold : State -> Events.Categorization seq -> State = Seq.fold evolve + let fold: State -> Events.Categorization seq -> State = Seq.fold evolve type Status = Complete | Active | Stuck let toStatus cutoffTime = function @@ -37,10 +38,10 @@ let toStatus cutoffTime = function | Fold.Active _ -> Active | Fold.Completed -> Complete -let fold : Events.Categorization seq -> Fold.State = +let fold: Events.Categorization seq -> Fold.State = Fold.fold Fold.initial -let (|TransactionStatus|) (codec : #FsCodec.IEventCodec<_, _, _>) events : Fold.State = +let (|TransactionStatus|) (codec: #FsCodec.IEventCodec<_, _, _>) events: Fold.State = events |> Seq.choose (codec.TryDecode >> function ValueSome x -> Some x | ValueNone -> None) |> fold diff --git a/equinox-shipping/Domain/Types.fs b/equinox-shipping/Domain/Types.fs index 989ca7459..d7b37fb32 100644 --- a/equinox-shipping/Domain/Types.fs +++ b/equinox-shipping/Domain/Types.fs @@ -5,21 +5,21 @@ open FSharp.UMX [] type shipmentId type ShipmentId = string module ShipmentId = - let toString (x : ShipmentId) : string = %x + let toString (x: ShipmentId): string = %x [] type containerId type ContainerId = string module ContainerId = - let toString (x : ContainerId) : string = %x + let toString (x: ContainerId): string = %x [] type transactionId type TransactionId = string module TransactionId = - let toString (x : TransactionId) : string = %x - let parse (x : string) = %x + let toString (x: TransactionId): string = %x + let parse (x: string) = %x let (|Parse|) = parse module Guid = - let inline toStringN (x : System.Guid) = x.ToString "N" + let inline toStringN (x: System.Guid) = x.ToString "N" let generateStringN () = let g = System.Guid.NewGuid() in toStringN g diff --git a/equinox-shipping/Watchdog.Integration/CosmosConnector.fs b/equinox-shipping/Watchdog.Integration/CosmosConnector.fs index dfa3a80ef..cd0d3c94d 100644 --- a/equinox-shipping/Watchdog.Integration/CosmosConnector.fs +++ b/equinox-shipping/Watchdog.Integration/CosmosConnector.fs @@ -12,13 +12,13 @@ type CosmosConnector(connectionString, databaseId, containerId) = let leaseContainerId = containerId + "-aux" let connectLeases () = connector.CreateUninitialized(databaseId, leaseContainerId) - new (c : Shipping.Watchdog.SourceArgs.Configuration) = CosmosConnector(c.CosmosConnection, c.CosmosDatabase, c.CosmosContainer) + new (c: Shipping.Watchdog.SourceArgs.Configuration) = CosmosConnector(c.CosmosConnection, c.CosmosDatabase, c.CosmosContainer) new () = CosmosConnector(Shipping.Watchdog.SourceArgs.Configuration EnvVar.tryGet) member val DumpStats = Equinox.CosmosStore.Core.Log.InternalMetrics.dump member private _.ConnectStoreAndMonitored() = connector.ConnectStoreAndMonitored(databaseId, containerId) member _.ConnectLeases() = - let leases : Microsoft.Azure.Cosmos.Container = connectLeases() + let leases: Microsoft.Azure.Cosmos.Container = connectLeases() // Just as ConnectStoreAndMonitored references the global Logger, so do we -> see SerilogLogFixture, _dummy Serilog.Log.Information("ChangeFeed Leases Database {db} Container {container}", leases.Database.Id, leases.Id) leases @@ -27,5 +27,5 @@ type CosmosConnector(connectionString, databaseId, containerId) = let storeCfg = let context = client |> CosmosStoreContext.create let cache = Equinox.Cache("Tests", sizeMb = 10) - Shipping.Domain.Config.Store.Cosmos (context, cache) + Shipping.Domain.Store.Context.Cosmos (context, cache) storeCfg, monitored diff --git a/equinox-shipping/Watchdog.Integration/DynamoConnector.fs b/equinox-shipping/Watchdog.Integration/DynamoConnector.fs index d5fd2cd93..b2d33fdda 100644 --- a/equinox-shipping/Watchdog.Integration/DynamoConnector.fs +++ b/equinox-shipping/Watchdog.Integration/DynamoConnector.fs @@ -2,14 +2,14 @@ namespace Shipping.Watchdog.Integration open Shipping.Infrastructure -type DynamoConnector(connector : Equinox.DynamoStore.DynamoStoreConnector, table, indexTable) = +type DynamoConnector(connector: Equinox.DynamoStore.DynamoStoreConnector, table, indexTable) = let client = connector.CreateClient() let storeClient = Equinox.DynamoStore.DynamoStoreClient(client, table) let storeContext = storeClient |> DynamoStoreContext.create let cache = Equinox.Cache("Tests", sizeMb = 10) - new (c : Shipping.Watchdog.SourceArgs.Configuration) = + new (c: Shipping.Watchdog.SourceArgs.Configuration) = let timeout, retries = System.TimeSpan.FromSeconds 5., 5 let connector = match c.DynamoRegion with | Some systemName -> Equinox.DynamoStore.DynamoStoreConnector(systemName, timeout, retries) @@ -21,9 +21,9 @@ type DynamoConnector(connector : Equinox.DynamoStore.DynamoStoreConnector, table member val IndexClient = Equinox.DynamoStore.DynamoStoreClient(client, match indexTable with Some x -> x | None -> table + "-index") member val StoreContext = storeContext member val StoreArgs = (storeContext, cache) - member val Store = Shipping.Domain.Config.Store.Dynamo (storeContext, cache) + member val Store = Shipping.Domain.Store.Context.Dynamo (storeContext, cache) /// Uses an in-memory checkpoint service; the real app will obviously need to store real checkpoints (see SourceArgs.Dynamo.Arguments.CreateCheckpointStore) member x.CreateCheckpointService(consumerGroupName) = let checkpointInterval = System.TimeSpan.FromHours 1. let store = Equinox.MemoryStore.VolatileStore() - Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Shipping.Domain.Config.log (consumerGroupName, checkpointInterval) store + Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Shipping.Domain.Store.log (consumerGroupName, checkpointInterval) store diff --git a/equinox-shipping/Watchdog.Integration/EsdbConnector.fs b/equinox-shipping/Watchdog.Integration/EsdbConnector.fs index 2c5fc8c08..7a6582ba6 100644 --- a/equinox-shipping/Watchdog.Integration/EsdbConnector.fs +++ b/equinox-shipping/Watchdog.Integration/EsdbConnector.fs @@ -15,7 +15,7 @@ type EsdbConnector(connection, credentials) = let storeContext = connection |> EventStoreContext.create let cache = Equinox.Cache("Tests", sizeMb = 10) - new (c : Shipping.Watchdog.SourceArgs.Configuration) = + new (c: Shipping.Watchdog.SourceArgs.Configuration) = EsdbConnector(c.MaybeEventStoreConnection |> Option.defaultValue "esdb://admin:changeit@localhost:2111,localhost:2112,localhost:2113?tls=true&tlsVerifyCert=false", c.MaybeEventStoreCredentials) new () = EsdbConnector(Shipping.Watchdog.SourceArgs.Configuration EnvVar.tryGet) @@ -24,9 +24,9 @@ type EsdbConnector(connection, credentials) = member val EventStoreClient = connection.ReadConnection member val StoreContext = storeContext member val StoreArgs = (storeContext, cache) - member val Store = Shipping.Domain.Config.Store.Esdb (storeContext, cache) + member val Store = Shipping.Domain.Store.Context.Esdb (storeContext, cache) /// Uses an in-memory checkpoint service; the real app will obviously need to store real checkpoints (see CheckpointStore.Config) member x.CreateCheckpointService(consumerGroupName) = let checkpointInterval = TimeSpan.FromHours 1. let store = Equinox.MemoryStore.VolatileStore() - Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Shipping.Domain.Config.log (consumerGroupName, checkpointInterval) store + Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Shipping.Domain.Store.log (consumerGroupName, checkpointInterval) store diff --git a/equinox-shipping/Watchdog.Integration/ReactorFixture.fs b/equinox-shipping/Watchdog.Integration/ReactorFixture.fs index ecb95f4be..6b90b11b1 100644 --- a/equinox-shipping/Watchdog.Integration/ReactorFixture.fs +++ b/equinox-shipping/Watchdog.Integration/ReactorFixture.fs @@ -13,21 +13,21 @@ type FixtureBase(messageSink, store, dumpStats, createSourceConfig) = let contextId = Shipping.Domain.Guid.generateStringN () let manager = let maxDop = 4 - Shipping.Domain.FinalizationProcess.Config.create maxDop store + Shipping.Domain.FinalizationProcess.Factory.create maxDop store let log = Serilog.Log.Logger let stats = Handler.Stats(log, statsInterval = TimeSpan.FromMinutes 1, stateInterval = TimeSpan.FromMinutes 2, verboseStore = true, logExternalStats = dumpStats) - let sink = Handler.Config.StartSink(log, stats, manager, processingTimeout = TimeSpan.FromSeconds 1., maxReadAhead = 1024, maxConcurrentStreams = 4, + let sink = Handler.Factory.StartSink(log, stats, 4, manager, processingTimeout = TimeSpan.FromSeconds 1., maxReadAhead = 1024, // Ensure batches are completed ASAP so waits in the tests are minimal wakeForResults = true) let source, awaitReactions = let consumerGroupName = $"ReactorFixture/{contextId}" let sourceConfig = createSourceConfig consumerGroupName - Handler.Config.StartSource(log, sink, sourceConfig) + Handler.Factory.StartSource(log, sink, sourceConfig) member val Store = store member val ProcessManager = manager - abstract member RunTimeout : TimeSpan with get + abstract member RunTimeout: TimeSpan with get default _.RunTimeout = TimeSpan.FromSeconds 1. member val Log = Serilog.Log.Logger // initialized by CaptureSerilogLog @@ -59,7 +59,7 @@ module MemoryReactor = new (messageSink) = let store = Equinox.MemoryStore.VolatileStore() let createSourceConfig _groupName = SourceConfig.Memory store - new Fixture(messageSink, Shipping.Domain.Config.Store.Memory store, createSourceConfig) + new Fixture(messageSink, Shipping.Domain.Store.Context.Memory store, createSourceConfig) override _.RunTimeout = TimeSpan.FromSeconds 0.1 member _.Wait() = base.Await(TimeSpan.MaxValue) // Propagation delay is not applicable for MemoryStore member val private Backoff = TimeSpan.FromMilliseconds 1 @@ -106,7 +106,7 @@ module DynamoReactor = new (messageSink) = let conn = DynamoConnector() let createSourceConfig consumerGroupName = - let loadMode = DynamoLoadModeConfig.Hydrate (conn.StoreContext, 4) + let loadMode = Propulsion.DynamoStore.WithData (4, conn.StoreContext) let checkpoints = conn.CreateCheckpointService(consumerGroupName) SourceConfig.Dynamo (conn.IndexClient, checkpoints, loadMode, startFromTail = true, batchSizeCutoff = 100, tailSleepInterval = tailSleepInterval, statsInterval = TimeSpan.FromSeconds 60.) @@ -137,7 +137,7 @@ module EsdbReactor = let conn = EsdbConnector() let createSourceConfig consumerGroupName = let checkpoints = conn.CreateCheckpointService(consumerGroupName) - SourceConfig.Esdb (conn.EventStoreClient, checkpoints, hydrateBodies = true, startFromTail = true, batchSize = 100, + SourceConfig.Esdb (conn.EventStoreClient, checkpoints, withData = true, startFromTail = true, batchSize = 100, tailSleepInterval = tailSleepInterval, statsInterval = TimeSpan.FromSeconds 60.) new Fixture(messageSink, conn.Store, conn.DumpStats, createSourceConfig) override _.RunTimeout = TimeSpan.FromSeconds 0.1 diff --git a/equinox-shipping/Watchdog.Integration/WatchdogIntegrationTests.fs b/equinox-shipping/Watchdog.Integration/WatchdogIntegrationTests.fs index 43b9d3635..0ee79ff12 100644 --- a/equinox-shipping/Watchdog.Integration/WatchdogIntegrationTests.fs +++ b/equinox-shipping/Watchdog.Integration/WatchdogIntegrationTests.fs @@ -5,7 +5,7 @@ open FsCheck.Xunit open Propulsion.Reactor.Internal // Async.timeoutAfter open System -let run (log: Serilog.ILogger) (processManager : Shipping.Domain.FinalizationProcess.Manager) runTimeout check (NonEmptyArray batches) = async { +let run (log: Serilog.ILogger) (processManager: Shipping.Domain.FinalizationProcess.Manager) runTimeout check (NonEmptyArray batches) = async { let counts = System.Collections.Generic.Stack() let mutable timeouts = 0 @@ -24,13 +24,13 @@ let run (log: Serilog.ILogger) (processManager : Shipping.Domain.FinalizationPro } [] -type ReactorPropertiesBase(reactor : FixtureBase, testOutput) = +type ReactorPropertiesBase(reactor: FixtureBase, testOutput) = let logSub = reactor.CaptureSerilogLog testOutput - abstract member DisposeAsync : unit -> Async + abstract member DisposeAsync: unit -> Async default _.DisposeAsync() = async.Zero () - abstract member RunTimeout : TimeSpan with get + abstract member RunTimeout: TimeSpan with get default _.RunTimeout = TimeSpan.FromSeconds 1. // Abusing IDisposable rather than IAsyncDisposable as we want the output to accompany the test output @@ -41,12 +41,12 @@ type ReactorPropertiesBase(reactor : FixtureBase, testOutput) = reactor.DumpStats() logSub.Dispose() } -type MemoryProperties (reactor : MemoryReactor.Fixture, testOutput) = +type MemoryProperties (reactor: MemoryReactor.Fixture, testOutput) = // Trigger logging of (Aggregate) Reactor stats after each Test/Propery is run inherit ReactorPropertiesBase(reactor, testOutput) [] - let run args : Async = + let run args: Async = run reactor.Log reactor.ProcessManager reactor.RunTimeout reactor.CheckReactions args override _.DisposeAsync() = @@ -57,7 +57,7 @@ type MemoryProperties (reactor : MemoryReactor.Fixture, testOutput) = interface Xunit.IClassFixture [] -type CosmosProperties(reactor : CosmosReactor.Fixture, testOutput) = +type CosmosProperties(reactor: CosmosReactor.Fixture, testOutput) = // Failsafe to emit the Remaining stats even in the case of a Test/Property failing (in success case, it's redundant) inherit ReactorPropertiesBase(reactor, testOutput) @@ -67,7 +67,7 @@ type CosmosProperties(reactor : CosmosReactor.Fixture, testOutput) = #else [] #endif - let run args : Async = async { + let run args: Async = async { do! run reactor.Log reactor.ProcessManager reactor.RunTimeout reactor.CheckReactions args // Dump the stats after each and every iteration of the test reactor.DumpStats() } @@ -81,7 +81,7 @@ type CosmosProperties(reactor : CosmosReactor.Fixture, testOutput) = // (* TODO implement reactor.Wait() *) } [] -type DynamoProperties(reactor : DynamoReactor.Fixture, testOutput) = +type DynamoProperties(reactor: DynamoReactor.Fixture, testOutput) = // Failsafe to emit the Remaining stats even in the case of a Test/Property failing (in success case, it's redundant) inherit ReactorPropertiesBase(reactor, testOutput) @@ -91,7 +91,7 @@ type DynamoProperties(reactor : DynamoReactor.Fixture, testOutput) = #else [] #endif - let run args : Async = async { + let run args: Async = async { do! run reactor.Log reactor.ProcessManager reactor.RunTimeout reactor.CheckReactions args // Dump the stats after each and every iteration of the test reactor.DumpStats() } @@ -103,7 +103,7 @@ type DynamoProperties(reactor : DynamoReactor.Fixture, testOutput) = reactor.Wait() [] -type EsdbProperties(reactor : EsdbReactor.Fixture, testOutput) = +type EsdbProperties(reactor: EsdbReactor.Fixture, testOutput) = // Failsafe to emit the Remaining stats even in the case of a Test/Property failing (in success case, it's redundant) inherit ReactorPropertiesBase(reactor, testOutput) @@ -113,7 +113,7 @@ type EsdbProperties(reactor : EsdbReactor.Fixture, testOutput) = #else [] #endif - let run args : Async = async { + let run args: Async = async { do! run reactor.Log reactor.ProcessManager reactor.RunTimeout reactor.CheckReactions args // Dump the stats after each and every iteration of the test reactor.DumpStats() } diff --git a/equinox-shipping/Watchdog.Lambda.Cdk/Watchdog.Lambda.Cdk.fsproj b/equinox-shipping/Watchdog.Lambda.Cdk/Watchdog.Lambda.Cdk.fsproj index 94a3cc1a0..54d9af31d 100644 --- a/equinox-shipping/Watchdog.Lambda.Cdk/Watchdog.Lambda.Cdk.fsproj +++ b/equinox-shipping/Watchdog.Lambda.Cdk/Watchdog.Lambda.Cdk.fsproj @@ -20,7 +20,7 @@ - + diff --git a/equinox-shipping/Watchdog.Lambda.Cdk/WatchdogLambdaStack.fs b/equinox-shipping/Watchdog.Lambda.Cdk/WatchdogLambdaStack.fs index 447e8e6ce..153350646 100644 --- a/equinox-shipping/Watchdog.Lambda.Cdk/WatchdogLambdaStack.fs +++ b/equinox-shipping/Watchdog.Lambda.Cdk/WatchdogLambdaStack.fs @@ -7,27 +7,27 @@ open System type WatchdogLambdaStackProps ( // Source Sns FIFO Topic Arn, serviced by Propulsion.DynamoStore.Notifier - notifierFifoTopicArn : string, + notifierFifoTopicArn: string, // DynamoDB Index Table Name, written by Propulsion.DynamoStore.Indexer - indexTableName : string, + indexTableName: string, // DynamoDB Store Table Name - storeTableName : string, + storeTableName: string, // Path for published binaries for Watchdog.Lambda - lambdaCodePath : string, + lambdaCodePath: string, // Description to apply to the Lambda - lambdaDescription : string, + lambdaDescription: string, // Handler invocation path - lambdaHandler : string, + lambdaHandler: string, // Lambda memory allocation - default 128 MB - ?memorySize : int, + ?memorySize: int, // Lambda max batch size - default 10 - ?batchSize : int, + ?batchSize: int, // Lambda timeout - default 3m - ?timeout : TimeSpan) = + ?timeout: TimeSpan) = inherit StackProps() member val NotifierFifoTopicArn = notifierFifoTopicArn member val StoreTableName = storeTableName @@ -41,10 +41,10 @@ type WatchdogLambdaStackProps member val LambdaArchitecture = Architecture.ARM_64 member val LambdaRuntime = Runtime.DOTNET_6 -type WatchdogLambdaStack(scope, id, props : WatchdogLambdaStackProps) as stack = +type WatchdogLambdaStack(scope, id, props: WatchdogLambdaStackProps) as stack = inherit Stack(scope, id, props) - let props : DynamoStoreReactorLambdaProps = + let props: DynamoStoreReactorLambdaProps = { updatesSource = UpdatesTopic props.NotifierFifoTopicArn regionName = stack.Region; storeTableName = props.StoreTableName; indexTableName = props.IndexTableName memorySize = props.MemorySize; batchSize = props.BatchSize; timeout = props.Timeout diff --git a/equinox-shipping/Watchdog.Lambda/Function.fs b/equinox-shipping/Watchdog.Lambda/Function.fs index c6536de01..8cf62062f 100644 --- a/equinox-shipping/Watchdog.Lambda/Function.fs +++ b/equinox-shipping/Watchdog.Lambda/Function.fs @@ -25,22 +25,22 @@ type Configuration(appName, ?tryGet) = member val ConsumerGroupName = appName member val CacheName = appName -type Store internal (connector : DynamoStoreConnector, table, indexTable, cacheName, consumerGroupName) = +type Store internal (connector: DynamoStoreConnector, table, indexTable, cacheName, consumerGroupName) = let dynamo = connector.CreateClient() let indexClient = DynamoStoreClient(dynamo, indexTable) let client = DynamoStoreClient(dynamo, table) let context = DynamoStoreContext(client) let cache = Equinox.Cache(cacheName, sizeMb = 1) - let checkpoints = indexClient.CreateCheckpointService(consumerGroupName, cache, Config.log) + let checkpoints = indexClient.CreateCheckpointService(consumerGroupName, cache, Store.log) - new (c : Configuration, requestTimeout, retries) = + new (c: Configuration, requestTimeout, retries) = let conn = match c.DynamoRegion with | Some r -> DynamoStoreConnector(r, requestTimeout, retries) | None -> DynamoStoreConnector(c.DynamoServiceUrl, c.DynamoAccessKey, c.DynamoSecretKey, requestTimeout, retries) Store(conn, c.DynamoTable, c.DynamoIndexTable, c.CacheName, c.ConsumerGroupName) - member val Config = Config.Store.Dynamo (context, cache) + member val Config = Store.Context.Dynamo (context, cache) member val DumpMetrics = Equinox.DynamoStore.Core.Log.InternalMetrics.dump member x.CreateSource(trancheIds, sink) = let batchSizeCutoff = 100 @@ -48,24 +48,24 @@ type Store internal (connector : DynamoStoreConnector, table, indexTable, cacheN let tailSleepInterval = TimeSpan.FromMilliseconds 500. let statsInterval = TimeSpan.FromMinutes 1. let streamsDop = 2 - let loadMode = DynamoLoadModeConfig.Hydrate (context, streamsDop) - Handler.Config.CreateDynamoSource(Log.Logger, sink, (indexClient, checkpoints, loadMode, fromTail, batchSizeCutoff, tailSleepInterval, statsInterval), trancheIds) + let loadMode = Propulsion.DynamoStore.WithData (streamsDop, context) + Handler.Factory.CreateDynamoSource(Log.Logger, sink, (indexClient, checkpoints, loadMode, fromTail, batchSizeCutoff, tailSleepInterval, statsInterval), trancheIds) /// Wiring for Source and Sink running the Watchdog.Handler -type App(store : Store) = +type App(store: Store) = let stats = Handler.Stats(Log.Logger, TimeSpan.FromMinutes 1., TimeSpan.FromMinutes 2., verboseStore = false, logExternalStats = store.DumpMetrics) let processingTimeout = 10. |> TimeSpan.FromSeconds let sink = let manager = let processManagerMaxDop = 4 - FinalizationProcess.Config.create processManagerMaxDop store.Config + FinalizationProcess.Factory.create processManagerMaxDop store.Config let maxReadAhead = 2 let maxConcurrentStreams = 8 // On paper, a 1m window should be fine, give the timeout for a single lifecycle // We use a higher value to reduce redundant work in the (edge) case of multiple deliveries due to rate limiting of readers let purgeInterval = TimeSpan.FromMinutes 5. - Handler.Config.StartSink(Log.Logger, stats, manager, processingTimeout, maxReadAhead, maxConcurrentStreams, + Handler.Factory.StartSink(Log.Logger, stats, maxConcurrentStreams, manager, processingTimeout, maxReadAhead, wakeForResults = true, purgeInterval = purgeInterval) member x.RunUntilCaughtUp(tranches, lambdaTimeout) = @@ -76,7 +76,7 @@ type App(store : Store) = type Function() = do // TOCONSIDER surface metrics from write activities to prometheus by wiring up Metrics Sink (for now we emit them to the log instead) - let removeMetrics (e : Serilog.Events.LogEvent) = + let removeMetrics (e: Serilog.Events.LogEvent) = e.RemovePropertyIfPresent(Equinox.DynamoStore.Core.Log.PropertyTag) e.RemovePropertyIfPresent(Propulsion.Streams.Log.PropertyTag) e.RemovePropertyIfPresent(Propulsion.Feed.Core.Log.PropertyTag) @@ -92,7 +92,7 @@ type Function() = let app = App(store) /// Process for all tranches in the input batch; requeue any triggers that we've not yet fully completed the processing for - member _.Handle(event : SQSEvent, context : ILambdaContext) : System.Threading.Tasks.Task = task { + member _.Handle(event: SQSEvent, context: ILambdaContext): System.Threading.Tasks.Task = task { let req = Propulsion.DynamoStore.Lambda.SqsNotificationBatch.parse event let! updated = app.RunUntilCaughtUp(req.Tranches, context.RemainingTime) return Propulsion.DynamoStore.Lambda.SqsNotificationBatch.batchResponseWithFailuresForPositionsNotReached req updated } diff --git a/equinox-shipping/Watchdog.Lambda/Watchdog.Lambda.fsproj b/equinox-shipping/Watchdog.Lambda/Watchdog.Lambda.fsproj index 4fe2eb570..7c4c37479 100644 --- a/equinox-shipping/Watchdog.Lambda/Watchdog.Lambda.fsproj +++ b/equinox-shipping/Watchdog.Lambda/Watchdog.Lambda.fsproj @@ -16,7 +16,7 @@ - + diff --git a/equinox-shipping/Watchdog/Args.fs b/equinox-shipping/Watchdog/Args.fs index a1eec993a..52176dd84 100644 --- a/equinox-shipping/Watchdog/Args.fs +++ b/equinox-shipping/Watchdog/Args.fs @@ -2,9 +2,10 @@ module Shipping.Infrastructure.Args open System -module Config = Shipping.Domain.Config -exception MissingArg of message : string with override this.Message = this.message +module Store = Shipping.Domain.Store + +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) let [] REGION = "EQUINOX_DYNAMO_REGION" @@ -14,7 +15,7 @@ let [] SECRET_KEY = "EQUINOX_DYNAMO_SECRET_ACCESS_KEY" let [] TABLE = "EQUINOX_DYNAMO_TABLE" let [] INDEX_TABLE = "EQUINOX_DYNAMO_TABLE_INDEX" -type Configuration(tryGet : string -> string option) = +type Configuration(tryGet: string -> string option) = member val tryGet = tryGet member _.get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" @@ -59,7 +60,7 @@ module Cosmos = | Retries _ -> "specify operation retries (default: 1)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 5)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let connection = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) let discovery = Equinox.CosmosStore.Discovery.ConnectionString connection let mode = p.TryGetResult ConnectionMode @@ -97,7 +98,7 @@ module Dynamo = | Retries _ -> "specify operation retries (default: 1)." | RetriesTimeoutS _ -> "specify max wait-time including retries in seconds (default: 5)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with | Some systemName -> Choice1Of2 systemName @@ -126,11 +127,11 @@ type [] module TargetStoreArgs = - let connectTarget targetStore cache : Config.Store<_> = + let connectTarget targetStore cache: Store.Context<_> = match targetStore with | TargetStoreArgs.Cosmos a -> let context = a.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - Config.Store.Cosmos (context, cache) + Store.Context.Cosmos (context, cache) | TargetStoreArgs.Dynamo a -> let context = a.Connect() |> DynamoStoreContext.create - Config.Store.Dynamo (context, cache) + Store.Context.Dynamo (context, cache) diff --git a/equinox-shipping/Watchdog/Handler.fs b/equinox-shipping/Watchdog/Handler.fs index 78f261e0b..989395c89 100644 --- a/equinox-shipping/Watchdog/Handler.fs +++ b/equinox-shipping/Watchdog/Handler.fs @@ -4,7 +4,7 @@ open Shipping.Infrastructure open System [] -type Outcome = Completed | Deferred | Resolved of successfully : bool +type Outcome = Completed | Deferred | Resolved of successfully: bool /// Gathers stats based on the outcome of each Span processed, periodically including them in the Sink summaries type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = @@ -36,47 +36,43 @@ type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = open Shipping.Domain -let private isReactionStream = function - | FinalizationTransaction.Category -> true - | _ -> false +let private reactionCategories = [| FinalizationTransaction.Category |] let handle - (processingTimeout : TimeSpan) - (driveTransaction : Shipping.Domain.TransactionId -> Async) - stream span ct = Propulsion.Internal.Async.startImmediateAsTask ct <| async { + (processingTimeout: TimeSpan) + (driveTransaction: Shipping.Domain.TransactionId -> Async) + stream events = async { let processingStuckCutoff = let now = DateTimeOffset.UtcNow in now.Add(-processingTimeout) - match stream, span with + match stream, events with | TransactionWatchdog.Finalization.MatchStatus (transId, state) -> match TransactionWatchdog.toStatus processingStuckCutoff state with | TransactionWatchdog.Complete -> - return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Completed) + return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Completed | TransactionWatchdog.Active -> // We don't want to be warming the data center for no purpose; visiting every second is not too expensive do! Async.Sleep 1000 // ms - return Propulsion.Streams.SpanResult.PartiallyProcessed 0, Outcome.Deferred + return Propulsion.Sinks.StreamResult.NoneProcessed, Outcome.Deferred | TransactionWatchdog.Stuck -> let! success = driveTransaction transId - return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Resolved success + return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Resolved success | other -> return failwithf "Span from unexpected category %A" other } -type Config private () = +type Factory private () = - static member private StartSink(log : Serilog.ILogger, stats : Stats, - handle : System.Func>, - maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = - Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, - ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + static member private StartSink(log: Serilog.ILogger, stats, maxConcurrentStreams, handle, maxReadAhead, + ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, maxConcurrentStreams, handle, stats, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) - static member StartSink(log, stats, manager : FinalizationProcess.Manager, processingTimeout, - maxReadAhead, maxConcurrentStreams, ?wakeForResults, ?idleDelay, ?purgeInterval) = + static member StartSink(log, stats, maxConcurrentStreams, manager: FinalizationProcess.Manager, processingTimeout, + maxReadAhead, ?wakeForResults, ?idleDelay, ?purgeInterval) = let handle = handle processingTimeout manager.Pump - Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, + Factory.StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead, ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) static member StartSource(log, sink, sourceConfig) = - SourceConfig.start (log, Config.log) sink isReactionStream sourceConfig + SourceConfig.start (log, Store.log) sink reactionCategories sourceConfig static member CreateDynamoSource(log, sink, sourceArgs, trancheIds) = - SourceConfig.Dynamo.create (log, Config.log) sink isReactionStream sourceArgs (Some trancheIds) + SourceConfig.Dynamo.create (log, Store.log) sink reactionCategories sourceArgs (Some trancheIds) diff --git a/equinox-shipping/Watchdog/Infrastructure.fs b/equinox-shipping/Watchdog/Infrastructure.fs index 9b33296a2..26ac2f5e6 100644 --- a/equinox-shipping/Watchdog/Infrastructure.fs +++ b/equinox-shipping/Watchdog/Infrastructure.fs @@ -6,7 +6,7 @@ open System module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj type Equinox.CosmosStore.CosmosStoreConnector with @@ -41,7 +41,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents = maxEvents) @@ -62,7 +62,7 @@ type Equinox.DynamoStore.DynamoStoreClient with type Equinox.DynamoStore.DynamoStoreContext with - member internal x.LogConfiguration(log : ILogger) = + member internal x.LogConfiguration(log: ILogger) = log.Information("DynamoStore Tip thresholds: {maxTipBytes}b {maxTipEvents}e Query Paging {queryMaxItems} items", x.TipOptions.MaxBytes, Option.toNullable x.TipOptions.MaxEvents, x.QueryOptions.MaxItems) @@ -76,19 +76,19 @@ type Amazon.DynamoDBv2.IAmazonDynamoDB with module DynamoStoreContext = /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations - let create (storeClient : Equinox.DynamoStore.DynamoStoreClient) = + let create (storeClient: Equinox.DynamoStore.DynamoStoreClient) = Equinox.DynamoStore.DynamoStoreContext(storeClient, queryMaxItems = 100) module EventStoreContext = - let create (storeConnection : Equinox.EventStoreDb.EventStoreConnection) = + let create (storeConnection: Equinox.EventStoreDb.EventStoreConnection) = Equinox.EventStoreDb.EventStoreContext(storeConnection, batchSize = 200) [] type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/equinox-shipping/Watchdog/Program.fs b/equinox-shipping/Watchdog/Program.fs index ed41e1d5d..c95c9ac39 100644 --- a/equinox-shipping/Watchdog/Program.fs +++ b/equinox-shipping/Watchdog/Program.fs @@ -4,7 +4,7 @@ open Serilog open Shipping.Infrastructure open System -module Config = Shipping.Domain.Config +module Store = Shipping.Domain.Store module Args = @@ -37,7 +37,7 @@ module Args = | Cosmos _ -> "specify CosmosDB parameters." | Dynamo _ -> "specify DynamoDB input parameters" | Esdb _ -> "specify EventStore DB input parameters" - and Arguments(c : SourceArgs.Configuration, p : ParseResults) = + and Arguments(c: SourceArgs.Configuration, p: ParseResults) = let processorName = p.GetResult ProcessorName let maxReadAhead = p.GetResult(MaxReadAhead, 16) let maxConcurrentProcessors = p.GetResult(MaxWriters, 8) @@ -53,7 +53,7 @@ module Args = processorName, maxReadAhead, maxConcurrentProcessors) (processorName, maxReadAhead, maxConcurrentProcessors) member val ProcessingTimeout = p.GetResult(TimeoutS, 10.) |> TimeSpan.FromSeconds - member val Store : Choice = + member val Store: Choice = match p.GetSubCommand() with | Cosmos a -> Choice1Of3 <| SourceArgs.Cosmos.Arguments(c, a) | Dynamo a -> Choice2Of3 <| SourceArgs.Dynamo.Arguments(c, a) @@ -63,7 +63,7 @@ module Args = | Choice1Of3 s -> s.Verbose | Choice2Of3 s -> s.Verbose | Choice3Of3 s -> s.Verbose - member x.ConnectStoreAndSource(appName) : Config.Store<_> * (ILogger -> string -> SourceConfig) * (ILogger -> unit) = + member x.ConnectStoreAndSource(appName): Store.Context<_> * (ILogger -> string -> SourceConfig) * (ILogger -> unit) = let cache = Equinox.Cache (appName, sizeMb = x.CacheSizeMb) match x.Store with | Choice1Of3 a -> @@ -73,49 +73,49 @@ module Args = let checkpointConfig = CosmosFeedConfig.Persistent (groupName, startFromTail, maxItems, lagFrequency) SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) let context = client |> CosmosStoreContext.create - let store = Config.Store.Cosmos (context, cache) + let store = Store.Context.Cosmos (context, cache) store, buildSourceConfig, Equinox.CosmosStore.Core.Log.InternalMetrics.dump | Choice2Of3 a -> let context = a.Connect() let buildSourceConfig log groupName = let indexStore, startFromTail, batchSizeCutoff, tailSleepInterval, streamsDop = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName, cache) - let load = DynamoLoadModeConfig.Hydrate (context, streamsDop) + let load = Propulsion.DynamoStore.WithData (streamsDop, context) SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) - let store = Config.Store.Dynamo (context, cache) + let store = Store.Context.Dynamo (context, cache) store, buildSourceConfig, Equinox.DynamoStore.Core.Log.InternalMetrics.dump | Choice3Of3 a -> let connection = a.Connect(Log.Logger, appName, EventStore.Client.NodePreference.Leader) let context = connection |> EventStoreContext.create - let store = Config.Store.Esdb (context, cache) + let store = Store.Context.Esdb (context, cache) let targetStore = a.ConnectTarget(cache) let buildSourceConfig log groupName = let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName, targetStore) - let hydrateBodies = true - SourceConfig.Esdb (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) + let withData = true + SourceConfig.Esdb (connection.ReadConnection, checkpoints, withData, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) store, buildSourceConfig, Equinox.EventStoreDb.Log.InternalMetrics.dump /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName = programName) Arguments(SourceArgs.Configuration tryGetConfigValue, parser.ParseCommandLine argv) let [] AppName = "Watchdog" -let build (args : Args.Arguments) = +let build (args: Args.Arguments) = let consumerGroupName, maxReadAhead, maxConcurrentStreams = args.ProcessorParams() let store, buildSourceConfig, dumpMetrics = args.ConnectStoreAndSource(AppName) let log = Log.Logger let sink = let stats = Handler.Stats(log, args.StatsInterval, args.StateInterval, args.VerboseStore, dumpMetrics) - let manager = Shipping.Domain.FinalizationProcess.Config.create args.ProcessManagerMaxDop store - Handler.Config.StartSink(log, stats, manager, args.ProcessingTimeout, maxReadAhead, maxConcurrentStreams, + let manager = Shipping.Domain.FinalizationProcess.Factory.create args.ProcessManagerMaxDop store + Handler.Factory.StartSink(log, stats, maxConcurrentStreams, manager, args.ProcessingTimeout, maxReadAhead, wakeForResults = args.WakeForResults, idleDelay = args.IdleDelay, purgeInterval = args.PurgeInterval) let source, _awaitReactions = let sourceConfig = buildSourceConfig log consumerGroupName - Handler.Config.StartSource(log, sink, sourceConfig) + Handler.Factory.StartSource(log, sink, sourceConfig) sink, source open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException @@ -125,7 +125,7 @@ let run args = [| Async.AwaitKeyboardInterruptAsTaskCanceledException() source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() - |] |> Async.Parallel |> Async.Ignore + |] |> Async.Parallel |> Async.Ignore [] let main argv = diff --git a/equinox-shipping/Watchdog/SourceArgs.fs b/equinox-shipping/Watchdog/SourceArgs.fs index 366aa9a0d..e29f57ea7 100644 --- a/equinox-shipping/Watchdog/SourceArgs.fs +++ b/equinox-shipping/Watchdog/SourceArgs.fs @@ -42,7 +42,7 @@ module Cosmos = | MaxItems _ -> "maximum item count to supply for the Change Feed query. Default: use response size limit" | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1" - type Arguments(c : Args.Configuration, p : ParseResults) = + type Arguments(c: Args.Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -58,8 +58,8 @@ module Cosmos = let lagFrequency = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes member _.Verbose = p.Contains Verbose member private _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) - member x.MonitoringParams(log : ILogger) = - let leases : Microsoft.Azure.Cosmos.Container = x.ConnectLeases() + member x.MonitoringParams(log: ILogger) = + let leases: Microsoft.Azure.Cosmos.Container = x.ConnectLeases() log.Information("ChangeFeed Leases Database {db} Container {container}. MaxItems limited to {maxItems}", leases.Database.Id, leases.Id, Option.toNullable maxItems) if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") @@ -101,7 +101,7 @@ module Dynamo = | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | StreamsDop _ -> "parallelism when loading events from Store Feed Source. Default 4" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with | Some systemName -> Choice1Of2 systemName @@ -129,14 +129,14 @@ module Dynamo = member val Verbose = p.Contains Verbose member _.Connect() = connector.LogConfiguration() client.ConnectStore("Main", table) |> DynamoStoreContext.create - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("DynamoStoreSource BatchSizeCutoff {batchSizeCutoff} Hydrater parallelism {streamsDop}", batchSizeCutoff, streamsDop) let indexStoreClient = indexStoreClient.Value if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") indexStoreClient, fromTail, batchSizeCutoff, tailSleepInterval, streamsDop member _.CreateCheckpointStore(group, cache) = let indexTable = indexStoreClient.Value - indexTable.CreateCheckpointService(group, cache, Config.log) + indexTable.CreateCheckpointService(group, cache, Store.log) module Esdb = @@ -146,12 +146,12 @@ module Esdb = alternately one could use a SQL Server DB via Propulsion.SqlStreamStore For now, we store the Checkpoints in one of the above stores as this sample uses one for the read models anyway *) - let private createCheckpointStore (consumerGroup, checkpointInterval) : _ -> Propulsion.Feed.IFeedCheckpointStore = function - | Config.Store.Cosmos (context, cache) -> - Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Config.log (consumerGroup, checkpointInterval) (context, cache) - | Config.Store.Dynamo (context, cache) -> - Propulsion.Feed.ReaderCheckpoint.DynamoStore.create Config.log (consumerGroup, checkpointInterval) (context, cache) - | Config.Store.Memory _ | Config.Store.Esdb _ -> Args.missingArg "Unexpected store type" + let private createCheckpointStore (consumerGroup, checkpointInterval): _ -> Propulsion.Feed.IFeedCheckpointStore = function + | Store.Context.Cosmos (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Store.log (consumerGroup, checkpointInterval) (context, cache) + | Store.Context.Dynamo (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.DynamoStore.create Store.log (consumerGroup, checkpointInterval) (context, cache) + | Store.Context.Memory _ | Store.Context.Esdb _ -> Args.missingArg "Unexpected store type" type [] Parameters = | [] Verbose @@ -179,7 +179,7 @@ module Esdb = | Cosmos _ -> "CosmosDB Target Store parameters (also used for checkpoint storage)." | Dynamo _ -> "DynamoDB Target Store parameters (also used for checkpoint storage)." - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let startFromTail = p.Contains FromTail let maxItems = p.GetResult(MaxItems, 100) let tailSleepInterval = TimeSpan.FromSeconds 0.5 @@ -192,22 +192,22 @@ module Esdb = let checkpointInterval = TimeSpan.FromHours 1. member val Verbose = p.Contains Verbose - member _.Connect(log : ILogger, appName, nodePreference) : Equinox.EventStoreDb.EventStoreConnection = + member _.Connect(log: ILogger, appName, nodePreference): Equinox.EventStoreDb.EventStoreConnection = log.Information("EventStore {discovery}", connectionStringLoggable) let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] Equinox.EventStoreDb.EventStoreConnector(timeout, retries, tags = tags) .Establish(appName, discovery, Equinox.EventStoreDb.ConnectionStrategy.ClusterSingle nodePreference) - member private _.TargetStoreArgs : Args.TargetStoreArgs = + member private _.TargetStoreArgs: Args.TargetStoreArgs = match p.GetSubCommand() with | Cosmos cosmos -> Args.TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) | Dynamo dynamo -> Args.TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("EventStoreSource MaxItems {maxItems} ", maxItems) startFromTail, maxItems, tailSleepInterval - member x.ConnectTarget(cache) : Config.Store<_> = + member x.ConnectTarget(cache): Store.Context<_> = Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache - member _.CreateCheckpointStore(group, store) : Propulsion.Feed.IFeedCheckpointStore = + member _.CreateCheckpointStore(group, store): Propulsion.Feed.IFeedCheckpointStore = createCheckpointStore (group, checkpointInterval) store diff --git a/equinox-shipping/Watchdog/SourceConfig.fs b/equinox-shipping/Watchdog/SourceConfig.fs index 4e0252690..025ca3c71 100644 --- a/equinox-shipping/Watchdog/SourceConfig.fs +++ b/equinox-shipping/Watchdog/SourceConfig.fs @@ -5,48 +5,46 @@ open System.Threading.Tasks [] type SourceConfig = - | Memory of store : Equinox.MemoryStore.VolatileStore)> - | Cosmos of monitoredContainer : Microsoft.Azure.Cosmos.Container - * leasesContainer : Microsoft.Azure.Cosmos.Container - * checkpoints : CosmosFeedConfig - * tailSleepInterval : TimeSpan - | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * loading : DynamoLoadModeConfig - * startFromTail : bool - * batchSizeCutoff : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan - | Esdb of client : EventStore.Client.EventStoreClient - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * hydrateBodies : bool - * startFromTail : bool - * batchSize : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan + | Memory of store: Equinox.MemoryStore.VolatileStore)> + | Cosmos of monitoredContainer: Microsoft.Azure.Cosmos.Container + * leasesContainer: Microsoft.Azure.Cosmos.Container + * checkpoints: CosmosFeedConfig + * tailSleepInterval: TimeSpan + | Dynamo of indexStore: Equinox.DynamoStore.DynamoStoreClient + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * loading: Propulsion.DynamoStore.EventLoadMode + * startFromTail: bool + * batchSizeCutoff: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan + | Esdb of client: EventStore.Client.EventStoreClient + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * withData: bool + * startFromTail: bool + * batchSize: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan and [] CosmosFeedConfig = - | Ephemeral of processorName : string - | Persistent of processorName : string * startFromTail : bool * maxItems : int option * lagFrequency : TimeSpan -and [] DynamoLoadModeConfig = - | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int + | Ephemeral of processorName: string + | Persistent of processorName: string * startFromTail: bool * maxItems: int option * lagFrequency: TimeSpan module SourceConfig = module Memory = open Propulsion.MemoryStore - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (store : Equinox.MemoryStore.VolatileStore<_>) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let source = MemoryStoreSource(log, store, categoryFilter, sink) + let start log (sink: Propulsion.Sinks.Sink) (categories: string[]) + (store: Equinox.MemoryStore.VolatileStore<_>): Propulsion.Pipeline * (TimeSpan -> Task) option = + let source = MemoryStoreSource(log, store, categories, sink) source.Start(), Some (fun _propagationDelay -> source.Monitor.AwaitCompletion(ignoreSubsequent = false)) module Cosmos = open Propulsion.CosmosStore - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let parseFeedDoc = EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter + let start log (sink: Propulsion.Sinks.Sink) categories + (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = + let parseFeedDoc = EquinoxSystemTextJsonParser.enumCategoryEvents categories let observer = CosmosStoreSource.CreateObserver(log, sink.StartIngester, Seq.collect parseFeedDoc) let source = match checkpointConfig with | Ephemeral processorName -> - let withStartTime1sAgo (x : Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = + let withStartTime1sAgo (x: Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = x.WithStartTime(let t = DateTime.UtcNow in t.AddSeconds -1.) let lagFrequency = TimeSpan.FromMinutes 1. CosmosStoreSource.Start(log, monitoredContainer, leasesContainer, processorName, observer, @@ -59,39 +57,36 @@ module SourceConfig = source, None module Dynamo = open Propulsion.DynamoStore - let create (log, storeLog) (sink : Propulsion.Streams.Default.Sink) categoryFilter - (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) trancheIds = - let loadMode = - match loadModeConfig with - | Hydrate (monitoredContext, hydrationConcurrency) -> LoadMode.Hydrated (categoryFilter, hydrationConcurrency, monitoredContext) + let create (log, storeLog) (sink: Propulsion.Sinks.Sink) categories + (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) trancheIds = DynamoStoreSource( log, statsInterval, indexStore, batchSizeCutoff, tailSleepInterval, - checkpoints, sink, loadMode, + checkpoints, sink, loadMode, categories = categories, startFromTail = startFromTail, storeLog = storeLog, ?trancheIds = trancheIds) - let start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) + let start (log, storeLog) sink categories (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let source = create (log, storeLog) sink categoryFilter (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) None + let source = create (log, storeLog) sink categories (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) None let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) module Esdb = open Propulsion.EventStoreDb - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = + let start log (sink: Propulsion.Sinks.Sink) categories + (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = let source = EventStoreSource( log, statsInterval, client, batchSize, tailSleepInterval, - checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + checkpoints, sink, categories, withData = withData, startFromTail = startFromTail) let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) - let start (log, storeLog) sink categoryFilter : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function + let start (log, storeLog) sink categories: SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function | SourceConfig.Memory volatileStore -> - Memory.start log sink categoryFilter volatileStore + Memory.start log sink categories volatileStore | SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) -> - Cosmos.start log sink categoryFilter (monitored, leases, checkpointConfig, tailSleepInterval) - | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> - Dynamo.start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) - | SourceConfig.Esdb (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> - Esdb.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) + Cosmos.start log sink categories (monitored, leases, checkpointConfig, tailSleepInterval) + | SourceConfig.Dynamo (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> + Dynamo.start (log, storeLog) sink categories (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) + | SourceConfig.Esdb (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Esdb.start log sink categories (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) diff --git a/equinox-shipping/Watchdog/Watchdog.fsproj b/equinox-shipping/Watchdog/Watchdog.fsproj index 3fa1d4110..7a1a6540d 100644 --- a/equinox-shipping/Watchdog/Watchdog.fsproj +++ b/equinox-shipping/Watchdog/Watchdog.fsproj @@ -18,10 +18,10 @@ - - - - + + + + diff --git a/equinox-testbed/Infrastructure.fs b/equinox-testbed/Infrastructure.fs index a06325355..d21c92765 100644 --- a/equinox-testbed/Infrastructure.fs +++ b/equinox-testbed/Infrastructure.fs @@ -6,26 +6,26 @@ open Serilog open System module Guid = - let inline toStringN (x : Guid) = x.ToString "N" + let inline toStringN (x: Guid) = x.ToString "N" /// ClientId strongly typed id; represented internally as a Guid; not used for storage so rendering is not significant type ClientId = Guid and [] clientId -module ClientId = let toString (value : ClientId) : string = Guid.toStringN %value +module ClientId = let toString (value: ClientId): string = Guid.toStringN %value /// SkuId strongly typed id; represented internally as a Guid // NB Perf is suboptimal as a key, see Equinox's samples/Store for expanded version type SkuId = Guid and [] skuId -module SkuId = let toString (value : SkuId) : string = Guid.toStringN %value +module SkuId = let toString (value: SkuId): string = Guid.toStringN %value -module Config = +module Store = let log = Serilog.Log.ForContext("isMetric", true) module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj type Equinox.CosmosStore.CosmosStoreConnector with diff --git a/equinox-testbed/Program.fs b/equinox-testbed/Program.fs index 80b4b0b52..7e83c7655 100644 --- a/equinox-testbed/Program.fs +++ b/equinox-testbed/Program.fs @@ -64,7 +64,7 @@ module Args = //#if cosmos | Cosmos _ -> "Run transactions in-process against CosmosDb." //#endif - and TestArguments(c : Storage.Configuration, p : ParseResults) = + and TestArguments(c: Storage.Configuration, p: ParseResults) = let duration = p.GetResult(DurationM, 30.) |> TimeSpan.FromMinutes member val Options = p.GetResults Cached @ p.GetResults Unfolds member val Cache = p.Contains Cached @@ -79,7 +79,7 @@ module Args = | [] -> TimeSpan.FromSeconds 10.|> Seq.singleton | intervals -> seq { for i in intervals -> TimeSpan.FromSeconds(float i) } |> fun intervals -> [| yield duration; yield! intervals |] - member x.ConfigureStore(log : ILogger, createStoreLog) = + member x.ConfigureStore(log: ILogger, createStoreLog) = match p.GetSubCommand() with //#if memoryStore || (!cosmos && !eventStore) | Memory _ -> @@ -124,14 +124,14 @@ let createStoreLog verbose verboseConsole maybeSeqEndpoint = module LoadTest = open Microsoft.Extensions.DependencyInjection - let private runLoadTest log testsPerSecond duration errorCutoff reportingIntervals (clients : ClientId[]) runSingleTest = + let private runLoadTest log testsPerSecond duration errorCutoff reportingIntervals (clients: ClientId[]) runSingleTest = let mutable idx = -1L let selectClient () = let clientIndex = Interlocked.Increment(&idx) |> int clients.[clientIndex % clients.Length] let selectClient = async { return async { return selectClient() } } Equinox.Tools.TestHarness.Local.runLoadTest log reportingIntervals testsPerSecond errorCutoff duration selectClient runSingleTest - let private decorateWithLogger (domainLog : ILogger, verbose) (run: 't -> Async) = + let private decorateWithLogger (domainLog: ILogger, verbose) (run: 't -> Async) = let execute clientId = if not verbose then run clientId else async { @@ -140,10 +140,10 @@ module LoadTest = with e -> domainLog.Warning(e, "Test threw an exception"); e.Reraise () } execute let private createResultLog fileName = LoggerConfiguration().WriteTo.File(fileName).CreateLogger() - let run (log: ILogger) (verbose, verboseConsole, maybeSeq) reportFilename (a : Args.TestArguments) = + let run (log: ILogger) (verbose, verboseConsole, maybeSeq) reportFilename (a: Args.TestArguments) = let createStoreLog verboseStore = createStoreLog verboseStore verboseConsole maybeSeq - let _storeLog, storeConfig: ILogger * Config.Store = a.ConfigureStore(log, createStoreLog) - let runSingleTest : ClientId -> Async = + let _storeLog, storeConfig: ILogger * Store.Context = a.ConfigureStore(log, createStoreLog) + let runSingleTest: ClientId -> Async = let services = ServiceCollection() Services.register(services, storeConfig) let container = services.BuildServiceProvider() @@ -163,11 +163,11 @@ module LoadTest = match storeConfig with //#if cosmos - | Config.Store.Cosmos _ -> + | Store.Context.Cosmos _ -> Equinox.CosmosStore.Core.Log.InternalMetrics.dump log //#endif //#if eventStore - | Config.Store.Esdb _ -> + | Store.Context.Esdb _ -> Equinox.EventStoreDb.Log.InternalMetrics.dump log //#endif //#if memory diff --git a/equinox-testbed/Services.fs b/equinox-testbed/Services.fs index 1dbe5898d..059aae3c7 100644 --- a/equinox-testbed/Services.fs +++ b/equinox-testbed/Services.fs @@ -22,7 +22,7 @@ module Domain = | Favorited of Favorited | Unfavorited of Unfavorited interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = @@ -30,38 +30,38 @@ module Domain = type private InternalState(input: State) = let dict = System.Collections.Generic.Dictionary() - let favorite (e : Events.Favorited) = dict[e.skuId] <- e + let favorite (e: Events.Favorited) = dict[e.skuId] <- e let favoriteAll (xs: Events.Favorited seq) = for x in xs do favorite x do favoriteAll input member _.ReplaceAllWith xs = dict.Clear(); favoriteAll xs - member _.Favorite(e : Events.Favorited) = favorite e + member _.Favorite(e: Events.Favorited) = favorite e member _.Unfavorite id = dict.Remove id |> ignore member _.AsState() = Seq.toArray dict.Values - let initial : State = [||] + let initial: State = [||] let private evolve (s: InternalState) = function | Events.Snapshotted { net = net } -> s.ReplaceAllWith net | Events.Favorited e -> s.Favorite e | Events.Unfavorited { skuId = id } -> s.Unfavorite id - let fold (state: State) (events: seq) : State = + let fold (state: State) (events: seq): State = let s = InternalState state for e in events do evolve s e s.AsState() let isOrigin = function Events.Snapshotted _ -> true | _ -> false let toSnapshot state = Events.Snapshotted { net = state } - let private doesntHave skuId (state : Fold.State) = state |> Array.exists (fun x -> x.skuId = skuId) |> not + let private doesntHave skuId (state: Fold.State) = state |> Array.exists (fun x -> x.skuId = skuId) |> not - let favorite date skuIds (state : Fold.State) = + let favorite date skuIds (state: Fold.State) = [ for skuId in Seq.distinct skuIds do if state |> doesntHave skuId then yield Events.Favorited { date = date; skuId = skuId } ] - let unfavorite skuId (state : Fold.State) = + let unfavorite skuId (state: Fold.State) = if state |> doesntHave skuId then [] else [ Events.Unfavorited { skuId = skuId } ] - type Service internal (resolve : ClientId -> Equinox.Decider) = + type Service internal (resolve: ClientId -> Equinox.Decider) = member x.Favorite(clientId, skus) = let decider = resolve clientId @@ -71,34 +71,34 @@ module Domain = let decider = resolve clientId decider.Transact(unfavorite sku) - member _.List(clientId) : Async = + member _.List(clientId): Async = let decider = resolve clientId decider.Query id let create cat = - streamId >> Config.createDecider cat Category |> Service + streamId >> Store.createDecider cat Category |> Service - module Config = + module Factory = let snapshot = Fold.isOrigin, Fold.toSnapshot let private (|Category|) = function //#if memoryStore || (!cosmos && !eventStore) - | Config.Store.Memory store -> - Config.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Memory store -> + Store.Memory.create Events.codec Fold.initial Fold.fold store //#endif //#if cosmos - | Config.Store.Cosmos (context, caching, unfolds) -> + | Store.Context.Cosmos (context, caching, unfolds) -> let accessStrategy = if unfolds then Equinox.CosmosStore.AccessStrategy.Snapshot snapshot else Equinox.CosmosStore.AccessStrategy.Unoptimized - Config.Cosmos.create Events.codecJe Fold.initial Fold.fold caching accessStrategy context + Store.Cosmos.create Events.codecJe Fold.initial Fold.fold caching accessStrategy context //#endif //#if eventStore - | Config.Store.Esdb (context, caching, unfolds) -> + | Store.Context.Esdb (context, caching, unfolds) -> let accessStrategy = if unfolds then Equinox.EventStoreDb.AccessStrategy.RollingSnapshots snapshot |> Some else None - Config.Esdb.create Events.codec Fold.initial Fold.fold caching accessStrategy context + Store.Esdb.create Events.codec Fold.initial Fold.fold caching accessStrategy context //#endif - let create (Category cat) = streamId >> Config.createDecider cat Category |> Service + let create (Category cat) = streamId >> Store.createDecider cat Category |> Service open Microsoft.Extensions.DependencyInjection -let register (services : IServiceCollection, storageConfig) = - services.AddSingleton(Domain.Favorites.Config.create storageConfig) |> ignore +let register (services: IServiceCollection, storageConfig) = + services.AddSingleton(Domain.Favorites.Factory.create storageConfig) |> ignore diff --git a/equinox-testbed/Storage.fs b/equinox-testbed/Storage.fs index 5faff203b..b8323d318 100644 --- a/equinox-testbed/Storage.fs +++ b/equinox-testbed/Storage.fs @@ -3,10 +3,10 @@ open Argu open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) -type Configuration(tryGet : string -> string option) = +type Configuration(tryGet: string -> string option) = let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" @@ -22,7 +22,7 @@ module MemoryStore = member p.Usage = p |> function | Verbose -> "Include low level Store logging." let config () = - Config.Store.Memory (Equinox.MemoryStore.VolatileStore()) + Store.Context.Memory (Equinox.MemoryStore.VolatileStore()) //#endif //#if cosmos @@ -47,7 +47,7 @@ module Cosmos = | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" | Container _ -> "specify a container name for store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -66,14 +66,14 @@ module Cosmos = open Equinox.CosmosStore let private createContext storeClient maxItems = CosmosStoreContext(storeClient, queryMaxItems = maxItems, tipMaxEvents = 256) - let config (cache, unfolds, maxItems) (info : Arguments) = + let config (cache, unfolds, maxItems) (info: Arguments) = let storeClient = info.Connect() |> Async.RunSynchronously let cacheStrategy = if cache then let c = Equinox.Cache("TestbedTemplate", sizeMb = 50) CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) else CachingStrategy.NoCaching - Config.Store.Cosmos (createContext storeClient maxItems, cacheStrategy, unfolds) + Store.Context.Cosmos (createContext storeClient maxItems, cacheStrategy, unfolds) //#endif //#if eventStore @@ -95,7 +95,7 @@ module EventStore = open Equinox.EventStoreDb - type Arguments(p : ParseResults) = + type Arguments(p: ParseResults) = member val ConnectionString = p.GetResult(ConnectionString) member val Retries = p.GetResult(Retries, 1) @@ -108,7 +108,7 @@ module EventStore = tags = ["M", Environment.MachineName; "I", Guid.NewGuid() |> string]) .Establish("TestbedTemplate", Discovery.ConnectionString connectionString, ConnectionStrategy.ClusterTwinPreferSlaveReads) let private createContext connection batchSize = EventStoreContext(connection, batchSize = batchSize) - let config (log: Serilog.ILogger, storeLog) (cache, unfolds, batchSize) (args : ParseResults) = + let config (log: Serilog.ILogger, storeLog) (cache, unfolds, batchSize) (args: ParseResults) = let a = Arguments args let timeout, retries as operationThrottling = a.Timeout, a.Retries log.Information("EventStore {connectionString} timeout: {timeout}s retries {retries}", @@ -117,7 +117,7 @@ module EventStore = let cacheStrategy = if cache then let c = Equinox.Cache("TestbedTemplate", sizeMb = 50) - CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) |> Some + Equinox.CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) |> Some else None - Config.Store.Esdb ((createContext conn batchSize), cacheStrategy, unfolds) + Store.Context.Esdb ((createContext conn batchSize), cacheStrategy, unfolds) //#endif diff --git a/equinox-testbed/Config.fs b/equinox-testbed/Store.fs similarity index 73% rename from equinox-testbed/Config.fs rename to equinox-testbed/Store.fs index ac07d5911..d595fad52 100644 --- a/equinox-testbed/Config.fs +++ b/equinox-testbed/Store.fs @@ -1,21 +1,20 @@ -module TestbedTemplate.Config +module TestbedTemplate.Store let log = Serilog.Log.ForContext("isMetric", true) let createDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = open FsCodec.SystemTextJson - let private defaultOptions = Options.Create() let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - Codec.Create<'t>(options = defaultOptions) - let genJe<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) + Codec.Create<'t>() // options = Options.Default + let genJsonElement<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>() // options = Options.Default module Memory = - let create _codec initial fold store : Equinox.Category<_, _, _> = + let create _codec initial fold store: Equinox.Category<_, _, _> = // While the actual prod codec can be used, the Box codec allows one to stub out the decoding on the basis that // nothing will be proved beyond what a complete roundtripping test per `module Aggregate` would already cover Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Box.Codec.Create(), fold, initial) @@ -31,7 +30,7 @@ module Esdb = Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, ?caching = cacheStrategy, ?access = accessStrategy) [] -type Store = +type Context = //#if (memoryStore || (!cosmos && !eventStore)) | Memory of Equinox.MemoryStore.VolatileStore //#endif @@ -39,5 +38,5 @@ type Store = | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.CosmosStore.CachingStrategy * unfolds: bool //#endif //#if eventStore - | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.EventStoreDb.CachingStrategy option * unfolds: bool + | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.CachingStrategy option * unfolds: bool //#endif diff --git a/equinox-testbed/Testbed.fsproj b/equinox-testbed/Testbed.fsproj index c318e9d04..b50a384d3 100644 --- a/equinox-testbed/Testbed.fsproj +++ b/equinox-testbed/Testbed.fsproj @@ -8,7 +8,7 @@ - + @@ -17,11 +17,11 @@ - - - - - + + + + + diff --git a/equinox-web-csharp/Domain/Domain.csproj b/equinox-web-csharp/Domain/Domain.csproj index b05dc1f72..b906ddc70 100755 --- a/equinox-web-csharp/Domain/Domain.csproj +++ b/equinox-web-csharp/Domain/Domain.csproj @@ -5,8 +5,8 @@ - - + + diff --git a/equinox-web-csharp/Domain/Infrastructure.cs b/equinox-web-csharp/Domain/Infrastructure.cs index 115c7aa0f..4a88fe46b 100644 --- a/equinox-web-csharp/Domain/Infrastructure.cs +++ b/equinox-web-csharp/Domain/Infrastructure.cs @@ -8,7 +8,7 @@ public class SystemTextJsonUtf8Codec readonly TypeShape.UnionContract.IEncoder> _codec; public SystemTextJsonUtf8Codec(System.Text.Json.JsonSerializerOptions options) => - _codec = new FsCodec.SystemTextJson.Core.ReadOnlyMemoryEncoder(options); + _codec = new FsCodec.SystemTextJson.Core.ReadOnlyMemoryEncoder(new FsCodec.SystemTextJson.Serdes(options)); public ReadOnlyMemory Encode(T value) => _codec.Encode(value); diff --git a/equinox-web-csharp/Web/CosmosContext.cs b/equinox-web-csharp/Web/CosmosContext.cs index 2c4c09c2a..d51c7de8a 100644 --- a/equinox-web-csharp/Web/CosmosContext.cs +++ b/equinox-web-csharp/Web/CosmosContext.cs @@ -62,7 +62,7 @@ await FSharpAsync.StartAsTask( var cacheStrategy = _cache == null ? null - : CachingStrategy.NewSlidingWindow(_cache, TimeSpan.FromMinutes(20)); + : Equinox.CosmosStore.CachingStrategy.NewSlidingWindow(_cache, TimeSpan.FromMinutes(20)); var cat = new CosmosStoreCategory(_store, codec.ToJsonElementCodec(), FuncConvert.FromFunc(fold), initial, cacheStrategy, accessStrategy, compressUnfolds:FSharpOption.None); return args => cat.Resolve(handlerLog).Invoke(args.Item1, args.Item2); } diff --git a/equinox-web-csharp/Web/EquinoxContext.cs b/equinox-web-csharp/Web/EquinoxContext.cs index 4c872fa9c..d35a6a676 100644 --- a/equinox-web-csharp/Web/EquinoxContext.cs +++ b/equinox-web-csharp/Web/EquinoxContext.cs @@ -23,18 +23,10 @@ public static class EquinoxCodec { public static FsCodec.IEventCodec, Unit> Create( Func)> encode, - Func<(string, ReadOnlyMemory), FSharpValueOption> tryDecode) where TEvent: class => + Func, FSharpValueOption> tryDecode) where TEvent: class => - FsCodec.Codec.Create( - FuncConvert.FromFunc(encode), - FuncConvert.FromFunc(tryDecode)); + FsCodec.Codec.Create(encode, tryDecode); - public static FsCodec.IEventCodec, Unit> Create( - Func)> encode, - Func, FSharpValueOption> tryDecode) where TEvent : class => - - Create(encode, tb => tryDecode(tb.Item1, tb.Item2)); - public static FsCodec.IEventCodec, Unit> Create(JsonSerializerOptions options = null) where TEvent: TypeShape.UnionContract.IUnionContract => FsCodec.SystemTextJson.Codec.Create(options); } diff --git a/equinox-web-csharp/Web/Web.csproj b/equinox-web-csharp/Web/Web.csproj index 1ec63138f..df69cee67 100755 --- a/equinox-web-csharp/Web/Web.csproj +++ b/equinox-web-csharp/Web/Web.csproj @@ -5,10 +5,10 @@ - - - - + + + + diff --git a/equinox-web/Domain/Aggregate.fs b/equinox-web/Domain/Aggregate.fs index bf6229141..125c262e6 100644 --- a/equinox-web/Domain/Aggregate.fs +++ b/equinox-web/Domain/Aggregate.fs @@ -12,7 +12,7 @@ module Events = | Happened | Snapshotted of SnapshottedData interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = @@ -21,46 +21,46 @@ module Fold = let evolve s = function | Events.Happened -> { happened = true } | Events.Snapshotted e -> { happened = e.happened} - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false let toSnapshot state = Events.Snapshotted { happened = state.happened } -let interpretMarkDone (state : Fold.State) = +let interpretMarkDone (state: Fold.State) = if state.happened then [] else [Events.Happened] -type View = { sorted : bool } +type View = { sorted: bool } -type Service internal (resolve : string -> Equinox.Decider) = +type Service internal (resolve: string -> Equinox.Decider) = /// Read the present state // TOCONSIDER: you should probably be separating this out per CQRS and reading from a denormalized/cached set of projections - member _.Read clientId : Async = + member _.Read clientId: Async = let decider = resolve clientId decider.Query(fun s -> { sorted = s.happened }) /// Execute the specified command - member _.MarkDone(clientId, command) : Async = + member _.MarkDone(clientId): Async = let decider = resolve clientId decider.Transact(interpretMarkDone) -module Config = +module Factory = let private (|Category|) = function #if (memoryStore || (!cosmos && !dynamo && !eventStore)) - | Config.Store.Memory store -> - Config.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Memory store -> + Store.Memory.create Events.codec Fold.initial Fold.fold store #endif //#endif //#if cosmos - | Config.Store.Cosmos (context, cache) -> - Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Cosmos (context, cache) -> + Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif //#if dynamo - | Config.Store.Dynamo (context, cache) -> - Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Dynamo (context, cache) -> + Store.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif //#if eventStore - | Config.Store.Esdb (context, cache) -> - Config.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Esdb (context, cache) -> + Store.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif - let create (Category cat) = Service(streamId >> Config.resolveDecider cat Category) + let create (Category cat) = Service(streamId >> Store.resolveDecider cat Category) diff --git a/equinox-web/Domain/Domain.fsproj b/equinox-web/Domain/Domain.fsproj index 80423ba64..0713ccab4 100644 --- a/equinox-web/Domain/Domain.fsproj +++ b/equinox-web/Domain/Domain.fsproj @@ -7,7 +7,7 @@ - + @@ -17,11 +17,11 @@ - - - - - + + + + + diff --git a/equinox-web/Domain/Infrastructure.fs b/equinox-web/Domain/Infrastructure.fs index 3a95ea9e6..ed05a23e5 100644 --- a/equinox-web/Domain/Infrastructure.fs +++ b/equinox-web/Domain/Infrastructure.fs @@ -4,10 +4,10 @@ open FSharp.UMX // see https://github.com/fsprojects/FSharp.UMX - % operator and open System module Guid = - let inline toStringN (x : Guid) = x.ToString "N" + let inline toStringN (x: Guid) = x.ToString "N" /// ClientId strongly typed id; represented internally as a Guid; not used for storage so rendering is not significant type ClientId = Guid and [] clientId module ClientId = - let toString (value : ClientId) : string = Guid.toStringN %value + let toString (value: ClientId): string = Guid.toStringN %value diff --git a/equinox-web/Domain/Config.fs b/equinox-web/Domain/Store.fs similarity index 87% rename from equinox-web/Domain/Config.fs rename to equinox-web/Domain/Store.fs index 8c00043eb..625cfe6ca 100644 --- a/equinox-web/Domain/Config.fs +++ b/equinox-web/Domain/Store.fs @@ -1,22 +1,21 @@ -module TodoBackendTemplate.Config +module TodoBackendTemplate.Store let log = Serilog.Log.ForContext("isMetric", true) let resolveDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = open FsCodec.SystemTextJson - let private defaultOptions = Options.Create() let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - Codec.Create<'t>(options = defaultOptions) + Codec.Create<'t>() // options = Options.Default let genJsonElement<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) + CodecJsonElement.Create<'t>() // options = Options.Default #if (memoryStore || (!cosmos && !dynamo && !eventStore)) module Memory = - let create _codec initial fold store : Equinox.Category<_, _, _> = + let create _codec initial fold store: Equinox.Category<_, _, _> = // While the actual prod codec can be used, the Box codec allows one to stub out the decoding on the basis that // nothing will be proved beyond what a complete roundtripping test per `module Aggregate` would already cover Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Box.Codec.Create(), fold, initial) @@ -58,24 +57,24 @@ module Dynamo = module Esdb = let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = - let cacheStrategy = Equinox.EventStoreDb.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = Equinox.EventStoreDb.AccessStrategy.RollingSnapshots (isOrigin, toSnapshot) Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, cacheStrategy, accessStrategy) //#endif [] #if (memoryStore || (!cosmos && !dynamo && !eventStore)) -type Store<'t> = +type Context<'t> = | Memory of Equinox.MemoryStore.VolatileStore<'t> #else -type Store = +type Context = #endif //#if cosmos - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache //#endif //#if dynamo - | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Cache //#endif //#if eventStore - | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Core.ICache + | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Cache //#endif diff --git a/equinox-web/Domain/Todo.fs b/equinox-web/Domain/Todo.fs index 6cec2e51c..bd2526abc 100644 --- a/equinox-web/Domain/Todo.fs +++ b/equinox-web/Domain/Todo.fs @@ -7,10 +7,10 @@ let streamId = Equinox.StreamId.gen ClientId.toString // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = - type ItemData = { id : int; order : int; title : string; completed : bool } - type DeletedData = { id : int } - type ClearedData = { nextId : int } - type SnapshotData = { nextId : int; items : ItemData[] } + type ItemData = { id: int; order: int; title: string; completed: bool } + type DeletedData = { id: int } + type ClearedData = { nextId: int } + type SnapshotData = { nextId: int; items: ItemData[] } /// Events we keep in Todo-* streams type Event = | Added of ItemData @@ -22,13 +22,13 @@ module Events = /// For EventStore, AccessStrategy.RollingSnapshots embeds these events every `batchSize` events | Snapshotted of SnapshotData interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement /// Types and mapping logic used maintain relevant State based on Events observed on the Todo List Stream module Fold = /// Present state of the Todo List as inferred from the Events we've seen to date - type State = { items : Events.ItemData list; nextId : int } + type State = { items: Events.ItemData list; nextId: int } /// State implied by the absence of any events on this stream let initial = { items = []; nextId = 0 } /// Compute State change implied by a given Event @@ -39,7 +39,7 @@ module Fold = | Events.Cleared e -> { nextId = e.nextId; items = [] } | Events.Snapshotted s -> { nextId = s.nextId; items = List.ofArray s.items } /// Folds a set of events from the store into a given `state` - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve /// Determines whether a given event represents a checkpoint that implies we don't need to see any preceding events let isOrigin = function Events.Cleared _ | Events.Snapshotted _ -> true | _ -> false /// Prepares an Event that encodes all relevant aspects of a State such that `evolve` can rehydrate a complete State from it @@ -48,51 +48,51 @@ module Fold = /// Properties that can be edited on a Todo List item type Props = { order: int; title: string; completed: bool } -let mkItem id (value : Props) : Events.ItemData = { id = id; order = value.order; title = value.title; completed = value.completed } +let mkItem id (value: Props): Events.ItemData = { id = id; order = value.order; title = value.title; completed = value.completed } -let decideAdd value (state : Fold.State) = +let decideAdd value (state: Fold.State) = [ Events.Added (mkItem state.nextId value) ] -let decideUpdate itemId value (state : Fold.State) = +let decideUpdate itemId value (state: Fold.State) = let proposed = mkItem itemId value match state.items |> List.tryFind (function { id = id } -> id = itemId) with | Some current when current <> proposed -> [ Events.Updated proposed ] | _ -> [] -let decideDelete id (state : Fold.State) = +let decideDelete id (state: Fold.State) = if state.items |> List.exists (fun x -> x.id = id) then [ Events.Deleted { id=id } ] else [] -let decideClear (state : Fold.State) = +let decideClear (state: Fold.State) = if state.items |> List.isEmpty then [] else [ Events.Cleared { nextId = state.nextId } ] /// A single Item in the Todo List type View = { id: int; order: int; title: string; completed: bool } -let private render (item: Events.ItemData) : View = +let private render (item: Events.ItemData): View = { id = item.id order = item.order title = item.title completed = item.completed } /// Defines operations that a Controller can perform on a Todo List -type Service internal (resolve : ClientId -> Equinox.Decider) = +type Service internal (resolve: ClientId -> Equinox.Decider) = (* READ *) /// List all open items - member _.List clientId : Async = + member _.List clientId: Async = let decider = resolve clientId decider.Query(fun x -> seq { for x in x.items -> render x }) /// Load details for a single specific item - member _.TryGet(clientId, id) : Async = + member _.TryGet(clientId, id): Async = let decider = resolve clientId decider.Query(fun x -> x.items |> List.tryFind (fun x -> x.id = id) |> Option.map render) (* WRITE *) /// Execute the specified (blind write) command - member _.Execute(clientId , command) : Async = + member _.Execute(clientId , command): Async = let decider = resolve clientId decider.Transact command @@ -112,40 +112,40 @@ type Service internal (resolve : ClientId -> Equinox.Decider = + member _.Clear(clientId): Async = let decider = resolve clientId decider.Transact decideClear (* WRITE-READ *) /// Create a new ToDo List item; response contains the generated `id` - member _.Create(clientId, template: Props) : Async = + member _.Create(clientId, template: Props): Async = let decider = resolve clientId decider.Transact(decideAdd template, fun s -> s.items |> List.head |> render) /// Update the specified item as referenced by the `item.id` - member _.Patch(clientId, id: int, value: Props) : Async = + member _.Patch(clientId, id: int, value: Props): Async = let decider = resolve clientId - let echoUpdated id (s : Fold.State) = s.items |> List.find (fun x -> x.id = id) + let echoUpdated id (s: Fold.State) = s.items |> List.find (fun x -> x.id = id) decider.Transact(decideUpdate id value, echoUpdated id >> render) -module Config = +module Factory = let private resolveCategory = function #if (memoryStore || (!cosmos && !dynamo && !eventStore)) - | Config.Store.Memory store -> - Config.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Memory store -> + Store.Memory.create Events.codec Fold.initial Fold.fold store #endif //#if cosmos - | Config.Store.Cosmos (context, cache) -> - Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Cosmos (context, cache) -> + Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif //#if dynamo - | Config.Store.Dynamo (context, cache) -> - Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Dynamo (context, cache) -> + Store.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif //#if eventStore - | Config.Store.Esdb (context, cache) -> - Config.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Esdb (context, cache) -> + Store.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif - let create store = Service(fun id -> Config.resolveDecider (resolveCategory store) Category (streamId id)) + let create store = Service(fun id -> Store.resolveDecider (resolveCategory store) Category (streamId id)) diff --git a/equinox-web/Web/Controllers/TodosController.fs b/equinox-web/Web/Controllers/TodosController.fs index d0fd33544..f9c84a731 100644 --- a/equinox-web/Web/Controllers/TodosController.fs +++ b/equinox-web/Web/Controllers/TodosController.fs @@ -24,40 +24,40 @@ type GetByIdArgsTemplate = { id: int } type TodosController(service: Todo.Service) = inherit ControllerBase() - let toProps (value : TodoView) : Todo.Props = { order = value.order; title = value.title; completed = value.completed } + let toProps (value: TodoView): Todo.Props = { order = value.order; title = value.title; completed = value.completed } - member private this.WithUri(x : Todo.View) : TodoView = + member private this.WithUri(x: Todo.View): TodoView = let url = this.Url.RouteUrl("GetTodo", { id=x.id }, this.Request.Scheme) // Supplying scheme is secret sauce for making it absolute as required by client { id = x.id; url = url; order = x.order; title = x.title; completed = x.completed } [] - member this.Get([]clientId : ClientId) = async { + member this.Get([]clientId: ClientId) = async { let! xs = service.List(clientId) return seq { for x in xs -> this.WithUri(x) } } [] - member this.Get([]clientId : ClientId, id) : Async = async { + member this.Get([]clientId: ClientId, id): Async = async { let! x = service.TryGet(clientId, id) return match x with None -> this.NotFound() :> _ | Some x -> ObjectResult(this.WithUri x) :> _ } [] - member this.Post([]clientId : ClientId, []value : TodoView) : Async = async { + member this.Post([]clientId: ClientId, []value: TodoView): Async = async { let! created = service.Create(clientId, toProps value) return this.WithUri created } [] - member this.Patch([]clientId : ClientId, id, []value : TodoView) : Async = async { + member this.Patch([]clientId: ClientId, id, []value: TodoView): Async = async { let! updated = service.Patch(clientId, id, toProps value) return this.WithUri updated } [] - member _.Delete([]clientId : ClientId, id): Async = + member _.Delete([]clientId: ClientId, id): Async = service.Delete(clientId, id) [] - member _.DeleteAll([]clientId : ClientId): Async = + member _.DeleteAll([]clientId: ClientId): Async = service.Clear(clientId) diff --git a/equinox-web/Web/Program.fs b/equinox-web/Web/Program.fs index eea2edf7c..99441af4d 100644 --- a/equinox-web/Web/Program.fs +++ b/equinox-web/Web/Program.fs @@ -8,7 +8,7 @@ open Serilog type Logging() = [] - static member Configure(c : LoggerConfiguration, appName) = + static member Configure(c: LoggerConfiguration, appName) = let customTags = ["app", appName] c .MinimumLevel.Debug() @@ -22,7 +22,7 @@ type Logging() = .Enrich.FromLogContext() .WriteTo.Console() -let createWebHostBuilder args : IWebHostBuilder = +let createWebHostBuilder args: IWebHostBuilder = WebHost .CreateDefaultBuilder(args) .UseSerilog() diff --git a/equinox-web/Web/Startup.fs b/equinox-web/Web/Startup.fs index 42d87f366..8fd057f5d 100644 --- a/equinox-web/Web/Startup.fs +++ b/equinox-web/Web/Startup.fs @@ -10,11 +10,11 @@ open System open TodoBackendTemplate /// Equinox store bindings -module Storage = +module Store = /// Specifies the store to be used, together with any relevant custom parameters [] - type Store = + type Context = //#if (memoryStore || (!cosmos && !dynamo && !eventStore)) | Memory //#endif @@ -25,7 +25,7 @@ module Storage = | Cosmos of mode: Microsoft.Azure.Cosmos.ConnectionMode * connectionStringWithUriAndKey: string * database: string * container: string * cacheMb: int //#endif //#if dynamo - | Dynamo of region : string * tableName: string * cacheMb: int + | Dynamo of region: string * tableName: string * cacheMb: int //#endif //#if (memoryStore || (!cosmos && !dynamo && !eventStore)) @@ -53,7 +53,7 @@ module Storage = module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : CosmosStoreClient) = + let create (storeClient: CosmosStoreClient) = let maxEvents = 256 CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -69,7 +69,7 @@ module Storage = module DynamoStoreContext = /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations - let create (storeClient : DynamoStoreClient) = + let create (storeClient: DynamoStoreClient) = let maxEvents = 256 DynamoStoreContext(storeClient, tipMaxEvents = maxEvents) @@ -81,44 +81,44 @@ module Storage = /// Creates and/or connects to a specific store as dictated by the specified config let connect = function //#if (memoryStore || (!cosmos && !dynamo && !eventStore)) - | Store.Memory -> + | Context.Memory -> let store = Memory.connect() - Config.Store.Memory store + Store.Context.Memory store //#endif //#if eventStore - | Store.Esdb (connectionString, cache) -> + | Context.Esdb (connectionString, cache) -> let cache = Equinox.Cache("ES", sizeMb = cache) let conn = ES.connect connectionString - Config.Store.Esdb (conn, cache) + Store.Context.Esdb (conn, cache) //#endif //#if cosmos - | Store.Cosmos (mode, connectionString, database, container, cache) -> + | Context.Cosmos (mode, connectionString, database, container, cache) -> let cache = Equinox.Cache("Cosmos", sizeMb = cache) let retriesOn429Throttling = 1 // Number of retries before failing processing when provisioned RU/s limit in CosmosDb is breached let timeout = TimeSpan.FromSeconds 5. // Timeout applied per request to CosmosDb, including retry attempts let context = Cosmos.connect (mode, Equinox.CosmosStore.Discovery.ConnectionString connectionString, database, container) (timeout, retriesOn429Throttling, timeout) - Config.Store.Cosmos (context, cache) + Store.Context.Cosmos (context, cache) //#endif //#if dynamo - | Store.Dynamo (region, table, cache) -> + | Context.Dynamo (region, table, cache) -> let cache = Equinox.Cache("Dynamo", sizeMb = cache) let retries = 1 // Number of retries before failing processing when provisioned RU/s limit in CosmosDb is breached let timeout = TimeSpan.FromSeconds 5. // Timeout applied per request, including retry attempts let context = Dynamo.connect (region, table) (timeout, retries) - Config.Store.Dynamo (context, cache) + Store.Context.Dynamo (context, cache) //#endif /// Dependency Injection wiring for services using Equinox module Services = /// Registers the Equinox Store, Stream Resolver, Service Builder and the Service - let register (services : IServiceCollection, storeCfg) = - let store = Storage.connect storeCfg + let register (services: IServiceCollection, storeCfg) = + let store = Store.connect storeCfg //#if todos - services.AddSingleton(Todo.Config.create store) |> ignore + services.AddSingleton(Todo.Factory.create store) |> ignore //#endif //#if aggregate - services.AddSingleton(Aggregate.Config.create store) |> ignore + services.AddSingleton(Aggregate.Factory.create store) |> ignore //#else //services.AddSingleton(Thing.Config.create store) |> ignore //#endif @@ -127,7 +127,7 @@ module Services = type Startup() = // This method gets called by the runtime. Use this method to add services to the container. - member _.ConfigureServices(services: IServiceCollection) : unit = + member _.ConfigureServices(services: IServiceCollection): unit = services .AddMvc() .AddJsonOptions(fun options -> @@ -145,7 +145,7 @@ type Startup() = //#if eventStore // EVENTSTORE: See https://github.com/jet/equinox/blob/master/docker-compose.yml for the associated docker-compose configuration - let storeConfig = Storage.Store.Esdb ("esdb://admin:changeit@localhost:2111,localhost:2112,localhost:2113?tls=true&tlsVerifyCert=false", cacheMb) + let storeConfig = Store.Context.Esdb ("esdb://admin:changeit@localhost:2111,localhost:2112,localhost:2113?tls=true&tlsVerifyCert=false", cacheMb) //#endif //#if cosmos @@ -161,13 +161,13 @@ type Startup() = match read connectionVar, read databaseVar, read containerVar with | Some connection, Some database, Some container -> let connMode = Microsoft.Azure.Cosmos.ConnectionMode.Direct // Best perf - select one of the others iff using .NETCore on linux or encounter firewall issues - Storage.Store.Cosmos (connMode, connection, database, container, cacheMb) + Store.Context.Cosmos (connMode, connection, database, container, cacheMb) //#if cosmosSimulator | None, Some database, Some container -> // alternately, you can feed in this connection string in as a parameter externally and remove this special casing let wellKnownConnectionStringForCosmosDbSimulator = "AccountEndpoint=https://localhost:8081;AccountKey=C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==;" - Storage.Store.Cosmos (Microsoft.Azure.Cosmos.ConnectionMode.Direct, wellKnownConnectionStringForCosmosDbSimulator, database, container, cacheMb) + Store.Context.Cosmos (Microsoft.Azure.Cosmos.ConnectionMode.Direct, wellKnownConnectionStringForCosmosDbSimulator, database, container, cacheMb) //#endif | _ -> failwithf "Event Storage subsystem requires the following Environment Variables to be specified: %s, %s, %s" connectionVar databaseVar containerVar @@ -179,23 +179,23 @@ type Startup() = let read key = Environment.GetEnvironmentVariable key |> Option.ofObj match read regionVar, read tableVar with | Some region, Some table -> - Storage.Store.Dynamo (region, table, cacheMb) + Store.Context.Dynamo (region, table, cacheMb) | _ -> failwithf "Event Storage subsystem requires the following Environment Variables to be specified: %s, %s" regionVar tableVar //#endif #if (memoryStore && !cosmos && !dynamo && !eventStore) - let storeConfig = Storage.Store.Memory + let storeConfig = Store.Context.Memory #endif //#if (!memoryStore && !cosmos && !dynamo && !eventStore) - //let storeConfig = Storage.Store.Memory + //let storeConfig = Store.Context.Memory //#endif Services.register(services, storeConfig) // This method gets called by the runtime. Use this method to configure the HTTP request pipeline. - member _.Configure(app: IApplicationBuilder, env: IHostEnvironment) : unit = + member _.Configure(app: IApplicationBuilder, env: IHostEnvironment): unit = if env.IsDevelopment() then app.UseDeveloperExceptionPage() |> ignore else app.UseHsts() |> ignore diff --git a/equinox-web/Web/Web.fsproj b/equinox-web/Web/Web.fsproj index 1e1a879fc..640f45c08 100644 --- a/equinox-web/Web/Web.fsproj +++ b/equinox-web/Web/Web.fsproj @@ -11,12 +11,12 @@ - - + + - + diff --git a/feed-consumer/ApiClient.fs b/feed-consumer/ApiClient.fs index 21c3a6047..deb813c22 100644 --- a/feed-consumer/ApiClient.fs +++ b/feed-consumer/ApiClient.fs @@ -10,51 +10,51 @@ open FeedConsumerTemplate.Domain module TrancheId = - let toFcId (x : Propulsion.Feed.TrancheId) : FcId = %x - let ofFcId (x : FcId) : Propulsion.Feed.TrancheId = %x + let toFcId (x: Propulsion.Feed.TrancheId): FcId = %x + let ofFcId (x: FcId): Propulsion.Feed.TrancheId = %x type TicketsEpochId = int and [] ticketsEpochId [] -type TicketsTranchesDto = { activeEpochs : TrancheReferenceDto[] } - and TrancheReferenceDto = { fc : FcId; epochId : TicketsEpochId } +type TicketsTranchesDto = { activeEpochs: TrancheReferenceDto[] } + and TrancheReferenceDto = { fc: FcId; epochId: TicketsEpochId } (* Each Tranche response includes a checkpoint, which can be presented to Poll in order to resume consumption *) type TicketsCheckpoint = int64 and [] ticketsCheckpoint module TicketsCheckpoint = - let ofPosition (x : Propulsion.Feed.Position) : TicketsCheckpoint = %x - let toPosition (x : TicketsCheckpoint) : Propulsion.Feed.Position = %x - let toStreamIndex (x : TicketsCheckpoint) : int64 = %x + let ofPosition (x: Propulsion.Feed.Position): TicketsCheckpoint = %x + let toPosition (x: TicketsCheckpoint): Propulsion.Feed.Position = %x + let toStreamIndex (x: TicketsCheckpoint): int64 = %x -type ItemDto = { id : TicketId; payload : string } -type SliceDto = { closed : bool; tickets : ItemDto[]; position : TicketsCheckpoint; checkpoint : TicketsCheckpoint } +type ItemDto = { id: TicketId; payload: string } +type SliceDto = { closed: bool; tickets: ItemDto[]; position: TicketsCheckpoint; checkpoint: TicketsCheckpoint } type Session(client: HttpClient) = - member _.Send(req : HttpRequestMessage) : Async = + member _.Send(req: HttpRequestMessage): Async = client.Send2(req) type TicketsClient(session: Session) = let basePath = "api/tickets" - member _.ActiveFcs() : Async = async { + member _.ActiveFcs(): Async = async { let request = HttpReq.get () |> HttpReq.withPath basePath let! response = session.Send request let! body = response |> HttpRes.deserializeOkStj return [| for f in body.activeEpochs -> f.fc |] } - member _.ReadPage(fc : FcId, index : int) : Async = async { + member _.ReadPage(fc: FcId, index: int): Async = async { let request = HttpReq.post () |> HttpReq.withPathf "%s/%O/%d" basePath fc index let! response = session.Send request return! response |> HttpRes.deserializeOkStj } - member _.Poll(fc : FcId, checkpoint: TicketsCheckpoint) : Async = async { + member _.Poll(fc: FcId, checkpoint: TicketsCheckpoint): Async = async { let request = HttpReq.create () |> HttpReq.withPathf "%s/%O/slice/%O" basePath fc checkpoint let! response = session.Send request return! response |> HttpRes.deserializeOkStj @@ -70,17 +70,17 @@ type TicketsFeed(baseUri) = let tickets = Session(client).Tickets // TODO add retries - consumer loop will abort if this throws - member _.Poll(trancheId, pos, ct) = task { + member _.Poll(trancheId, pos) = async { let checkpoint = TicketsCheckpoint.ofPosition pos let! pg = tickets.Poll(TrancheId.toFcId trancheId, checkpoint) let baseIndex = TicketsCheckpoint.toStreamIndex pg.position - let map (x : ItemDto) : Ingester.PipelineEvent.Item = { id = x.id; payload = x.payload } + let map (x: ItemDto): Ingester.PipelineEvent.Item = { id = x.id; payload = x.payload } let items = pg.tickets |> Array.mapi (fun i x -> Ingester.PipelineEvent.ofIndexAndItem (baseIndex + int64 i) (map x)) - return ({ checkpoint = TicketsCheckpoint.toPosition pg.checkpoint; items = items; isTail = not pg.closed } : Propulsion.Feed.Page) + return ({ checkpoint = TicketsCheckpoint.toPosition pg.checkpoint; items = items; isTail = not pg.closed }: Propulsion.Feed.Page) } // TODO add retries - consumer loop will not commence if this emits an exception - member _.ReadTranches(ct) = task { - let! activeFcs = tickets.ActiveFcs() |> Async.startImmediateAsTask ct + member _.ReadTranches() = async { + let! activeFcs = tickets.ActiveFcs() return [| for f in activeFcs -> TrancheId.ofFcId f |] } diff --git a/feed-consumer/FeedConsumer.fsproj b/feed-consumer/FeedConsumer.fsproj index 08aaba268..60d00b33f 100644 --- a/feed-consumer/FeedConsumer.fsproj +++ b/feed-consumer/FeedConsumer.fsproj @@ -16,11 +16,11 @@ - - - - - + + + + + diff --git a/feed-consumer/Infrastructure.fs b/feed-consumer/Infrastructure.fs index 0404a2180..455b8e3cc 100644 --- a/feed-consumer/Infrastructure.fs +++ b/feed-consumer/Infrastructure.fs @@ -5,13 +5,13 @@ open Serilog open System open System.Text -module Config = +module Store = let log = Log.ForContext("isMetric", true) module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj module Log = @@ -36,7 +36,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -46,19 +46,19 @@ module Sinks = let tags appName = ["app", appName] - let equinoxMetricsOnly tags (l : LoggerConfiguration) = + let equinoxMetricsOnly tags (l: LoggerConfiguration) = l.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.CosmosStore.Prometheus.LogSink(tags)) - let equinoxAndPropulsionConsumerMetrics tags group (l : LoggerConfiguration) = + let equinoxAndPropulsionConsumerMetrics tags group (l: LoggerConfiguration) = l |> equinoxMetricsOnly tags |> fun l -> l.WriteTo.Sink(Propulsion.Prometheus.LogSink(tags, group)) - let equinoxAndPropulsionFeedConsumerMetrics tags source (l : LoggerConfiguration) = + let equinoxAndPropulsionFeedConsumerMetrics tags source (l: LoggerConfiguration) = l |> equinoxAndPropulsionConsumerMetrics tags (Propulsion.Feed.SourceId.toString source) |> fun l -> l.WriteTo.Sink(Propulsion.Feed.Prometheus.LogSink(tags)) - let console (configuration : LoggerConfiguration) = + let console (configuration: LoggerConfiguration) = let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" configuration.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) @@ -66,14 +66,14 @@ module Sinks = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c [] - static member private Sinks(configuration : LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + static member private Sinks(configuration: LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(configureMetricsSinks >> ignore) |> ignore // unconditionally feed all log events to the metrics sinks a.Logger(fun l -> // but filter what gets emitted to the console sink let l = match isMetric with None -> l | Some predicate -> l.Filter.ByExcluding(Func predicate) @@ -82,19 +82,19 @@ type Logging() = configuration.WriteTo.Async(bufferSize=65536, blockWhenFull=true, configure=Action<_> configure) [] - static member Sinks(configuration : LoggerConfiguration, configureMetricsSinks, verboseStore) = + static member Sinks(configuration: LoggerConfiguration, configureMetricsSinks, verboseStore) = configuration.Sinks(configureMetricsSinks, Sinks.console, ?isMetric = if verboseStore then None else Some Log.isStoreMetrics) type Async with - static member Sleep(t : TimeSpan) : Async = Async.Sleep(int t.TotalMilliseconds) + static member Sleep(t: TimeSpan): Async = Async.Sleep(int t.TotalMilliseconds) /// Re-raise an exception so that the current stacktrace is preserved - static member Raise(e : #exn) : Async<'T> = Async.FromContinuations (fun (_,ec,_) -> ec e) + static member Raise(e: #exn): Async<'T> = Async.FromContinuations (fun (_,ec,_) -> ec e) type StringBuilder with member sb.Appendf fmt = Printf.ksprintf (ignore << sb.Append) fmt member sb.Appendfn fmt = Printf.ksprintf (ignore << sb.AppendLine) fmt - static member inline Build(builder : StringBuilder -> unit) = + static member inline Build(builder: StringBuilder -> unit) = let instance = StringBuilder() // TOCONSIDER PooledStringBuilder.GetInstance() builder instance instance.ToString() @@ -110,7 +110,7 @@ module HttpReq = let inline create () = new HttpRequestMessage() /// Assigns a method to an HTTP request. - let inline withMethod (m : HttpMethod) (req : HttpRequestMessage) = + let inline withMethod (m: HttpMethod) (req: HttpRequestMessage) = req.Method <- m req @@ -121,12 +121,12 @@ module HttpReq = let inline post () = create () |> withMethod HttpMethod.Post /// Assigns a path to an HTTP request. - let inline withUri (u : Uri) (req : HttpRequestMessage) = + let inline withUri (u: Uri) (req: HttpRequestMessage) = req.RequestUri <- u req /// Assigns a path to an HTTP request. - let inline withPath (p : string) (req : HttpRequestMessage) = + let inline withPath (p: string) (req: HttpRequestMessage) = req |> withUri (Uri(p, UriKind.Relative)) /// Assigns a path to a Http request using printf-like formatting. @@ -151,7 +151,7 @@ type HttpClient with /// Drop-in replacement for HttpClient.SendAsync which addresses known timeout issues /// /// HttpRequestMessage to be submitted. - member client.Send2(msg : HttpRequestMessage) = async { + member client.Send2(msg: HttpRequestMessage) = async { let! ct = Async.CancellationToken try return! client.SendAsync(msg, ct) |> Async.AwaitTask // address https://github.com/dotnet/corefx/issues/20296 @@ -169,19 +169,19 @@ type InvalidHttpResponseException = inherit Exception // TODO: include headers - val private userMessage : string - val private requestMethod : string - val RequestUri : Uri - val RequestBody : string - val StatusCode : HttpStatusCode - val ReasonPhrase : string - val ResponseBody : string + val private userMessage: string + val private requestMethod: string + val RequestUri: Uri + val RequestBody: string + val StatusCode: HttpStatusCode + val ReasonPhrase: string + val ResponseBody: string member e.RequestMethod = HttpMethod(e.requestMethod) - private new (userMessage : string, requestMethod : HttpMethod, requestUri : Uri, requestBody : string, - statusCode : HttpStatusCode, reasonPhrase : string, responseBody : string, - ?innerException : exn) = + private new (userMessage: string, requestMethod: HttpMethod, requestUri: Uri, requestBody: string, + statusCode: HttpStatusCode, reasonPhrase: string, responseBody: string, + ?innerException: exn) = { inherit Exception(message = null, innerException = defaultArg innerException null) ; userMessage = userMessage ; requestMethod = string requestMethod ; RequestUri = requestUri ; RequestBody = requestBody ; @@ -196,13 +196,13 @@ type InvalidHttpResponseException = sb.Appendfn "ResponseBody=%s" (getBodyString e.ResponseBody)) interface ISerializable with - member e.GetObjectData(si : SerializationInfo, sc : StreamingContext) = - let add name (value:obj) = si.AddValue(name, value) + member e.GetObjectData(si: SerializationInfo, sc: StreamingContext) = + let add name (value: obj) = si.AddValue(name, value) base.GetObjectData(si, sc) ; add "userMessage" e.userMessage ; add "requestUri" e.RequestUri ; add "requestMethod" e.requestMethod ; add "requestBody" e.RequestBody add "statusCode" e.StatusCode ; add "reasonPhrase" e.ReasonPhrase ; add "responseBody" e.ResponseBody - new (si : SerializationInfo, sc : StreamingContext) = + new (si: SerializationInfo, sc: StreamingContext) = let get name = si.GetValue(name, typeof<'a>) :?> 'a { inherit Exception(si, sc) ; userMessage = get "userMessage" ; @@ -210,7 +210,7 @@ type InvalidHttpResponseException = StatusCode = get "statusCode" ; ReasonPhrase = get "reasonPhrase" ; ResponseBody = get "responseBody" } - static member Create(userMessage : string, response : HttpResponseMessage, ?innerException : exn) = async { + static member Create(userMessage: string, response: HttpResponseMessage, ?innerException: exn) = async { let request = response.RequestMessage let! responseBodyC = response.Content.ReadAsStringDiapered() |> Async.StartChild let! requestBody = request.Content.ReadAsStringDiapered() @@ -222,13 +222,13 @@ type InvalidHttpResponseException = ?innerException = innerException) } - static member Create(response : HttpResponseMessage, ?innerException : exn) = + static member Create(response: HttpResponseMessage, ?innerException: exn) = InvalidHttpResponseException.Create("HTTP request yielded unexpected response.", response, ?innerException = innerException) type HttpResponseMessage with /// Raises an InvalidHttpResponseException if the response status code does not match expected value. - member response.EnsureStatusCode(expectedStatusCode : HttpStatusCode) = async { + member response.EnsureStatusCode(expectedStatusCode: HttpStatusCode) = async { if response.StatusCode <> expectedStatusCode then let! exn = InvalidHttpResponseException.Create("Http request yielded unanticipated HTTP Result.", response) do raise exn @@ -236,7 +236,7 @@ type HttpResponseMessage with /// Asynchronously deserializes the json response content using the supplied `deserializer`, without validating the `StatusCode` /// The decoder routine to apply to the body content. Exceptions are wrapped in exceptions containing the offending content. - member response.InterpretContent<'Decoded>(deserializer : string -> 'Decoded) : Async<'Decoded> = async { + member response.InterpretContent<'Decoded>(deserializer: string -> 'Decoded): Async<'Decoded> = async { let! content = response.Content.ReadAsString() try return deserializer content with e -> @@ -247,16 +247,16 @@ type HttpResponseMessage with /// Asynchronously deserializes the json response content using the supplied `deserializer`, validating the `StatusCode` is `expectedStatusCode` /// check that status code matches supplied code or raise a InvalidHttpResponseException if it doesn't. /// The decoder routine to apply to the body content. Exceptions are wrapped in exceptions containing the offending content. - member response.Interpret<'Decoded>(expectedStatusCode : HttpStatusCode, deserializer : string -> 'Decoded) : Async<'Decoded> = async { + member response.Interpret<'Decoded>(expectedStatusCode: HttpStatusCode, deserializer: string -> 'Decoded): Async<'Decoded> = async { do! response.EnsureStatusCode expectedStatusCode return! response.InterpretContent deserializer } module HttpRes = - let private serdes = FsCodec.SystemTextJson.Options.Default |> FsCodec.SystemTextJson.Serdes + let private serdes = FsCodec.SystemTextJson.Serdes.Default /// Deserialize body using default System.Text.Json profile - throw with content details if StatusCode is unexpected or decoding fails - let deserializeExpectedStj<'t> expectedStatusCode (res : HttpResponseMessage) = + let deserializeExpectedStj<'t> expectedStatusCode (res: HttpResponseMessage) = res.Interpret(expectedStatusCode, serdes.Deserialize<'t>) /// Deserialize body using default System.Text.Json profile - throw with content details if StatusCode is not OK or decoding fails diff --git a/feed-consumer/Ingester.fs b/feed-consumer/Ingester.fs index 86d67098e..b47546136 100644 --- a/feed-consumer/Ingester.fs +++ b/feed-consumer/Ingester.fs @@ -4,7 +4,7 @@ open Propulsion.Internal open System open FeedConsumerTemplate.Domain -type Outcome = { added : int; notReady : int; dups : int } +type Outcome = { added: int; notReady: int; dups: int } /// Gathers stats based on the outcome of each Span processed for periodic emission type Stats(log, statsInterval, stateInterval) = @@ -28,21 +28,21 @@ type Stats(log, statsInterval, stateInterval) = module PipelineEvent = - type Item = { id : TicketId; payload : string } - let ofIndexAndItem index (item : Item) = + type Item = { id: TicketId; payload: string } + let ofIndexAndItem index (item: Item) = FsCodec.Core.TimelineEvent.Create( index, "eventType", Unchecked.defaultof<_>, context = item) let [] (|ItemsForFc|_|) = function - | FsCodec.StreamName.CategoryAndIds (_,[|_ ; FcId.Parse fc|]), (s : Propulsion.Streams.StreamSpan) -> + | FsCodec.StreamName.CategoryAndIds (_,[|_ ; FcId.Parse fc|]), (s: Propulsion.Sinks.Event[]) -> ValueSome (fc, s |> Seq.map (fun e -> Unchecked.unbox e.Context)) | _ -> ValueNone -let handle maxDop stream span ct = Async.startImmediateAsTask ct <| async { - match stream, span with - | PipelineEvent.ItemsForFc (fc, items) -> +let handle maxDop stream events = async { + match stream, events with + | PipelineEvent.ItemsForFc (_fc, items) -> // Take chunks of max 1000 in order to make handler latency be less 'lumpy' // What makes sense in terms of a good chunking size will vary depending on the workload in question let ticketIds = seq { for x in items -> x.id } |> Seq.truncate 1000 |> Seq.toArray @@ -50,14 +50,19 @@ let handle maxDop stream span ct = Async.startImmediateAsTask ct <| async { do! Async.Sleep(TimeSpan.FromSeconds 1.) return if i % 3 = 1 then Some 42 else None }) - let! results = Async.Parallel(maybeAccept, maxDegreeOfParallelism=maxDop) + let! results = Async.Parallel(maybeAccept, maxDegreeOfParallelism = maxDop) let ready = results |> Array.choose id let maybeAdd = ready |> Seq.mapi (fun i _x -> async { do! Async.Sleep(TimeSpan.FromSeconds 1.) return if i % 2 = 1 then Some 42 else None }) - let! added = Async.Parallel(maybeAdd, maxDegreeOfParallelism=maxDop) + let! added = Async.Parallel(maybeAdd, maxDegreeOfParallelism = maxDop) let outcome = { added = Seq.length added; notReady = results.Length - ready.Length; dups = results.Length - ticketIds.Length } - return struct (Propulsion.Streams.SpanResult.PartiallyProcessed ticketIds.Length, outcome) + return Propulsion.Sinks.PartiallyProcessed ticketIds.Length, outcome | x -> return failwithf "Unexpected stream %O" x } + +type Factory private () = + + static member StartSink(log, stats, dop, handle, maxReadAhead) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, dop, handle, stats) diff --git a/feed-consumer/Program.fs b/feed-consumer/Program.fs index d87aaaf84..9696de7a2 100644 --- a/feed-consumer/Program.fs +++ b/feed-consumer/Program.fs @@ -3,7 +3,7 @@ open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -43,7 +43,7 @@ module Args = | FcsDop _ -> "maximum number of FCs to process in parallel. Default: 4" | TicketsDop _ -> "maximum number of Tickets to process in parallel (per FC). Default: 4" | Cosmos _ -> "Cosmos Store parameters." - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = member val Verbose = p.Contains Parameters.Verbose member val GroupId = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) member val SourceId = p.GetResult(SourceId,"default") |> Propulsion.Feed.SourceId.parse @@ -55,7 +55,7 @@ module Args = member val StateInterval = TimeSpan.FromMinutes 5. member val CheckpointInterval = TimeSpan.FromHours 1. member val TailSleepInterval = TimeSpan.FromSeconds 1. - member val Cosmos : CosmosArguments = + member val Cosmos: CosmosArguments = match p.GetSubCommand() with | Cosmos cosmos -> CosmosArguments(c, cosmos) | _ -> missingArg "Must specify cosmos" @@ -78,7 +78,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds (default: 30)." | Retries _ -> "specify operation retries (default: 9)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 30)" - and CosmosArguments(c : Configuration, p : ParseResults) = + and CosmosArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds @@ -98,22 +98,22 @@ module Args = let [] AppName = "FeedConsumerTemplate" -let build (args : Args.Arguments) = +let build (args: Args.Arguments) = let cache = Equinox.Cache(AppName, sizeMb = 10) let context = args.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let sink = - let handle = Ingester.handle args.TicketsDop let stats = Ingester.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - Propulsion.Streams.Default.Config.Start(Log.Logger, args.MaxReadAhead, args.FcsDop, handle, stats, args.StatsInterval) + let handle = Ingester.handle args.TicketsDop + Ingester.Factory.StartSink(Log.Logger, stats, args.FcsDop, handle, args.MaxReadAhead) let source = - let checkpoints = Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Config.log (args.GroupId, args.CheckpointInterval) (context, cache) + let checkpoints = Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Store.log (args.GroupId, args.CheckpointInterval) (context, cache) let feed = ApiClient.TicketsFeed args.BaseUri let source = Propulsion.Feed.FeedSource( Log.Logger, args.StatsInterval, args.SourceId, args.TailSleepInterval, - checkpoints, sink, feed.Poll) - source.Start(fun ct -> source.Pump(feed.ReadTranches, ct)) + checkpoints, sink) + source.Start(feed.ReadTranches, fun t p -> feed.Poll(t, p)) sink, source let run args = async { diff --git a/feed-consumer/Types.fs b/feed-consumer/Types.fs index d9a0f16cd..f36205037 100644 --- a/feed-consumer/Types.fs +++ b/feed-consumer/Types.fs @@ -5,13 +5,13 @@ open FSharp.UMX // see https://github.com/fsprojects/FSharp.UMX - % operator and type [] fcId type FcId = string module FcId = - let toString (value : FcId) : string = %value - let parse (value : string) : FcId = let raw = value in % raw + let toString (value: FcId): string = %value + let parse (value: string): FcId = let raw = value in % raw let (|Parse|) = parse type [] ticketId type TicketId = string module TicketId = - let toString (value : TicketId) : string = %value - let parse (value : string) : TicketId = let raw = value in % raw + let toString (value: TicketId): string = %value + let parse (value: string): TicketId = let raw = value in % raw let (|Parse|) = parse diff --git a/feed-source/Domain.Tests/IngesterTests.fs b/feed-source/Domain.Tests/IngesterTests.fs index e308eda9b..6042b2906 100644 --- a/feed-source/Domain.Tests/IngesterTests.fs +++ b/feed-source/Domain.Tests/IngesterTests.fs @@ -21,12 +21,12 @@ let genDefault<'t> = ArbMap.defaults |> ArbMap.generate<'t> type Custom = static member GuidStringN() = genDefault |> Gen.map (Guid.toStringN >> GuidStringN) |> Arb.fromGen - static member Item() = genDefault |> Gen.map (fun i -> { i with id = TicketId.genForTest () } : TicketsEpoch.Events.Item) |> Arb.fromGen + static member Item() = genDefault |> Gen.map (fun i -> { i with id = TicketId.genForTest () }: TicketsEpoch.Events.Item) |> Arb.fromGen [ |] )>] do() let [] properties shouldInitialize shouldUseSameSut (GuidStringN trancheId) initialItems items = async { - let store = Equinox.MemoryStore.VolatileStore() |> Config.Store.Memory + let store = Equinox.MemoryStore.VolatileStore() |> Store.Context.Memory // Initialize with some items let initialSut = createSut store trancheId @@ -49,7 +49,7 @@ let [] properties shouldInitialize shouldUseSameSut (Guid test <@ set initialResult + set result = set independentResult @> } let [|])>] ``lookBack is limited`` (GuidStringN trancheId, genItem) = async { - let store = Equinox.MemoryStore.VolatileStore() |> Config.Store.Memory + let store = Equinox.MemoryStore.VolatileStore() |> Store.Context.Memory // Initialize with more items than the lookBack accommodates let initialSut = createSut store trancheId let itemCount = @@ -57,7 +57,7 @@ let [|])>] ``lookBack is limi (lookBackLimit+1) * maxPickTicketsPerBatch // Add one more so we end up with an active batchId = lookBackLimit + 1 - let items = Array.init itemCount (fun _ -> genItem () |> fun x -> { x with id = TicketId.genForTest () } : TicketsEpoch.Events.Item) + let items = Array.init itemCount (fun _ -> genItem () |> fun x -> { x with id = TicketId.genForTest () }: TicketsEpoch.Events.Item) test <@ Array.distinct items = items @> let batch0 = Array.take maxPickTicketsPerBatch items let batchesInLookBack = Array.skip maxPickTicketsPerBatch items diff --git a/feed-source/Domain/Domain.fsproj b/feed-source/Domain/Domain.fsproj index 8208e461c..5b76533d7 100644 --- a/feed-source/Domain/Domain.fsproj +++ b/feed-source/Domain/Domain.fsproj @@ -6,7 +6,7 @@ - + @@ -14,9 +14,9 @@ - - - + + + diff --git a/feed-source/Domain/Config.fs b/feed-source/Domain/Store.fs similarity index 80% rename from feed-source/Domain/Config.fs rename to feed-source/Domain/Store.fs index b3cdd3d22..b74243f0c 100644 --- a/feed-source/Domain/Config.fs +++ b/feed-source/Domain/Store.fs @@ -1,19 +1,16 @@ -module FeedSourceTemplate.Domain.Config +module FeedSourceTemplate.Domain.Store let log = Serilog.Log.ForContext("isMetric", true) let createDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = - open FsCodec.SystemTextJson - - let private defaultOptions = Options.Create() let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) + FsCodec.SystemTextJson.CodecJsonElement.Create<'t>() // options = Options.Default module Memory = - let create codec initial fold store : Equinox.Category<_, _, _> = + let create codec initial fold store: Equinox.Category<_, _, _> = Equinox.MemoryStore.MemoryStoreCategory(store, codec, fold, initial) module Cosmos = @@ -31,6 +28,6 @@ module Cosmos = createCached codec initial fold accessStrategy (context, cache) [] -type Store<'t> = +type Context<'t> = | Memory of Equinox.MemoryStore.VolatileStore<'t> - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache diff --git a/feed-source/Domain/TicketsEpoch.fs b/feed-source/Domain/TicketsEpoch.fs index d039cd3ff..1ff86ca47 100644 --- a/feed-source/Domain/TicketsEpoch.fs +++ b/feed-source/Domain/TicketsEpoch.fs @@ -12,17 +12,17 @@ let streamId = Equinox.StreamId.gen2 FcId.toString TicketsEpochId.toString [] module Events = - type Ingested = { items : Item[] } - and Item = { id : TicketId; payload : string } + type Ingested = { items: Item[] } + and Item = { id: TicketId; payload: string } type Event = | Ingested of Ingested | Closed - | Snapshotted of {| ids : TicketId[]; closed : bool |} + | Snapshotted of {| ids: TicketId[]; closed: bool |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.gen + let codec = Store.Codec.gen -let itemId (x : Events.Item) : TicketId = x.id -let (|ItemIds|) : Events.Item[] -> TicketId[] = Array.map itemId +let itemId (x: Events.Item): TicketId = x.id +let (|ItemIds|): Events.Item[] -> TicketId[] = Array.map itemId module Fold = @@ -33,16 +33,16 @@ module Fold = | Events.Closed -> (ids, true) | Events.Snapshotted e -> (e.ids, e.closed) - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false let toSnapshot (ids, closed) = Events.Snapshotted {| ids = ids; closed = closed |} -let notAlreadyIn (ids : TicketId seq) = +let notAlreadyIn (ids: TicketId seq) = let ids = System.Collections.Generic.HashSet ids - fun (x : Events.Item) -> (not << ids.Contains) x.id + fun (x: Events.Item) -> (not << ids.Contains) x.id -type Result = { accepted : TicketId[]; residual : Events.Item[]; content : TicketId[]; closed : bool } +type Result = { accepted: TicketId[]; residual: Events.Item[]; content: TicketId[]; closed: bool } /// NOTE See eqxPatterns template ItemEpoch for a simplified decide function which does not split ingestion requests with a rigid capacity rule /// NOTE does not deduplicate (distinct) candidates and/or the residuals on the basis that the caller should guarantee that @@ -51,7 +51,7 @@ let decide capacity candidates (currentIds, closed as state) = | true, freshCandidates -> { accepted = [||]; residual = freshCandidates; content = currentIds; closed = closed }, [] | false, [||] -> { accepted = [||]; residual = [||]; content = currentIds; closed = closed }, [] | false, freshItems -> - // NOTE we in some cases end up triggering splitting of a request (or set of requests coalesced in the AsyncBatchingGate) + // NOTE we in some cases end up triggering splitting of a request (or set of requests coalesced in the Batcher) // In some cases it might be better to be a little tolerant and not be rigid about limiting things as // - snapshots should compress well (no major incremental cost for a few more items) // - its always good to avoid a second store roundtrip @@ -68,29 +68,30 @@ let decide capacity candidates (currentIds, closed as state) = { accepted = addedItemIds; residual = residualItems; content = currentIds; closed = closed }, events /// Service used for the write side; manages ingestion of items into the series of epochs -type IngestionService internal (capacity, resolve : struct (FcId * TicketsEpochId) -> Equinox.Decider) = +type IngestionService internal (capacity, resolve: FcId * TicketsEpochId -> Equinox.Decider) = /// Handles idempotent deduplicated insertion into the set of items held within the epoch - member _.Ingest(fcId, epochId, ticketIds) : Async = + member _.Ingest(fcId, epochId, ticketIds): Async = let decider = resolve (fcId, epochId) // Accept whatever date is in the cache on the basis that we are doing most of the writing so will more often than not // have the correct state already without a roundtrip. What if the data is actually stale? we'll end up needing to resync, // but we we need to deal with that as a race condition anyway - decider.Transact(decide capacity ticketIds, Equinox.AllowStale) + decider.Transact(decide capacity ticketIds, Equinox.AnyCachedValue) /// Obtains a complete list of all the tickets in the specified fcid/epochId - member _.ReadTickets(fcId, epochId) : Async = + /// NOTE AnyCachedValue option assumes that it's safe to ignore writes from other nodes + member _.ReadTickets(fcId, epochId): Async = let decider = resolve (fcId, epochId) - decider.Query(fst, Equinox.AllowStale) + decider.Query(fst, Equinox.AnyCachedValue) -module Config = +module Factory = let private create_ capacity resolve = IngestionService(capacity, streamId >> resolve) let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - let create capacity (Category cat) = Config.createDecider cat Category |> create_ capacity + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create capacity (Category cat) = Store.createDecider cat Category |> create_ capacity /// Custom Fold and caching logic compared to the IngesterService /// - When reading, we want the full Items @@ -104,20 +105,20 @@ module Reader = | Events.Ingested e -> Array.append es e.items, closed | Events.Closed -> (es, true) | Events.Snapshotted _ -> state // there's nothing useful in the snapshot for us to take - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve - type StateDto = { closed : bool; tickets : Events.Item[] } + type StateDto = { closed: bool; tickets: Events.Item[] } - type Service internal (resolve : struct (FcId * TicketsEpochId) -> Equinox.Decider) = + type Service internal (resolve: FcId * TicketsEpochId -> Equinox.Decider) = /// Returns all the items currently held in the stream - member _.Read(fcId, epochId) : Async = + member _.Read(fcId, epochId): Async = let decider = resolve (fcId, epochId) decider.Query(fun (items, closed) -> { closed = closed; tickets = items }) - module Config = + module Factory = let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec initial fold store - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createUnoptimized Events.codec initial fold (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Memory store -> Store.Memory.create Events.codec initial fold store + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createUnoptimized Events.codec initial fold (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/feed-source/Domain/TicketsIngester.fs b/feed-source/Domain/TicketsIngester.fs index e0e0485e2..022f27974 100644 --- a/feed-source/Domain/TicketsIngester.fs +++ b/feed-source/Domain/TicketsIngester.fs @@ -23,7 +23,7 @@ type internal IdsCache() = /// Maintains active EpochId in a thread-safe manner while ingesting items into the chain of `epochs` indexed by the `series` /// Prior to first add, reads `lookBack` batches to seed the cache, in order to minimize the number of duplicated tickets we ingest -type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.IngestionService, series : TicketsSeries.Service, lookBack, linger) = +type ServiceForFc internal (log: Serilog.ILogger, fcId, epochs: TicketsEpoch.IngestionService, series: TicketsSeries.Service, lookBack, linger) = // Maintains what we believe to be the currently open EpochId. // NOTE not valid/initialized until invocation of `previousTicket.AwaitValue()` has completed @@ -32,7 +32,7 @@ type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.I let effectiveEpochId () = if activeEpochId = uninitializedSentinel then TicketsEpochId.initial else %activeEpochId // establish the pre-existing items from which the tickets cache will be seeded - let loadPreviousEpochs loadDop : Async = async { + let loadPreviousEpochs loadDop: Async = async { match! series.TryReadIngestionEpochId fcId with | None -> log.Information("No starting epoch registered for {fcId}", fcId) @@ -46,7 +46,7 @@ type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.I return! Async.Parallel(seq { for epochId in (max 0 (%startingId - lookBack)) .. (%startingId - 1) -> readEpoch %epochId }, loadDop) } // Tickets cache - used to maintain a list of tickets that have already been ingested in order to avoid db round-trips - let previousTickets : AsyncCacheCell = + let previousTickets: AsyncCacheCell = let aux = async { let! batches = loadPreviousEpochs 4 return IdsCache.Create(Seq.concat batches) } @@ -85,7 +85,7 @@ type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.I /// Within the processing for a given FC, we have a Scheduler running N streams concurrently /// If each thread works in isolation, they'll conflict with each other as they feed the ticket into the batch in epochs.Ingest - /// Instead, we enable concurrent requests to coalesce by having requests converge in this AsyncBatchingGate + /// Instead, we enable concurrent requests to coalesce by having requests converge in this Batcher /// This has the following critical effects: /// - Traffic to CosmosDB is naturally constrained to a single flight in progress /// (BatchingGate does not release next batch for execution until current has succeeded or throws) @@ -94,7 +94,7 @@ type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.I /// a) back-off, re-read and retry if there's a concurrent write Optimistic Concurrency Check failure when writing the stream /// b) enter a prolonged period of retries if multiple concurrent writes trigger rate limiting and 429s from CosmosDB /// c) readers will less frequently encounter sustained 429s on the batch - let batchedIngest = AsyncBatchingGate(tryIngest, linger) + let batchedIngest = Equinox.Core.Batching.Batcher(tryIngest, linger) /// Upon startup, we initialize the Tickets cache from recent epochs; we want to kick that process off before our first ingest member _.Initialize() = async { @@ -102,13 +102,13 @@ type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.I return! previousTickets.Await(ct) |> Async.AwaitTask |> Async.Ignore } /// Attempts to feed the items into the sequence of epochs. Returns the subset that actually got fed in this time around. - member _.IngestMany(items : TicketsEpoch.Events.Item[]) : Async = async { + member _.IngestMany(items: TicketsEpoch.Events.Item[]): Async = async { let! results = batchedIngest.Execute items return System.Linq.Enumerable.Intersect(Seq.map TicketsEpoch.itemId items, results) |> Array.ofSeq } /// Attempts to feed the item into the sequence of batches. Returns true if the item actually got included into an Epoch this time around. - member _.TryIngest(item : TicketsEpoch.Events.Item) : Async = async { + member _.TryIngest(item: TicketsEpoch.Events.Item): Async = async { let! result = batchedIngest.Execute(Array.singleton item) return result |> Array.contains (TicketsEpoch.itemId item) } @@ -118,14 +118,14 @@ let private createFcService (epochs, lookBackLimit) series linger fcId = ServiceForFc(log, fcId, epochs, series, lookBack=lookBackLimit, linger=linger) /// Each ServiceForFc maintains significant state (set of tickets looking back through e.g. 100 epochs), which we obv need to cache -type Service internal (createForFc : FcId -> ServiceForFc) = +type Service internal (createForFc: FcId -> ServiceForFc) = // Its important we don't risk >1 instance https://andrewlock.net/making-getoradd-on-concurrentdictionary-thread-safe-using-lazy/ // while it would be safe, there would be a risk of incurring the cost of multiple initialization loops let forFc = System.Collections.Concurrent.ConcurrentDictionary>() let build fcId = lazy createForFc fcId - member _.ForFc(fcId) : ServiceForFc = + member _.ForFc(fcId): ServiceForFc = forFc.GetOrAdd(fcId, build).Value type Config() = @@ -134,8 +134,8 @@ type Config() = let remainingBatchCapacity _candidateItems currentItems = let l = Array.length currentItems max 0 (maxItemsPerEpoch - l) - let epochs = TicketsEpoch.Config.create remainingBatchCapacity store - let series = TicketsSeries.Config.create None store + let epochs = TicketsEpoch.Factory.create remainingBatchCapacity store + let series = TicketsSeries.Factory.create None store let createForFc = createFcService (epochs, lookBackLimit) series linger Service createForFc diff --git a/feed-source/Domain/TicketsSeries.fs b/feed-source/Domain/TicketsSeries.fs index 582b7aefe..5e53f4a3f 100644 --- a/feed-source/Domain/TicketsSeries.fs +++ b/feed-source/Domain/TicketsSeries.fs @@ -12,10 +12,10 @@ let streamId = Equinox.StreamId.gen TicketsSeriesId.toString module Events = type Event = - | Started of {| fcId : FcId; epochId : TicketsEpochId |} - | Snapshotted of {| active : Map |} + | Started of {| fcId: FcId; epochId: TicketsEpochId |} + | Snapshotted of {| active: Map |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.gen + let codec = Store.Codec.gen module Fold = @@ -25,44 +25,44 @@ module Fold = let evolve state = function | Events.Started e -> state |> Map.add e.fcId e.epochId | Events.Snapshotted e -> e.active - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let isOrigin = function Events.Snapshotted _ -> true | _ -> false let toSnapshot s = Events.Snapshotted {| active = s |} -let readEpochId fcId (state : Fold.State) = +let readEpochId fcId (state: Fold.State) = state |> Map.tryFind fcId -let interpret (fcId, epochId) (state : Fold.State) = +let interpret (fcId, epochId) (state: Fold.State) = [if state |> readEpochId fcId |> Option.forall (fun cur -> cur < epochId) && epochId >= TicketsEpochId.initial then yield Events.Started {| fcId = fcId; epochId = epochId |}] -type EpochDto = { fc : FcId; ingestionEpochId : TicketsEpochId } +type EpochDto = { fc: FcId; ingestionEpochId: TicketsEpochId } module EpochDto = - let ofState (s : Fold.State) = seq { + let ofState (s: Fold.State) = seq { for x in s -> { fc = x.Key; ingestionEpochId = x.Value } } -type Service internal (seriesId, resolve : TicketsSeriesId -> Equinox.Decider) = +type Service internal (seriesId, resolve: TicketsSeriesId -> Equinox.Decider) = /// Exposes the set of tranches for which data is held, enabling a consumer to crawl the full dataset - member _.ReadIngestionEpochs() : Async = + member _.ReadIngestionEpochs(): Async = let decider = resolve seriesId decider.Query EpochDto.ofState /// Determines the current active epoch for the specified `fcId` - member _.TryReadIngestionEpochId fcId : Async = + member _.TryReadIngestionEpochId fcId: Async = let decider = resolve seriesId decider.Query(readEpochId fcId) /// Mark specified `epochId` as live for the purposes of ingesting TicketIds /// Writers are expected to react to having writes to an epoch denied (due to it being Closed) by anointing the successor via this - member _.MarkIngestionEpochId(fcId, epochId) : Async = + member _.MarkIngestionEpochId(fcId, epochId): Async = let decider = resolve seriesId decider.Transact(interpret (fcId, epochId)) -module Config = +module Factory = let private create_ seriesId resolve = // For now we have a single global sequence. This provides us an extension point should we ever need to reprocess @@ -70,6 +70,6 @@ module Config = let seriesId = defaultArg seriesId TicketsSeriesId.wellKnownId Service(seriesId, streamId >> resolve) let private (|Category|) = function - | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - let create seriesOverride (Category cat) = create_ seriesOverride (Config.createDecider cat Category) + | Store.Context.Memory store -> Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create seriesOverride (Category cat) = create_ seriesOverride (Store.createDecider cat Category) diff --git a/feed-source/Domain/Types.fs b/feed-source/Domain/Types.fs index 53a4a41d4..c03f8bb49 100644 --- a/feed-source/Domain/Types.fs +++ b/feed-source/Domain/Types.fs @@ -4,20 +4,20 @@ open FSharp.UMX // see https://github.com/fsprojects/FSharp.UMX - % operator and module Guid = - let toStringN (g : System.Guid) = g.ToString "N" + let toStringN (g: System.Guid) = g.ToString "N" type [] fcId type FcId = string module FcId = - let toString (value : FcId) : string = %value - let parse (value : string) : FcId = let raw = value in % raw + let toString (value: FcId): string = %value + let parse (value: string): FcId = let raw = value in % raw let (|Parse|) = parse type [] ticketId type TicketId = string module TicketId = - let toString (value : TicketId) : string = %value - let parse (value : string) : TicketId = let raw = value in % raw + let toString (value: TicketId): string = %value + let parse (value: string): TicketId = let raw = value in % raw let (|Parse|) = parse let genForTest () = let g = System.Guid.NewGuid() in Guid.toStringN g |> parse @@ -28,33 +28,33 @@ type TicketsEpochId = int module TicketsEpochId = let unknown = -1 let initial = 0 - let value (value : TicketsEpochId) : int = %value - let next (value : TicketsEpochId) : TicketsEpochId = % (%value + 1) - let toString (value : TicketsEpochId) : string = string %value + let value (value: TicketsEpochId): int = %value + let next (value: TicketsEpochId): TicketsEpochId = % (%value + 1) + let toString (value: TicketsEpochId): string = string %value (* Identifies the series pointing to the active TicketsEpochId per FC *) type [] ticketsSeriesId type TicketsSeriesId = string module TicketsSeriesId = - let wellKnownId : TicketsSeriesId = % "0" - let toString (value : TicketsSeriesId) : string = %value + let wellKnownId: TicketsSeriesId = % "0" + let toString (value: TicketsSeriesId): string = %value type [] ticketsCheckpoint type TicketsCheckpoint = int64 module TicketsCheckpoint = - let initial : TicketsCheckpoint = %0L + let initial: TicketsCheckpoint = %0L - let ofEpochAndOffset (epoch : TicketsEpochId) offset : TicketsCheckpoint = + let ofEpochAndOffset (epoch: TicketsEpochId) offset: TicketsCheckpoint = int64 (TicketsEpochId.value epoch) * 1_000_000L + int64 offset |> UMX.tag - let ofEpochContent (epoch : TicketsEpochId) isClosed count : TicketsCheckpoint = + let ofEpochContent (epoch: TicketsEpochId) isClosed count: TicketsCheckpoint = let epoch, offset = if isClosed then TicketsEpochId.next epoch, 0 else epoch, count ofEpochAndOffset epoch offset - let toEpochAndOffset (value : TicketsCheckpoint) : TicketsEpochId * int = + let toEpochAndOffset (value: TicketsCheckpoint): TicketsEpochId * int = let d, r = System.Math.DivRem(%value, 1_000_000L) - (%int %d : TicketsEpochId), int r + (%int %d: TicketsEpochId), int r diff --git a/feed-source/FeedApi/Controllers/TicketsController.fs b/feed-source/FeedApi/Controllers/TicketsController.fs index 0c1512c30..75bac42f0 100644 --- a/feed-source/FeedApi/Controllers/TicketsController.fs +++ b/feed-source/FeedApi/Controllers/TicketsController.fs @@ -4,42 +4,42 @@ open Microsoft.AspNetCore.Mvc open FeedSourceTemplate.Domain -type TicketsTranchesDto = { activeEpochs : TrancheReferenceDto[] } - and TrancheReferenceDto = { fc : FcId; epochId : TicketsEpochId } +type TicketsTranchesDto = { activeEpochs: TrancheReferenceDto[] } + and TrancheReferenceDto = { fc: FcId; epochId: TicketsEpochId } -type SliceDto = { closed : bool; tickets : ItemDto[]; position : TicketsCheckpoint; checkpoint : TicketsCheckpoint } - and ItemDto = { id : TicketId; payload : string } +type SliceDto = { closed: bool; tickets: ItemDto[]; position: TicketsCheckpoint; checkpoint: TicketsCheckpoint } + and ItemDto = { id: TicketId; payload: string } module ItemDto = - let ofDto (x : TicketsEpoch.Events.Item) : ItemDto = + let ofDto (x: TicketsEpoch.Events.Item): ItemDto = { id = x.id; payload = x.payload } module Checkpoint = - let ofEpochAndOffset (epoch : TicketsEpochId) (offset : int) = + let ofEpochAndOffset (epoch: TicketsEpochId) (offset: int) = TicketsCheckpoint.ofEpochAndOffset epoch offset - let ofState (epochId : TicketsEpochId) (s : TicketsEpoch.Reader.StateDto) = + let ofState (epochId: TicketsEpochId) (s: TicketsEpoch.Reader.StateDto) = TicketsCheckpoint.ofEpochContent epochId s.closed s.tickets.Length [] -type TicketsController(tickets : TicketsIngester.Service, series : TicketsSeries.Service, epochs : TicketsEpoch.Reader.Service) = +type TicketsController(tickets: TicketsIngester.Service, series: TicketsSeries.Service, epochs: TicketsEpoch.Reader.Service) = inherit ControllerBase() [] - member _.Post(fc : FcId, ticket : TicketId, [] payload) = async { + member _.Post(fc: FcId, ticket: TicketId, [] payload) = async { let! _added = tickets.ForFc(fc).TryIngest({ id = ticket; payload = payload}) () } [] - member _.ListTranches() : Async = async { + member _.ListTranches(): Async = async { let! active = series.ReadIngestionEpochs() return { activeEpochs = [| for x in active -> { fc = x.fc; epochId = x.ingestionEpochId } |]} } [] - member _.ReadTranche(fcId : FcId, epoch : TicketsEpochId) : Async = async { + member _.ReadTranche(fcId: FcId, epoch: TicketsEpochId): Async = async { let! state = epochs.Read(fcId, epoch) // TOCONSIDER closed should control cache header let pos, checkpoint = Checkpoint.ofEpochAndOffset epoch 0, Checkpoint.ofState epoch state @@ -47,7 +47,7 @@ type TicketsController(tickets : TicketsIngester.Service, series : TicketsSeries } [] - member _.Poll(fcId : FcId, token : System.Nullable) : Async = async { + member _.Poll(fcId: FcId, token: System.Nullable): Async = async { let pos = if token.HasValue then token.Value else TicketsCheckpoint.initial let epochId, offset = TicketsCheckpoint.toEpochAndOffset pos let! state = epochs.Read(fcId, epochId) diff --git a/feed-source/FeedApi/FeedApi.fsproj b/feed-source/FeedApi/FeedApi.fsproj index 527b8fd55..195c0ae62 100644 --- a/feed-source/FeedApi/FeedApi.fsproj +++ b/feed-source/FeedApi/FeedApi.fsproj @@ -19,7 +19,7 @@ - + diff --git a/feed-source/FeedApi/Infrastructure.fs b/feed-source/FeedApi/Infrastructure.fs index 8a3f09eea..a2085361d 100644 --- a/feed-source/FeedApi/Infrastructure.fs +++ b/feed-source/FeedApi/Infrastructure.fs @@ -6,7 +6,7 @@ open System module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj module Log = @@ -34,11 +34,11 @@ module Sinks = let tags appName = ["app", appName] - let equinoxMetricsOnly tags (l : LoggerConfiguration) = + let equinoxMetricsOnly tags (l: LoggerConfiguration) = l.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.CosmosStore.Prometheus.LogSink(tags)) - let console (configuration : LoggerConfiguration) = + let console (configuration: LoggerConfiguration) = let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" configuration.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) @@ -46,15 +46,15 @@ module Sinks = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() .MinimumLevel.Override("Microsoft.AspNetCore", Serilog.Events.LogEventLevel.Warning) |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c [] - static member private Sinks(configuration : LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + static member private Sinks(configuration: LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(configureMetricsSinks >> ignore) |> ignore // unconditionally feed all log events to the metrics sinks a.Logger(fun l -> // but filter what gets emitted to the console sink let l = match isMetric with None -> l | Some predicate -> l.Filter.ByExcluding(Func predicate) @@ -63,5 +63,5 @@ type Logging() = configuration.WriteTo.Async(bufferSize=65536, blockWhenFull=true, configure=Action<_> configure) [] - static member Sinks(configuration : LoggerConfiguration, configureMetricsSinks, verboseStore) = + static member Sinks(configuration: LoggerConfiguration, configureMetricsSinks, verboseStore) = configuration.Sinks(configureMetricsSinks, Sinks.console, ?isMetric = if verboseStore then None else Some Log.isStoreMetrics) diff --git a/feed-source/FeedApi/Program.fs b/feed-source/FeedApi/Program.fs index 4f8eb9739..4210ae59d 100644 --- a/feed-source/FeedApi/Program.fs +++ b/feed-source/FeedApi/Program.fs @@ -3,7 +3,7 @@ module FeedSourceTemplate.Program open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -26,9 +26,9 @@ module Args = match a with | Verbose -> "request Verbose Logging. Default: off." | Cosmos _ -> "specify CosmosDB input parameters." - and Arguments(config : Configuration, p : ParseResults) = + and Arguments(config: Configuration, p: ParseResults) = member val Verbose = p.Contains Parameters.Verbose - member val Cosmos : CosmosArguments = + member val Cosmos: CosmosArguments = match p.GetSubCommand() with | Parameters.Cosmos cosmos -> CosmosArguments(config, cosmos) | _ -> missingArg "Must specify cosmos" @@ -51,7 +51,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 9." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." - and CosmosArguments(c : Configuration, p : ParseResults) = + and CosmosArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -73,17 +73,17 @@ let [] AppName = "FeedSourceTemplate" open Microsoft.Extensions.DependencyInjection -let registerSingleton<'t when 't : not struct> (services : IServiceCollection) (s : 't) = +let registerSingleton<'t when 't : not struct> (services: IServiceCollection) (s: 't) = services.AddSingleton s |> ignore [] type AppDependenciesExtensions() = [] - static member AddTickets(services : IServiceCollection, store) : unit = Async.RunSynchronously <| async { + static member AddTickets(services: IServiceCollection, store): unit = Async.RunSynchronously <| async { - let ticketsSeries = Domain.TicketsSeries.Config.create None store - let ticketsEpochs = Domain.TicketsEpoch.Reader.Config.create store + let ticketsSeries = Domain.TicketsSeries.Factory.create None store + let ticketsEpochs = Domain.TicketsEpoch.Reader.Factory.create store let tickets = Domain.TicketsIngester.Config.Create store ticketsSeries |> registerSingleton services @@ -96,15 +96,15 @@ open Microsoft.Extensions.Hosting module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) -let run (args : Args.Arguments) = +let run (args: Args.Arguments) = let cosmos = args.Cosmos let context = cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let cache = Equinox.Cache(AppName, sizeMb = 2) - let store = FeedSourceTemplate.Domain.Config.Store.Cosmos (context, cache) + let store = FeedSourceTemplate.Domain.Store.Context.Cosmos (context, cache) Hosting.createHostBuilder() .ConfigureServices(fun s -> diff --git a/feed-source/FeedApi/Startup.fs b/feed-source/FeedApi/Startup.fs index 8420b0f9b..c563c4ba3 100644 --- a/feed-source/FeedApi/Startup.fs +++ b/feed-source/FeedApi/Startup.fs @@ -9,10 +9,10 @@ open Serilog type Startup() = - member this.ConfigureServices(services : IServiceCollection) = + member this.ConfigureServices(services: IServiceCollection) = services.AddControllers() |> ignore - member this.Configure(app : IApplicationBuilder, env : IWebHostEnvironment) = + member this.Configure(app: IApplicationBuilder, env: IWebHostEnvironment) = if env.IsDevelopment() then app.UseDeveloperExceptionPage() |> ignore @@ -28,7 +28,7 @@ type Startup() = module Hosting = - let createHostBuilder () : IHostBuilder = + let createHostBuilder (): IHostBuilder = Host.CreateDefaultBuilder() .UseSerilog() .ConfigureWebHostDefaults(fun webBuilder -> diff --git a/periodic-ingester/ApiClient.fs b/periodic-ingester/ApiClient.fs index 755529957..39fc78ec0 100644 --- a/periodic-ingester/ApiClient.fs +++ b/periodic-ingester/ApiClient.fs @@ -11,21 +11,21 @@ open System.Threading open PeriodicIngesterTemplate.Domain [] -type TicketsDto = { tickets : TicketDto[] } - and TicketDto = { id : TicketId; lastUpdated : DateTimeOffset; body : string; } +type TicketsDto = { tickets: TicketDto[] } + and TicketDto = { id: TicketId; lastUpdated: DateTimeOffset; body: string; } -type TicketsClient(client : HttpClient) = +type TicketsClient(client: HttpClient) = let basePath = "api/tickets" - member _.Crawl(ct : CancellationToken) : IAsyncEnumerable array)> = taskSeq { + member _.Crawl(ct: CancellationToken): IAsyncEnumerable[])> = taskSeq { let request = HttpReq.get () |> HttpReq.withPath basePath let ts = System.Diagnostics.Stopwatch.StartNew() let! response = client.Send2(request, ct) let! basePage = response |> HttpRes.deserializeOkStj yield struct (ts.Elapsed, [| for t in basePage.tickets -> - let data : Ingester.TicketData = { lastUpdated = t.lastUpdated; body = t.body } + let data: Ingester.TicketData = { lastUpdated = t.lastUpdated; body = t.body } Ingester.PipelineEvent.sourceItemOfTicketIdAndData (t.id, data) |]) } diff --git a/periodic-ingester/Infrastructure.fs b/periodic-ingester/Infrastructure.fs index fa17972c9..eda503192 100644 --- a/periodic-ingester/Infrastructure.fs +++ b/periodic-ingester/Infrastructure.fs @@ -9,7 +9,7 @@ open System.Threading.Tasks module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj module Log = @@ -34,7 +34,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -44,19 +44,19 @@ module Sinks = let tags appName = ["app", appName] - let equinoxMetricsOnly tags (l : LoggerConfiguration) = + let equinoxMetricsOnly tags (l: LoggerConfiguration) = l.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.CosmosStore.Prometheus.LogSink(tags)) - let equinoxAndPropulsionConsumerMetrics tags group (l : LoggerConfiguration) = + let equinoxAndPropulsionConsumerMetrics tags group (l: LoggerConfiguration) = l |> equinoxMetricsOnly tags |> fun l -> l.WriteTo.Sink(Propulsion.Prometheus.LogSink(tags, group)) - let equinoxAndPropulsionFeedConsumerMetrics tags source (l : LoggerConfiguration) = + let equinoxAndPropulsionFeedConsumerMetrics tags source (l: LoggerConfiguration) = l |> equinoxAndPropulsionConsumerMetrics tags (Propulsion.Feed.SourceId.toString source) |> fun l -> l.WriteTo.Sink(Propulsion.Feed.Prometheus.LogSink(tags)) - let console (configuration : LoggerConfiguration) = + let console (configuration: LoggerConfiguration) = let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" configuration.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) @@ -64,14 +64,14 @@ module Sinks = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c [] - static member private Sinks(configuration : LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + static member private Sinks(configuration: LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(configureMetricsSinks >> ignore) |> ignore // unconditionally feed all log events to the metrics sinks a.Logger(fun l -> // but filter what gets emitted to the console sink let l = match isMetric with None -> l | Some predicate -> l.Filter.ByExcluding(Func predicate) @@ -80,19 +80,19 @@ type Logging() = configuration.WriteTo.Async(bufferSize=65536, blockWhenFull=true, configure=Action<_> configure) [] - static member Sinks(configuration : LoggerConfiguration, configureMetricsSinks, verboseStore) = + static member Sinks(configuration: LoggerConfiguration, configureMetricsSinks, verboseStore) = configuration.Sinks(configureMetricsSinks, Sinks.console, ?isMetric = if verboseStore then None else Some Log.isStoreMetrics) type Async with - static member Sleep(t : TimeSpan) : Async = Async.Sleep(int t.TotalMilliseconds) + static member Sleep(t: TimeSpan): Async = Async.Sleep(int t.TotalMilliseconds) /// Re-raise an exception so that the current stacktrace is preserved - static member Raise(e : #exn) : Async<'T> = Async.FromContinuations (fun (_,ec,_) -> ec e) + static member Raise(e: #exn): Async<'T> = Async.FromContinuations (fun (_,ec,_) -> ec e) type StringBuilder with member sb.Appendf fmt = Printf.ksprintf (ignore << sb.Append) fmt member sb.Appendfn fmt = Printf.ksprintf (ignore << sb.AppendLine) fmt - static member inline Build(builder : StringBuilder -> unit) = + static member inline Build(builder: StringBuilder -> unit) = let instance = StringBuilder() // TOCONSIDER PooledStringBuilder.GetInstance() builder instance instance.ToString() @@ -108,7 +108,7 @@ module HttpReq = let inline create () = new HttpRequestMessage() /// Assigns a method to an HTTP request. - let inline withMethod (m : HttpMethod) (req : HttpRequestMessage) = + let inline withMethod (m: HttpMethod) (req: HttpRequestMessage) = req.Method <- m req @@ -119,12 +119,12 @@ module HttpReq = let inline post () = create () |> withMethod HttpMethod.Post /// Assigns a path to an HTTP request. - let inline withUri (u : Uri) (req : HttpRequestMessage) = + let inline withUri (u: Uri) (req: HttpRequestMessage) = req.RequestUri <- u req /// Assigns a path to an HTTP request. - let inline withPath (p : string) (req : HttpRequestMessage) = + let inline withPath (p: string) (req: HttpRequestMessage) = req |> withUri (Uri(p, UriKind.Relative)) /// Assigns a path to a Http request using printf-like formatting. @@ -149,7 +149,7 @@ type HttpClient with /// Drop-in replacement for HttpClient.SendAsync which addresses known timeout issues /// /// HttpRequestMessage to be submitted. - member client.Send2(msg : HttpRequestMessage, ct : CancellationToken) = task { + member client.Send2(msg: HttpRequestMessage, ct: CancellationToken) = task { try return! client.SendAsync(msg, ct) // address https://github.com/dotnet/corefx/issues/20296 with :? TaskCanceledException -> @@ -166,19 +166,19 @@ type InvalidHttpResponseException = inherit Exception // TODO: include headers - val private userMessage : string - val private requestMethod : string - val RequestUri : Uri - val RequestBody : string - val StatusCode : HttpStatusCode - val ReasonPhrase : string - val ResponseBody : string + val private userMessage: string + val private requestMethod: string + val RequestUri: Uri + val RequestBody: string + val StatusCode: HttpStatusCode + val ReasonPhrase: string + val ResponseBody: string member e.RequestMethod = HttpMethod(e.requestMethod) - private new (userMessage : string, requestMethod : HttpMethod, requestUri : Uri, requestBody : string, - statusCode : HttpStatusCode, reasonPhrase : string, responseBody : string, - ?innerException : exn) = + private new (userMessage: string, requestMethod: HttpMethod, requestUri: Uri, requestBody: string, + statusCode: HttpStatusCode, reasonPhrase: string, responseBody: string, + ?innerException: exn) = { inherit Exception(message = null, innerException = defaultArg innerException null) ; userMessage = userMessage ; requestMethod = string requestMethod ; RequestUri = requestUri ; RequestBody = requestBody ; @@ -193,13 +193,13 @@ type InvalidHttpResponseException = sb.Appendfn "ResponseBody=%s" (getBodyString e.ResponseBody)) interface ISerializable with - member e.GetObjectData(si : SerializationInfo, sc : StreamingContext) = + member e.GetObjectData(si: SerializationInfo, sc: StreamingContext) = let add name (value:obj) = si.AddValue(name, value) base.GetObjectData(si, sc) ; add "userMessage" e.userMessage ; add "requestUri" e.RequestUri ; add "requestMethod" e.requestMethod ; add "requestBody" e.RequestBody add "statusCode" e.StatusCode ; add "reasonPhrase" e.ReasonPhrase ; add "responseBody" e.ResponseBody - new (si : SerializationInfo, sc : StreamingContext) = + new (si: SerializationInfo, sc: StreamingContext) = let get name = si.GetValue(name, typeof<'a>) :?> 'a { inherit Exception(si, sc) ; userMessage = get "userMessage" ; @@ -207,7 +207,7 @@ type InvalidHttpResponseException = StatusCode = get "statusCode" ; ReasonPhrase = get "reasonPhrase" ; ResponseBody = get "responseBody" } - static member Create(userMessage : string, response : HttpResponseMessage, ?innerException : exn) = async { + static member Create(userMessage: string, response: HttpResponseMessage, ?innerException: exn) = async { let request = response.RequestMessage let! responseBodyC = response.Content.ReadAsStringDiapered() |> Async.StartChild let! requestBody = request.Content.ReadAsStringDiapered() @@ -219,13 +219,13 @@ type InvalidHttpResponseException = ?innerException = innerException) } - static member Create(response : HttpResponseMessage, ?innerException : exn) = + static member Create(response: HttpResponseMessage, ?innerException: exn) = InvalidHttpResponseException.Create("HTTP request yielded unexpected response.", response, ?innerException = innerException) type HttpResponseMessage with /// Raises an InvalidHttpResponseException if the response status code does not match expected value. - member response.EnsureStatusCode(expectedStatusCode : HttpStatusCode) = async { + member response.EnsureStatusCode(expectedStatusCode: HttpStatusCode) = async { if response.StatusCode <> expectedStatusCode then let! exn = InvalidHttpResponseException.Create("Http request yielded unanticipated HTTP Result.", response) do raise exn @@ -233,7 +233,7 @@ type HttpResponseMessage with /// Asynchronously deserializes the json response content using the supplied `deserializer`, without validating the `StatusCode` /// The decoder routine to apply to the body content. Exceptions are wrapped in exceptions containing the offending content. - member response.InterpretContent<'Decoded>(deserializer : string -> 'Decoded) : Task<'Decoded> = task { + member response.InterpretContent<'Decoded>(deserializer: string -> 'Decoded): Task<'Decoded> = task { let! content = response.Content.ReadAsString() try return deserializer content with e -> @@ -244,16 +244,16 @@ type HttpResponseMessage with /// Asynchronously deserializes the json response content using the supplied `deserializer`, validating the `StatusCode` is `expectedStatusCode` /// check that status code matches supplied code or raise a InvalidHttpResponseException if it doesn't. /// The decoder routine to apply to the body content. Exceptions are wrapped in exceptions containing the offending content. - member response.Interpret<'Decoded>(expectedStatusCode : HttpStatusCode, deserializer : string -> 'Decoded) : Task<'Decoded> = task { + member response.Interpret<'Decoded>(expectedStatusCode: HttpStatusCode, deserializer: string -> 'Decoded): Task<'Decoded> = task { do! response.EnsureStatusCode expectedStatusCode return! response.InterpretContent deserializer } module HttpRes = - let serdes = FsCodec.SystemTextJson.Options.Default |> FsCodec.SystemTextJson.Serdes + let serdes = FsCodec.SystemTextJson.Serdes.Default /// Deserialize body using default Json.Net profile - throw with content details if StatusCode is unexpected or decoding fails - let deserializeExpectedStj<'t> expectedStatusCode (res : HttpResponseMessage) = + let deserializeExpectedStj<'t> expectedStatusCode (res: HttpResponseMessage) = res.Interpret(expectedStatusCode, serdes.Deserialize<'t>) /// Deserialize body using default Json.Net profile - throw with content details if StatusCode is not OK or decoding fails diff --git a/periodic-ingester/Ingester.fs b/periodic-ingester/Ingester.fs index fd9d0f39c..dc622f36b 100644 --- a/periodic-ingester/Ingester.fs +++ b/periodic-ingester/Ingester.fs @@ -25,7 +25,7 @@ type Stats(log, statsInterval, stateInterval) = log.Information(" Changed {changed} Unchanged {skipped} Stale {stale}", changed, unchanged, stale) stale <- 0; unchanged <- 0; changed <- 0 -type TicketData = { lastUpdated : DateTimeOffset; body : string } +type TicketData = { lastUpdated: DateTimeOffset; body: string } module PipelineEvent = @@ -38,17 +38,22 @@ module PipelineEvent = (* Each item per stream is represented as an event; if multiple events have been found for a given stream, they are delivered together *) let private dummyEventData = let dummyEventType, noBody = "eventType", Unchecked.defaultof<_> in FsCodec.Core.EventData.Create(dummyEventType, noBody) - let sourceItemOfTicketIdAndData struct (id : TicketId, data : TicketData) : Propulsion.Feed.SourceItem = + let sourceItemOfTicketIdAndData struct (id: TicketId, data: TicketData): Propulsion.Feed.SourceItem = { streamName = streamName id; eventData = dummyEventData; context = box data } let [] (|TicketEvents|_|) = function - | StreamName ticketId, (s : Propulsion.Streams.StreamSpan<_>) -> + | StreamName ticketId, (s: Propulsion.Sinks.Event[]) -> ValueSome (ticketId, s |> Seq.map (fun e -> Unchecked.unbox e.Context)) | _ -> ValueNone -let handle stream span ct = Propulsion.Internal.Async.startImmediateAsTask ct <| async { - match stream, span with +let handle stream events = async { + match stream, events with | PipelineEvent.TicketEvents (ticketId, items) -> // TODO : Ingest the data - return struct (Propulsion.Streams.SpanResult.AllProcessed, IngestionOutcome.Unchanged) + return Propulsion.Sinks.StreamResult.AllProcessed, IngestionOutcome.Unchanged | x -> return failwithf "Unexpected stream %O" x } + +type Factory private () = + + static member StartSink(log: Serilog.ILogger, stats, maxConcurrentStreams, handle, maxReadAhead) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, maxConcurrentStreams, handle, stats) diff --git a/periodic-ingester/IngesterPrometheus.fs b/periodic-ingester/IngesterPrometheus.fs index 7948b0f07..8cdb93b0f 100644 --- a/periodic-ingester/IngesterPrometheus.fs +++ b/periodic-ingester/IngesterPrometheus.fs @@ -5,7 +5,7 @@ let baseDesc desc = "PeriodicIngesterTemplate: Ingester " + desc module private Counter = - let private make (config : Prometheus.CounterConfiguration) name desc = + let private make (config: Prometheus.CounterConfiguration) name desc = let ctr = Prometheus.Metrics.CreateCounter(name, desc, config) fun tagValues -> ctr.WithLabels(tagValues).Inc diff --git a/periodic-ingester/PeriodicIngester.fsproj b/periodic-ingester/PeriodicIngester.fsproj index 836390d4e..db83d8fea 100644 --- a/periodic-ingester/PeriodicIngester.fsproj +++ b/periodic-ingester/PeriodicIngester.fsproj @@ -17,11 +17,11 @@ - - + + - - + + diff --git a/periodic-ingester/Program.fs b/periodic-ingester/Program.fs index 5437cb90d..2a38b7b25 100644 --- a/periodic-ingester/Program.fs +++ b/periodic-ingester/Program.fs @@ -3,7 +3,7 @@ open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -40,7 +40,7 @@ module Args = | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 8." | TicketsDop _ -> "maximum number of Tickets to process in parallel. Default: 4" | Feed _ -> "Feed parameters." - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = member val GroupId = p.GetResult(GroupId, "default") member val Verbose = p.Contains Parameters.Verbose @@ -50,7 +50,7 @@ module Args = member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member val CheckpointInterval = TimeSpan.FromHours 1. - member val Feed : FeedArguments = + member val Feed: FeedArguments = match p.GetSubCommand() with | Feed feed -> FeedArguments(c, feed) | _ -> missingArg "Must specify feed" @@ -63,11 +63,11 @@ module Args = | Group _ -> "specify Api Consumer Group Id. (optional if environment variable API_CONSUMER_GROUP specified)" | BaseUri _ -> "specify Api endpoint. (optional if environment variable API_BASE_URI specified)" | Cosmos _ -> "Cosmos Store parameters." - and FeedArguments(c : Configuration, p : ParseResults) = + and FeedArguments(c: Configuration, p: ParseResults) = member val SourceId = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) |> Propulsion.Feed.SourceId.parse member val BaseUri = p.TryGetResult BaseUri |> Option.defaultWith (fun () -> c.BaseUri) |> Uri member val RefreshInterval = TimeSpan.FromHours 1. - member val Cosmos : CosmosArguments = + member val Cosmos: CosmosArguments = match p.GetSubCommand() with | Cosmos cosmos -> CosmosArguments(c, cosmos) | _ -> missingArg "Must specify cosmos" @@ -90,7 +90,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds (default: 30)." | Retries _ -> "specify operation retries (default: 9)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 30)" - and CosmosArguments(c : Configuration, p : ParseResults) = + and CosmosArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds @@ -110,27 +110,26 @@ module Args = let [] AppName = "PeriodicIngesterTemplate" -let build (args : Args.Arguments) = +let build (args: Args.Arguments) = let cache = Equinox.Cache(AppName, sizeMb = 10) let feed = args.Feed let context = feed.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let sink = let stats = Ingester.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - Propulsion.Streams.Default.Config.Start(Log.Logger, args.MaxReadAhead, args.TicketsDop, Ingester.handle, stats, args.StatsInterval) + Ingester.Factory.StartSink(Log.Logger, stats, args.TicketsDop, Ingester.handle, args.MaxReadAhead) let source = let checkpoints = Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Log.Logger (args.GroupId, args.CheckpointInterval) (context, cache) let client = ApiClient.TicketsFeed feed.BaseUri let source = Propulsion.Feed.PeriodicSource( Log.Logger, args.StatsInterval, feed.SourceId, - client.Crawl, feed.RefreshInterval, checkpoints, - sink) - source.Start() + feed.RefreshInterval, checkpoints, sink) + source.Start(client.Crawl) sink, source // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly -let startMetricsServer port : IDisposable = +let startMetricsServer port: IDisposable = let metricsServer = new Prometheus.KestrelMetricServer(port = port) let ms = metricsServer.Start() Log.Information("Prometheus /metrics endpoint on port {port}", port) @@ -140,7 +139,7 @@ let run args = async { let sink, source = build args use _ = source use _ = sink - use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj + use _metricsServer: IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj return! Async.Parallel [ source.AwaitWithStopOnCancellation(); sink.AwaitWithStopOnCancellation() ] |> Async.Ignore } [] diff --git a/periodic-ingester/Types.fs b/periodic-ingester/Types.fs index 1bb52bb21..68588c3c3 100644 --- a/periodic-ingester/Types.fs +++ b/periodic-ingester/Types.fs @@ -5,8 +5,8 @@ open FSharp.UMX // see https://github.com/fsprojects/FSharp.UMX - % operator and type [] ticketId type TicketId = string module TicketId = - let toString (value : TicketId) : string = %value - let parse (value : string) : TicketId = let raw = value in % raw + let toString (value: TicketId): string = %value + let parse (value: string): TicketId = let raw = value in % raw let (|Parse|) = parse [] diff --git a/propulsion-archiver/Archiver.fsproj b/propulsion-archiver/Archiver.fsproj index 89b98cec0..a81acbf92 100644 --- a/propulsion-archiver/Archiver.fsproj +++ b/propulsion-archiver/Archiver.fsproj @@ -14,8 +14,8 @@ - - + + diff --git a/propulsion-archiver/Handler.fs b/propulsion-archiver/Handler.fs index 7bfe56c93..2c0f8f549 100644 --- a/propulsion-archiver/Handler.fs +++ b/propulsion-archiver/Handler.fs @@ -1,7 +1,7 @@ module ArchiverTemplate.Handler type Stats(log, statsInterval, stateInterval) = - inherit Propulsion.Streams.Sync.Stats(log, statsInterval, stateInterval) + inherit Propulsion.Sync.Stats(log, statsInterval, stateInterval) override _.HandleOk(()) = () override _.HandleExn(log, exn) = @@ -18,7 +18,7 @@ let (|Archivable|NotArchivable|) = function | _ -> NotArchivable -let selectArchivable changeFeedDocument: Propulsion.Streams.StreamEvent<_> seq = seq { +let selectArchivable changeFeedDocument: Propulsion.Sinks.StreamEvent seq = seq { for struct (s, _e) as batch in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter changeFeedDocument do let (FsCodec.StreamName.Category cat) = s match cat with diff --git a/propulsion-archiver/Infrastructure.fs b/propulsion-archiver/Infrastructure.fs index e35b9d530..f325e3bcd 100644 --- a/propulsion-archiver/Infrastructure.fs +++ b/propulsion-archiver/Infrastructure.fs @@ -4,13 +4,13 @@ module ArchiverTemplate.Infrastructure open Serilog open System -module Config = +module Store = let log = Log.ForContext("isMetric", true) module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj module Log = @@ -25,19 +25,19 @@ module Sinks = let tags appName = ["app", appName] - let equinoxMetricsOnly tags (l : LoggerConfiguration) = + let equinoxMetricsOnly tags (l: LoggerConfiguration) = l.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.CosmosStore.Prometheus.LogSink(tags)) - let equinoxAndPropulsionConsumerMetrics tags (l : LoggerConfiguration) = + let equinoxAndPropulsionConsumerMetrics tags (l: LoggerConfiguration) = l |> equinoxMetricsOnly tags |> fun l -> l.WriteTo.Sink(Propulsion.Prometheus.LogSink(tags)) - let equinoxAndPropulsionCosmosConsumerMetrics tags (l : LoggerConfiguration) = + let equinoxAndPropulsionCosmosConsumerMetrics tags (l: LoggerConfiguration) = l |> equinoxAndPropulsionConsumerMetrics tags |> fun l -> l.WriteTo.Sink(Propulsion.CosmosStore.Prometheus.LogSink(tags)) - let console verbose (configuration : LoggerConfiguration) = + let console verbose (configuration: LoggerConfiguration) = let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" let t = if verbose then t else t.Replace("{Properties}", "") configuration.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) @@ -46,14 +46,14 @@ module Sinks = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c [] - static member private Sinks(configuration : LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + static member private Sinks(configuration: LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(configureMetricsSinks >> ignore) |> ignore // unconditionally feed all log events to the metrics sinks a.Logger(fun l -> // but filter what gets emitted to the console sink let l = match isMetric with None -> l | Some predicate -> l.Filter.ByExcluding(Func predicate) @@ -62,10 +62,10 @@ type Logging() = configuration.WriteTo.Async(bufferSize=65536, blockWhenFull=true, configure=Action<_> configure) [] - static member Configure(configuration : LoggerConfiguration, appName, verbose, (logSyncToConsole, minRu)) = + static member Configure(configuration: LoggerConfiguration, appName, verbose, (logSyncToConsole, minRu)) = configuration.Configure(verbose) |> fun c -> let ingesterLevel = if logSyncToConsole then Events.LogEventLevel.Debug else Events.LogEventLevel.Information - c.MinimumLevel.Override(typeof.FullName, ingesterLevel) + c.MinimumLevel.Override(typeof.FullName, ingesterLevel) |> fun c -> let generalLevel = if verbose then Events.LogEventLevel.Information else Events.LogEventLevel.Warning c.MinimumLevel.Override(typeof.FullName, generalLevel) |> fun c -> let isWriterB = Filters.Matching.FromSource().Invoke diff --git a/propulsion-archiver/Program.fs b/propulsion-archiver/Program.fs index 5e3a3cce2..b3b224e29 100644 --- a/propulsion-archiver/Program.fs +++ b/propulsion-archiver/Program.fs @@ -4,7 +4,7 @@ open Propulsion.CosmosStore open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -40,7 +40,7 @@ module Args = | RuThreshold _ -> "minimum request charge required to log. Default: 0" | MaxKib _ -> "max KiB to submit to Sync operation. Default: 512" | SrcCosmos _ -> "Cosmos input parameters." - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = member val Verbose = p.Contains Parameters.Verbose member val SyncLogging = p.Contains SyncVerbose, p.TryGetResult RuThreshold member val PrometheusPort = p.TryGetResult PrometheusPort @@ -50,15 +50,15 @@ module Args = member val MaxBytes = p.GetResult(MaxKib, 512) * 1024 member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. - member val Source : CosmosSourceArguments = + member val Source: CosmosSourceArguments = match p.GetSubCommand() with | SrcCosmos cosmos -> CosmosSourceArguments(c, cosmos) | _ -> missingArg "Must specify cosmos for SrcCosmos" member x.DestinationArchive = x.Source.Archive member x.MonitoringParams() = let srcC = x.Source - let leases : Microsoft.Azure.Cosmos.Container = - let dstC : CosmosSinkArguments = srcC.Archive + let leases: Microsoft.Azure.Cosmos.Container = + let dstC: CosmosSinkArguments = srcC.Archive match srcC.LeaseContainer, dstC.LeaseContainerId with | _, None -> srcC.ConnectLeases() | None, Some dc -> dstC.ConnectLeases dc @@ -103,7 +103,7 @@ module Args = | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." | DstCosmos _ -> "CosmosDb Sink parameters." - and CosmosSourceArguments(c : Configuration, p : ParseResults) = + and CosmosSourceArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult CosmosSourceParameters.ConnectionMode let timeout = p.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds @@ -116,7 +116,7 @@ module Args = member val FromTail = p.Contains CosmosSourceParameters.FromTail member val MaxItems = p.TryGetResult MaxItems - member val LagFrequency : TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member val LagFrequency: TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes member val LeaseContainer = p.TryGetResult CosmosSourceParameters.LeaseContainer member val Verbose = p.Contains Verbose member private _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) @@ -147,7 +147,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 0." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - and CosmosSinkArguments(c : Configuration, p : ParseResults) = + and CosmosSinkArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds @@ -162,7 +162,7 @@ module Args = member _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) @@ -172,15 +172,15 @@ let [] AppName = "ArchiverTemplate" module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = // while the default maxJsonBytes is 30000 - we are prepared to incur significant extra write RU charges in order to maximize packing let maxEvents, maxJsonBytes = 100_000, 100_000 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents, tipMaxJsonLength=maxJsonBytes) -let build (args : Args.Arguments, log) = +let build (args: Args.Arguments, log) = let archiverSink = let context = args.DestinationArchive.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Config.log) + let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Store.log) CosmosStoreSink.Start(log, args.MaxReadAhead, eventsContext, args.MaxWriters, args.StatsInterval, args.StateInterval, purgeInterval=TimeSpan.FromMinutes 10., maxBytes = args.MaxBytes) let source = @@ -191,7 +191,7 @@ let build (args : Args.Arguments, log) = archiverSink, source // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly -let startMetricsServer port : IDisposable = +let startMetricsServer port: IDisposable = let metricsServer = new Prometheus.KestrelMetricServer(port = port) let ms = metricsServer.Start() Log.Information("Prometheus /metrics endpoint on port {port}", port) @@ -199,14 +199,14 @@ let startMetricsServer port : IDisposable = open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException -let run (args : Args.Arguments) = async { - let log = (Log.forGroup args.ProcessorName).ForContext() +let run (args: Args.Arguments) = async { + let log = (Log.forGroup args.ProcessorName).ForContext() let sink, source = build (args, log) - use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj + use _metricsServer: IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj return! [| Async.AwaitKeyboardInterruptAsTaskCanceledException() source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() - |] |> Async.Parallel |> Async.Ignore } + |] |> Async.Parallel |> Async.Ignore } [] let main argv = diff --git a/propulsion-consumer/Consumer.fsproj b/propulsion-consumer/Consumer.fsproj index 1781fc1cc..5ec261ac5 100644 --- a/propulsion-consumer/Consumer.fsproj +++ b/propulsion-consumer/Consumer.fsproj @@ -15,8 +15,8 @@ - - + + diff --git a/propulsion-consumer/Examples.fs b/propulsion-consumer/Examples.fs index d73c88da6..05fea9e92 100644 --- a/propulsion-consumer/Examples.fs +++ b/propulsion-consumer/Examples.fs @@ -8,18 +8,6 @@ open System.Collections.Concurrent open System.Collections.Generic open System.Threading -module EventCodec = - - /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : IEventCodec<_, _, _>) (log : ILogger) streamName (x : ITimelineEvent) = - match codec.TryDecode x with - | ValueNone -> - if log.IsEnabled Serilog.Events.LogEventLevel.Debug then - log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) - .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) - ValueNone - | x -> x - /// This more advanced sample shows processing >1 category of events, and maintaining derived state based on it // When processing streamwise, the handler is passed deduplicated spans of events per stream, with a guarantee of max 1 // in-flight request per stream, which allows one to avoid having to consider any explicit concurrency management @@ -34,11 +22,11 @@ module MultiStreams = let [] Category = "SavedForLater" - type Item = { skuId : SkuId; dateSaved : DateTimeOffset } + type Item = { skuId: SkuId; dateSaved: DateTimeOffset } - type Added = { skus : SkuId []; dateSaved : DateTimeOffset } - type Removed = { skus : SkuId [] } - type Merged = { items : Item [] } + type Added = { skus: SkuId []; dateSaved: DateTimeOffset } + type Removed = { skus: SkuId [] } + type Merged = { items: Item [] } type Event = /// Inclusion of another set of state in this one @@ -50,13 +38,24 @@ module MultiStreams = /// Clearing of the list | Cleared interface TypeShape.UnionContract.IUnionContract - let codec = EventCodec.gen - let tryDecode = EventCodec.tryDecode codec + + module Reactions = + + let dec = Streams.Codec.gen + let [] (|StreamName|_|) = function + | FsCodec.StreamName.CategoryAndId (Category, id) -> ValueSome id + | _ -> ValueNone + let [] (|Parse|_|) = function + | struct (StreamName id, _) & Streams.Decode dec events -> ValueSome (id, events) + | _ -> ValueNone // NB - these schemas reflect the actual storage formats and hence need to be versioned with care module Favorites = let [] Category = "Favorites" + let [] (|StreamName|_|) = function + | FsCodec.StreamName.CategoryAndId (Category, id) -> ValueSome id + | _ -> ValueNone type Favorited = { date: DateTimeOffset; skuId: SkuId } type Unfavorited = { skuId: SkuId } @@ -65,58 +64,60 @@ module MultiStreams = | Favorited of Favorited | Unfavorited of Unfavorited interface TypeShape.UnionContract.IUnionContract - let codec = EventCodec.gen - let tryDecode = EventCodec.tryDecode codec + + module Reactions = + + let dec = Streams.Codec.gen + let [] (|Parse|_|) = function + | struct (StreamName id, _) & Streams.Decode dec events -> ValueSome (id, events) + | _ -> ValueNone type Stat = Faves of int | Saves of int | OtherCategory of string * int // Maintains a denormalized view cache per stream (in-memory, unbounded). TODO: keep in a persistent store type InMemoryHandler() = - let log = Log.ForContext() // events are handled concurrently across streams. Only a single Handle call will be in progress at any time per stream let faves, saves = ConcurrentDictionary>(), ConcurrentDictionary() // The StreamProjector mechanism trims any events that have already been handled based on the in-memory state - let (|FavoritesEvents|SavedForLaterEvents|OtherCategory|) (streamName, span : Propulsion.Streams.StreamSpan) = - let decode tryDecode = span |> Seq.chooseV (tryDecode log streamName) |> Array.ofSeq - match streamName with - | StreamName.CategoryAndId (Favorites.Category, id) -> + let (|FavoritesEvents|SavedForLaterEvents|OtherCategory|) = function + | Favorites.Reactions.Parse (id, events) -> let s = match faves.TryGetValue id with true, value -> value | false, _ -> HashSet() - FavoritesEvents (id, s, decode Favorites.tryDecode) - | StreamName.CategoryAndId (SavedForLater.Category, id) -> + FavoritesEvents (id, s, events) + | SavedForLater.Reactions.Parse (id, events) -> let s = match saves.TryGetValue id with true, value -> value | false, _ -> [] - SavedForLaterEvents (id, s, decode SavedForLater.tryDecode) - | StreamName.CategoryAndId (categoryName, _) -> OtherCategory (categoryName, Seq.length span) + SavedForLaterEvents (id, s, events) + | StreamName.CategoryAndId (categoryName, _), events -> OtherCategory struct (categoryName, Array.length events) // each event is guaranteed to only be supplied once by virtue of having been passed through the Streams Scheduler - member _.Handle(streamName : StreamName, span : Propulsion.Streams.StreamSpan<_>, ct) = task { - match streamName, span with + member _.Handle(streamName: StreamName, events: Propulsion.Sinks.Event[]) = async { + match struct (streamName, events) with | OtherCategory (cat, count) -> - return struct (Propulsion.Streams.SpanResult.AllProcessed, OtherCategory (cat, count)) + return Propulsion.Sinks.StreamResult.AllProcessed, OtherCategory (cat, count) | FavoritesEvents (id, s, xs) -> - let folder (s : HashSet<_>) = function + let folder (s: HashSet<_>) = function | Favorites.Favorited e -> s.Add(e.skuId) |> ignore; s | Favorites.Unfavorited e -> s.Remove(e.skuId) |> ignore; s - faves.[id] <- Array.fold folder s xs - return Propulsion.Streams.SpanResult.AllProcessed, Faves xs.Length + faves[id] <- Array.fold folder s xs + return Propulsion.Sinks.StreamResult.AllProcessed, Faves xs.Length | SavedForLaterEvents (id, s, xs) -> - let remove (skus : SkuId seq) (s : _ list) = + let remove (skus: SkuId seq) (s: _ list) = let removing = (HashSet skus).Contains s |> List.where (not << removing) - let add skus (s : _ list) = + let add skus (s: _ list) = List.append (List.ofArray skus) s let folder s = function | SavedForLater.Cleared -> [] | SavedForLater.Added e -> add e.skus s | SavedForLater.Removed e -> remove e.skus s | SavedForLater.Merged e -> s |> remove [| for x in e.items -> x.skuId |] |> add [| for x in e.items -> x.skuId |] - saves.[id] <- (s, xs) ||> Array.fold folder - return Propulsion.Streams.SpanResult.AllProcessed, Saves xs.Length + saves[id] <- (s, xs) ||> Array.fold folder + return Propulsion.Sinks.StreamResult.AllProcessed, Saves xs.Length } // Dump stats relating to how much information is being held - note it's likely for requests to be in flighht during the call - member _.DumpState(log : ILogger) = + member _.DumpState(log: ILogger) = log.Information(" Favorited {total}/{users}", faves.Values |> Seq.sumBy (fun x -> x.Count), faves.Count) log.Information(" SavedForLater {total}/{users}", saves.Values |> Seq.sumBy (fun x -> x.Length), saves.Count) @@ -143,16 +144,15 @@ module MultiStreams = log.Information(" Ignored Categories {ignoredCats}", Seq.truncate 5 otherCats.StatsDescending) otherCats.Clear() - let private parseStreamEvents(res : Confluent.Kafka.ConsumeResult<_, _>) : seq = + let private parseStreamEvents(res: Confluent.Kafka.ConsumeResult<_, _>): seq = Propulsion.Codec.NewtonsoftJson.RenderedSpan.parse res.Message.Value - let start (config : FsKafka.KafkaConsumerConfig, degreeOfParallelism : int) = + let start (config: FsKafka.KafkaConsumerConfig, degreeOfParallelism: int) = let log, handler = Log.ForContext(), InMemoryHandler() let stats = Stats(log, TimeSpan.FromSeconds 30., TimeSpan.FromMinutes 5.) - Propulsion.Kafka.StreamsConsumer.Start( - log, config, parseStreamEvents, (fun s ss ct -> handler.Handle(s, ss, ct)), degreeOfParallelism, - stats, TimeSpan.FromMinutes 10., - logExternalState=handler.DumpState) + Propulsion.Kafka.Factory.StartConcurrent( + log, config, parseStreamEvents, + degreeOfParallelism, (fun s ss -> handler.Handle(s, ss)), stats, logExternalState = handler.DumpState) /// When using parallel or batch processing, items are not grouped by stream but there are no constraints on the concurrency module MultiMessages = @@ -160,10 +160,9 @@ module MultiMessages = // We'll use the same event parsing logic, though it works a little differently open MultiStreams - type Message = Fave of Favorites.Event | Save of SavedForLater.Event | OtherCat of name : string * count : int | Unclassified of messageKey : string + type Message = Fave of Favorites.Event | Save of SavedForLater.Event | OtherCat of name: string * count: int | Unclassified of messageKey: string type Processor() = - let log = Log.ForContext() let mutable favorited, unfavorited, saved, removed, cleared = 0, 0, 0, 0, 0 let cats, keys = Stats.CatStats(), ConcurrentDictionary() @@ -171,22 +170,24 @@ module MultiMessages = // and waits for the work to complete before calling this // `ParallelScheduler` ensures that only one call to `logExternalStats` will take place at a time, but it's highly likely that the execution will // overlap with a call to `Handle` (which makes for a slight race condition between the capturing of the values in the log statement and the resetting) - member _.DumpStats(log : ILogger) = + member _.DumpStats(log: ILogger) = log.Information("Favorited {f} Unfavorited {u} Saved {s} Removed {r} Cleared {c} Keys {keyCount} Categories {@catCount}", favorited, unfavorited, saved, removed, cleared, keys.Count, Seq.truncate 5 cats.StatsDescending) favorited <- 0; unfavorited <- 0; saved <- 0; removed <- 0; cleared <- 0; cats.Clear(); keys.Clear() /// Handles various category / eventType / payload types as produced by Equinox.Tool - member private _.Interpret(streamName : StreamName, spanJson) : seq = seq { - let span = Propulsion.Codec.NewtonsoftJson.RenderedSpan.Parse spanJson - let decode tryDecode wrap = Propulsion.Codec.NewtonsoftJson.RenderedSpan.enum span |> Seq.chooseV (fun struct (_s, e) -> e |> tryDecode log streamName |> ValueOption.map wrap) - match streamName with - | StreamName.CategoryAndId (Favorites.Category, _) -> yield! decode Favorites.tryDecode Fave - | StreamName.CategoryAndId (SavedForLater.Category, _) -> yield! decode SavedForLater.tryDecode Save - | StreamName.CategoryAndId (otherCategoryName, _) -> yield OtherCat (otherCategoryName, Seq.length span.e) } + member private _.Interpret(streamName: StreamName, spanJson): seq = seq { + let raw = + Propulsion.Codec.NewtonsoftJson.RenderedSpan.Parse spanJson + |> Propulsion.Codec.NewtonsoftJson.RenderedSpan.enum + |> Seq.map ValueTuple.snd + match struct (streamName, Array.ofSeq raw) with + | Favorites.Reactions.Parse (_, events) -> yield! events |> Seq.map Fave + | SavedForLater.Reactions.Parse (_, events) -> yield! events |> Seq.map Save + | StreamName.CategoryAndId (otherCategoryName, _), events -> yield OtherCat (otherCategoryName, events.Length) } // NB can be called in parallel, so must be thread-safe - member x.Handle(streamName : StreamName, spanJson : string) = + member x.Handle(streamName: StreamName, spanJson: string) = for x in x.Interpret(streamName, spanJson) do match x with | Fave (Favorites.Favorited _) -> Interlocked.Increment &favorited |> ignore @@ -202,7 +203,7 @@ module MultiMessages = /// Starts a consumer that consumes a topic in streamed mode /// StreamingConsumer manages the parallelism, spreading individual messages out to Async tasks /// Optimal where each Message naturally lends itself to independent processing with no ordering constraints - static member Start(config : FsKafka.KafkaConsumerConfig, degreeOfParallelism : int) = + static member Start(config: FsKafka.KafkaConsumerConfig, degreeOfParallelism: int) = let log, processor = Log.ForContext(), Processor() let handleMessage (KeyValue (streamName, eventsSpan)) _ct = task { processor.Handle(StreamName.parse streamName, eventsSpan) } Propulsion.Kafka.ParallelConsumer.Start( @@ -212,9 +213,9 @@ module MultiMessages = type BatchesSync = /// Starts a consumer that consumes a topic in a batched mode, based on a source defined by `cfg` /// Processing runs as a single Async computation per batch, which can work well where parallelism is not relevant - static member Start(config : FsKafka.KafkaConsumerConfig) = + static member Start(config: FsKafka.KafkaConsumerConfig) = let log = Log.ForContext() - let handleBatch (msgs : Confluent.Kafka.ConsumeResult<_, _>[]) = async { + let handleBatch (msgs: Confluent.Kafka.ConsumeResult<_, _>[]) = async { let processor = Processor() for m in msgs do processor.Handle(StreamName.parse m.Message.Key, m.Message.Value) @@ -226,10 +227,10 @@ module MultiMessages = /// Processing fans out as parallel Async computations (limited to max `degreeOfParallelism` concurrent tasks /// The messages in the batch emanate from a single partition and are all in sequence /// notably useful where there's an ability to share some processing cost across a batch of work by doing the processing in phases - static member Start(config : FsKafka.KafkaConsumerConfig, degreeOfParallelism : int) = + static member Start(config: FsKafka.KafkaConsumerConfig, degreeOfParallelism: int) = let log = Log.ForContext() let dop = new SemaphoreSlim(degreeOfParallelism) - let handleBatch (msgs : Confluent.Kafka.ConsumeResult<_, _>[]) = async { + let handleBatch (msgs: Confluent.Kafka.ConsumeResult<_, _>[]) = async { let processor = Processor() let! _ = Async.Parallel(seq { for m in msgs -> async { processor.Handle(StreamName.parse m.Message.Key, m.Message.Value) } |> dop.Throttle }) processor.DumpStats log } diff --git a/propulsion-consumer/Infrastructure.fs b/propulsion-consumer/Infrastructure.fs index 1c0fdb4ee..ff9312659 100644 --- a/propulsion-consumer/Infrastructure.fs +++ b/propulsion-consumer/Infrastructure.fs @@ -5,20 +5,31 @@ open Serilog open System open System.Threading.Tasks -module EventCodec = +module Streams = - open FsCodec.SystemTextJson - - let private defaultOptions = Options.Create() - let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - Codec.Create<'t>(options = defaultOptions) + let private renderBody (x: Propulsion.Sinks.EventBody) = System.Text.Encoding.UTF8.GetString(x.Span) + // Uses the supplied codec to decode the supplied event record (iff at LogEventLevel.Debug, failures are logged, citing `stream` and `.Data`) + let private tryDecode<'E> (codec: Propulsion.Sinks.Codec<'E>) (streamName: FsCodec.StreamName) event = + match codec.TryDecode event with + | ValueNone when Log.IsEnabled Serilog.Events.LogEventLevel.Debug -> + Log.ForContext("eventData", renderBody event.Data) + .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, event.EventType, streamName) + ValueNone + | x -> x + let (|Decode|) codec struct (stream, events: Propulsion.Sinks.Event[]): 'E[] = + events |> Propulsion.Internal.Array.chooseV (tryDecode codec stream) + + module Codec = + + let gen<'E when 'E :> TypeShape.UnionContract.IUnionContract> : Propulsion.Sinks.Codec<'E> = + FsCodec.SystemTextJson.Codec.Create<'E>() // options = Options.Default module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj type FSharp.Control.Async with - static member AwaitTaskCorrect (task : Task<'T>) : Async<'T> = + static member AwaitTaskCorrect (task: Task<'T>): Async<'T> = Async.FromContinuations <| fun (k, ek, _) -> task.ContinueWith (fun (t:Task<'T>) -> if t.IsFaulted then @@ -29,7 +40,7 @@ type FSharp.Control.Async with elif t.IsCompleted then k t.Result else ek(Exception "invalid Task state!")) |> ignore - static member AwaitTaskCorrect (task : Task) : Async = + static member AwaitTaskCorrect (task: Task): Async = Async.FromContinuations <| fun (k,ek,_) -> task.ContinueWith (fun (t:Task) -> if t.IsFaulted then @@ -44,19 +55,19 @@ type FSharp.Control.Async with type System.Threading.SemaphoreSlim with /// Wait for capacity to be available. Returns false if timeout elapsed before this as achieved - member semaphore.Await(timeout : TimeSpan) : Async = async { + member semaphore.Await(timeout: TimeSpan): Async = async { let! ct = Async.CancellationToken return! semaphore.WaitAsync(timeout, ct) |> Async.AwaitTaskCorrect } /// Wait indefinitely for capacity to be available on the semaphore - member semaphore.Await() : Async = async { + member semaphore.Await(): Async = async { let! ct = Async.CancellationToken return! semaphore.WaitAsync(ct) |> Async.AwaitTaskCorrect } /// Throttling wrapper that waits asynchronously until the semaphore has available capacity - member semaphore.Throttle(workflow : Async<'T>) : Async<'T> = async { + member semaphore.Throttle(workflow: Async<'T>): Async<'T> = async { do! semaphore.Await() try return! workflow finally semaphore.Release() |> ignore @@ -66,7 +77,7 @@ type System.Threading.SemaphoreSlim with type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-consumer/Program.fs b/propulsion-consumer/Program.fs index 77b97cfa3..66562c5c9 100644 --- a/propulsion-consumer/Program.fs +++ b/propulsion-consumer/Program.fs @@ -3,7 +3,7 @@ open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -41,7 +41,7 @@ module Args = | MaxDop _ -> "maximum number of items to process in parallel. Default: 8" | Verbose _ -> "request verbose logging." - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) member val Group = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) @@ -52,14 +52,14 @@ module Args = member val Verbose = p.Contains Verbose /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName = programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) let [] AppName = "ConsumerTemplate" -let start (args : Args.Arguments) = +let start (args: Args.Arguments) = let c = FsKafka.KafkaConsumerConfig.Create( AppName, diff --git a/propulsion-cosmos-reactor/Contract.fs b/propulsion-cosmos-reactor/Contract.fs index 51b3ce953..bd412c96e 100644 --- a/propulsion-cosmos-reactor/Contract.fs +++ b/propulsion-cosmos-reactor/Contract.fs @@ -3,12 +3,12 @@ module ReactorTemplate.Contract /// A single Item in the list type ItemInfo = { id: int; order: int; title: string; completed: bool } -type SummaryInfo = { items : ItemInfo[] } +type SummaryInfo = { items: ItemInfo[] } -let render (item: Todo.Events.ItemData) : ItemInfo = +let render (item: Todo.Events.ItemData): ItemInfo = { id = item.id order = item.order title = item.title completed = item.completed } -let ofState (state : Todo.Fold.State) : SummaryInfo = +let ofState (state: Todo.Fold.State): SummaryInfo = { items = [| for x in state.items -> render x |]} diff --git a/propulsion-cosmos-reactor/Infrastructure.fs b/propulsion-cosmos-reactor/Infrastructure.fs index 398d58691..cc0c85b7b 100644 --- a/propulsion-cosmos-reactor/Infrastructure.fs +++ b/propulsion-cosmos-reactor/Infrastructure.fs @@ -6,31 +6,38 @@ open System module Guid = - let inline toStringN (x : Guid) = x.ToString "N" + let inline toStringN (x: Guid) = x.ToString "N" /// ClientId strongly typed id; represented internally as a Guid; not used for storage so rendering is not significant type ClientId = Guid and [] clientId module ClientId = - let toString (value : ClientId) : string = Guid.toStringN %value - let parse (value : string) : ClientId = let raw = Guid.Parse value in % raw + let toString (value: ClientId): string = Guid.toStringN %value + let parse (value: string): ClientId = let raw = Guid.Parse value in % raw let (|Parse|) = parse module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj -module EventCodec = +module Streams = - /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = - match codec.TryDecode x with - | ValueNone -> - if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) - .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) + let private renderBody (x: Propulsion.Sinks.EventBody) = System.Text.Encoding.UTF8.GetString(x.Span) + // Uses the supplied codec to decode the supplied event record (iff at LogEventLevel.Debug, failures are logged, citing `stream` and `.Data`) + let private tryDecode<'E> (codec: Propulsion.Sinks.Codec<'E>) (streamName: FsCodec.StreamName) event = + match codec.TryDecode event with + | ValueNone when Log.IsEnabled Serilog.Events.LogEventLevel.Debug -> + Log.ForContext("eventData", renderBody event.Data) + .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, event.EventType, streamName) ValueNone | x -> x + let (|Decode|) codec struct (stream, events: Propulsion.Sinks.Event[]): 'E[] = + events |> Propulsion.Internal.Array.chooseV (tryDecode codec stream) + + module Codec = + + let gen<'E when 'E :> TypeShape.UnionContract.IUnionContract> : Propulsion.Sinks.Codec<'E> = + FsCodec.SystemTextJson.Codec.Create<'E>() // options = Options.Default module Log = @@ -68,7 +75,7 @@ module ConnectorExtensions = module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -78,19 +85,19 @@ module Sinks = let tags appName = ["app", appName] - let equinoxMetricsOnly tags (l : LoggerConfiguration) = + let equinoxMetricsOnly tags (l: LoggerConfiguration) = l.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.CosmosStore.Prometheus.LogSink(tags)) - let equinoxAndPropulsionConsumerMetrics tags group (l : LoggerConfiguration) = + let equinoxAndPropulsionConsumerMetrics tags group (l: LoggerConfiguration) = l |> equinoxMetricsOnly tags |> fun l -> l.WriteTo.Sink(Propulsion.Prometheus.LogSink(tags, group)) - let equinoxAndPropulsionCosmosConsumerMetrics tags group (l : LoggerConfiguration) = + let equinoxAndPropulsionCosmosConsumerMetrics tags group (l: LoggerConfiguration) = l |> equinoxAndPropulsionConsumerMetrics tags group |> fun l -> l.WriteTo.Sink(Propulsion.CosmosStore.Prometheus.LogSink(tags)) - let console (configuration : LoggerConfiguration) = + let console (configuration: LoggerConfiguration) = let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" configuration.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) @@ -98,14 +105,14 @@ module Sinks = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c [] - static member private Sinks(configuration : LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + static member private Sinks(configuration: LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(configureMetricsSinks >> ignore) |> ignore // unconditionally feed all log events to the metrics sinks a.Logger(fun l -> // but filter what gets emitted to the console sink let l = match isMetric with None -> l | Some predicate -> l.Filter.ByExcluding(Func predicate) @@ -114,5 +121,5 @@ type Logging() = configuration.WriteTo.Async(bufferSize=65536, blockWhenFull=true, configure=System.Action<_> configure) [] - static member Sinks(configuration : LoggerConfiguration, configureMetricsSinks, verboseStore) = + static member Sinks(configuration: LoggerConfiguration, configureMetricsSinks, verboseStore) = configuration.Sinks(configureMetricsSinks, Sinks.console, ?isMetric = if verboseStore then None else Some Log.isStoreMetrics) diff --git a/propulsion-cosmos-reactor/Program.fs b/propulsion-cosmos-reactor/Program.fs index aa55ab01e..5542ca088 100644 --- a/propulsion-cosmos-reactor/Program.fs +++ b/propulsion-cosmos-reactor/Program.fs @@ -3,7 +3,7 @@ module ReactorTemplate.Program open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -33,7 +33,7 @@ module Args = | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 2." | MaxWriters _ -> "maximum number of concurrent streams on which to process at any time. Default: 8." | Cosmos _ -> "specify CosmosDB input parameters" - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = let maxReadAhead = p.GetResult(MaxReadAhead, 2) let maxConcurrentProcessors = p.GetResult(MaxWriters, 8) member val Verbose = p.Contains Parameters.Verbose @@ -74,7 +74,7 @@ module Args = | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | MaxItems _ -> "maximum item count to request from the feed. Default: unlimited." | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1" - and CosmosArguments(c : Configuration, p : ParseResults) = + and CosmosArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult CosmosParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -91,7 +91,7 @@ module Args = member _.Verbose = p.Contains Verbose member private _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) member x.MonitoringParams() = - let leases : Microsoft.Azure.Cosmos.Container = x.ConnectLeases() + let leases: Microsoft.Azure.Cosmos.Container = x.ConnectLeases() Log.Information("ChangeFeed Leases Database {db} Container {container}. MaxItems limited to {maxItems}", leases.Database.Id, leases.Id, Option.toNullable maxItems) if fromTail then Log.Warning("(If new projector group) Skipping projection of all existing events.") @@ -99,34 +99,34 @@ module Args = member x.ConnectStoreAndMonitored() = connector.ConnectStoreAndMonitored(database, containerId) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) let [] AppName = "ReactorTemplate" -let build (args : Args.Arguments) = +let build (args: Args.Arguments) = let processorName, maxReadAhead, maxConcurrentStreams = args.ProcessorParams() let client, monitored = args.Cosmos.ConnectStoreAndMonitored() let sink = let store = let context = client |> CosmosStoreContext.create let cache = Equinox.Cache(AppName, sizeMb = 10) - Config.Store.Cosmos (context, cache) + Store.Context.Cosmos (context, cache) let stats = Reactor.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - let handle = Reactor.Config.createHandler store - Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, args.StatsInterval) + let handle = Reactor.Factory.createHandler store + Reactor.Factory.StartSink(Log.Logger, stats, maxConcurrentStreams, handle, maxReadAhead) let source = - let parseFeedDoc = Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents Reactor.categoryFilter + let parseFeedDoc = Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumCategoryEvents Reactor.reactionCategories let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, Seq.collect parseFeedDoc) let leases, startFromTail, maxItems, lagFrequency = args.Cosmos.MonitoringParams() Propulsion.CosmosStore.CosmosStoreSource.Start(Log.Logger, monitored, leases, processorName, observer, - startFromTail = startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) + startFromTail = startFromTail, ?maxItems = maxItems, lagReportFreq = lagFrequency) sink, source // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly -let startMetricsServer port : IDisposable = +let startMetricsServer port: IDisposable = let metricsServer = new Prometheus.KestrelMetricServer(port = port) let ms = metricsServer.Start() Log.Information("Prometheus /metrics endpoint on port {port}", port) @@ -136,7 +136,7 @@ open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException let run args = async { let sink, source = build args - use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj + use _metricsServer: IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj return! [| Async.AwaitKeyboardInterruptAsTaskCanceledException() source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() diff --git a/propulsion-cosmos-reactor/Reactor.fs b/propulsion-cosmos-reactor/Reactor.fs index ea3cb6e35..38c23e678 100644 --- a/propulsion-cosmos-reactor/Reactor.fs +++ b/propulsion-cosmos-reactor/Reactor.fs @@ -2,7 +2,7 @@ module ReactorTemplate.Reactor type Outcome = Metrics.Outcome -/// Gathers stats based on the outcome of each Span processed for emission, at intervals controlled by `StreamsConsumer` +/// Gathers stats based on the Outcome of each Span as it's processed, for periodic emission via DumpStats() type Stats(log, statsInterval, stateInterval) = inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) @@ -24,28 +24,30 @@ type Stats(log, statsInterval, stateInterval) = ok <- 0; skipped <- 0; na <- 0 // map from external contract to internal contract defined by the aggregate -let toSummaryEventData ( x : Contract.SummaryInfo) : TodoSummary.Events.SummaryData = +let toSummaryEventData (x: Contract.SummaryInfo): TodoSummary.Events.SummaryData = { items = [| for x in x.items -> { id = x.id; order = x.order; title = x.title; completed = x.completed } |] } -let categoryFilter = Todo.Reactions.categoryFilter +let reactionCategories = Todo.Reactions.categories -let handle - (sourceService : Todo.Service) - (summaryService : TodoSummary.Service) - stream (span : Propulsion.Streams.StreamSpan<_>) ct = Propulsion.Internal.Async.startImmediateAsTask ct <| async { - match stream, span with - | Todo.Reactions.Parse (clientId, events) when events |> Seq.exists Todo.Reactions.impliesStateChange -> +let handle (sourceService: Todo.Service) (summaryService: TodoSummary.Service) stream events = async { + match struct (stream, events) with + | Todo.Reactions.ImpliesStateChange clientId -> let! version', summary = sourceService.QueryWithVersion(clientId, Contract.ofState) match! summaryService.TryIngest(clientId, version', toSummaryEventData summary) with - | true -> return struct (Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, span.Length - 1)) - | false -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Skipped span.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } + | true -> return Propulsion.Sinks.StreamResult.OverrideNextIndex version', Outcome.Ok (1, events.Length - 1) + | false -> return Propulsion.Sinks.StreamResult.OverrideNextIndex version', Outcome.Skipped events.Length + | _ -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.NotApplicable events.Length } -module Config = +module Factory = let createHandler store = - let srcService = Todo.Config.create store - let dstService = TodoSummary.Config.create store + let srcService = Todo.Factory.create store + let dstService = TodoSummary.Factory.create store handle srcService dstService + +type Factory private () = + + static member StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, maxConcurrentStreams, handle, stats) diff --git a/propulsion-cosmos-reactor/Reactor.fsproj b/propulsion-cosmos-reactor/Reactor.fsproj index f1a184cbd..4b6dabdac 100644 --- a/propulsion-cosmos-reactor/Reactor.fsproj +++ b/propulsion-cosmos-reactor/Reactor.fsproj @@ -9,7 +9,7 @@ - + @@ -20,10 +20,10 @@ - - + + - + diff --git a/propulsion-cosmos-reactor/ReactorMetrics.fs b/propulsion-cosmos-reactor/ReactorMetrics.fs index 5161f6682..d41ab35ad 100644 --- a/propulsion-cosmos-reactor/ReactorMetrics.fs +++ b/propulsion-cosmos-reactor/ReactorMetrics.fs @@ -5,9 +5,9 @@ let baseDesc desc = "ReactorTemplate: Reactor " + desc module private Counter = - let private make (config : Prometheus.CounterConfiguration) name desc = + let private make (config: Prometheus.CounterConfiguration) name desc = let ctr = Prometheus.Metrics.CreateCounter(name, desc, config) - fun tagValues (c : float) -> ctr.WithLabels(tagValues).Inc(c) + fun tagValues (c: float) -> ctr.WithLabels(tagValues).Inc(c) let create (tagNames, tagValues) stat desc = let config = Prometheus.CounterConfiguration(LabelNames = tagNames) @@ -18,12 +18,12 @@ let observeOutcomeStatus s = Counter.create ([| "status" |],[| s |]) [] type Outcome = /// Handler processed the span, with counts of used vs unused known event types - | Ok of used : int * unused : int + | Ok of used: int * unused: int /// Handler processed the span, but idempotency checks resulted in no writes being applied; includes count of decoded events - | Skipped of count : int + | Skipped of count: int /// Handler determined the events were not relevant to its duties and performed no actions /// e.g. wrong category, events that dont imply a state change - | NotApplicable of count : int + | NotApplicable of count: int let observeReactorOutcome = function | Outcome.Ok (used, unused) -> observeOutcomeStatus "ok" (float used) diff --git a/propulsion-cosmos-reactor/Config.fs b/propulsion-cosmos-reactor/Store.fs similarity index 74% rename from propulsion-cosmos-reactor/Config.fs rename to propulsion-cosmos-reactor/Store.fs index 8f4a9f1bf..dd05313ef 100644 --- a/propulsion-cosmos-reactor/Config.fs +++ b/propulsion-cosmos-reactor/Store.fs @@ -1,17 +1,14 @@ -module ReactorTemplate.Config +module ReactorTemplate.Store let log = Serilog.Log.ForContext("isMetric", true) let createDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = open FsCodec.SystemTextJson - let private defaultOptions = Options.Create() - let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - Codec.Create<'t>(options = defaultOptions) - let genJe<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) + let genJsonElement<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>() // options = Options.Default module Cosmos = @@ -28,5 +25,5 @@ module Cosmos = createCached codec initial fold accessStrategy (context, cache) [] -type Store = - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache +type Context = + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache diff --git a/propulsion-cosmos-reactor/Todo.fs b/propulsion-cosmos-reactor/Todo.fs index 6e0b538c1..921113033 100644 --- a/propulsion-cosmos-reactor/Todo.fs +++ b/propulsion-cosmos-reactor/Todo.fs @@ -1,7 +1,5 @@ module ReactorTemplate.Todo -open Propulsion.Internal - let [] Category = "Todos" let streamId = Equinox.StreamId.gen ClientId.toString let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId | _ -> ValueNone @@ -9,10 +7,10 @@ let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAn // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = - type ItemData = { id : int; order : int; title : string; completed : bool } - type DeletedData = { id : int } - type ClearedData = { nextId : int } - type SnapshotData = { nextId : int; items : ItemData[] } + type ItemData = { id: int; order: int; title: string; completed: bool } + type DeletedData = { id: int } + type ClearedData = { nextId: int } + type SnapshotData = { nextId: int; items: ItemData[] } type Event = | Added of ItemData | Updated of ItemData @@ -20,24 +18,25 @@ module Events = | Cleared of ClearedData | Snapshotted of SnapshotData interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe + let codec = Store.Codec.genJsonElement module Reactions = - let categoryFilter = function Category -> true | _ -> false - let (|Decode|) (stream, span : Propulsion.Streams.StreamSpan<_>) = - span |> Array.chooseV (EventCodec.tryDecode Events.codec stream) - let [] (|Parse|_|) = function - | (StreamName clientId, _) & Decode events -> ValueSome struct (clientId, events) - | _ -> ValueNone + let categories = [| Category |] + /// Allows us to skip producing summaries for events that we know won't result in an externally discernable change to the summary output - let impliesStateChange = function Events.Snapshotted _ -> false | _ -> true + let private impliesStateChange = function Events.Snapshotted _ -> false | _ -> true + + let dec = Streams.Codec.gen + let [] (|ImpliesStateChange|_|) = function + | struct (StreamName clientId, _) & Streams.Decode dec events when Array.exists impliesStateChange events -> ValueSome clientId + | _ -> ValueNone /// Types and mapping logic used maintain relevant State based on Events observed on the Todo List Stream module Fold = /// Present state of the Todo List as inferred from the Events we've seen to date - type State = { items : Events.ItemData list; nextId : int } + type State = { items: Events.ItemData list; nextId: int } /// State implied by the absence of any events on this stream let initial = { items = []; nextId = 0 } /// Compute State change implied by a giveC:\Users\f0f00db\Projects\dotnet-templates\propulsion-summary-projector\Todo.fsn Event @@ -48,24 +47,24 @@ module Fold = | Events.Cleared e -> { nextId = e.nextId; items = [] } | Events.Snapshotted s -> { nextId = s.nextId; items = List.ofArray s.items } /// Folds a set of events from the store into a given `state` - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve /// Determines whether a given event represents a checkpoint that implies we don't need to see any preceding events let isOrigin = function Events.Cleared _ | Events.Snapshotted _ -> true | _ -> false /// Prepares an Event that encodes all relevant aspects of a State such that `evolve` can rehydrate a complete State from it let toSnapshot state = Events.Snapshotted { nextId = state.nextId; items = Array.ofList state.items } /// Defines operations that a Controller or Projector can perform on a Todo List -type Service internal (resolve : ClientId -> Equinox.Decider) = +type Service internal (resolve: ClientId -> Equinox.Decider) = /// Load and render the state - member _.QueryWithVersion(clientId, render : Fold.State -> 'res) : Async = + member _.QueryWithVersion(clientId, render: Fold.State -> 'res): Async = let decider = resolve clientId // Establish the present state of the Stream, project from that (using QueryEx so we can determine the version in effect) decider.QueryEx(fun c -> c.Version, render c.State) -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/propulsion-cosmos-reactor/TodoSummary.fs b/propulsion-cosmos-reactor/TodoSummary.fs index 7c6143c21..dd2c400e1 100644 --- a/propulsion-cosmos-reactor/TodoSummary.fs +++ b/propulsion-cosmos-reactor/TodoSummary.fs @@ -7,28 +7,28 @@ let streamId = Equinox.StreamId.gen ClientId.toString module Events = type ItemData = { id: int; order: int; title: string; completed: bool } - type SummaryData = { items : ItemData[] } - type IngestedData = { version : int64; value : SummaryData } + type SummaryData = { items: ItemData[] } + type IngestedData = { version: int64; value: SummaryData } type Event = | Ingested of IngestedData interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe + let codec = Store.Codec.genJsonElement module Fold = - type State = { version : int64; value : Events.SummaryData option } + type State = { version: int64; value: Events.SummaryData option } let initial = { version = -1L; value = None } let evolve _state = function | Events.Ingested e -> { version = e.version; value = Some e.value } - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let toSnapshot state = Events.Ingested { version = state.version; value = state.value.Value } -let decide (version : int64, value : Events.SummaryData) (state : Fold.State) = +let decide (version: int64, value: Events.SummaryData) (state: Fold.State) = if state.version >= version then false, [] else true, [Events.Ingested { version = version; value = value }] type Item = { id: int; order: int; title: string; completed: bool } -let render : Fold.State -> Item[] = function +let render: Fold.State -> Item[] = function | { value = Some { items = xs} } -> [| for x in xs -> { id = x.id @@ -38,10 +38,10 @@ let render : Fold.State -> Item[] = function | _ -> [||] /// Defines the operations that the Read side of a Controller and/or the Reactor can perform on the 'aggregate' -type Service internal (resolve : ClientId -> Equinox.Decider) = +type Service internal (resolve: ClientId -> Equinox.Decider) = /// Returns false if the ingestion was rejected due to being an older version of the data than is presently being held - member _.TryIngest(clientId, version, value) : Async = + member _.TryIngest(clientId, version, value): Async = let decider = resolve clientId decider.Transact(decide (version, value)) @@ -49,8 +49,8 @@ type Service internal (resolve : ClientId -> Equinox.Decider Config.Cosmos.createRollingState Events.codecJe Fold.initial Fold.fold Fold.toSnapshot (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/propulsion-dynamostore-cdk/DynamoStore.Cdk.fsproj b/propulsion-dynamostore-cdk/DynamoStore.Cdk.fsproj index 08762e350..f50327013 100644 --- a/propulsion-dynamostore-cdk/DynamoStore.Cdk.fsproj +++ b/propulsion-dynamostore-cdk/DynamoStore.Cdk.fsproj @@ -21,9 +21,9 @@ - - - + + + diff --git a/propulsion-dynamostore-cdk/IndexerStack.fs b/propulsion-dynamostore-cdk/IndexerStack.fs index 1de6f95de..9dfc11cec 100644 --- a/propulsion-dynamostore-cdk/IndexerStack.fs +++ b/propulsion-dynamostore-cdk/IndexerStack.fs @@ -6,20 +6,20 @@ open System type IndexerStackProps ( // DynamoDB Streams Source ARN (for Store Table) - storeStreamArn : string, + storeStreamArn: string, // DynamoDB Index Table Name - indexTableName : string, + indexTableName: string, // Path for published binaries for Propulsion.DynamoStore.Indexer - lambdaCodePath : string, + lambdaCodePath: string, // Lambda memory allocation - default 128 MB - ?memorySize : int, + ?memorySize: int, // Lambda max batch size - default 1000 - ?batchSize : int, + ?batchSize: int, // Lambda max batch size - default 180s - ?timeout : TimeSpan) = + ?timeout: TimeSpan) = inherit StackProps() member val StoreStreamArn = storeStreamArn member val IndexTableName = indexTableName @@ -28,10 +28,10 @@ type IndexerStackProps member val Timeout = defaultArg timeout (TimeSpan.FromSeconds 180) member val LambdaCodePath = lambdaCodePath -type IndexerStack(scope, id, props : IndexerStackProps) as stack = +type IndexerStack(scope, id, props: IndexerStackProps) as stack = inherit Stack(scope, id, props) - let props : DynamoStoreIndexerLambdaProps = + let props: DynamoStoreIndexerLambdaProps = { storeStreamArn = props.StoreStreamArn regionName = stack.Region; indexTableName = props.IndexTableName memorySize = props.MemorySize; batchSize = props.BatchSize; timeout = props.Timeout diff --git a/propulsion-dynamostore-cdk/NotifierStack.fs b/propulsion-dynamostore-cdk/NotifierStack.fs index 8b4026447..3fd90759e 100644 --- a/propulsion-dynamostore-cdk/NotifierStack.fs +++ b/propulsion-dynamostore-cdk/NotifierStack.fs @@ -6,20 +6,20 @@ open System type NotifierStackProps ( // DynamoDB Streams Source ARN (for Index Table) - indexStreamArn : string, + indexStreamArn: string, // Target SNS FIFO Topic Arn (Default: Create fresh topic) - updatesTopicArn : string option, + updatesTopicArn: string option, // Path for published binaries for Propulsion.DynamoStore.Notifier - lambdaCodePath : string, + lambdaCodePath: string, // Lambda memory allocation - default 128 MB - ?memorySize : int, + ?memorySize: int, // Lambda max batch size - default 10 - ?batchSize : int, + ?batchSize: int, // Lambda max batch size - default 10s - ?timeout : TimeSpan) = + ?timeout: TimeSpan) = inherit StackProps() member val IndexStreamArn = indexStreamArn member val UpdatesTopicArn = updatesTopicArn @@ -28,10 +28,10 @@ type NotifierStackProps member val Timeout = defaultArg timeout (TimeSpan.FromSeconds 10) member val LambdaCodePath = lambdaCodePath -type NotifierStack(scope, id, props : NotifierStackProps) as stack = +type NotifierStack(scope, id, props: NotifierStackProps) as stack = inherit Stack(scope, id, props) - let props : DynamoStoreNotifierLambdaProps = + let props: DynamoStoreNotifierLambdaProps = { indexStreamArn = props.IndexStreamArn updatesTarget = match props.UpdatesTopicArn with Some ta -> UpdatesTarget.ExistingTopic ta | None -> UpdatesTarget.Default memorySize = props.MemorySize; batchSize = props.BatchSize; timeout = props.Timeout diff --git a/propulsion-hotel/Domain.Tests/Arbitraries.fs b/propulsion-hotel/Domain.Tests/Arbitraries.fs index b3f651acf..81ff168a7 100644 --- a/propulsion-hotel/Domain.Tests/Arbitraries.fs +++ b/propulsion-hotel/Domain.Tests/Arbitraries.fs @@ -7,7 +7,7 @@ open FsCheck.FSharp /// For unit tests, we only ever use the Domain Services wired to a MemoryStore, so we default Store to that type Generators = - static member MemoryStore = Gen.constant (Config.Store.Memory <| Equinox.MemoryStore.VolatileStore()) + static member MemoryStore = Gen.constant (Store.Context.Memory <| Equinox.MemoryStore.VolatileStore()) static member Store = Arb.fromGen Generators.MemoryStore [ |])>] do () diff --git a/propulsion-hotel/Domain.Tests/GroupCheckoutFlow.fs b/propulsion-hotel/Domain.Tests/GroupCheckoutFlow.fs index aaaeb2415..6fd773ffe 100644 --- a/propulsion-hotel/Domain.Tests/GroupCheckoutFlow.fs +++ b/propulsion-hotel/Domain.Tests/GroupCheckoutFlow.fs @@ -7,9 +7,9 @@ open Reactor open Swensen.Unquote [] -let ``Happy path including Reaction`` (store, groupCheckoutId, paymentId, stays : _ []) = async { - let staysService = GuestStay.Config.create store - let sut = GroupCheckout.Config.create store +let ``Happy path including Reaction`` (store, groupCheckoutId, paymentId, stays: _ []) = async { + let staysService = GuestStay.Factory.create store + let sut = GroupCheckout.Factory.create store let processor = GroupCheckoutProcess.Service(staysService, sut, 2) let mutable charged = 0 for stayId, chargeId, PositiveInt amount in stays do diff --git a/propulsion-hotel/Domain/Domain.fsproj b/propulsion-hotel/Domain/Domain.fsproj index c0e59affc..509b42d6d 100644 --- a/propulsion-hotel/Domain/Domain.fsproj +++ b/propulsion-hotel/Domain/Domain.fsproj @@ -9,14 +9,14 @@ - - - - + + + + - + diff --git a/propulsion-hotel/Domain/GroupCheckout.fs b/propulsion-hotel/Domain/GroupCheckout.fs index 37ae04431..0ac5f1f14 100644 --- a/propulsion-hotel/Domain/GroupCheckout.fs +++ b/propulsion-hotel/Domain/GroupCheckout.fs @@ -8,30 +8,30 @@ let [] (|StreamName|_|) = function module Events = - type CheckoutResidual = { stay : GuestStayId; residual : decimal } + type CheckoutResidual = { stay: GuestStayId; residual: decimal } type Event = /// There may be more than one of these; each represents the user requesting the adding a group of Stays into group checkout /// NOTE in the case where >=1 of the nominated Stays has already been checked out, the pending stay will be taken off the list via /// a MergesFailed event rather than the typical StaysMerged outcome - | StaysSelected of {| at : DateTimeOffset; stays : GuestStayId[] |} + | StaysSelected of {| at: DateTimeOffset; stays: GuestStayId[] |} /// Represents the workflow's record of a) confirming checkout of the Stay has been completed and b) the balance to be paid, if any - | StaysMerged of {| residuals : CheckoutResidual[] |} + | StaysMerged of {| residuals: CheckoutResidual[] |} /// Indicates that it was not possible for the the Selected stay to be transferred to the group as requested /// i.e. Guest was checked out via another group, or independently, prior to being able to grab it. - | MergesFailed of {| stays : GuestStayId[] |} + | MergesFailed of {| stays: GuestStayId[] |} /// Records payments for this group (a group cannot be confirmed until all outstanding charges for all Merged stays have Paid) - | Paid of {| at : DateTimeOffset; paymentId : PaymentId; amount : decimal |} + | Paid of {| at: DateTimeOffset; paymentId: PaymentId; amount: decimal |} /// Records confirmation of completion of the group checkout. No further Stays can be Selected, nor should any balance be outstanding - | Confirmed of {| at : DateTimeOffset |} + | Confirmed of {| at: DateTimeOffset |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.gen + let codec = Store.Codec.gen module Fold = open Events [] - type State = { pending : GuestStayId[]; checkedOut : CheckoutResidual[]; failed : GuestStayId[]; balance : decimal; payments : PaymentId[]; completed : bool } + type State = { pending: GuestStayId[]; checkedOut: CheckoutResidual[]; failed: GuestStayId[]; balance: decimal; payments: PaymentId[]; completed: bool } let initial = { pending = [||]; checkedOut = [||]; failed = [||]; payments = [||]; balance = 0m; completed = false } @@ -52,22 +52,22 @@ module Fold = payments = [| yield! state.payments; e.paymentId |] } | Confirmed _ -> { state with completed = true } - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve /// Manages the Workflow aspect, mapping Fold.State to an Action surfacing information relevant for reactions processing module Flow = type State = | MergeStays of GuestStayId[] - | Ready of balance : decimal + | Ready of balance: decimal | Finished - let nextAction : Fold.State -> State = function + let nextAction: Fold.State -> State = function | { completed = true } -> Finished | { pending = xs } when not (Array.isEmpty xs) -> MergeStays xs | { balance = bal } -> Ready bal - let decide handleAction (state : Fold.State) : Async<'R * Events.Event list> = + let decide handleAction (state: Fold.State): Async<'R * Events.Event list> = nextAction state |> handleAction module Decide = @@ -93,36 +93,36 @@ module Decide = | { payments = paymentIds } when paymentIds |> Array.contains paymentId -> [] | _ -> [ Events.Paid {| at = at; paymentId = paymentId; amount = amount |} ] -type Service internal (resolve : GroupCheckoutId -> Equinox.Decider) = +type Service internal (resolve: GroupCheckoutId -> Equinox.Decider) = - member _.Merge(id, stays, ?at) : Async= + member _.Merge(id, stays, ?at): Async= let decider = resolve id decider.Transact(Decide.add (defaultArg at DateTimeOffset.UtcNow) stays, Flow.nextAction) - member _.Pay(id, paymentId, amount, ?at) : Async = + member _.Pay(id, paymentId, amount, ?at): Async = let decider = resolve id decider.Transact(Decide.pay paymentId amount (defaultArg at DateTimeOffset.UtcNow)) - member _.Confirm(id, ?at) : Async= + member _.Confirm(id, ?at): Async= let decider = resolve id decider.Transact(Decide.confirm (defaultArg at DateTimeOffset.UtcNow)) /// Used by GroupCheckOutProcess to run any relevant Reaction activities - member _.React(id, handleReaction : Flow.State -> Async<'R * Events.Event list>) : Async<'R * int64> = + member _.React(id, handleReaction: Flow.State -> Async<'R * Events.Event list>): Async<'R * int64> = let decider = resolve id decider.TransactAsyncWithPostVersion(Flow.decide handleReaction) - member _.Read(groupCheckoutId) : Async= + member _.Read(groupCheckoutId): Async= let decider = resolve groupCheckoutId decider.Query(Flow.nextAction) -module Config = +module Factory = - let private (|StoreCat|) = function - | Config.Store.Memory store -> - Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Dynamo (context, cache) -> - Config.Dynamo.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - | Config.Store.Mdb (context, cache) -> - Config.Mdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - let create (StoreCat cat) = streamId >> Config.resolve cat Category |> Service + let private (|Category|) = function + | Store.Context.Memory store -> + Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Dynamo (context, cache) -> + Store.Dynamo.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) + | Store.Context.Mdb (context, cache) -> + Store.Mdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) + let create (Category cat) = streamId >> Store.resolve cat Category |> Service diff --git a/propulsion-hotel/Domain/GuestStay.fs b/propulsion-hotel/Domain/GuestStay.fs index 2b66de103..4d12647ec 100644 --- a/propulsion-hotel/Domain/GuestStay.fs +++ b/propulsion-hotel/Domain/GuestStay.fs @@ -7,17 +7,17 @@ module Events = type Event = /// Notes time of of checkin of the guest (does not affect whether charges can be levied against the stay) - | CheckedIn of {| at : DateTimeOffset |} + | CheckedIn of {| at: DateTimeOffset |} /// Notes addition of a charge against the stay - | Charged of {| chargeId : ChargeId; at : DateTimeOffset; amount : decimal |} + | Charged of {| chargeId: ChargeId; at: DateTimeOffset; amount: decimal |} /// Notes a payment against this stay - | Paid of {| paymentId : PaymentId; at : DateTimeOffset; amount : decimal |} + | Paid of {| paymentId: PaymentId; at: DateTimeOffset; amount: decimal |} /// Notes an ordinary checkout by the Guest (requires prior payment of all outstanding charges) - | CheckedOut of {| at : DateTimeOffset |} + | CheckedOut of {| at: DateTimeOffset |} /// Notes checkout is being effected via a GroupCheckout. Marks stay complete equivalent to typical CheckedOut event - | TransferredToGroup of {| at : DateTimeOffset; groupId : GroupCheckoutId; residualBalance : decimal |} + | TransferredToGroup of {| at: DateTimeOffset; groupId: GroupCheckoutId; residualBalance: decimal |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.gen + let codec = Store.Codec.gen module Fold = @@ -25,8 +25,8 @@ module Fold = type State = | Active of Balance | Closed - | TransferredToGroup of {| groupId : GroupCheckoutId; amount : decimal |} - and Balance = { balance : decimal; charges : ChargeId[]; payments : PaymentId[]; checkedInAt : DateTimeOffset option } + | TransferredToGroup of {| groupId: GroupCheckoutId; amount: decimal |} + and Balance = { balance: decimal; charges: ChargeId[]; payments: PaymentId[]; checkedInAt: DateTimeOffset option } let initial = Active { balance = 0m; charges = [||]; payments = [||]; checkedInAt = None } let evolve state event = @@ -39,7 +39,7 @@ module Fold = | Events.CheckedOut _ -> Closed | Events.TransferredToGroup e -> TransferredToGroup {| groupId = e.groupId; amount = e.residualBalance |} | Closed _ | TransferredToGroup _ -> invalidOp "No events allowed after CheckedOut/TransferredToGroup" - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve module Decide = @@ -65,21 +65,21 @@ module Decide = [] type CheckoutResult = Ok | AlreadyCheckedOut | BalanceOutstanding of decimal - let checkout at : State -> CheckoutResult * Events.Event list = function + let checkout at: State -> CheckoutResult * Events.Event list = function | Closed -> CheckoutResult.Ok, [] | TransferredToGroup _ -> CheckoutResult.AlreadyCheckedOut, [] | Active { balance = 0m } -> CheckoutResult.Ok, [ Events.CheckedOut {| at = at |} ] | Active { balance = residual } -> CheckoutResult.BalanceOutstanding residual, [] [] - type GroupCheckoutResult = Ok of residual : decimal | AlreadyCheckedOut - let groupCheckout at groupId : State -> GroupCheckoutResult * Events.Event list = function + type GroupCheckoutResult = Ok of residual: decimal | AlreadyCheckedOut + let groupCheckout at groupId: State -> GroupCheckoutResult * Events.Event list = function | Closed -> GroupCheckoutResult.AlreadyCheckedOut, [] | TransferredToGroup s when s.groupId = groupId -> GroupCheckoutResult.Ok s.amount, [] | TransferredToGroup _ -> GroupCheckoutResult.AlreadyCheckedOut, [] | Active { balance = residual } -> GroupCheckoutResult.Ok residual, [ Events.TransferredToGroup {| at = at; groupId = groupId; residualBalance = residual |} ] -type Service internal (resolve : GuestStayId -> Equinox.Decider) = +type Service internal (resolve: GuestStayId -> Equinox.Decider) = member _.Charge(id, chargeId, amount) = let decider = resolve id @@ -89,22 +89,22 @@ type Service internal (resolve : GuestStayId -> Equinox.Decider = + member _.Checkout(id, at): Async = let decider = resolve id decider.Transact(Decide.checkout (defaultArg at DateTimeOffset.UtcNow)) // Driven exclusively by GroupCheckout - member _.GroupCheckout(id, groupId, ?at) : Async = + member _.GroupCheckout(id, groupId, ?at): Async = let decider = resolve id decider.Transact(Decide.groupCheckout (defaultArg at DateTimeOffset.UtcNow) groupId) -module Config = +module Factory = - let private (|StoreCat|) = function - | Config.Store.Memory store -> - Config.Memory.create Events.codec Fold.initial Fold.fold store - | Config.Store.Dynamo (context, cache) -> - Config.Dynamo.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - | Config.Store.Mdb (context, cache) -> - Config.Mdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - let create (StoreCat cat) = Service(streamId >> Config.resolve cat Category) + let private (|Category|) = function + | Store.Context.Memory store -> + Store.Memory.create Events.codec Fold.initial Fold.fold store + | Store.Context.Dynamo (context, cache) -> + Store.Dynamo.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) + | Store.Context.Mdb (context, cache) -> + Store.Mdb.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) + let create (Category cat) = Service(streamId >> Store.resolve cat Category) diff --git a/propulsion-hotel/Domain/Config.fs b/propulsion-hotel/Domain/Store.fs similarity index 79% rename from propulsion-hotel/Domain/Config.fs rename to propulsion-hotel/Domain/Store.fs index c92e181ef..2c8b83f0f 100644 --- a/propulsion-hotel/Domain/Config.fs +++ b/propulsion-hotel/Domain/Store.fs @@ -1,25 +1,18 @@ -module Domain.Config - -[] -type Store = - | Memory of Equinox.MemoryStore.VolatileStore)> - | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache - | Mdb of Equinox.MessageDb.MessageDbContext * Equinox.Core.ICache +module Domain.Store let log = Serilog.Log.ForContext("isMetric", true) let resolve cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = open FsCodec.SystemTextJson - let private defaultOptions = Options.Create() let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - Codec.Create<'t>(options = defaultOptions) + Codec.Create<'t>() // options = Options.Default module Memory = - let create codec initial fold store : Equinox.Category<_, _, _> = + let create codec initial fold store: Equinox.Category<_, _, _> = Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Deflate.EncodeUncompressed codec, fold, initial) let defaultCacheDuration = System.TimeSpan.FromMinutes 20. @@ -41,9 +34,15 @@ module Mdb = open Equinox.MessageDb let private create codec initial fold accessStrategy (context, cache) = - let cacheStrategy = CachingStrategy.SlidingWindow (cache, defaultCacheDuration) + let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, defaultCacheDuration) MessageDbCategory(context, codec, fold, initial, cacheStrategy, ?access = accessStrategy) let createUnoptimized codec initial fold (context, cache) = let accessStrategy = None create codec initial fold accessStrategy (context, cache) + +[] +type Context = + | Memory of Equinox.MemoryStore.VolatileStore)> + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Cache + | Mdb of Equinox.MessageDb.MessageDbContext * Equinox.Cache diff --git a/propulsion-hotel/Domain/Types.fs b/propulsion-hotel/Domain/Types.fs index 00a0b69cd..dceea626d 100644 --- a/propulsion-hotel/Domain/Types.fs +++ b/propulsion-hotel/Domain/Types.fs @@ -4,18 +4,18 @@ open FSharp.UMX open System module Guid = - let toString (x : Guid) : string = x.ToString "N" + let toString (x: Guid): string = x.ToString "N" type GroupCheckoutId = Guid and [] groupCheckoutId module GroupCheckoutId = - let toString : GroupCheckoutId -> string = UMX.untag >> Guid.toString - let (|Parse|) : string -> GroupCheckoutId = Guid.Parse >> UMX.tag + let toString: GroupCheckoutId -> string = UMX.untag >> Guid.toString + let (|Parse|): string -> GroupCheckoutId = Guid.Parse >> UMX.tag type GuestStayId = Guid and [] guestStayId module GuestStayId = - let toString : GuestStayId -> string = UMX.untag >> Guid.toString + let toString: GuestStayId -> string = UMX.untag >> Guid.toString type ChargeId = Guid and [] chargeId @@ -31,5 +31,5 @@ module DeciderExtensions = type Equinox.Decider<'S, 'E> with - member x.TransactAsyncWithPostVersion(decide) : Async<'R * int64> = + member x.TransactAsyncWithPostVersion(decide): Async<'R * int64> = x.TransactExAsync((fun c -> decide c.State), (fun r c -> (r, c.Version))) diff --git a/propulsion-hotel/Reactor.Integration/DynamoConnector.fs b/propulsion-hotel/Reactor.Integration/DynamoConnector.fs index a34cddd44..d0de0e146 100644 --- a/propulsion-hotel/Reactor.Integration/DynamoConnector.fs +++ b/propulsion-hotel/Reactor.Integration/DynamoConnector.fs @@ -2,14 +2,14 @@ namespace Reactor.Integration open Infrastructure -type DynamoConnector(connector : Equinox.DynamoStore.DynamoStoreConnector, table, indexTable) = +type DynamoConnector(connector: Equinox.DynamoStore.DynamoStoreConnector, table, indexTable) = let client = connector.CreateClient() let storeClient = Equinox.DynamoStore.DynamoStoreClient(client, table) let storeContext = storeClient |> DynamoStoreContext.create let cache = Equinox.Cache("Tests", sizeMb = 10) - new (c : Reactor.SourceArgs.Configuration) = + new (c: Reactor.SourceArgs.Configuration) = let timeout, retries = System.TimeSpan.FromSeconds 5., 5 let connector = match c.DynamoRegion with | Some systemName -> Equinox.DynamoStore.DynamoStoreConnector(systemName, timeout, retries) @@ -19,9 +19,9 @@ type DynamoConnector(connector : Equinox.DynamoStore.DynamoStoreConnector, table member val IndexClient = Equinox.DynamoStore.DynamoStoreClient(client, match indexTable with Some x -> x | None -> table + "-index") member val DumpStats = Equinox.DynamoStore.Core.Log.InternalMetrics.dump - member val Store = Domain.Config.Store.Dynamo (storeContext, cache) + member val Store = Domain.Store.Context.Dynamo (storeContext, cache) /// Uses an in-memory checkpoint service; the real app will obviously need to store real checkpoints (see SourceArgs.Dynamo.Arguments.CreateCheckpointStore) member x.CreateCheckpointService(consumerGroupName) = let checkpointInterval = System.TimeSpan.FromHours 1. let store = Equinox.MemoryStore.VolatileStore() - Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Domain.Config.log (consumerGroupName, checkpointInterval) store + Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Domain.Store.log (consumerGroupName, checkpointInterval) store diff --git a/propulsion-hotel/Reactor.Integration/MessageDbConnector.fs b/propulsion-hotel/Reactor.Integration/MessageDbConnector.fs index 25688c838..4bfc98deb 100644 --- a/propulsion-hotel/Reactor.Integration/MessageDbConnector.fs +++ b/propulsion-hotel/Reactor.Integration/MessageDbConnector.fs @@ -2,20 +2,20 @@ namespace Reactor.Integration open Infrastructure -type MessageDbConnector(connectionString : string) = +type MessageDbConnector(connectionString: string) = let client = Equinox.MessageDb.MessageDbClient connectionString let context = Equinox.MessageDb.MessageDbContext client let cache = Equinox.Cache("Tests", sizeMb = 10) - new (c : Reactor.SourceArgs.Configuration) = MessageDbConnector(c.MdbConnectionString) + new (c: Reactor.SourceArgs.Configuration) = MessageDbConnector(c.MdbConnectionString) new () = MessageDbConnector(Reactor.SourceArgs.Configuration EnvVar.tryGet) member val ConnectionString = connectionString member val DumpStats = Equinox.MessageDb.Log.InternalMetrics.dump - member val Store = Domain.Config.Store.Mdb (context, cache) + member val Store = Domain.Store.Context.Mdb (context, cache) /// Uses an in-memory checkpoint service; the real app will obviously need to store real checkpoints (see SourceArgs.Mdb.Arguments.CreateCheckpointStore) member x.CreateCheckpointService(consumerGroupName) = let checkpointInterval = System.TimeSpan.FromHours 1. let store = Equinox.MemoryStore.VolatileStore() - Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Domain.Config.log (consumerGroupName, checkpointInterval) store + Propulsion.Feed.ReaderCheckpoint.MemoryStore.create Domain.Store.log (consumerGroupName, checkpointInterval) store diff --git a/propulsion-hotel/Reactor.Integration/ReactorFixture.fs b/propulsion-hotel/Reactor.Integration/ReactorFixture.fs index cd5b749d1..041bf3e8f 100644 --- a/propulsion-hotel/Reactor.Integration/ReactorFixture.fs +++ b/propulsion-hotel/Reactor.Integration/ReactorFixture.fs @@ -18,13 +18,13 @@ type FixtureBase(messageSink, store, dumpStats, createSourceConfig) = let log = Serilog.Log.Logger let stats = Handler.Stats(log, statsInterval = TimeSpan.FromMinutes 1, stateInterval = TimeSpan.FromMinutes 2, logExternalStats = dumpStats) - let sink = Handler.Config.StartSink(log, stats, handler, maxReadAhead = 1024, maxConcurrentStreams = 4, - // Ensure batches are completed ASAP so waits in the tests are minimal - wakeForResults = true) + let sink = Handler.Factory.StartSink(log, stats, 4, handler, maxReadAhead = 1024, + // Ensure batches are completed ASAP so waits in the tests are minimal + wakeForResults = true) let source, awaitReactions = let consumerGroupName = $"ReactorFixture/{contextId}" let sourceConfig = createSourceConfig consumerGroupName - Handler.Config.StartSource(log, sink, sourceConfig) + Handler.Factory.StartSource(log, sink, sourceConfig) member val Store = store @@ -56,7 +56,7 @@ module MemoryReactor = new(messageSink) = let store = Equinox.MemoryStore.VolatileStore() let createSourceConfig _groupName = SourceConfig.Memory store - new Fixture(messageSink, Domain.Config.Store.Memory store, createSourceConfig) + new Fixture(messageSink, Domain.Store.Context.Memory store, createSourceConfig) // override _.RunTimeout = TimeSpan.FromSeconds 0.1 member _.Wait() = base.Await(TimeSpan.MaxValue) // Propagation delay is not applicable for MemoryStore member val private Backoff = TimeSpan.FromMilliseconds 1 @@ -75,7 +75,7 @@ module DynamoReactor = new (messageSink) = let conn = DynamoConnector() let createSourceConfig consumerGroupName = - let loadMode = DynamoLoadModeConfig.NoBodies + let loadMode = Propulsion.DynamoStore.IndexOnly let checkpoints = conn.CreateCheckpointService(consumerGroupName) SourceConfig.Dynamo (conn.IndexClient, checkpoints, loadMode, startFromTail = true, batchSizeCutoff = 100, tailSleepInterval = tailSleepInterval, statsInterval = TimeSpan.FromSeconds 60.) diff --git a/propulsion-hotel/Reactor.Integration/ReactorIntegrationTests.fs b/propulsion-hotel/Reactor.Integration/ReactorIntegrationTests.fs index bbb591079..944bcedad 100644 --- a/propulsion-hotel/Reactor.Integration/ReactorIntegrationTests.fs +++ b/propulsion-hotel/Reactor.Integration/ReactorIntegrationTests.fs @@ -6,8 +6,8 @@ open FsCheck.Xunit open System let runCheckoutScenario store (paymentId, id, NonEmptyArray stays, payBefore) checkWithRetry = async { - let staysService = GuestStay.Config.create store - let checkoutService = GroupCheckout.Config.create store + let staysService = GuestStay.Factory.create store + let checkoutService = GroupCheckout.Factory.create store let mutable charged = 0 for stayId, chargeId, PositiveInt amount in stays do charged <- charged + amount @@ -42,10 +42,10 @@ let runCheckoutScenario store (paymentId, id, NonEmptyArray stays, payBefore) ch | GroupCheckout.Decide.BalanceOutstanding _ -> return false } [] -type ReactorPropertiesBase(reactor : FixtureBase, testOutput) = +type ReactorPropertiesBase(reactor: FixtureBase, testOutput) = let logSub = reactor.CaptureSerilogLog testOutput - abstract member DisposeAsync : unit -> Async + abstract member DisposeAsync: unit -> Async default _.DisposeAsync() = async.Zero () // Abusing IDisposable rather than IAsyncDisposable as we want the output to accompany the test output @@ -56,12 +56,12 @@ type ReactorPropertiesBase(reactor : FixtureBase, testOutput) = reactor.DumpStats() logSub.Dispose() } -type MemoryProperties (reactor : MemoryReactor.Fixture, testOutput) = +type MemoryProperties (reactor: MemoryReactor.Fixture, testOutput) = // Trigger logging of (Aggregate) Reactor stats after each Test/Property is run inherit ReactorPropertiesBase(reactor, testOutput) [] - let run args : Async = + let run args: Async = runCheckoutScenario reactor.Store args reactor.CheckReactions override _.DisposeAsync() = @@ -72,7 +72,7 @@ type MemoryProperties (reactor : MemoryReactor.Fixture, testOutput) = interface Xunit.IClassFixture [] -type DynamoProperties(reactor : DynamoReactor.Fixture, testOutput) = +type DynamoProperties(reactor: DynamoReactor.Fixture, testOutput) = // Failsafe to emit the Remaining stats even in the case of a Test/Property failing (in success case, it's redundant) inherit ReactorPropertiesBase(reactor, testOutput) @@ -82,7 +82,7 @@ type DynamoProperties(reactor : DynamoReactor.Fixture, testOutput) = #else [] #endif - let run args : Async = async { + let run args: Async = async { try return! runCheckoutScenario reactor.Store args reactor.CheckReactions // Dump the stats after each and every iteration of the test finally reactor.DumpStats() } @@ -94,7 +94,7 @@ type DynamoProperties(reactor : DynamoReactor.Fixture, testOutput) = reactor.Wait() [] -type MessageDbProperties(reactor : MessageDbReactor.Fixture, testOutput) = +type MessageDbProperties(reactor: MessageDbReactor.Fixture, testOutput) = // Failsafe to emit the Remaining stats even in the case of a Test/Property failing (in success case, it's redundant) inherit ReactorPropertiesBase(reactor, testOutput) @@ -104,7 +104,7 @@ type MessageDbProperties(reactor : MessageDbReactor.Fixture, testOutput) = #else [] #endif - let run args : Async = async { + let run args: Async = async { try return! runCheckoutScenario reactor.Store args reactor.CheckReactions // Dump the stats after each and every iteration of the test finally reactor.DumpStats() } diff --git a/propulsion-hotel/Reactor.Integration/SerilogLogFixture.fs b/propulsion-hotel/Reactor.Integration/SerilogLogFixture.fs index d8e10ad99..7ae1fb5d0 100644 --- a/propulsion-hotel/Reactor.Integration/SerilogLogFixture.fs +++ b/propulsion-hotel/Reactor.Integration/SerilogLogFixture.fs @@ -2,14 +2,14 @@ namespace Reactor.Integration open Infrastructure // isStoreMetrics -type XunitOutputSink(?messageSink : Xunit.Abstractions.IMessageSink, ?minLevel : Serilog.Events.LogEventLevel, ?templatePrefix) = +type XunitOutputSink(?messageSink: Xunit.Abstractions.IMessageSink, ?minLevel: Serilog.Events.LogEventLevel, ?templatePrefix) = let minLevel = defaultArg minLevel Serilog.Events.LogEventLevel.Information let formatter = let baseTemplate = "{Timestamp:HH:mm:ss.fff} {Level:u1} " + Option.toObj templatePrefix + "{Message:l} {Properties}{NewLine}{Exception}" let template = if minLevel <= Serilog.Events.LogEventLevel.Debug then baseTemplate else baseTemplate.Replace("{Properties}", "") Serilog.Formatting.Display.MessageTemplateTextFormatter(template, null) - let mutable currentTestOutput : Xunit.Abstractions.ITestOutputHelper option = None - let writeSerilogEvent (logEvent : Serilog.Events.LogEvent) = + let mutable currentTestOutput: Xunit.Abstractions.ITestOutputHelper option = None + let writeSerilogEvent (logEvent: Serilog.Events.LogEvent) = logEvent.RemovePropertyIfPresent Equinox.DynamoStore.Core.Log.PropertyTag logEvent.RemovePropertyIfPresent Equinox.MessageDb.Log.PropertyTag logEvent.RemovePropertyIfPresent Propulsion.Feed.Core.Log.PropertyTag diff --git a/propulsion-hotel/Reactor/Args.fs b/propulsion-hotel/Reactor/Args.fs index 9c571d0db..73f3cdd66 100644 --- a/propulsion-hotel/Reactor/Args.fs +++ b/propulsion-hotel/Reactor/Args.fs @@ -1,9 +1,7 @@ /// Commandline arguments and/or secrets loading specifications module Infrastructure.Args -module Config = Domain.Config - -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) module Configuration = @@ -23,7 +21,7 @@ module Configuration = let [] READ_CONN_STRING = "MDB_CONNECTION_STRING_READ" let [] SCHEMA = "MDB_SCHEMA" -type Configuration(tryGet : string -> string option) = +type Configuration(tryGet: string -> string option) = member val tryGet = tryGet member _.get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" diff --git a/propulsion-hotel/Reactor/GroupCheckoutProcess.fs b/propulsion-hotel/Reactor/GroupCheckoutProcess.fs index 5d6f96af6..a1919f8dd 100644 --- a/propulsion-hotel/Reactor/GroupCheckoutProcess.fs +++ b/propulsion-hotel/Reactor/GroupCheckoutProcess.fs @@ -4,10 +4,10 @@ open Domain open Infrastructure [] -type Outcome = Merged of ok : int * failed : int | Noop +type Outcome = Merged of ok: int * failed: int | Noop /// Handles Reactions associated with the group checkout process -type Service(guestStays : GuestStay.Service, groupCheckouts : GroupCheckout.Service, checkoutParallelism) = +type Service(guestStays: GuestStay.Service, groupCheckouts: GroupCheckout.Service, checkoutParallelism) = (* Alternate impl of attemptMerge + executeMergeStayAttempts_ that's easier to read let executeMergeStayAttempts_ groupCheckoutId stayIds = async { @@ -33,7 +33,7 @@ type Service(guestStays : GuestStay.Service, groupCheckouts : GroupCheckout.Serv // Attempts to merge the specified stays into the specified Group Checkout // Maps the results of each individual merge attempt into 0, 1 or 2 events reflecting the progress achieved against the requested merges - let decideMerge groupCheckoutId stayIds : Async = async { + let decideMerge groupCheckoutId stayIds: Async = async { let! residuals, fails = executeMergeStayAttempts groupCheckoutId stayIds let events = [ match residuals with @@ -57,5 +57,5 @@ type Service(guestStays : GuestStay.Service, groupCheckouts : GroupCheckout.Serv /// Handles Reactions based on the state of the Group Checkout's workflow /// NOTE result includes the post-version of the stream after processing has concluded - member _.React(groupCheckoutId) : Async = + member _.React(groupCheckoutId): Async = groupCheckouts.React(groupCheckoutId, handleReaction groupCheckoutId) diff --git a/propulsion-hotel/Reactor/Handler.fs b/propulsion-hotel/Reactor/Handler.fs index f07b11098..be232445f 100644 --- a/propulsion-hotel/Reactor/Handler.fs +++ b/propulsion-hotel/Reactor/Handler.fs @@ -1,8 +1,5 @@ module Reactor.Handler -open Infrastructure -open Propulsion.Internal - type Outcome = GroupCheckoutProcess.Outcome /// Gathers stats based on the outcome of each Span processed, periodically including them in the Sink summaries @@ -40,7 +37,7 @@ let private reactionCategories = [| GroupCheckout.Category |] // NOTE while Propulsion supplies the handler with the full set of outstanding events since the last successful checkpoint, // the nature of the reaction processing we are performing here can also be reliant on state that's inferred based on events // prior to those that will have arrived on the feed. For that reason, the caller does not forward the `events` argument here. -let private handle (processor : GroupCheckoutProcess.Service) stream = async { +let private handle (processor: GroupCheckoutProcess.Service) stream _events = async { match stream with | GroupCheckout.StreamName groupCheckoutId -> let! outcome, ver' = processor.React(groupCheckoutId) @@ -54,26 +51,24 @@ let private handle (processor : GroupCheckoutProcess.Service) stream = async { // NOTE also that in some cases, the observed position on the stream can be beyond that which has been notified via // the change feed. In those cases, Propulsion will drop any incoming events that would represent duplication of processing, // (and not even invoke the Handler unless one or more of the feed events are beyond the write position) - return struct (Propulsion.Streams.SpanResult.OverrideWritePosition ver', outcome) + return Propulsion.Sinks.StreamResult.OverrideNextIndex ver', outcome | other -> return failwithf "Span from unexpected category %A" other } let private createService store = - let stays = GuestStay.Config.create store - let checkouts = GroupCheckout.Config.create store + let stays = GuestStay.Factory.create store + let checkouts = GroupCheckout.Factory.create store GroupCheckoutProcess.Service(stays, checkouts, checkoutParallelism = 5) let create store = createService store |> handle -type Config private () = +type Factory private () = - static member StartSink(log : Serilog.ILogger, stats : Stats, handle, - maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = - let handle stream _events ct = Async.startImmediateAsTask ct (handle stream) - Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, - stats, stats.StatsInterval.Period, - ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + static member StartSink(log, stats, maxConcurrentStreams, handle,maxReadAhead, + ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, maxConcurrentStreams, handle, stats, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) static member StartSource(log, sink, sourceConfig) = - SourceConfig.start (log, Config.log) sink reactionCategories sourceConfig + Infrastructure.SourceConfig.start (log, Store.log) sink reactionCategories sourceConfig diff --git a/propulsion-hotel/Reactor/Infrastructure.fs b/propulsion-hotel/Reactor/Infrastructure.fs index c78f1328e..0f12a668f 100644 --- a/propulsion-hotel/Reactor/Infrastructure.fs +++ b/propulsion-hotel/Reactor/Infrastructure.fs @@ -11,7 +11,7 @@ module Log = module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj module Choice = @@ -45,7 +45,7 @@ type Equinox.DynamoStore.DynamoStoreClient with type Equinox.DynamoStore.DynamoStoreContext with - member internal x.LogConfiguration(log : ILogger) = + member internal x.LogConfiguration(log: ILogger) = log.Information("DynamoStore Tip thresholds: {maxTipBytes}b {maxTipEvents}e Query Paging {queryMaxItems} items", x.TipOptions.MaxBytes, Option.toNullable x.TipOptions.MaxEvents, x.QueryOptions.MaxItems) @@ -59,7 +59,7 @@ type Amazon.DynamoDBv2.IAmazonDynamoDB with module DynamoStoreContext = /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations - let create (storeClient : Equinox.DynamoStore.DynamoStoreClient) = + let create (storeClient: Equinox.DynamoStore.DynamoStoreClient) = Equinox.DynamoStore.DynamoStoreContext(storeClient, queryMaxItems = 100) /// Equinox and Propulsion provide metrics as properties in log emissions @@ -68,20 +68,20 @@ module Sinks = let tags appName = ["app", appName] - let private equinoxMetricsOnly tags (l : LoggerConfiguration) = + let private equinoxMetricsOnly tags (l: LoggerConfiguration) = l.WriteTo.Sink(Equinox.DynamoStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.DynamoStore.Prometheus.LogSink(tags)) .WriteTo.Sink(Equinox.MessageDb.Log.InternalMetrics.Stats.LogSink()) - let private equinoxAndPropulsionMetrics tags group (l : LoggerConfiguration) = + let private equinoxAndPropulsionMetrics tags group (l: LoggerConfiguration) = l |> equinoxMetricsOnly tags |> fun l -> l.WriteTo.Sink(Propulsion.Prometheus.LogSink(tags, group)) - let equinoxAndPropulsionFeedMetrics tags group (l : LoggerConfiguration) = + let equinoxAndPropulsionFeedMetrics tags group (l: LoggerConfiguration) = l |> equinoxAndPropulsionMetrics tags group |> fun l -> l.WriteTo.Sink(Propulsion.Feed.Prometheus.LogSink(tags)) - let console (configuration : LoggerConfiguration) = + let console (configuration: LoggerConfiguration) = let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" configuration.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) @@ -89,14 +89,14 @@ module Sinks = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c [] - static member private Sinks(configuration : LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + static member private Sinks(configuration: LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(configureMetricsSinks >> ignore) |> ignore // unconditionally feed all log events to the metrics sinks a.Logger(fun l -> // but filter what gets emitted to the console sink let l = match isMetric with None -> l | Some predicate -> l.Filter.ByExcluding(Func predicate) @@ -105,5 +105,5 @@ type Logging() = configuration.WriteTo.Async(bufferSize = 65536, blockWhenFull = true, configure = System.Action<_> configure) [] - static member Sinks(configuration : LoggerConfiguration, configureMetricsSinks, verboseStore) = + static member Sinks(configuration: LoggerConfiguration, configureMetricsSinks, verboseStore) = configuration.Sinks(configureMetricsSinks, Sinks.console, ?isMetric = if verboseStore then None else Some Log.isStoreMetrics) diff --git a/propulsion-hotel/Reactor/Program.fs b/propulsion-hotel/Reactor/Program.fs index 0a37dd085..0c985bc02 100644 --- a/propulsion-hotel/Reactor/Program.fs +++ b/propulsion-hotel/Reactor/Program.fs @@ -4,7 +4,7 @@ open Infrastructure open Serilog open System -module Config = Domain.Config +module Store = Domain.Store module Args = @@ -37,7 +37,7 @@ module Args = | Dynamo _ -> "specify DynamoDB input parameters" | Mdb _ -> "specify MessageDb input parameters" - and Arguments(c : SourceArgs.Configuration, p : ParseResults) = + and Arguments(c: SourceArgs.Configuration, p: ParseResults) = let maxReadAhead = p.GetResult(MaxReadAhead, 16) let maxConcurrentProcessors = p.GetResult(MaxWriters, 8) member val ProcessorName = p.GetResult ProcessorName @@ -52,12 +52,12 @@ module Args = member x.ProcessorParams() = Log.Information("Reacting... {processorName}, reading {maxReadAhead} ahead, {dop} streams", x.ProcessorName, maxReadAhead, maxConcurrentProcessors) (x.ProcessorName, maxReadAhead, maxConcurrentProcessors) - member val Store : Choice = + member val Store: Choice = match p.GetSubCommand() with | Dynamo a -> Choice1Of2 <| SourceArgs.Dynamo.Arguments(c, a) | Mdb a -> Choice2Of2 <| SourceArgs.Mdb.Arguments(c, a) | a -> Args.missingArg $"Unexpected Store subcommand %A{a}" - member x.ConnectStoreAndSource(appName) : Config.Store * (ILogger -> string -> SourceConfig) * (ILogger -> unit) = + member x.ConnectStoreAndSource(appName): Store.Context * (ILogger -> string -> SourceConfig) * (ILogger -> unit) = let cache = Equinox.Cache (appName, sizeMb = x.CacheSizeMb) match x.Store with | Choice1Of2 a -> @@ -65,9 +65,9 @@ module Args = let buildSourceConfig log groupName = let indexStore, startFromTail, batchSizeCutoff, tailSleepInterval = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName, cache) - let load = DynamoLoadModeConfig.NoBodies + let load = Propulsion.DynamoStore.EventLoadMode.IndexOnly SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) - let store = Config.Store.Dynamo (context, cache) + let store = Store.Context.Dynamo (context, cache) store, buildSourceConfig, Equinox.DynamoStore.Core.Log.InternalMetrics.dump | Choice2Of2 a -> let context = a.Connect() @@ -75,35 +75,35 @@ module Args = let connectionString, startFromTail, batchSize, tailSleepInterval = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName) SourceConfig.Mdb (connectionString, checkpoints, startFromTail, batchSize, tailSleepInterval, x.StatsInterval) - let store = Config.Store.Mdb (context, cache) + let store = Store.Context.Mdb (context, cache) store, buildSourceConfig, Equinox.MessageDb.Log.InternalMetrics.dump /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName = programName) Arguments(SourceArgs.Configuration tryGetConfigValue, parser.ParseCommandLine argv) let [] AppName = "Reactor" -let build (args : Args.Arguments) = +let build (args: Args.Arguments) = let consumerGroupName, maxReadAhead, maxConcurrentStreams = args.ProcessorParams() let store, buildSourceConfig, dumpMetrics = args.ConnectStoreAndSource(AppName) let log = Log.Logger let sink = let stats = Handler.Stats(log, args.StatsInterval, args.StateInterval, dumpMetrics) let handle = Handler.create store - Handler.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, + Handler.Factory.StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead, wakeForResults = args.WakeForResults, idleDelay = args.IdleDelay, purgeInterval = args.PurgeInterval) let source, _awaitReactions = let sourceConfig = buildSourceConfig log consumerGroupName - Handler.Config.StartSource(log, sink, sourceConfig) + Handler.Factory.StartSource(log, sink, sourceConfig) sink, source open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly -let startMetricsServer port : IDisposable = +let startMetricsServer port: IDisposable = let metricsServer = new Prometheus.KestrelMetricServer(port = port) let ms = metricsServer.Start() Log.Information("Prometheus /metrics endpoint on port {port}", port) @@ -113,7 +113,7 @@ let run args = async { let sink, source = build args use _ = source use _ = sink - use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj + use _metricsServer: IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj return! Async.Parallel [ Async.AwaitKeyboardInterruptAsTaskCanceledException() source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() ] |> Async.Ignore } diff --git a/propulsion-hotel/Reactor/Reactor.fsproj b/propulsion-hotel/Reactor/Reactor.fsproj index 25e1cea9c..14104f996 100644 --- a/propulsion-hotel/Reactor/Reactor.fsproj +++ b/propulsion-hotel/Reactor/Reactor.fsproj @@ -18,11 +18,11 @@ - + - - - + + + diff --git a/propulsion-hotel/Reactor/SourceArgs.fs b/propulsion-hotel/Reactor/SourceArgs.fs index e74db9afa..1cca9c325 100644 --- a/propulsion-hotel/Reactor/SourceArgs.fs +++ b/propulsion-hotel/Reactor/SourceArgs.fs @@ -5,7 +5,7 @@ open Infrastructure // Args etc open Serilog open System -module Config = Domain.Config +module Store = Domain.Store type Configuration(tryGet) = inherit Args.Configuration(tryGet) @@ -43,7 +43,7 @@ module Dynamo = | MaxItems _ -> "maximum events to load in a batch. Default: 100" | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." - type Arguments(c : Args.Configuration, p : ParseResults) = + type Arguments(c: Args.Configuration, p: ParseResults) = let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with | Some systemName -> Choice1Of2 systemName @@ -70,14 +70,14 @@ module Dynamo = member _.Connect() = connector.LogConfiguration() client.ConnectStore("Main", table) |> DynamoStoreContext.create - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("DynamoStoreSource BatchSizeCutoff {batchSizeCutoff} No event hydration", batchSizeCutoff) let indexStoreClient = indexStoreClient.Value if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") indexStoreClient, fromTail, batchSizeCutoff, tailSleepInterval member _.CreateCheckpointStore(group, cache) = let indexTable = indexStoreClient.Value - indexTable.CreateCheckpointService(group, cache, Config.log) + indexTable.CreateCheckpointService(group, cache, Domain.Store.log) module Mdb = @@ -99,7 +99,7 @@ module Mdb = | BatchSize _ -> "maximum events to load in a batch. Default: 1000" | TailSleepIntervalMs _ -> "How long to sleep in ms once the consumer has hit the tail (default: 100ms)" | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." - type Arguments(c : Args.Configuration, p : ParseResults) = + type Arguments(c: Args.Configuration, p: ParseResults) = let writeConnStr = p.TryGetResult ConnectionString |> Option.defaultWith (fun () -> c.MdbConnectionString) let readConnStr = p.TryGetResult ReadConnectionString |> Option.orElseWith (fun () -> c.MdbReadConnectionString) |> Option.defaultValue writeConnStr let checkpointConnStr = p.TryGetResult CheckpointConnectionString |> Option.defaultValue writeConnStr @@ -108,7 +108,7 @@ module Mdb = let batchSize = p.GetResult(BatchSize, 1000) let tailSleepInterval = p.GetResult(TailSleepIntervalMs, 100) |> TimeSpan.FromMilliseconds member _.Connect() = - let sanitize (cs : string) = Npgsql.NpgsqlConnectionStringBuilder(cs, Password = null) + let sanitize (cs: string) = Npgsql.NpgsqlConnectionStringBuilder(cs, Password = null) Log.Information("Npgsql checkpoint connection {connectionString}", sanitize checkpointConnStr) if writeConnStr = readConnStr then Log.Information("MessageDB connection {connectionString}", sanitize writeConnStr) @@ -117,7 +117,7 @@ module Mdb = Log.Information("MessageDB read connection {connectionString}", sanitize readConnStr) let client = Equinox.MessageDb.MessageDbClient(writeConnStr, readConnStr) Equinox.MessageDb.MessageDbContext(client, batchSize) - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("MessageDbSource batchSize {batchSize} Checkpoints schema {schema}", batchSize, schema) if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") readConnStr, fromTail, batchSize, tailSleepInterval diff --git a/propulsion-hotel/Reactor/SourceConfig.fs b/propulsion-hotel/Reactor/SourceConfig.fs index fce70fde6..1ad61cb1d 100644 --- a/propulsion-hotel/Reactor/SourceConfig.fs +++ b/propulsion-hotel/Reactor/SourceConfig.fs @@ -5,47 +5,40 @@ open System.Threading.Tasks [] type SourceConfig = - | Memory of store : Equinox.MemoryStore.VolatileStore)> - | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * loading : DynamoLoadModeConfig - * startFromTail : bool - * batchSizeCutoff : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan - | Mdb of connectionString : string - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * startFromTail : bool - * batchSize : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan -and [] DynamoLoadModeConfig = - | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int - | NoBodies + | Memory of store: Equinox.MemoryStore.VolatileStore)> + | Dynamo of indexStore: Equinox.DynamoStore.DynamoStoreClient + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * loading: Propulsion.DynamoStore.EventLoadMode + * startFromTail: bool + * batchSizeCutoff: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan + | Mdb of connectionString: string + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * startFromTail: bool + * batchSize: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan module SourceConfig = module Memory = open Propulsion.MemoryStore - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (store : Equinox.MemoryStore.VolatileStore<_>) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let source = MemoryStoreSource(log, store, categoryFilter, sink) + let start log (sink: Propulsion.Sinks.Sink) (categories: string[]) + (store: Equinox.MemoryStore.VolatileStore<_>): Propulsion.Pipeline * (TimeSpan -> Task) option = + let source = MemoryStoreSource(log, store, categories, sink) source.Start(), Some (fun _propagationDelay -> source.Monitor.AwaitCompletion(ignoreSubsequent = false)) module Dynamo = open Propulsion.DynamoStore - let private create (log, storeLog) (sink : Propulsion.Streams.Default.Sink) categoryFilter - (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) trancheIds = - let loadMode = - match loadModeConfig with - | Hydrate (monitoredContext, hydrationConcurrency) -> LoadMode.Hydrated (categoryFilter, hydrationConcurrency, monitoredContext) - | NoBodies -> LoadMode.WithoutEventBodies categoryFilter + let private create (log, storeLog) (sink: Propulsion.Sinks.Sink) categories + (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) trancheIds = DynamoStoreSource( log, statsInterval, indexStore, batchSizeCutoff, tailSleepInterval, - checkpoints, sink, loadMode, + checkpoints, sink, loadMode, categories = categories, startFromTail = startFromTail, storeLog = storeLog, ?trancheIds = trancheIds) - let start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) + let start (log, storeLog) sink categories (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let source = create (log, storeLog) sink categoryFilter (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) None + let source = create (log, storeLog) sink categories (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) None let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) module Mdb = @@ -61,11 +54,10 @@ module SourceConfig = let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) - let start (log, storeLog) sink categories : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function + let start (log, storeLog) sink categories: SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function | SourceConfig.Memory volatileStore -> - Memory.start log sink (fun c -> Array.contains c categories) volatileStore + Memory.start log sink categories volatileStore | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> - let catFilter c = Array.contains c categories - Dynamo.start (log, storeLog) sink catFilter (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) + Dynamo.start (log, storeLog) sink categories (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) | SourceConfig.Mdb (connectionString, checkpoints, startFromTail, batchSize, tailSleepInterval, statsInterval) -> Mdb.start log sink categories (connectionString, checkpoints, startFromTail, batchSize, tailSleepInterval, statsInterval) diff --git a/propulsion-projector/Args.fs b/propulsion-projector/Args.fs index 356b976db..8cb08b6a7 100644 --- a/propulsion-projector/Args.fs +++ b/propulsion-projector/Args.fs @@ -3,7 +3,7 @@ module ProjectorTemplate.Args open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) let [] REGION = "EQUINOX_DYNAMO_REGION" @@ -13,7 +13,7 @@ let [] SECRET_KEY = "EQUINOX_DYNAMO_SECRET_ACCESS_KEY" let [] TABLE = "EQUINOX_DYNAMO_TABLE" let [] INDEX_TABLE = "EQUINOX_DYNAMO_TABLE_INDEX" -type Configuration(tryGet : string -> string option) = +type Configuration(tryGet: string -> string option) = member val tryGet = tryGet member _.get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" @@ -53,7 +53,7 @@ module Cosmos = | Retries _ -> "specify operation retries (default: 1)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 5)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let connection = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) let discovery = Equinox.CosmosStore.Discovery.ConnectionString connection let mode = p.TryGetResult ConnectionMode @@ -91,7 +91,7 @@ module Dynamo = | Retries _ -> "specify operation retries (default: 1)." | RetriesTimeoutS _ -> "specify max wait-time including retries in seconds (default: 5)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with | Some systemName -> Choice1Of2 systemName diff --git a/propulsion-projector/Handler.fs b/propulsion-projector/Handler.fs index 0cb70f7ce..928db66cc 100644 --- a/propulsion-projector/Handler.fs +++ b/propulsion-projector/Handler.fs @@ -4,27 +4,27 @@ open Propulsion.Internal //#if cosmos #if parallelOnly // Here we pass the items directly through to the handler without parsing them -let mapToStreamItems (x : System.Collections.Generic.IReadOnlyCollection<'a>) : seq<'a> = upcast x -let categoryFilter _ = true +let mapToStreamItems (x: System.Collections.Generic.IReadOnlyCollection<'a>): seq<'a> = upcast x +let categories = [||] // TODO add category names #else // cosmos && !parallelOnly #endif // !parallelOnly //#endif // cosmos #if kafka #if (cosmos && parallelOnly) // kafka && cosmos && parallelOnly -type ExampleOutput = { id : string } +type ExampleOutput = { id: string } let serdes = FsCodec.SystemTextJson.Options.Default |> FsCodec.SystemTextJson.Serdes -let render (doc : System.Text.Json.JsonDocument) = +let render (doc: System.Text.Json.JsonDocument) = let r = doc.RootElement - let gs (name : string) = let x = r.GetProperty name in x.GetString() + let gs (name: string) = let x = r.GetProperty name in x.GetString() let equinoxPartition, itemId = gs "p", gs "id" - equinoxPartition, serdes.Serialize { id = itemId } + struct (equinoxPartition, serdes.Serialize { id = itemId }) #else // kafka && !(cosmos && parallelOnly) // Each outcome from `handle` is passed to `HandleOk` or `HandleExn` by the scheduler, DumpStats is called at `statsInterval` // The incoming calls are all sequential - the logic does not need to consider concurrent incoming calls type ProductionStats(log, statsInterval, stateInterval) = - inherit Propulsion.Streams.Sync.Stats(log, statsInterval, stateInterval) + inherit Propulsion.Sync.Stats(log, statsInterval, stateInterval) // TODO consider whether it's warranted to log every time a message is produced given the stats will periodically emit counts override _.HandleOk(()) = @@ -39,15 +39,14 @@ type ProductionStats(log, statsInterval, stateInterval) = /// to preserve ordering at stream (key) level for messages produced to the topic) // TODO NOTE: The bulk of any manipulation should take place before events enter the scheduler, i.e. in program.fs // TODO NOTE: While filtering out entire categories is appropriate, you should not filter within a given stream (i.e., by event type) -let render (stream : FsCodec.StreamName) (span : Propulsion.Streams.Default.StreamSpan) ct = Async.startImmediateAsTask ct <| async { +let render (stream: FsCodec.StreamName) (events: Propulsion.Sinks.Event[]) = async { let value = - span + events |> Propulsion.Codec.NewtonsoftJson.RenderedSpan.ofStreamSpan stream |> Propulsion.Codec.NewtonsoftJson.Serdes.Serialize - return struct (FsCodec.StreamName.toString stream, value) } + return FsCodec.StreamName.toString stream, value } -let categoryFilter = function - | _ -> true // TODO filter categories to be rendered +let categories = [||] // TODO add category names to render #endif // kafka && !(cosmos && parallelOnly) #else // !kafka @@ -71,25 +70,21 @@ type Stats(log, statsInterval, stateInterval) = log.Information(" Total events processed {total}", totalCount) totalCount <- 0 -let categoryFilter = function - | "categoryA" - | _ -> true +let categories = [| "categoryA" |] -let handle _stream (span: Propulsion.Streams.StreamSpan<_>) _ct = task { +let handle _stream (events: Propulsion.Sinks.Event[]) = async { let r = System.Random() - let ms = r.Next(1, span.Length) + let ms = r.Next(1, events.Length) do! Async.Sleep ms - return struct (Propulsion.Streams.SpanResult.AllProcessed, span.Length) } + return Propulsion.Sinks.StreamResult.AllProcessed, events.Length } #endif // !kafka -type Config private () = +type Factory private () = - static member StartSink(log : Serilog.ILogger, stats, - handle : System.Func>, - maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = - Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, - ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + static member StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead, + ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, maxConcurrentStreams, handle, stats, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) static member StartSource(log, sink, sourceConfig) = - SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig + SourceConfig.start (log, Store.log) sink categories sourceConfig diff --git a/propulsion-projector/Infrastructure.fs b/propulsion-projector/Infrastructure.fs index d6803f6c4..9326bef16 100644 --- a/propulsion-projector/Infrastructure.fs +++ b/propulsion-projector/Infrastructure.fs @@ -6,13 +6,13 @@ open System module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj // #if (cosmos || esdb || sss) module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -78,7 +78,7 @@ type Equinox.DynamoStore.DynamoStoreClient with type Equinox.DynamoStore.DynamoStoreContext with - member internal x.LogConfiguration(log : ILogger) = + member internal x.LogConfiguration(log: ILogger) = log.Information("DynamoStore Tip thresholds: {maxTipBytes}b {maxTipEvents}e Query Paging {queryMaxItems} items", x.TipOptions.MaxBytes, Option.toNullable x.TipOptions.MaxEvents, x.QueryOptions.MaxItems) @@ -92,7 +92,7 @@ type Amazon.DynamoDBv2.IAmazonDynamoDB with module DynamoStoreContext = /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations - let create (storeClient : Equinox.DynamoStore.DynamoStoreClient) = + let create (storeClient: Equinox.DynamoStore.DynamoStoreClient) = Equinox.DynamoStore.DynamoStoreContext(storeClient, queryMaxItems = 100) // #endif @@ -100,7 +100,7 @@ module DynamoStoreContext = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-projector/Program.fs b/propulsion-projector/Program.fs index d0c7c9bcb..63e595015 100644 --- a/propulsion-projector/Program.fs +++ b/propulsion-projector/Program.fs @@ -52,7 +52,7 @@ module Args = #if sss | SqlMs _ -> "specify SqlStreamStore input parameters." #endif - and Arguments(c : SourceArgs.Configuration, p : ParseResults) = + and Arguments(c: SourceArgs.Configuration, p: ParseResults) = let processorName = p.GetResult ProcessorName let maxReadAhead = p.GetResult(MaxReadAhead, 64) let maxConcurrentProcessors = p.GetResult(MaxWriters, 1024) @@ -83,7 +83,7 @@ module Args = #else member val Sink = () #endif - member x.ConnectSource(appName) : (ILogger -> string -> SourceConfig) * _ * (ILogger -> unit) = + member x.ConnectSource(appName): (ILogger -> string -> SourceConfig) * _ * (ILogger -> unit) = let cache = Equinox.Cache (appName, sizeMb = x.CacheSizeMb) match x.Store with | a -> @@ -100,7 +100,7 @@ module Args = let buildSourceConfig log groupName = let indexStore, startFromTail, batchSizeCutoff, tailSleepInterval, streamsDop = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName, cache) - let load = DynamoLoadModeConfig.Hydrate (context, streamsDop) + let load = Propulsion.DynamoStore.WithData (streamsDop, context) SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) buildSourceConfig, x.Sink, Equinox.DynamoStore.Core.Log.InternalMetrics.dump #endif @@ -110,8 +110,8 @@ module Args = let buildSourceConfig log groupName = let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName, targetStore) - let hydrateBodies = true - SourceConfig.Esdb (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) + let withData = true + SourceConfig.Esdb (connection.ReadConnection, checkpoints, withData, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) buildSourceConfig, x.Sink, fun log -> Equinox.CosmosStore.Core.Log.InternalMetrics.dump log Equinox.DynamoStore.Core.Log.InternalMetrics.dump log @@ -121,8 +121,8 @@ module Args = let buildSourceConfig log groupName = let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStoreSql(groupName) - let hydrateBodies = true - SourceConfig.Sss (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) + let withData = true + SourceConfig.Sss (connection.ReadConnection, checkpoints, withData, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) buildSourceConfig, x.Sink, fun log -> Equinox.SqlStreamStore.Log.InternalMetrics.dump log Equinox.CosmosStore.Core.Log.InternalMetrics.dump log @@ -130,14 +130,14 @@ module Args = #endif #if kafka - and KafkaSinkArguments(c : SourceArgs.Configuration, p : ParseResults) = + and KafkaSinkArguments(c: SourceArgs.Configuration, p: ParseResults) = member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) member x.BuildTargetParams() = x.Broker, x.Topic #endif /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) Arguments(SourceArgs.Configuration tryGetConfigValue, parser.ParseCommandLine argv) @@ -146,7 +146,7 @@ let [] AppName = "ProjectorTemplate" open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException -let build (args : Args.Arguments) = +let build (args: Args.Arguments) = let consumerGroupName, maxReadAhead, maxConcurrentProcessors = args.ProcessorParams() let buildSourceConfig, target, dumpMetrics = args.ConnectSource(AppName) #if kafka // kafka @@ -156,18 +156,18 @@ let build (args : Args.Arguments) = let sink = Propulsion.Kafka.ParallelProducerSink.Start(maxReadAhead, maxConcurrentProcessors, Handler.render, producer, args.StatsInterval) #else // kafka && !parallelOnly let stats = Handler.ProductionStats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Kafka.StreamsProducerSink.Start(Log.Logger, maxReadAhead, maxConcurrentProcessors, Handler.render, producer, stats, statsInterval = args.StatsInterval) + let sink = Propulsion.Kafka.StreamsProducerSink.Start(Log.Logger, maxReadAhead, maxConcurrentProcessors, Handler.render, producer, stats) #endif // kafka && !parallelOnly #else // !kafka let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentProcessors, Handler.handle, stats, args.StatsInterval) + let sink = Handler.Factory.StartSink(Log.Logger, stats, maxConcurrentProcessors, Handler.handle, maxReadAhead) #endif // !kafka #if (cosmos && parallelOnly) // Custom logic for establishing the source, as we're not projecting StreamEvents - TODO could probably be generalized let source = - let mapToStreamItems (x : System.Collections.Generic.IReadOnlyCollection<'a>) : seq<'a> = upcast x + let mapToStreamItems (x: System.Collections.Generic.IReadOnlyCollection<'a>): seq<'a> = upcast x let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, Handler.mapToStreamItems) - match buildSourceConfig Log.Logger consumerGroupName with SourceConfig.Cosmos (monitoredContainer, leasesContainer, checkpoints, tailSleepInterval : TimeSpan) -> + match buildSourceConfig Log.Logger consumerGroupName with SourceConfig.Cosmos (monitoredContainer, leasesContainer, checkpoints, tailSleepInterval: TimeSpan) -> match checkpoints with | Ephemeral _ -> failwith "Unexpected" | Persistent (processorName, startFromTail, maxItems, lagFrequency) -> @@ -177,7 +177,7 @@ let build (args : Args.Arguments) = #else let source, _awaitReactions = let sourceConfig = buildSourceConfig Log.Logger consumerGroupName - Handler.Config.StartSource(Log.Logger, sink, sourceConfig) + Handler.Factory.StartSource(Log.Logger, sink, sourceConfig) #endif [| Async.AwaitKeyboardInterruptAsTaskCanceledException() source.AwaitWithStopOnCancellation() diff --git a/propulsion-projector/Projector.fsproj b/propulsion-projector/Projector.fsproj index 547be6e79..61e403f74 100644 --- a/propulsion-projector/Projector.fsproj +++ b/propulsion-projector/Projector.fsproj @@ -9,7 +9,7 @@ - + @@ -20,19 +20,19 @@ - - + + - + - + - - - - + + + + - + diff --git a/propulsion-projector/SourceArgs.fs b/propulsion-projector/SourceArgs.fs index 00256d267..616511486 100644 --- a/propulsion-projector/SourceArgs.fs +++ b/propulsion-projector/SourceArgs.fs @@ -57,7 +57,7 @@ module Cosmos = | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | MaxItems _ -> "maximum item count to supply for the Change Feed query. Default: use response size limit" | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1" - type Arguments(c : Args.Configuration, p : ParseResults) = + type Arguments(c: Args.Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -73,8 +73,8 @@ module Cosmos = let lagFrequency = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes member _.Verbose = p.Contains Verbose member private _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) - member x.MonitoringParams(log : ILogger) = - let leases : Microsoft.Azure.Cosmos.Container = x.ConnectLeases() + member x.MonitoringParams(log: ILogger) = + let leases: Microsoft.Azure.Cosmos.Container = x.ConnectLeases() log.Information("ChangeFeed Leases Database {db} Container {container}. MaxItems limited to {maxItems}", leases.Database.Id, leases.Id, Option.toNullable maxItems) if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") @@ -119,7 +119,7 @@ module Dynamo = | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | StreamsDop _ -> "parallelism when loading events from Store Feed Source. Default 4" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with | Some systemName -> Choice1Of2 systemName @@ -147,14 +147,14 @@ module Dynamo = member val Verbose = p.Contains Verbose member _.Connect() = connector.LogConfiguration() client.ConnectStore("Main", table) |> DynamoStoreContext.create - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("DynamoStoreSource BatchSizeCutoff {batchSizeCutoff} Hydrater parallelism {streamsDop}", batchSizeCutoff, streamsDop) let indexStoreClient = indexStoreClient.Value if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") indexStoreClient, fromTail, batchSizeCutoff, tailSleepInterval, streamsDop member _.CreateCheckpointStore(group, cache) = let indexTable = indexStoreClient.Value - indexTable.CreateCheckpointService(group, cache, Config.log) + indexTable.CreateCheckpointService(group, cache, Store.log) // #endif // dynamo #if esdb @@ -166,11 +166,11 @@ module Esdb = alternately one could use a SQL Server DB via Propulsion.SqlStreamStore For now, we store the Checkpoints in one of the above stores as this sample uses one for the read models anyway *) - let private createCheckpointStore (consumerGroup, checkpointInterval) : _ -> Propulsion.Feed.IFeedCheckpointStore = function - | Config.Store.Cosmos (context, cache) -> - Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Config.log (consumerGroup, checkpointInterval) (context, cache) - | Config.Store.Dynamo (context, cache) -> - Propulsion.Feed.ReaderCheckpoint.DynamoStore.create Config.log (consumerGroup, checkpointInterval) (context, cache) + let private createCheckpointStore (consumerGroup, checkpointInterval): _ -> Propulsion.Feed.IFeedCheckpointStore = function + | Store.Context.Cosmos (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Store.log (consumerGroup, checkpointInterval) (context, cache) + | Store.Context.Dynamo (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.DynamoStore.create Store.log (consumerGroup, checkpointInterval) (context, cache) type [] Parameters = | [] Verbose @@ -198,7 +198,7 @@ module Esdb = | Cosmos _ -> "CosmosDB Target Store parameters (also used for checkpoint storage)." | Dynamo _ -> "DynamoDB Target Store parameters (also used for checkpoint storage)." - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let startFromTail = p.Contains FromTail let batchSize = p.GetResult(BatchSize, 100) let tailSleepInterval = TimeSpan.FromSeconds 0.5 @@ -211,24 +211,24 @@ module Esdb = let checkpointInterval = TimeSpan.FromHours 1. member val Verbose = p.Contains Verbose - member _.Connect(appName, nodePreference) : Equinox.EventStoreDb.EventStoreConnection = + member _.Connect(appName, nodePreference): Equinox.EventStoreDb.EventStoreConnection = Log.Information("EventStore {discovery}", connectionStringLoggable) let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] Equinox.EventStoreDb.EventStoreConnector(timeout, retries, tags = tags) .Establish(appName, discovery, Equinox.EventStoreDb.ConnectionStrategy.ClusterSingle nodePreference) - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("EventStoreSource BatchSize {batchSize} ", batchSize) startFromTail, batchSize, tailSleepInterval - member _.CreateCheckpointStore(group, store) : Propulsion.Feed.IFeedCheckpointStore = + member _.CreateCheckpointStore(group, store): Propulsion.Feed.IFeedCheckpointStore = createCheckpointStore (group, checkpointInterval) store - member x.ConnectTarget(cache) : Config.Store = + member x.ConnectTarget(cache): Store.Context = match p.GetSubCommand() with | Cosmos a -> let context = Args.Cosmos.Arguments(c, a).Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - Config.Store.Cosmos (context, cache) + Store.Context.Cosmos (context, cache) | Dynamo a -> let context = Args.Dynamo.Arguments(c, a).Connect() |> DynamoStoreContext.create - Config.Store.Dynamo (context, cache) + Store.Context.Dynamo (context, cache) | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` checkpoint store when source is `esdb`" #endif // esdb @@ -258,7 +258,7 @@ module Sss = | CheckpointsConnection _ ->"Connection string for Checkpoints sql db. Optional if SQLSTREAMSTORE_CONNECTION_CHECKPOINTS specified. Default: same as `Connection`" | CheckpointsCredentials _ ->"Credentials string for Checkpoints sql db. (used as part of checkpoints connection string, but NOT logged). Default (when no `CheckpointsConnection`: use `Credentials. Default (when `CheckpointsConnection` specified): use SQLSTREAMSTORE_CREDENTIALS_CHECKPOINTS environment variable (or assume no credentials)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let startFromTail = p.Contains FromTail let tailSleepInterval = p.GetResult(Tail, 1.) |> TimeSpan.FromSeconds let checkpointEventInterval = TimeSpan.FromHours 1. // Ignored when storing to Propulsion.SqlStreamStore.ReaderCheckpoint @@ -284,10 +284,10 @@ module Sss = Log.Information("SqlStreamStore MsSql Connection {connectionString} Schema {schema} AutoCreate {autoCreate}", conn, schema, autoCreate) let rawStore = Equinox.SqlStreamStore.MsSql.Connector(sssConnectionString, schema, autoCreate=autoCreate).Connect() |> Async.RunSynchronously Equinox.SqlStreamStore.SqlStreamStoreConnection rawStore - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("SqlStreamStoreSource BatchSize {batchSize} ", batchSize) startFromTail, batchSize, tailSleepInterval - member x.CreateCheckpointStoreSql(groupName) : Propulsion.Feed.IFeedCheckpointStore = + member x.CreateCheckpointStoreSql(groupName): Propulsion.Feed.IFeedCheckpointStore = let connectionString = x.BuildCheckpointsConnectionString() Propulsion.SqlStreamStore.ReaderCheckpoint.Service(connectionString, groupName, checkpointEventInterval) diff --git a/propulsion-projector/SourceConfig.fs b/propulsion-projector/SourceConfig.fs index eab7beac7..251d0f5a2 100644 --- a/propulsion-projector/SourceConfig.fs +++ b/propulsion-projector/SourceConfig.fs @@ -6,60 +6,56 @@ open System.Threading.Tasks [] type SourceConfig = // #if (cosmos) - | Cosmos of monitoredContainer : Microsoft.Azure.Cosmos.Container - * leasesContainer : Microsoft.Azure.Cosmos.Container - * checkpoints : CosmosFeedConfig - * tailSleepInterval : TimeSpan + | Cosmos of monitoredContainer: Microsoft.Azure.Cosmos.Container + * leasesContainer: Microsoft.Azure.Cosmos.Container + * checkpoints: CosmosFeedConfig + * tailSleepInterval: TimeSpan // #endif #if dynamo - | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * loading : DynamoLoadModeConfig - * startFromTail : bool - * batchSizeCutoff : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan + | Dynamo of indexStore: Equinox.DynamoStore.DynamoStoreClient + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * loading: Propulsion.DynamoStore.EventLoadMode + * startFromTail: bool + * batchSizeCutoff: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan #endif #if esdb - | Esdb of client : EventStore.Client.EventStoreClient - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * hydrateBodies : bool - * startFromTail : bool - * batchSize : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan + | Esdb of client: EventStore.Client.EventStoreClient + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * withData: bool + * startFromTail: bool + * batchSize: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan #endif #if sss - | Sss of client : SqlStreamStore.IStreamStore - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * hydrateBodies : bool - * startFromTail : bool - * batchSize : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan + | Sss of client: SqlStreamStore.IStreamStore + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * withData: bool + * startFromTail: bool + * batchSize: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan #endif // #if cosmos and [] CosmosFeedConfig = - | Ephemeral of processorName : string - | Persistent of processorName : string * startFromTail : bool * maxItems : int option * lagFrequency : TimeSpan + | Ephemeral of processorName: string + | Persistent of processorName: string * startFromTail: bool * maxItems: int option * lagFrequency: TimeSpan // #endif -#if dynamo -and [] DynamoLoadModeConfig = - | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int -#endif module SourceConfig = // #if cosmos module Cosmos = open Propulsion.CosmosStore - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let parseFeedDoc = EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter + let start log (sink: Propulsion.Sinks.Sink) categories + (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = + let parseFeedDoc = EquinoxSystemTextJsonParser.enumCategoryEvents categories let observer = CosmosStoreSource.CreateObserver(log, sink.StartIngester, Seq.collect parseFeedDoc) let source = match checkpointConfig with | Ephemeral processorName -> - let withStartTime1sAgo (x : Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = + let withStartTime1sAgo (x: Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = x.WithStartTime(let t = DateTime.UtcNow in t.AddSeconds -1.) let lagFrequency = TimeSpan.FromMinutes 1. CosmosStoreSource.Start(log, monitoredContainer, leasesContainer, processorName, observer, @@ -74,16 +70,13 @@ module SourceConfig = #if dynamo module Dynamo = open Propulsion.DynamoStore - let start (log, storeLog) (sink : Propulsion.Streams.Default.Sink) categoryFilter - (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let loadMode = - match loadModeConfig with - | Hydrate (monitoredContext, hydrationConcurrency) -> LoadMode.Hydrated (categoryFilter, hydrationConcurrency, monitoredContext) + let start (log, storeLog) (sink: Propulsion.Sinks.Sink) categories + (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = let source = DynamoStoreSource( log, statsInterval, indexStore, batchSizeCutoff, tailSleepInterval, - checkpoints, sink, loadMode, + checkpoints, sink, loadMode, categories = categories, startFromTail = startFromTail, storeLog = storeLog) let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) @@ -91,43 +84,43 @@ module SourceConfig = #if esdb module Esdb = open Propulsion.EventStoreDb - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = + let start log (sink: Propulsion.Sinks.Sink) (categories: string[]) + (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = let source = EventStoreSource( log, statsInterval, client, batchSize, tailSleepInterval, - checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + checkpoints, sink, categories, withData = withData, startFromTail = startFromTail) let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) #endif #if sss module Sss = open Propulsion.SqlStreamStore - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = + let start log (sink: Propulsion.Sinks.Sink) (categories: string[]) + (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = let source = SqlStreamStoreSource( log, statsInterval, client, batchSize, tailSleepInterval, - checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + checkpoints, sink, categories, withData = withData, startFromTail = startFromTail) let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) #endif - let start (log, storeLog) sink categoryFilter : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function + let start (log, storeLog) sink categories: SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function // #if cosmos | SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) -> - Cosmos.start log sink categoryFilter (monitored, leases, checkpointConfig, tailSleepInterval) + Cosmos.start log sink categories (monitored, leases, checkpointConfig, tailSleepInterval) // #endif #if dynamo - | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> - Dynamo.start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) + | SourceConfig.Dynamo (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> + Dynamo.start (log, storeLog) sink categories (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) #endif #if esdb - | SourceConfig.Esdb (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> - Esdb.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) + | SourceConfig.Esdb (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Esdb.start log sink categories (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) #endif #if sss - | SourceConfig.Sss (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> - Sss.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) + | SourceConfig.Sss (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Sss.start log sink categories (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) #endif diff --git a/propulsion-projector/Config.fs b/propulsion-projector/Store.fs similarity index 84% rename from propulsion-projector/Config.fs rename to propulsion-projector/Store.fs index 3954f8f89..a5d12c9c6 100644 --- a/propulsion-projector/Config.fs +++ b/propulsion-projector/Store.fs @@ -1,11 +1,11 @@ -module ProjectorTemplate.Config +module ProjectorTemplate.Store let log = Serilog.Log.ForContext("isMetric", true) // #if (cosmos || esdb || sss) module Cosmos = - let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = + let private createCached codec initial fold accessStrategy (context, cache): Equinox.Category<_, _, _> = let cacheStrategy = Equinox.CosmosStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.CosmosStore.CosmosStoreCategory(context, codec, fold, initial, cacheStrategy, accessStrategy) @@ -20,7 +20,7 @@ module Cosmos = // #endif module Dynamo = - let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = + let private createCached codec initial fold accessStrategy (context, cache): Equinox.Category<_, _, _> = let cacheStrategy = Equinox.DynamoStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.DynamoStore.DynamoStoreCategory(context, FsCodec.Deflate.EncodeUncompressed codec, fold, initial, cacheStrategy, accessStrategy) @@ -35,18 +35,18 @@ module Dynamo = module Esdb = let create codec initial fold (context, cache) = - let cacheStrategy = Equinox.EventStoreDb.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, cacheStrategy) module Sss = let create codec initial fold (context, cache) = - let cacheStrategy = Equinox.SqlStreamStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.SqlStreamStore.SqlStreamStoreCategory(context, codec, fold, initial, cacheStrategy) #if esdb [] -type Store = - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache - | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache +type Context = + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Cache #endif diff --git a/propulsion-pruner/Handler.fs b/propulsion-pruner/Handler.fs index d6288bacb..a53d07b4f 100644 --- a/propulsion-pruner/Handler.fs +++ b/propulsion-pruner/Handler.fs @@ -4,8 +4,8 @@ open System // As we're not looking at the bodies of the events in the course of the shouldPrune decision, we remove them // from the Event immediately in order to avoid consuming lots of memory without purpose while they're queued -let removeDataAndMeta (x : FsCodec.ITimelineEvent) : FsCodec.ITimelineEvent<_> = - FsCodec.Core.TimelineEvent.Create(x.Index, x.EventType, Unchecked.defaultof, timestamp = x.Timestamp) +let removeDataAndMeta (x: Propulsion.Sinks.Event): FsCodec.ITimelineEvent<_> = + FsCodec.Core.TimelineEvent.Create(x.Index, x.EventType, Unchecked.defaultof, timestamp = x.Timestamp) let categoryFilter = function | "CategoryName" -> true @@ -16,7 +16,7 @@ let categoryFilter = function // 2. If transactional processing will benefit from being able to load the events using the provisioned capacity on the Primary // 3. All relevant systems are configured to be able to fall back to the Secondary where the head of a stream being read has been pruned // NOTE - DANGEROUS - events submitted to the CosmosPruner get removed from the supplied Context! -let shouldPrune category (age : TimeSpan) = +let shouldPrune category (age: TimeSpan) = match category, age.TotalDays with // TODO define pruning criteria | "CategoryName", age -> age > 30. @@ -24,7 +24,7 @@ let shouldPrune category (age : TimeSpan) = // Only relevant (copied to secondary container, meeting expiration criteria) events get fed into the CosmosPruner for removal // NOTE - DANGEROUS - events submitted to the CosmosPruner get removed from the supplied Context! -let selectPrunable changeFeedDocument : Propulsion.Streams.StreamEvent<_> seq = seq { +let selectPrunable changeFeedDocument: Propulsion.Streams.StreamEvent<_> seq = seq { let asOf = DateTimeOffset.UtcNow for s, e in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter changeFeedDocument do let (FsCodec.StreamName.Category cat) = s diff --git a/propulsion-pruner/Infrastructure.fs b/propulsion-pruner/Infrastructure.fs index 29c1fe23d..fe348fcff 100644 --- a/propulsion-pruner/Infrastructure.fs +++ b/propulsion-pruner/Infrastructure.fs @@ -4,13 +4,13 @@ module PrunerTemplate.Infrastructure open Serilog open System -module Config = +module Store = let log = Log.ForContext("isMetric", true) module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj module Log = @@ -46,7 +46,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -56,19 +56,19 @@ module Sinks = let tags appName = ["app", appName] - let equinoxMetricsOnly tags (l : LoggerConfiguration) = + let equinoxMetricsOnly tags (l: LoggerConfiguration) = l.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.CosmosStore.Prometheus.LogSink(tags)) - let equinoxAndPropulsionConsumerMetrics tags (l : LoggerConfiguration) = + let equinoxAndPropulsionConsumerMetrics tags (l: LoggerConfiguration) = l |> equinoxMetricsOnly tags |> fun l -> l.WriteTo.Sink(Propulsion.Prometheus.LogSink(tags)) - let equinoxAndPropulsionCosmosConsumerMetrics tags (l : LoggerConfiguration) = + let equinoxAndPropulsionCosmosConsumerMetrics tags (l: LoggerConfiguration) = l |> equinoxAndPropulsionConsumerMetrics tags |> fun l -> l.WriteTo.Sink(Propulsion.CosmosStore.Prometheus.LogSink(tags)) - let console verbose (configuration : LoggerConfiguration) = + let console verbose (configuration: LoggerConfiguration) = let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" let t = if verbose then t else t.Replace("{Properties:j}", "") configuration.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) @@ -77,14 +77,14 @@ module Sinks = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c [] - static member private Sinks(configuration : LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + static member private Sinks(configuration: LoggerConfiguration, configureMetricsSinks, configureConsoleSink, ?isMetric) = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(configureMetricsSinks >> ignore) |> ignore // unconditionally feed all log events to the metrics sinks a.Logger(fun l -> // but filter what gets emitted to the console sink let l = match isMetric with None -> l | Some predicate -> l.Filter.ByExcluding(Func predicate) @@ -93,10 +93,10 @@ type Logging() = configuration.WriteTo.Async(bufferSize=65536, blockWhenFull=true, configure=System.Action<_> configure) [] - static member Configure(configuration : LoggerConfiguration, appName, verbose, cfpVerbose) = + static member Configure(configuration: LoggerConfiguration, appName, verbose, cfpVerbose) = configuration.Configure(verbose) |> fun c -> let ingesterLevel = if cfpVerbose then Events.LogEventLevel.Debug else Events.LogEventLevel.Information - c.MinimumLevel.Override(typeof.FullName, ingesterLevel) + c.MinimumLevel.Override(typeof.FullName, ingesterLevel) |> fun c -> let generalLevel = if verbose then Events.LogEventLevel.Information else Events.LogEventLevel.Warning c.MinimumLevel.Override(typeof.FullName, generalLevel) |> fun c -> let isWriterB = Filters.Matching.FromSource().Invoke diff --git a/propulsion-pruner/Program.fs b/propulsion-pruner/Program.fs index f0a6dac30..6b9e6f880 100644 --- a/propulsion-pruner/Program.fs +++ b/propulsion-pruner/Program.fs @@ -4,7 +4,7 @@ open Propulsion.CosmosStore open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -34,7 +34,7 @@ module Args = | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 8." | MaxWriters _ -> "maximum number of concurrent writes to target. Default: 4." | SrcCosmos _ -> "Cosmos Archive parameters." - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = member val Verbose = p.Contains Parameters.Verbose member val PrometheusPort = p.TryGetResult PrometheusPort member val ProcessorName = p.GetResult ProcessorName @@ -42,15 +42,15 @@ module Args = member val MaxWriters = p.GetResult(MaxWriters, 4) member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. - member val Source : CosmosSourceArguments = + member val Source: CosmosSourceArguments = match p.GetSubCommand() with | SrcCosmos cosmos -> CosmosSourceArguments(c, cosmos) | _ -> missingArg "Must specify cosmos for Source" member x.DeletionTarget = x.Source.Target member x.MonitoringParams() = let srcC = x.Source - let leases : Microsoft.Azure.Cosmos.Container = - let dstC : CosmosSinkArguments = srcC.Target + let leases: Microsoft.Azure.Cosmos.Container = + let dstC: CosmosSinkArguments = srcC.Target match srcC.LeaseContainerId, dstC.LeaseContainerId with | None, None -> srcC.ConnectLeases(srcC.ContainerId + "-aux") | Some sc, None -> srcC.ConnectLeases(sc) @@ -96,7 +96,7 @@ module Args = | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." | DstCosmos _ -> "CosmosDb Pruning Target parameters." - and CosmosSourceArguments(c : Configuration, p : ParseResults) = + and CosmosSourceArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult CosmosSourceParameters.ConnectionMode let timeout = p.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds @@ -110,7 +110,7 @@ module Args = member val Verbose = p.Contains Verbose member val FromTail = p.Contains CosmosSourceParameters.FromTail member val MaxItems = p.TryGetResult MaxItems - member val LagFrequency : TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member val LagFrequency: TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes member val LeaseContainerId = p.TryGetResult CosmosSourceParameters.LeaseContainer member x.ConnectLeases containerId = connector.CreateUninitialized(x.DatabaseId, containerId) @@ -137,7 +137,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 0." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - and CosmosSinkArguments(c : Configuration, p : ParseResults) = + and CosmosSinkArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds @@ -154,14 +154,14 @@ module Args = /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) let [] AppName = "PrunerTemplate" -let build (args : Args.Arguments, log : ILogger) = +let build (args: Args.Arguments, log: ILogger) = let archive = args.Source // NOTE - DANGEROUS - events submitted to this sink get DELETED from the supplied Context! let deletingEventsSink = @@ -169,7 +169,7 @@ let build (args : Args.Arguments, log : ILogger) = if (target.DatabaseId, target.ContainerId) = (archive.DatabaseId, archive.ContainerId) then missingArg "Danger! Can not prune a target based on itself" let context = target.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Config.log) + let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Store.log) CosmosStorePruner.Start(Log.Logger, args.MaxReadAhead, eventsContext, args.MaxWriters, args.StatsInterval, args.StateInterval) let source = let observer = CosmosStoreSource.CreateObserver(log.ForContext(), deletingEventsSink.StartIngester, Seq.collect Handler.selectPrunable) @@ -179,7 +179,7 @@ let build (args : Args.Arguments, log : ILogger) = deletingEventsSink, source // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly -let startMetricsServer port : IDisposable = +let startMetricsServer port: IDisposable = let metricsServer = new Prometheus.KestrelMetricServer(port = port) let ms = metricsServer.Start() Log.Information("Prometheus /metrics endpoint on port {port}", port) @@ -187,10 +187,10 @@ let startMetricsServer port : IDisposable = open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException -let run (args : Args.Arguments) = async { - let log = (Log.forGroup args.ProcessorName).ForContext() +let run (args: Args.Arguments) = async { + let log = (Log.forGroup args.ProcessorName).ForContext() let sink, source = build (args, log) - use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj + use _metricsServer: IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj return! [| Async.AwaitKeyboardInterruptAsTaskCanceledException() source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() diff --git a/propulsion-pruner/Pruner.fsproj b/propulsion-pruner/Pruner.fsproj index 339f630b5..bbeff8558 100644 --- a/propulsion-pruner/Pruner.fsproj +++ b/propulsion-pruner/Pruner.fsproj @@ -14,9 +14,9 @@ - + - + diff --git a/propulsion-reactor/Args.fs b/propulsion-reactor/Args.fs index 104935349..89a79b79e 100644 --- a/propulsion-reactor/Args.fs +++ b/propulsion-reactor/Args.fs @@ -3,7 +3,7 @@ module ReactorTemplate.Args open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) #if !(sourceKafka && blank && kafka) @@ -15,7 +15,7 @@ let [] TABLE = "EQUINOX_DYNAMO_TABLE" let [] INDEX_TABLE = "EQUINOX_DYNAMO_TABLE_INDEX" #endif -type Configuration(tryGet : string -> string option) = +type Configuration(tryGet: string -> string option) = member val tryGet = tryGet member _.get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" @@ -64,7 +64,7 @@ module Cosmos = | Retries _ -> "specify operation retries (default: 1)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 5)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let connection = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) let discovery = Equinox.CosmosStore.Discovery.ConnectionString connection let mode = p.TryGetResult ConnectionMode @@ -102,7 +102,7 @@ module Dynamo = | Retries _ -> "specify operation retries (default: 1)." | RetriesTimeoutS _ -> "specify max wait-time including retries in seconds (default: 5)" - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with | Some systemName -> Choice1Of2 systemName diff --git a/propulsion-reactor/Contract.fs b/propulsion-reactor/Contract.fs index 26d65230d..f2af152ef 100644 --- a/propulsion-reactor/Contract.fs +++ b/propulsion-reactor/Contract.fs @@ -4,14 +4,14 @@ module ReactorTemplate.Contract /// A single Item in the list type ItemInfo = { id: int; order: int; title: string; completed: bool } -type SummaryInfo = { items : ItemInfo[] } +type SummaryInfo = { items: ItemInfo[] } -let render (item: Todo.Events.ItemData) : ItemInfo = +let render (item: Todo.Events.ItemData): ItemInfo = { id = item.id order = item.order title = item.title completed = item.completed } -let ofState (state : Todo.Fold.State) : SummaryInfo = +let ofState (state: Todo.Fold.State): SummaryInfo = { items = [| for x in state.items -> render x |]} //#endif @@ -20,22 +20,20 @@ let ofState (state : Todo.Fold.State) : SummaryInfo = module Input = let [] Category = "CategoryName" - type Value = { field : int } + let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId | _ -> ValueNone + + type Value = { field: int } type Event = | EventA of Value | EventB of Value interface TypeShape.UnionContract.IUnionContract - let private codec : FsCodec.IEventCodec<_, _, _> = Config.EventCodec.withIndex + let private dec = Streams.Codec.genWithIndex - open Propulsion.Internal - let (|Decode|) (stream, span : Propulsion.Streams.StreamSpan<_>) = - span |> Array.chooseV (EventCodec.tryDecode codec stream) - let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId | _ -> ValueNone let [] (|Parse|_|) = function - | (StreamName clientId, _) & Decode events -> ValueSome struct (clientId, events) + | struct (StreamName clientId, _) & Streams.Decode dec events -> ValueSome struct (clientId, events) | _ -> ValueNone -type Data = { value : int } +type Data = { value: int } type SummaryEvent = | EventA of Data | EventB of Data @@ -46,6 +44,6 @@ type SummaryEvent = | [] Summary of SummaryInfo interface TypeShape.UnionContract.IUnionContract #endif -let codec = Config.EventCodec.gen +let codec = Streams.Codec.gen let encode summary = codec.Encode((), summary) //#endif diff --git a/propulsion-reactor/Handler.fs b/propulsion-reactor/Handler.fs index 8893ee2f9..8675f214e 100644 --- a/propulsion-reactor/Handler.fs +++ b/propulsion-reactor/Handler.fs @@ -4,18 +4,18 @@ module ReactorTemplate.Handler [] type Outcome = /// Handler processed the span, with counts of used vs unused known event types - | Ok of used : int * unused : int + | Ok of used: int * unused: int /// Handler processed the span, but idempotency checks resulted in no writes being applied; includes count of decoded events - | Skipped of count : int + | Skipped of count: int /// Handler determined the events were not relevant to its duties and performed no decoding or processing - | NotApplicable of count : int + | NotApplicable of count: int -/// Gathers stats based on the outcome of each Span processed for emission, at intervals controlled by `StreamsConsumer` +/// Gathers stats based on the Outcome of each Span as it's processed, for periodic emission via DumpStats() type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = #if (blank || sourceKafka) inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) #else - inherit Propulsion.Streams.Sync.Stats(log, statsInterval, stateInterval) + inherit Propulsion.Sync.Stats(log, statsInterval, stateInterval) #endif let mutable ok, skipped, na = 0, 0, 0 @@ -46,14 +46,12 @@ let generate stream version summary = Propulsion.Codec.NewtonsoftJson.RenderedSummary.ofStreamEvent stream version event #if blank -let categoryFilter = function - | Contract.Input.Category -> true - | _ -> false +let categories = [| Contract.Input.Category |] let handle - (produceSummary : Propulsion.Codec.NewtonsoftJson.RenderedSummary -> Async) - stream span ct = Propulsion.Internal.Async.startImmediateAsTask ct <| async { - match stream, span with + (produceSummary: Propulsion.Codec.NewtonsoftJson.RenderedSummary -> Async) + stream events = async { + match struct (stream, events) with | Contract.Input.Parse (_clientId, events) -> for version, event in events do let summary = @@ -62,38 +60,34 @@ let handle | Contract.Input.EventB { field = x } -> Contract.EventB { value = x } let wrapped = generate stream version summary let! _ = produceSummary wrapped in () - return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (events.Length, 0)) - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } + return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Ok (events.Length, 0) + | _ -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.NotApplicable events.Length } #else -let categoryFilter = function - | Todo.Reactions.Category -> true - | _ -> false +let categories = Todo.Reactions.categories let handle - (service : Todo.Service) - (produceSummary : Propulsion.Codec.NewtonsoftJson.RenderedSummary -> Async) - stream span ct = Propulsion.Internal.Async.startImmediateAsTask ct <| async { - match stream, span with - | Todo.Reactions.Parse (clientId, events) -> - if events |> Seq.exists Todo.Reactions.impliesStateChange then - let! version', summary = service.QueryWithVersion(clientId, Contract.ofState) - let wrapped = generate stream version' (Contract.Summary summary) - let! _ = produceSummary wrapped - return struct (Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, events.Length - 1)) - else - return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped events.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } + (service: Todo.Service) + (produceSummary: Propulsion.Codec.NewtonsoftJson.RenderedSummary -> Async) + stream events = async { + match struct (stream, events) with + | Todo.Reactions.ImpliesStateChange (clientId, eventCount) -> + let! version', summary = service.QueryWithVersion(clientId, Contract.ofState) + let wrapped = generate stream version' (Contract.Summary summary) + let! _ = produceSummary wrapped + return Propulsion.Sinks.StreamResult.OverrideNextIndex version', Outcome.Ok (1, eventCount - 1) + | Todo.Reactions.NoStateChange eventCount -> + return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Skipped eventCount + | Todo.Reactions.NotApplicable eventCount -> + return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.NotApplicable eventCount } #endif -type Config private () = +type Factory private () = - static member StartSink(log : Serilog.ILogger, stats, - handle : System.Func>, - maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = - Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, - ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + static member StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead, + ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, maxConcurrentStreams, handle, stats, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) static member StartSource(log, sink, sourceConfig) = - SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig + SourceConfig.start (log, Store.log) sink categories sourceConfig //#endif diff --git a/propulsion-reactor/Infrastructure.fs b/propulsion-reactor/Infrastructure.fs index 079a211ca..8c75fbc0e 100644 --- a/propulsion-reactor/Infrastructure.fs +++ b/propulsion-reactor/Infrastructure.fs @@ -10,33 +10,47 @@ open System // #if (kafka || !blank) module Guid = - let inline toStringN (x : Guid) = x.ToString "N" + let inline toStringN (x: Guid) = x.ToString "N" /// ClientId strongly typed id; represented internally as a Guid; not used for storage so rendering is not significant type ClientId = Guid and [] clientId module ClientId = - let toString (value : ClientId) : string = Guid.toStringN %value - let parse (value : string) : ClientId = let raw = Guid.Parse value in % raw + let toString (value: ClientId): string = Guid.toStringN %value + let parse (value: string): ClientId = let raw = Guid.Parse value in % raw let (|Parse|) = parse // #endif module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj // #if (kafka || !blank) -module EventCodec = - - /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = - match codec.TryDecode x with - | ValueNone -> - if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) - .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) +module Streams = + + let private renderBody (x: Propulsion.Sinks.EventBody) = System.Text.Encoding.UTF8.GetString(x.Span) + // Uses the supplied codec to decode the supplied event record (iff at LogEventLevel.Debug, failures are logged, citing `stream` and `.Data`) + let private tryDecode<'E> (codec: Propulsion.Sinks.Codec<'E>) (streamName: FsCodec.StreamName) event = + match codec.TryDecode event with + | ValueNone when Log.IsEnabled Serilog.Events.LogEventLevel.Debug -> + Log.ForContext("eventData", renderBody event.Data) + .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, event.EventType, streamName) ValueNone | x -> x + let (|Decode|) codec struct (stream, events: Propulsion.Sinks.Event[]): 'E[] = + events |> Propulsion.Internal.Array.chooseV (tryDecode codec stream) + + module Codec = + + let gen<'E when 'E :> TypeShape.UnionContract.IUnionContract> : Propulsion.Sinks.Codec<'E> = + FsCodec.SystemTextJson.Codec.Create<'E>() // options = Options.Default + + let private withUpconverter<'c, 'e when 'c :> TypeShape.UnionContract.IUnionContract> up: Propulsion.Sinks.Codec<'e> = + let down (_: 'e) = failwith "Unexpected" + FsCodec.SystemTextJson.Codec.Create<'e, 'c, _>(up, down) // options = Options.Default + let genWithIndex<'c when 'c :> TypeShape.UnionContract.IUnionContract> : Propulsion.Sinks.Codec = + let up (raw: FsCodec.ITimelineEvent<_>) e = raw.Index, e + withUpconverter<'c, int64 * 'c> up // #endif type Equinox.CosmosStore.CosmosStoreConnector with @@ -72,7 +86,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -80,7 +94,7 @@ module Dynamo = open Equinox.DynamoStore - let defaultCacheDuration = System.TimeSpan.FromMinutes 20. + let defaultCacheDuration = TimeSpan.FromMinutes 20. let private createCached codec initial fold accessStrategy (context, cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, defaultCacheDuration) DynamoStoreCategory(context, FsCodec.Deflate.EncodeTryDeflate codec, fold, initial, cacheStrategy, accessStrategy) @@ -108,7 +122,7 @@ type Equinox.DynamoStore.DynamoStoreClient with #endif type Equinox.DynamoStore.DynamoStoreContext with - member internal x.LogConfiguration(log : ILogger) = + member internal x.LogConfiguration(log: ILogger) = log.Information("DynamoStore Tip thresholds: {maxTipBytes}b {maxTipEvents}e Query Paging {queryMaxItems} items", x.TipOptions.MaxBytes, Option.toNullable x.TipOptions.MaxEvents, x.QueryOptions.MaxItems) @@ -122,14 +136,14 @@ type Amazon.DynamoDBv2.IAmazonDynamoDB with module DynamoStoreContext = /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations - let create (storeClient : Equinox.DynamoStore.DynamoStoreClient) = + let create (storeClient: Equinox.DynamoStore.DynamoStoreClient) = Equinox.DynamoStore.DynamoStoreContext(storeClient, queryMaxItems = 100) [] type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-reactor/Ingester.fs b/propulsion-reactor/Ingester.fs index 3f24adfce..91fffbee4 100644 --- a/propulsion-reactor/Ingester.fs +++ b/propulsion-reactor/Ingester.fs @@ -3,14 +3,14 @@ module ReactorTemplate.Ingester [] type Outcome = /// Handler processed the span, with counts of used vs unused known event types - | Ok of used : int * unused : int + | Ok of used: int * unused: int /// Handler processed the span, but idempotency checks resulted in no writes being applied; includes count of decoded events - | Skipped of count : int + | Skipped of count: int /// Handler determined the events were not relevant to its duties and performed no actions /// e.g. wrong category, events that dont imply a state change - | NotApplicable of count : int + | NotApplicable of count: int -/// Gathers stats based on the outcome of each Span processed for emission, at intervals controlled by `StreamsConsumer` +/// Gathers stats based on the Outcome of each Span as it's processed, for periodic emission via DumpStats() type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) @@ -39,51 +39,43 @@ type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = logExternalStats |> Option.iter (fun dumpTo -> dumpTo log) #if blank -let categoryFilter = function - | sn when sn = "Todos" -> true - | _ -> false +let [] Category = "Todos" +let reactionCategories = [| Category |] -let handle stream (span : Propulsion.Streams.StreamSpan<_>) ct = Propulsion.Internal.Async.startImmediateAsTask ct <| async { - match stream, span with - | FsCodec.StreamName.CategoryAndId ("Todos", id), _ -> +let handle stream (events: Propulsion.Sinks.Event[]) = async { + match stream, events with + | FsCodec.StreamName.CategoryAndId (Category, id), _ -> let ok = true // "TODO: add handler code" match ok with - | true -> return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (1, span.Length - 1)) - | false -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped span.Length - | _ -> return Propulsion.Streams.AllProcessed, Outcome.NotApplicable span.Length } + | true -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Ok (1, events.Length - 1) + | false -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Skipped events.Length + | _ -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.NotApplicable events.Length } #else // map from external contract to internal contract defined by the aggregate -let toSummaryEventData ( x : Contract.SummaryInfo) : TodoSummary.Events.SummaryData = +let toSummaryEventData (x: Contract.SummaryInfo): TodoSummary.Events.SummaryData = { items = [| for x in x.items -> { id = x.id; order = x.order; title = x.title; completed = x.completed } |] } -let categoryFilter = function - | Todo.Reactions.Category -> true - | _ -> false +let reactionCategories = Todo.Reactions.categories -let handle - (sourceService : Todo.Service) - (summaryService : TodoSummary.Service) - stream (span : Propulsion.Streams.StreamSpan<_>) ct = Propulsion.Internal.Async.startImmediateAsTask ct <| async { - match stream, span with - | Todo.Reactions.Parse (clientId, events) when events |> Seq.exists Todo.Reactions.impliesStateChange -> +let handle (sourceService: Todo.Service) (summaryService: TodoSummary.Service) stream events = async { + match struct (stream, events) with + | Todo.Reactions.ImpliesStateChange (clientId, eventCount) -> let! version', summary = sourceService.QueryWithVersion(clientId, Contract.ofState) match! summaryService.TryIngest(clientId, version', toSummaryEventData summary) with - | true -> return struct (Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, span.Length - 1)) - | false -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Skipped span.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } + | true -> return Propulsion.Sinks.StreamResult.OverrideNextIndex version', Outcome.Ok (1, eventCount - 1) + | false -> return Propulsion.Sinks.StreamResult.OverrideNextIndex version', Outcome.Skipped eventCount + | _ -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.NotApplicable events.Length } #endif -type Config private () = +type Factory private () = - static member StartSink(log : Serilog.ILogger, stats, - handle : System.Func>, - maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = - Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, - ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + static member StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead, + ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Sinks.Factory.StartConcurrent(log, maxReadAhead, maxConcurrentStreams, handle, stats, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) static member StartSource(log, sink, sourceConfig) = - SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig + SourceConfig.start (log, Store.log) sink reactionCategories sourceConfig diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index 431e51f06..fd99351ab 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -67,7 +67,7 @@ module Args = | SqlMs _ -> "specify SqlStreamStore input parameters." #endif #endif - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = let processorName = p.GetResult ProcessorName let maxReadAhead = p.GetResult(MaxReadAhead, 16) let maxConcurrentStreams = p.GetResult(MaxWriters, 8) @@ -82,7 +82,7 @@ module Args = processorName, maxReadAhead, maxConcurrentStreams) (processorName, maxReadAhead, maxConcurrentStreams) #if sourceKafka - member x.ConnectStoreAndSource(appName) : _ * _ * _ * (string -> FsKafka.KafkaConsumerConfig) * (ILogger -> unit) = + member x.ConnectStoreAndSource(appName): _ * _ * _ * (string -> FsKafka.KafkaConsumerConfig) * (ILogger -> unit) = let (Source.Kafka a) = x.Source let createConsumerConfig groupName = FsKafka.KafkaConsumerConfig.Create( @@ -98,7 +98,7 @@ module Args = Equinox.DynamoStore.Core.Log.InternalMetrics.dump log #endif #else - member x.ConnectStoreAndSource(appName) : Config.Store * _ * _ * (ILogger -> string -> SourceConfig) * (ILogger -> unit) = + member x.ConnectStoreAndSource(appName): Store.Context * _ * _ * (ILogger -> string -> SourceConfig) * (ILogger -> unit) = let cache = Equinox.Cache (appName, sizeMb = cacheSizeMb) match x.Source with | Source.Cosmos a -> @@ -108,7 +108,7 @@ module Args = let checkpointConfig = CosmosFeedConfig.Persistent (groupName, startFromTail, maxItems, lagFrequency) SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) let context = client |> CosmosStoreContext.create - let store = Config.Store.Cosmos (context, cache) + let store = Store.Context.Cosmos (context, cache) #if blank let targetStore = store #else @@ -120,9 +120,9 @@ module Args = let buildSourceConfig log groupName = let indexStore, startFromTail, batchSizeCutoff, tailSleepInterval, streamsDop = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName, cache) - let load = DynamoLoadModeConfig.Hydrate (context, streamsDop) + let load = Propulsion.DynamoStore.WithData (streamsDop, context) SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) - let store = Config.Store.Dynamo (context, cache) + let store = Store.Context.Dynamo (context, cache) #if blank let targetStore = store #else @@ -132,7 +132,7 @@ module Args = | Source.Esdb a -> let connection = a.Connect(appName, EventStore.Client.NodePreference.Leader) let context = EventStoreContext connection - let store = Config.Store.Esdb (context, cache) + let store = Store.Context.Esdb (context, cache) #if blank let targetStore = store #else @@ -141,8 +141,8 @@ module Args = let buildSourceConfig log groupName = let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStore(groupName, targetStore) - let hydrateBodies = true - SourceConfig.Esdb (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) + let withData = true + SourceConfig.Esdb (connection.ReadConnection, checkpoints, withData, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) store, targetStore, x.Sink, buildSourceConfig, fun log -> Equinox.EventStoreDb.Log.InternalMetrics.dump log Equinox.CosmosStore.Core.Log.InternalMetrics.dump log @@ -150,12 +150,12 @@ module Args = | Source.SqlMs a -> let connection = a.Connect() let context = SqlStreamStoreContext connection - let store = Config.Store.Sss (context, cache) + let store = Store.Context.Sss (context, cache) let buildSourceConfig log groupName = let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) let checkpoints = a.CreateCheckpointStoreSql(groupName) - let hydrateBodies = true - SourceConfig.Sss (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) + let withData = true + SourceConfig.Sss (connection.ReadConnection, checkpoints, withData, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) #if blank let targetStore = store #else @@ -169,11 +169,11 @@ module Args = #if (!kafka) member val Sink = () #if sourceKafka - member val Source : Source = match p.GetSubCommand() with + member val Source: Source = match p.GetSubCommand() with | Kafka p -> Source.Kafka <| SourceArgs.Kafka.Arguments(c, p) | p -> Args.missingArg $"Unexpected Source subcommand %A{p}" #else - member val Source : Source = match p.GetSubCommand() with + member val Source: Source = match p.GetSubCommand() with | Cosmos p -> Source.Cosmos <| SourceArgs.Cosmos.Arguments(c, p) | Dynamo p -> Source.Dynamo <| SourceArgs.Dynamo.Arguments(c, p) | Esdb p -> Source.Esdb <| SourceArgs.Esdb.Arguments(c, p) @@ -184,7 +184,7 @@ module Args = member val Sink = match p.GetSubCommand() with | Parameters.Kafka p -> KafkaSinkArguments(c, p) | p -> Args.missingArg $"Unexpected Sink subcommand %A{p}" - member x.Source : Source = x.Sink.Source + member x.Source: Source = x.Sink.Source and [] KafkaSinkParameters = | [] Broker of string @@ -210,7 +210,7 @@ module Args = | SqlMs _ -> "specify SqlStreamStore input parameters." #endif - and KafkaSinkArguments(c : Configuration, p : ParseResults) = + and KafkaSinkArguments(c: Configuration, p: ParseResults) = member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) member x.BuildTargetParams() = x.Broker, x.Topic @@ -219,7 +219,7 @@ module Args = | KafkaSinkParameters.Kafka p -> Source.Kafka <| SourceArgs.Kafka.Arguments(c, p) | p -> Args.missingArg $"Unexpected Source subcommand %A{p}" #else - member val Source : Source = match p.GetSubCommand() with + member val Source: Source = match p.GetSubCommand() with | Cosmos p -> Source.Cosmos <| SourceArgs.Cosmos.Arguments(c, p) | Dynamo p -> Source.Dynamo <| SourceArgs.Dynamo.Arguments(c, p) | Esdb p -> Source.Esdb <| SourceArgs.Esdb.Arguments(c, p) @@ -229,7 +229,7 @@ module Args = #endif /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) @@ -238,7 +238,7 @@ let [] AppName = "ReactorTemplate" open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException -let build (args : Args.Arguments) = +let build (args: Args.Arguments) = let consumerGroupName, maxReadAhead, maxConcurrentStreams = args.ProcessorParams() #if sourceKafka let store, targetStore, sinkParams, createConsumerConfig, dumpMetrics = args.ConnectStoreAndSource(AppName) @@ -252,7 +252,7 @@ let build (args : Args.Arguments) = #if kafka let broker, topic = sinkParams.BuildTargetParams() let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic) - let produceSummary (x : Propulsion.Codec.NewtonsoftJson.RenderedSummary) = + let produceSummary (x: Propulsion.Codec.NewtonsoftJson.RenderedSummary) = producer.Produce(x.s, Propulsion.Codec.NewtonsoftJson.Serdes.Serialize x) |> Propulsion.Internal.Async.ofTask let dumpMetrics log = dumpMetrics log @@ -261,7 +261,7 @@ let build (args : Args.Arguments) = #if blank // kafka && blank let handle = Handler.handle produceSummary #else // kafka && !blank - let srcService = Todo.Config.create store + let srcService = Todo.Factory.create store let handle = Handler.handle srcService produceSummary #endif // kafka && !blank #else // !kafka (i.e., ingester) @@ -270,8 +270,8 @@ let build (args : Args.Arguments) = let handle = Ingester.handle let stats = Ingester.Stats(log, args.StatsInterval, args.StateInterval, args.VerboseStore, dumpMetrics) #else // !kafka && !blank - let srcService = Todo.Config.create store - let dstService = TodoSummary.Config.create targetStore + let srcService = Todo.Factory.create store + let dstService = TodoSummary.Factory.create targetStore let handle = Ingester.handle srcService dstService let stats = Ingester.Stats(log, args.StatsInterval, args.StateInterval, args.VerboseStore, dumpMetrics) #endif // blank @@ -280,33 +280,28 @@ let build (args : Args.Arguments) = (* ESTABLISH sink; AWAIT *) #if sourceKafka - let parseStreamEvents (res : Confluent.Kafka.ConsumeResult<_, _>) : seq> = + let parseStreamEvents (res: Confluent.Kafka.ConsumeResult<_, _>): seq> = Propulsion.Codec.NewtonsoftJson.RenderedSpan.parse res.Message.Value let consumerConfig = createConsumerConfig consumerGroupName - let pipeline = - Propulsion.Kafka.StreamsConsumer.Start - ( Log.Logger, consumerConfig, parseStreamEvents, handle, maxConcurrentStreams, - stats = stats, statsInterval = args.StateInterval) + let pipeline = Propulsion.Kafka.Factory.StartConcurrent(Log.Logger, consumerConfig, parseStreamEvents, maxConcurrentStreams, handle, stats) [| pipeline.AwaitWithStopOnCancellation() #else // !sourceKafka let sink = #if kafka // !sourceKafka && kafka #if blank // !sourceKafka && kafka && blank - Handler.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, purgeInterval = args.PurgeInterval) + Handler.Factory.StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead, purgeInterval = args.PurgeInterval) #else // !sourceKafka && kafka && !blank - Propulsion.Streams.Sync.StreamsSync.Start( - Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, args.StatsInterval, - Propulsion.Streams.Default.jsonSize, Propulsion.Streams.Default.eventSize) + Propulsion.Sinks.Factory.StartConcurrentChunked(Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, purgeInterval = args.PurgeInterval) #endif // !sourceKafka && kafka && !blank #else // !sourceKafka && !kafka (i.e., ingester) - Ingester.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, purgeInterval = args.PurgeInterval) + Ingester.Factory.StartSink(log, stats, maxConcurrentStreams, handle, maxReadAhead, purgeInterval = args.PurgeInterval) #endif // !sourceKafka && !kafka let source, _awaitReactions = let sourceConfig = buildSourceConfig log consumerGroupName #if kafka - Handler.Config.StartSource(log, sink, sourceConfig) + Handler.Factory.StartSource(log, sink, sourceConfig) #else - Ingester.Config.StartSource(log, sink, sourceConfig) + Ingester.Factory.StartSource(log, sink, sourceConfig) #endif [| source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() @@ -317,7 +312,7 @@ let build (args : Args.Arguments) = let main argv = try let args = Args.parse EnvVar.tryGet argv try Log.Logger <- LoggerConfiguration().Configure(verbose=args.Verbose).CreateLogger() - try build args |> Async.Parallel |> Async.Ignore |> Async.RunSynchronously; 0 + try build args |> Async.Parallel |> Async.Ignore |> Async.RunSynchronously; 0 with e when not (e :? Args.MissingArg) && not (e :? System.Threading.Tasks.TaskCanceledException) -> Log.Fatal(e, "Exiting"); 2 finally Log.CloseAndFlush() with Args.MissingArg msg -> eprintfn "%s" msg; 1 diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index 98561066d..12e9c3e03 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -11,7 +11,7 @@ - + @@ -35,14 +35,14 @@ - - - - - - + + + + + + - + diff --git a/propulsion-reactor/SourceArgs.fs b/propulsion-reactor/SourceArgs.fs index d929ae39c..c5664280b 100644 --- a/propulsion-reactor/SourceArgs.fs +++ b/propulsion-reactor/SourceArgs.fs @@ -28,14 +28,14 @@ type [] module TargetStoreArgs = - let connectTarget targetStore cache: Config.Store = + let connectTarget targetStore cache: Store.Context = match targetStore with | TargetStoreArgs.Cosmos a -> let context = a.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - Config.Store.Cosmos (context, cache) + Store.Context.Cosmos (context, cache) | TargetStoreArgs.Dynamo a -> let context = a.Connect() |> DynamoStoreContext.create - Config.Store.Dynamo (context, cache) + Store.Context.Dynamo (context, cache) #endif #if sourceKafka @@ -59,7 +59,7 @@ module Kafka = | Cosmos _ -> "CosmosDb Sink parameters." | Dynamo _ -> "CosmosDb Sink parameters." - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) member val MaxInFlightBytes = p.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 @@ -67,12 +67,12 @@ module Kafka = member x.BuildSourceParams() = x.Broker, x.Topic #if !(kafka && blank) - member private _.TargetStoreArgs : TargetStoreArgs = + member private _.TargetStoreArgs: TargetStoreArgs = match p.GetSubCommand() with | Cosmos cosmos -> TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) | Dynamo dynamo -> TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `kafka`" - member x.ConnectTarget(cache) : Config.Store = + member x.ConnectTarget(cache): Store.Context = TargetStoreArgs.connectTarget x.TargetStoreArgs cache #endif #else // !sourceKafka @@ -114,7 +114,7 @@ module Cosmos = | Dynamo _ -> "DynamoDb Sink parameters." #endif - type Arguments(c : Args.Configuration, p : ParseResults) = + type Arguments(c: Args.Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -130,8 +130,8 @@ module Cosmos = let lagFrequency = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes member _.Verbose = p.Contains Verbose member private _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) - member x.MonitoringParams(log : ILogger) = - let leases : Microsoft.Azure.Cosmos.Container = x.ConnectLeases() + member x.MonitoringParams(log: ILogger) = + let leases: Microsoft.Azure.Cosmos.Container = x.ConnectLeases() log.Information("ChangeFeed Leases Database {db} Container {container}. MaxItems limited to {maxItems}", leases.Database.Id, leases.Id, Option.toNullable maxItems) if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") @@ -139,12 +139,12 @@ module Cosmos = member x.ConnectStoreAndMonitored() = connector.ConnectStoreAndMonitored(database, containerId) #if !(kafka && blank) - member private _.TargetStoreArgs : TargetStoreArgs = + member private _.TargetStoreArgs: TargetStoreArgs = match p.GetSubCommand() with | Cosmos cosmos -> TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) | Dynamo dynamo -> TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" - member x.ConnectTarget(cache) : Config.Store = + member x.ConnectTarget(cache): Store.Context = TargetStoreArgs.connectTarget x.TargetStoreArgs cache #endif @@ -191,7 +191,7 @@ module Dynamo = | Dynamo _ -> "DynamoDb Sink parameters." #endif - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with | Some systemName -> Choice1Of2 systemName @@ -219,21 +219,21 @@ module Dynamo = member val Verbose = p.Contains Verbose member _.Connect() = connector.LogConfiguration() client.ConnectStore("Main", table) |> DynamoStoreContext.create - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("DynamoStoreSource BatchSizeCutoff {batchSizeCutoff} Hydrater parallelism {streamsDop}", batchSizeCutoff, streamsDop) let indexStoreClient = indexStoreClient.Value if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") indexStoreClient, fromTail, batchSizeCutoff, tailSleepInterval, streamsDop member _.CreateCheckpointStore(group, cache) = let indexTable = indexStoreClient.Value - indexTable.CreateCheckpointService(group, cache, Config.log) + indexTable.CreateCheckpointService(group, cache, Store.log) #if !(kafka && blank) - member private _.TargetStoreArgs : TargetStoreArgs = + member private _.TargetStoreArgs: TargetStoreArgs = match p.GetSubCommand() with | Cosmos cosmos -> TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) | Dynamo dynamo -> TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" - member x.ConnectTarget(cache) : Config.Store = + member x.ConnectTarget(cache): Store.Context = TargetStoreArgs.connectTarget x.TargetStoreArgs cache #endif @@ -245,14 +245,14 @@ module Esdb = alternately one could use a SQL Server DB via Propulsion.SqlStreamStore For now, we store the Checkpoints in one of the above stores as this sample uses one for the read models anyway *) - let private createCheckpointStore (consumerGroup, checkpointInterval) : _ -> Propulsion.Feed.IFeedCheckpointStore = function - | Config.Store.Cosmos (context, cache) -> - Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Config.log (consumerGroup, checkpointInterval) (context, cache) - | Config.Store.Dynamo (context, cache) -> - Propulsion.Feed.ReaderCheckpoint.DynamoStore.create Config.log (consumerGroup, checkpointInterval) (context, cache) + let private createCheckpointStore (consumerGroup, checkpointInterval): _ -> Propulsion.Feed.IFeedCheckpointStore = function + | Store.Context.Cosmos (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Store.log (consumerGroup, checkpointInterval) (context, cache) + | Store.Context.Dynamo (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.DynamoStore.create Store.log (consumerGroup, checkpointInterval) (context, cache) #if !(sourceKafka && kafka) - | Config.Store.Esdb _ - | Config.Store.Sss _ -> failwith "Unexpected store type" + | Store.Context.Esdb _ + | Store.Context.Sss _ -> failwith "Unexpected store type" #endif type [] Parameters = @@ -281,7 +281,7 @@ module Esdb = | Dynamo _ -> "DynamoDB Target Store parameters (also used for checkpoint storage)." #endif - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let startFromTail = p.Contains FromTail let batchSize = p.GetResult(BatchSize, 100) let tailSleepInterval = TimeSpan.FromSeconds 0.5 @@ -294,24 +294,24 @@ module Esdb = let checkpointInterval = TimeSpan.FromHours 1. member val Verbose = p.Contains Verbose - member _.Connect(appName, nodePreference) : Equinox.EventStoreDb.EventStoreConnection = + member _.Connect(appName, nodePreference): Equinox.EventStoreDb.EventStoreConnection = Log.Information("EventStore {discovery}", connectionStringLoggable) let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] Equinox.EventStoreDb.EventStoreConnector(timeout, retries, tags = tags) .Establish(appName, discovery, Equinox.EventStoreDb.ConnectionStrategy.ClusterSingle nodePreference) - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("EventStoreSource BatchSize {batchSize} ", batchSize) startFromTail, batchSize, tailSleepInterval - member _.CreateCheckpointStore(group, store) : Propulsion.Feed.IFeedCheckpointStore = + member _.CreateCheckpointStore(group, store): Propulsion.Feed.IFeedCheckpointStore = createCheckpointStore (group, checkpointInterval) store #if !(kafka && blank) - member private _.TargetStoreArgs : TargetStoreArgs = + member private _.TargetStoreArgs: TargetStoreArgs = match p.GetSubCommand() with | Cosmos cosmos -> TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) | Dynamo dynamo -> TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" - member x.ConnectTarget(cache) : Config.Store = + member x.ConnectTarget(cache): Store.Context = TargetStoreArgs.connectTarget x.TargetStoreArgs cache #endif @@ -346,7 +346,7 @@ module Sss = | Dynamo _ -> "DynamoDB Target Store parameters" #endif - type Arguments(c : Configuration, p : ParseResults) = + type Arguments(c: Configuration, p: ParseResults) = let startFromTail = p.Contains FromTail let tailSleepInterval = p.GetResult(Tail, 1.) |> TimeSpan.FromSeconds let checkpointEventInterval = TimeSpan.FromHours 1. // Ignored when storing to Propulsion.SqlStreamStore.ReaderCheckpoint @@ -371,19 +371,19 @@ module Sss = Log.Information("SqlStreamStore MsSql Connection {connectionString} Schema {schema} AutoCreate {autoCreate}", conn, schema, autoCreate) let rawStore = Equinox.SqlStreamStore.MsSql.Connector(sssConnectionString, schema, autoCreate=autoCreate).Connect() |> Async.RunSynchronously Equinox.SqlStreamStore.SqlStreamStoreConnection rawStore - member _.MonitoringParams(log : ILogger) = + member _.MonitoringParams(log: ILogger) = log.Information("SqlStreamStoreSource BatchSize {batchSize} ", batchSize) startFromTail, batchSize, tailSleepInterval - member x.CreateCheckpointStoreSql(groupName) : Propulsion.Feed.IFeedCheckpointStore = + member x.CreateCheckpointStoreSql(groupName): Propulsion.Feed.IFeedCheckpointStore = let connectionString = x.BuildCheckpointsConnectionString() Propulsion.SqlStreamStore.ReaderCheckpoint.Service(connectionString, groupName, checkpointEventInterval) #if !(kafka && blank) - member private _.TargetStoreArgs : TargetStoreArgs = + member private _.TargetStoreArgs: TargetStoreArgs = match p.GetSubCommand() with | Cosmos cosmos -> TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) | Dynamo dynamo -> TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `sss`" - member x.ConnectTarget(cache) : Config.Store = + member x.ConnectTarget(cache): Store.Context = TargetStoreArgs.connectTarget x.TargetStoreArgs cache #endif #endif diff --git a/propulsion-reactor/SourceConfig.fs b/propulsion-reactor/SourceConfig.fs index eb16760ff..7f2a80c5b 100644 --- a/propulsion-reactor/SourceConfig.fs +++ b/propulsion-reactor/SourceConfig.fs @@ -5,48 +5,46 @@ open System.Threading.Tasks [] type SourceConfig = - | Cosmos of monitoredContainer : Microsoft.Azure.Cosmos.Container - * leasesContainer : Microsoft.Azure.Cosmos.Container - * checkpoints : CosmosFeedConfig - * tailSleepInterval : TimeSpan - | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * loading : DynamoLoadModeConfig - * startFromTail : bool - * batchSizeCutoff : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan - | Esdb of client : EventStore.Client.EventStoreClient - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * hydrateBodies : bool - * startFromTail : bool - * batchSize : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan - | Sss of client : SqlStreamStore.IStreamStore - * checkpoints : Propulsion.Feed.IFeedCheckpointStore - * hydrateBodies : bool - * startFromTail : bool - * batchSize : int - * tailSleepInterval : TimeSpan - * statsInterval : TimeSpan + | Cosmos of monitoredContainer: Microsoft.Azure.Cosmos.Container + * leasesContainer: Microsoft.Azure.Cosmos.Container + * checkpoints: CosmosFeedConfig + * tailSleepInterval: TimeSpan + | Dynamo of indexStore: Equinox.DynamoStore.DynamoStoreClient + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * loading: Propulsion.DynamoStore.EventLoadMode + * startFromTail: bool + * batchSizeCutoff: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan + | Esdb of client: EventStore.Client.EventStoreClient + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * withData: bool + * startFromTail: bool + * batchSize: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan + | Sss of client: SqlStreamStore.IStreamStore + * checkpoints: Propulsion.Feed.IFeedCheckpointStore + * withData: bool + * startFromTail: bool + * batchSize: int + * tailSleepInterval: TimeSpan + * statsInterval: TimeSpan and [] CosmosFeedConfig = - | Ephemeral of processorName : string - | Persistent of processorName : string * startFromTail : bool * maxItems : int option * lagFrequency : TimeSpan -and [] DynamoLoadModeConfig = - | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int + | Ephemeral of processorName: string + | Persistent of processorName: string * startFromTail: bool * maxItems: int option * lagFrequency: TimeSpan module SourceConfig = module Cosmos = open Propulsion.CosmosStore - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let parseFeedDoc = EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter + let start log (sink: Propulsion.Sinks.Sink) categories + (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = + let parseFeedDoc = EquinoxSystemTextJsonParser.enumCategoryEvents categories let observer = CosmosStoreSource.CreateObserver(log, sink.StartIngester, Seq.collect parseFeedDoc) let source = match checkpointConfig with | Ephemeral processorName -> - let withStartTime1sAgo (x : Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = + let withStartTime1sAgo (x: Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = x.WithStartTime(let t = DateTime.UtcNow in t.AddSeconds -1.) let lagFrequency = TimeSpan.FromMinutes 1. CosmosStoreSource.Start(log, monitoredContainer, leasesContainer, processorName, observer, @@ -59,48 +57,45 @@ module SourceConfig = source, None module Dynamo = open Propulsion.DynamoStore - let start (log, storeLog) (sink : Propulsion.Streams.Default.Sink) categoryFilter - (indexStore, checkpoints, loadModeConfig, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = - let loadMode = - match loadModeConfig with - | Hydrate (monitoredContext, hydrationConcurrency) -> LoadMode.Hydrated (categoryFilter, hydrationConcurrency, monitoredContext) + let start (log, storeLog) (sink: Propulsion.Sinks.Sink) categories + (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = let source = DynamoStoreSource( log, statsInterval, indexStore, batchSizeCutoff, tailSleepInterval, - checkpoints, sink, loadMode, + checkpoints, sink, loadMode, categories = categories, startFromTail = startFromTail, storeLog = storeLog) let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) module Esdb = open Propulsion.EventStoreDb - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = + let start log (sink: Propulsion.Sinks.Sink) categories + (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = let source = EventStoreSource( log, statsInterval, client, batchSize, tailSleepInterval, - checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + checkpoints, sink, categories, withData = withData, startFromTail = startFromTail) let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) module Sss = open Propulsion.SqlStreamStore - let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter - (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Task) option = + let start log (sink: Propulsion.Sinks.Sink) categories + (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval): Propulsion.Pipeline * (TimeSpan -> Task) option = let source = SqlStreamStoreSource( log, statsInterval, client, batchSize, tailSleepInterval, - checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + checkpoints, sink, categories, withData = withData, startFromTail = startFromTail) let source = source.Start() source, Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) - let start (log, storeLog) sink categoryFilter : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function + let start (log, storeLog) sink categories: SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Task) option = function | SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) -> - Cosmos.start log sink categoryFilter (monitored, leases, checkpointConfig, tailSleepInterval) - | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> - Dynamo.start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) - | SourceConfig.Esdb (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> - Esdb.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) - | SourceConfig.Sss (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> - Sss.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) + Cosmos.start log sink categories (monitored, leases, checkpointConfig, tailSleepInterval) + | SourceConfig.Dynamo (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> + Dynamo.start (log, storeLog) sink categories (indexStore, checkpoints, loadMode, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) + | SourceConfig.Esdb (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Esdb.start log sink categories (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) + | SourceConfig.Sss (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Sss.start log sink categories (client, checkpoints, withData, startFromTail, batchSize, tailSleepInterval, statsInterval) diff --git a/propulsion-reactor/Config.fs b/propulsion-reactor/Store.fs similarity index 68% rename from propulsion-reactor/Config.fs rename to propulsion-reactor/Store.fs index bbe338157..5e77a8076 100644 --- a/propulsion-reactor/Config.fs +++ b/propulsion-reactor/Store.fs @@ -1,27 +1,20 @@ -module ReactorTemplate.Config +module ReactorTemplate.Store let log = Serilog.Log.ForContext("isMetric", true) let createDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = open FsCodec.SystemTextJson - let private defaultOptions = Options.Create() let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - Codec.Create<'t>(options = defaultOptions) - let genJe<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) - let private withUpconverter<'c, 'e when 'c :> TypeShape.UnionContract.IUnionContract> up : FsCodec.IEventCodec<'e, _, _> = - let down (_ : 'e) = failwith "Unexpected" - Codec.Create<'e, 'c, _>(up, down, options = defaultOptions) - let withIndex<'c when 'c :> TypeShape.UnionContract.IUnionContract> : FsCodec.IEventCodec = - let up struct (raw : FsCodec.ITimelineEvent<_>, e) = raw.Index, e - withUpconverter<'c, int64 * 'c> up + Codec.Create<'t>() // options = Options.Default + let genJsonElement<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>() // options = Options.Default module Cosmos = - let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = + let private createCached codec initial fold accessStrategy (context, cache): Equinox.Category<_, _, _> = let cacheStrategy = Equinox.CosmosStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.CosmosStore.CosmosStoreCategory(context, codec, fold, initial, cacheStrategy, accessStrategy) @@ -35,7 +28,7 @@ module Cosmos = module Dynamo = - let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = + let private createCached codec initial fold accessStrategy (context, cache): Equinox.Category<_, _, _> = let cacheStrategy = Equinox.DynamoStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.DynamoStore.DynamoStoreCategory(context, FsCodec.Deflate.EncodeUncompressed codec, fold, initial, cacheStrategy, accessStrategy) @@ -51,21 +44,21 @@ module Dynamo = module Esdb = let create codec initial fold (context, cache) = - let cacheStrategy = Equinox.EventStoreDb.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, cacheStrategy) module Sss = let create codec initial fold (context, cache) = - let cacheStrategy = Equinox.SqlStreamStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + let cacheStrategy = Equinox.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.SqlStreamStore.SqlStreamStoreCategory(context, codec, fold, initial, cacheStrategy) #endif [] -type Store = - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache - | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache +type Context = + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Cache #if !(sourceKafka && kafka) - | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Core.ICache - | Sss of Equinox.SqlStreamStore.SqlStreamStoreContext * Equinox.Core.ICache + | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Cache + | Sss of Equinox.SqlStreamStore.SqlStreamStoreContext * Equinox.Cache #endif diff --git a/propulsion-reactor/Todo.fs b/propulsion-reactor/Todo.fs index 659cc04db..4ecd5df6c 100644 --- a/propulsion-reactor/Todo.fs +++ b/propulsion-reactor/Todo.fs @@ -9,10 +9,10 @@ let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAn // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = - type ItemData = { id : int; order : int; title : string; completed : bool } - type DeletedData = { id : int } - type ClearedData = { nextId : int } - type SnapshotData = { nextId : int; items : ItemData[] } + type ItemData = { id: int; order: int; title: string; completed: bool } + type DeletedData = { id: int } + type ClearedData = { nextId: int } + type SnapshotData = { nextId: int; items: ItemData[] } type Event = | Added of ItemData | Updated of ItemData @@ -20,25 +20,30 @@ module Events = | Cleared of ClearedData | Snapshotted of SnapshotData interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Reactions = - let [] Category = Category - let (|Decode|) (stream, span : Propulsion.Streams.StreamSpan<_>) = - span |> Array.chooseV (EventCodec.tryDecode Events.codec stream) - let [] (|Parse|_|) = function - | (StreamName clientId, _) & Decode events -> ValueSome struct (clientId, events) - | _ -> ValueNone - + let categories = [| Category |] + /// Allows us to skip producing summaries for events that we know won't result in an externally discernable change to the summary output - let impliesStateChange = function Events.Snapshotted _ -> false | _ -> true + let private impliesStateChange = function Events.Snapshotted _ -> false | _ -> true + + let private dec = Streams.Codec.gen + let [] private (|Parse|_|) = function + | struct (StreamName clientId, _) & Streams.Decode dec events -> ValueSome struct (clientId, events) + | _ -> ValueNone + let (|ImpliesStateChange|NoStateChange|NotApplicable|) = function + | Parse (clientId, events) -> + if events |> Array.exists impliesStateChange then ImpliesStateChange (clientId, events.Length) + else NoStateChange events.Length + | _, events -> NotApplicable events.Length /// Types and mapping logic used maintain relevant State based on Events observed on the Todo List Stream module Fold = /// Present state of the Todo List as inferred from the Events we've seen to date - type State = { items : Events.ItemData list; nextId : int } + type State = { items: Events.ItemData list; nextId: int } /// State implied by the absence of any events on this stream let initial = { items = []; nextId = 0 } /// Compute State change implied by a giveC:\Users\f0f00db\Projects\dotnet-templates\propulsion-summary-projector\Todo.fsn Event @@ -49,28 +54,28 @@ module Fold = | Events.Cleared e -> { nextId = e.nextId; items = [] } | Events.Snapshotted s -> { nextId = s.nextId; items = List.ofArray s.items } /// Folds a set of events from the store into a given `state` - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve /// Determines whether a given event represents a checkpoint that implies we don't need to see any preceding events let isOrigin = function Events.Cleared _ | Events.Snapshotted _ -> true | _ -> false /// Prepares an Event that encodes all relevant aspects of a State such that `evolve` can rehydrate a complete State from it let toSnapshot state = Events.Snapshotted { nextId = state.nextId; items = Array.ofList state.items } /// Defines operations that a Controller or Projector can perform on a Todo List -type Service internal (resolve : ClientId -> Equinox.Decider) = +type Service internal (resolve: ClientId -> Equinox.Decider) = /// Load and render the state - member _.QueryWithVersion(clientId, render : Fold.State -> 'res) : Async = + member _.QueryWithVersion(clientId, render: Fold.State -> 'res): Async = let decider = resolve clientId // Establish the present state of the Stream, project from that (using QueryEx so we can determine the version in effect) decider.QueryEx(fun c -> c.Version, render c.State) -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Dynamo (context, cache) -> Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Dynamo (context, cache) -> Store.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) #if !(sourceKafka && kafka) - | Config.Store.Esdb (context, cache) -> Config.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) - | Config.Store.Sss (context, cache) -> Config.Sss.create Events.codec Fold.initial Fold.fold (context, cache) + | Store.Context.Esdb (context, cache) -> Store.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) + | Store.Context.Sss (context, cache) -> Store.Sss.create Events.codec Fold.initial Fold.fold (context, cache) #endif - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/propulsion-reactor/TodoSummary.fs b/propulsion-reactor/TodoSummary.fs index 7788dce6b..3d4f23951 100644 --- a/propulsion-reactor/TodoSummary.fs +++ b/propulsion-reactor/TodoSummary.fs @@ -7,28 +7,28 @@ let streamId = Equinox.StreamId.gen ClientId.toString module Events = type ItemData = { id: int; order: int; title: string; completed: bool } - type SummaryData = { items : ItemData[] } - type IngestedData = { version : int64; value : SummaryData } + type SummaryData = { items: ItemData[] } + type IngestedData = { version: int64; value: SummaryData } type Event = | Ingested of IngestedData interface TypeShape.UnionContract.IUnionContract - let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe + let codec, codecJe = Store.Codec.gen, Store.Codec.genJsonElement module Fold = - type State = { version : int64; value : Events.SummaryData option } + type State = { version: int64; value: Events.SummaryData option } let initial = { version = -1L; value = None } let evolve _state = function | Events.Ingested e -> { version = e.version; value = Some e.value } - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let toSnapshot state = Events.Ingested { version = state.version; value = state.value.Value } -let ingest version value (state : Fold.State) = +let ingest version value (state: Fold.State) = if state.version >= version then false, [] else true, [Events.Ingested { version = version; value = value }] type Item = { id: int; order: int; title: string; completed: bool } -let render : Fold.State -> Item[] = function +let render: Fold.State -> Item[] = function | { value = Some { items = xs} } -> [| for x in xs -> { id = x.id @@ -38,24 +38,24 @@ let render : Fold.State -> Item[] = function | _ -> [||] /// Defines the operations that the Read side of a Controller and/or the Ingester can perform on the 'aggregate' -type Service internal (resolve : ClientId -> Equinox.Decider) = +type Service internal (resolve: ClientId -> Equinox.Decider) = /// Returns false if the ingestion was rejected due to being an older version of the data than is presently being held - member _.TryIngest(clientId, version, value) : Async = + member _.TryIngest(clientId, version, value): Async = let decider = resolve clientId decider.Transact(ingest version value) - member _.Read(clientId) : Async = + member _.Read(clientId): Async = let decider = resolve clientId decider.Query render -module Config = +module Factory = let private (|Category|) = function - | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createRollingState Events.codecJe Fold.initial Fold.fold Fold.toSnapshot (context, cache) - | Config.Store.Dynamo (context, cache) -> Config.Dynamo.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createRollingState Events.codecJe Fold.initial Fold.fold Fold.toSnapshot (context, cache) + | Store.Context.Dynamo (context, cache) -> Store.Dynamo.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) #if !(sourceKafka && kafka) - | Config.Store.Esdb (context, cache) -> Config.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) - | Config.Store.Sss (context, cache) -> Config.Sss.create Events.codec Fold.initial Fold.fold (context, cache) + | Store.Context.Esdb (context, cache) -> Store.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) + | Store.Context.Sss (context, cache) -> Store.Sss.create Events.codec Fold.initial Fold.fold (context, cache) #endif - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/propulsion-summary-consumer/Infrastructure.fs b/propulsion-summary-consumer/Infrastructure.fs index 1f83e5880..2c1b74c00 100644 --- a/propulsion-summary-consumer/Infrastructure.fs +++ b/propulsion-summary-consumer/Infrastructure.fs @@ -7,31 +7,42 @@ open System module Guid = - let inline toStringN (x : Guid) = x.ToString "N" + let inline toStringN (x: Guid) = x.ToString "N" /// ClientId strongly typed id; represented internally as a Guid; not used for storage so rendering is not significant type ClientId = Guid and [] clientId module ClientId = - let toString (value : ClientId) : string = Guid.toStringN %value - let parse (value : string) : ClientId = let raw = Guid.Parse value in % raw + let toString (value: ClientId): string = Guid.toStringN %value + let parse (value: string): ClientId = let raw = Guid.Parse value in % raw let (|Parse|) = parse module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj -module EventCodec = +module Streams = - /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = - match codec.TryDecode x with - | ValueNone -> - if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) - .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) + let private renderBody (x: Propulsion.Sinks.EventBody) = System.Text.Encoding.UTF8.GetString(x.Span) + // Uses the supplied codec to decode the supplied event record (iff at LogEventLevel.Debug, failures are logged, citing `stream` and `.Data`) + let private tryDecode<'E> (codec: Propulsion.Sinks.Codec<'E>) (streamName: FsCodec.StreamName) event = + match codec.TryDecode event with + | ValueNone when Log.IsEnabled Serilog.Events.LogEventLevel.Debug -> + Log.ForContext("eventData", renderBody event.Data) + .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, event.EventType, streamName) ValueNone | x -> x + let [] (|DecodeNewest|_|) codec struct (stream, events: Propulsion.Sinks.Event[]): 'E voption = + events |> Seq.rev |> Propulsion.Internal.Seq.tryPickV (tryDecode codec stream) + + module Codec = + + let private withUpconverter<'c, 'e when 'c :> TypeShape.UnionContract.IUnionContract> up: Propulsion.Sinks.Codec<'e> = + let down (_: 'e) = failwith "Unexpected" + FsCodec.SystemTextJson.Codec.Create<'e, 'c, _>(up, down) // options = Options.Default + let genWithIndex<'c when 'c :> TypeShape.UnionContract.IUnionContract> : Propulsion.Sinks.Codec = + let up (raw: FsCodec.ITimelineEvent<_>) e = raw.Index, e + withUpconverter<'c, int64 * 'c> up type Equinox.CosmosStore.CosmosStoreConnector with @@ -51,7 +62,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -59,7 +70,7 @@ module CosmosStoreContext = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-summary-consumer/Ingester.fs b/propulsion-summary-consumer/Ingester.fs index 0f96b1733..eaf902d3f 100644 --- a/propulsion-summary-consumer/Ingester.fs +++ b/propulsion-summary-consumer/Ingester.fs @@ -2,44 +2,41 @@ /// Due to this, we should ensure that writes only happen where the update is not redundant and/or a replay of a previous message module ConsumerTemplate.Ingester -open Propulsion.Internal - /// Defines the contract we share with the proReactor --'s published feed module Contract = let [] Category = "TodoSummary" + let [] (|StreamName|_|) = function + | FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId + | _ -> ValueNone /// A single Item in the list - type ItemInfo = { id : int; order : int; title : string; completed : bool } + type ItemInfo = { id: int; order: int; title: string; completed: bool } /// All data summarized for Summary Event Stream - type SummaryInfo = { items : ItemInfo[] } + type SummaryInfo = { items: ItemInfo[] } type Message = | [] Summary of SummaryInfo interface TypeShape.UnionContract.IUnionContract - type VersionAndMessage = int64*Message + // We also want the index (which is the Version of the Summary) whenever we're handling an event - let private codec : FsCodec.IEventCodec = Config.EventCodec.withIndex - let [] (|DecodeNewest|_|) (stream, span : Propulsion.Streams.StreamSpan<_>) : VersionAndMessage voption = - span |> Seq.rev |> Seq.tryPickV (EventCodec.tryDecode codec stream) - let [] (|StreamName|_|) = function - | FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId - | _ -> ValueNone - let [] (|MatchNewest|_|) = function - | (StreamName clientId, _) & DecodeNewest (version, update) -> ValueSome struct (clientId, version, update) + type VersionAndMessage = int64*Message + let private dec: Propulsion.Sinks.Codec = Streams.Codec.genWithIndex + let [] (|Parse|_|) = function + | struct (StreamName clientId, _) & Streams.DecodeNewest dec (version, update) -> ValueSome struct (clientId, version, update) | _ -> ValueNone [] type Outcome = /// Handler processed the span, with counts of used vs unused known event types - | Ok of used : int * unused : int + | Ok of used: int * unused: int /// Handler processed the span, but idempotency checks resulted in no writes being applied; includes count of decoded events - | Skipped of count : int + | Skipped of count: int /// Handler determined the events were not relevant to its duties and performed no decoding or processing - | NotApplicable of count : int + | NotApplicable of count: int -/// Gathers stats based on the outcome of each Span processed for emission, at intervals controlled by `StreamsConsumer` +/// Gathers stats based on the Outcome of each Span as it's processed, for periodic emission via DumpStats() type Stats(log, statsInterval, stateInterval) = inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) @@ -59,17 +56,17 @@ type Stats(log, statsInterval, stateInterval) = ok <- 0; skipped <- 0l; na <- 0 /// Map from external contract to internal contract defined by the aggregate -let map : Contract.Message -> TodoSummary.Events.SummaryData = function +let map: Contract.Message -> TodoSummary.Events.SummaryData = function | Contract.Summary x -> { items = [| for x in x.items -> { id = x.id; order = x.order; title = x.title; completed = x.completed } |]} /// Ingest queued events per client - each time we handle all the incoming updates for a given stream as a single act -let ingest (service : TodoSummary.Service) stream (span : Propulsion.Streams.StreamSpan<_>) ct = Async.startImmediateAsTask ct <| async { - match stream, span with - | Contract.MatchNewest (clientId, version, update) -> +let ingest (service: TodoSummary.Service) stream (events: Propulsion.Sinks.Event[]) = async { + match struct (stream, events) with + | Contract.Parse (clientId, version, update) -> match! service.TryIngest(clientId, version, map update) with - | true -> return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (1, span.Length - 1)) - | false -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped span.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } + | true -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Ok (1, events.Length - 1) + | false -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Skipped events.Length + | _ -> return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.NotApplicable events.Length } diff --git a/propulsion-summary-consumer/Program.fs b/propulsion-summary-consumer/Program.fs index a852fb761..2d375e765 100644 --- a/propulsion-summary-consumer/Program.fs +++ b/propulsion-summary-consumer/Program.fs @@ -3,7 +3,7 @@ open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -43,7 +43,7 @@ module Args = | MaxWriters _ -> "maximum number of items to process in parallel. Default: 8" | Verbose _ -> "request verbose logging." | Cosmos _ -> "specify CosmosDb input parameters" - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = member val Cosmos = CosmosArguments(c, p.GetResult Cosmos) member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) @@ -73,7 +73,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 1." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - and CosmosArguments(c : Configuration, p : ParseResults) = + and CosmosArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -85,30 +85,28 @@ module Args = member _.Connect() = connector.ConnectStore("Main", database, container) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) let [] AppName = "ConsumerTemplate" -let start (args : Args.Arguments) = +let start (args: Args.Arguments) = let service = let store = let context = args.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let cache = Equinox.Cache(AppName, sizeMb = 10) - Config.Store.Cosmos (context, cache) - TodoSummary.Config.create store + Store.Context.Cosmos (context, cache) + TodoSummary.Factory.create store let config = FsKafka.KafkaConsumerConfig.Create( AppName, args.Broker, [args.Topic], args.Group, Confluent.Kafka.AutoOffsetReset.Earliest, maxInFlightBytes = args.MaxInFlightBytes, ?statisticsInterval = args.LagFrequency) - let parseStreamSummaries(res : Confluent.Kafka.ConsumeResult<_, _>) : seq> = + let parseStreamSummaries(res: Confluent.Kafka.ConsumeResult<_, _>): seq> = Propulsion.Codec.NewtonsoftJson.RenderedSummary.parse res.Message.Value let stats = Ingester.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - Propulsion.Kafka.StreamsConsumer.Start - ( Log.Logger, config, parseStreamSummaries, Ingester.ingest service, args.MaxConcurrentStreams, - stats, args.StateInterval) + Propulsion.Kafka.Factory.StartConcurrent(Log.Logger, config, parseStreamSummaries, args.MaxConcurrentStreams, Ingester.ingest service, stats) let run args = async { use consumer = start args diff --git a/propulsion-summary-consumer/Config.fs b/propulsion-summary-consumer/Store.fs similarity index 55% rename from propulsion-summary-consumer/Config.fs rename to propulsion-summary-consumer/Store.fs index f46a7882e..4161fde83 100644 --- a/propulsion-summary-consumer/Config.fs +++ b/propulsion-summary-consumer/Store.fs @@ -1,21 +1,12 @@ -module ConsumerTemplate.Config +module ConsumerTemplate.Store let log = Serilog.Log.ForContext("isMetric", true) let createDecider cat = Equinox.Decider.resolve log cat -module EventCodec = +module Codec = - open FsCodec.SystemTextJson - - let private defaultOptions = Options.Create() let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) - let private withUpconverter<'c, 'e when 'c :> TypeShape.UnionContract.IUnionContract> up : FsCodec.IEventCodec<'e, _, _> = - let down (_ : 'e) = failwith "Unexpected" - Codec.Create<'e, 'c, _>(up, down, options = defaultOptions) - let withIndex<'c when 'c :> TypeShape.UnionContract.IUnionContract> : FsCodec.IEventCodec = - let up struct (raw : FsCodec.ITimelineEvent<_>, e) = raw.Index, e - withUpconverter<'c, int64 * 'c> up + FsCodec.SystemTextJson.CodecJsonElement.Create<'t>() // options = Options.Default module Cosmos = @@ -28,5 +19,5 @@ module Cosmos = createCached codec initial fold accessStrategy (context, cache) [] -type Store = - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache +type Context = + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache diff --git a/propulsion-summary-consumer/SummaryConsumer.fsproj b/propulsion-summary-consumer/SummaryConsumer.fsproj index 145b8e823..46591589e 100644 --- a/propulsion-summary-consumer/SummaryConsumer.fsproj +++ b/propulsion-summary-consumer/SummaryConsumer.fsproj @@ -9,7 +9,7 @@ - + @@ -17,9 +17,9 @@ - - - + + + diff --git a/propulsion-summary-consumer/TodoSummary.fs b/propulsion-summary-consumer/TodoSummary.fs index 7de17fc61..6e8622756 100644 --- a/propulsion-summary-consumer/TodoSummary.fs +++ b/propulsion-summary-consumer/TodoSummary.fs @@ -7,28 +7,28 @@ let streamId = Equinox.StreamId.gen ClientId.toString module Events = type ItemData = { id: int; order: int; title: string; completed: bool } - type SummaryData = { items : ItemData[] } - type IngestedData = { version : int64; value : SummaryData } + type SummaryData = { items: ItemData[] } + type IngestedData = { version: int64; value: SummaryData } type Event = | Ingested of IngestedData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.gen + let codec = Store.Codec.gen module Fold = - type State = { version : int64; value : Events.SummaryData option } + type State = { version: int64; value: Events.SummaryData option } let initial = { version = -1L; value = None } let evolve _state = function | Events.Ingested e -> { version = e.version; value = Some e.value } - let fold : State -> Events.Event seq -> State = Seq.fold evolve + let fold: State -> Events.Event seq -> State = Seq.fold evolve let toSnapshot state = Events.Ingested { version = state.version; value = state.value.Value } -let ingest version value (state : Fold.State) = +let ingest version value (state: Fold.State) = if state.version >= version then false, [] else true, [Events.Ingested { version = version; value = value }] type Item = { id: int; order: int; title: string; completed: bool } -let render : Fold.State -> Item[] = function +let render: Fold.State -> Item[] = function | { value = Some { items = xs} } -> [| for x in xs -> { id = x.id @@ -38,9 +38,9 @@ let render : Fold.State -> Item[] = function | _ -> [||] /// Defines the operations that the Read side of a Controller and/or the Ingester can perform on the 'aggregate' -type Service internal (resolve : ClientId -> Equinox.Decider) = +type Service internal (resolve: ClientId -> Equinox.Decider) = - member _.TryIngest(clientId, version, value) : Async = + member _.TryIngest(clientId, version, value): Async = let decider = resolve clientId decider.Transact(ingest version value) @@ -48,8 +48,8 @@ type Service internal (resolve : ClientId -> Equinox.Decider Config.Cosmos.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/propulsion-sync/Infrastructure.fs b/propulsion-sync/Infrastructure.fs index d5b0bd06e..cd224cd7e 100644 --- a/propulsion-sync/Infrastructure.fs +++ b/propulsion-sync/Infrastructure.fs @@ -5,13 +5,13 @@ open Serilog open Serilog.Events open System -module Config = +module Store = let log = Log.ForContext("isMetric", true) module EnvVar = - let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj + let tryGet varName: string option = Environment.GetEnvironmentVariable varName |> Option.ofObj module Log = @@ -47,7 +47,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -55,17 +55,17 @@ module CosmosStoreContext = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, verbose, verboseStore, ?maybeSeqEndpoint) = + static member Configure(configuration: LoggerConfiguration, verbose, verboseStore, ?maybeSeqEndpoint) = configuration .Enrich.FromLogContext() |> fun c -> if verbose then c.MinimumLevel.Debug() else c |> fun c -> let ingesterLevel = if verboseStore then LogEventLevel.Debug else LogEventLevel.Information - c.MinimumLevel.Override(typeof.FullName, ingesterLevel) + c.MinimumLevel.Override(typeof.FullName, ingesterLevel) |> fun c -> let generalLevel = if verbose then LogEventLevel.Information else LogEventLevel.Warning c.MinimumLevel.Override(typeof.FullName, generalLevel) .MinimumLevel.Override(typeof.FullName, generalLevel) |> fun c -> let t = "[{Timestamp:HH:mm:ss} {Level:u1}] {Message:lj} {Properties:j}{NewLine}{Exception}" - let configure (a : Configuration.LoggerSinkConfiguration) : unit = + let configure (a: Configuration.LoggerSinkConfiguration): unit = a.Logger(fun l -> l.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) |> ignore) |> ignore diff --git a/propulsion-sync/Program.fs b/propulsion-sync/Program.fs index f8af95fea..38f38b382 100644 --- a/propulsion-sync/Program.fs +++ b/propulsion-sync/Program.fs @@ -9,7 +9,7 @@ open Serilog open System open System.Threading -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -60,7 +60,7 @@ module Args = | SrcCosmos _ -> "Cosmos input parameters." | SrcEs _ -> "EventStore input parameters." - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = member val Verbose = p.Contains Parameters.Verbose member val VerboseStore = p.Contains VerboseStore member val MaybeSeqEndpoint = if p.Contains LocalSeq then Some "http://localhost:5341" else None @@ -69,7 +69,7 @@ module Args = member val MaxWriters = p.GetResult(MaxWriters, 512) member val MaxConnections = p.GetResult(MaxConnections, 1) - member val Source : Choice = + member val Source: Choice = match p.GetSubCommand() with | SrcCosmos cosmos -> Choice1Of2 (CosmosSourceArguments(c, cosmos)) | SrcEs es -> Choice2Of2 (EsSourceArguments(c, es)) @@ -78,7 +78,7 @@ module Args = member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member _.CategoryFilterFunction(?excludeLong, ?longOnly): string -> bool = - let isLong (streamName : string) = + let isLong (streamName: string) = streamName.StartsWith "Inventory-" // Too long || streamName.StartsWith "InventoryCount-" // No Longer used || streamName.StartsWith "InventoryLog" // 5GB, causes lopsided partitions, unused @@ -92,7 +92,7 @@ module Args = "SkuFileUpload-534e4362c641461ca27e3d23547f0852" "SkuFileUpload-778f1efeab214f5bab2860d1f802ef24" "PurchaseOrder-5791" ] - let isCheckpoint (streamName : string) = + let isCheckpoint (streamName: string) = streamName.EndsWith "_checkpoint" || streamName.EndsWith "_checkpoints" || streamName.StartsWith "#serial" @@ -103,14 +103,14 @@ module Args = | [], good -> let white = Set.ofList good in Log.Warning("Only copying categories: {cats}", white); fun x -> white.Contains x | _, _ -> missingArg "BlackList and Whitelist are mutually exclusive; inclusions and exclusions cannot be mixed" - member x.Sink : Choice = + member x.Sink: Choice = match x.Source with | Choice1Of2 cosmos -> cosmos.Sink | Choice2Of2 es -> Choice1Of2 es.Sink - member x.SourceParams() : Choice<_, _*Equinox.CosmosStore.CosmosStoreContext*ReaderSpec> = + member x.SourceParams(): Choice<_, _*Equinox.CosmosStore.CosmosStoreContext*ReaderSpec> = match x.Source with | Choice1Of2 srcC -> - let leases : Microsoft.Azure.Cosmos.Container = + let leases: Microsoft.Azure.Cosmos.Container = match srcC.Sink with | Choice1Of2 dstC -> match srcC.LeaseContainerId, dstC.LeaseContainerId with @@ -169,7 +169,7 @@ module Args = | DstEs _ -> "EventStore Sink parameters." | DstCosmos _ -> "CosmosDb Sink parameters." - and CosmosSourceArguments(c : Configuration, p : ParseResults) = + and CosmosSourceArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult CosmosSourceParameters.ConnectionMode let timeout = p.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds @@ -177,12 +177,12 @@ module Args = let maxRetryWaitTime = p.GetResult(CosmosSourceParameters.RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) let database = p.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - member val ContainerId : string = p.GetResult CosmosSourceParameters.Container + member val ContainerId: string = p.GetResult CosmosSourceParameters.Container member x.MonitoredContainer() = connector.ConnectMonitored(database, x.ContainerId) member val FromTail = p.Contains CosmosSourceParameters.FromTail member val MaxItems = p.TryGetResult MaxItems - member val LagFrequency : TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member val LagFrequency: TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes member val LeaseContainerId = p.TryGetResult CosmosSourceParameters.LeaseContainer member private _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) member x.ConnectLeases() = match x.LeaseContainerId with @@ -242,7 +242,7 @@ module Args = | Cosmos _ -> "CosmosDb Sink parameters." | Es _ -> "EventStore Sink parameters." - and EsSourceArguments(c : Configuration, p : ParseResults) = + and EsSourceArguments(c: Configuration, p: ParseResults) = member val Gorge = p.TryGetResult Gorge member val StreamReaders = p.GetResult(StreamReaders, 1) member val TailInterval = p.GetResult(Tail, 1.) |> TimeSpan.FromSeconds @@ -273,7 +273,7 @@ module Args = member val Heartbeat = p.GetResult(EsSourceParameters.HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds member x.Connect(log: ILogger, storeLog: ILogger, appName, connectionStrategy) = let discovery = x.Discovery - let s (x : TimeSpan) = x.TotalSeconds + let s (x: TimeSpan) = x.TotalSeconds log.ForContext("host", x.Host).ForContext("port", x.Port) .Information("EventStore {discovery} heartbeat: {heartbeat}s Timeout: {timeout}s Retries {retries}", discovery, s x.Heartbeat, s x.Timeout, x.Retries) @@ -312,7 +312,7 @@ module Args = #if kafka | Kafka _ -> "specify Kafka target for non-Synced categories. Default: None." #endif - and CosmosSinkArguments(c : Configuration, p : ParseResults) = + and CosmosSinkArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.GetResult(ConnectionMode, Microsoft.Azure.Cosmos.ConnectionMode.Direct) let timeout = p.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds @@ -352,7 +352,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 20." | Retries _ -> "specify operation retries. Default: 3." | HeartbeatTimeout _ -> "specify heartbeat timeout in seconds. Default: 1.5." - and EsSinkArguments(c : Configuration, p : ParseResults) = + and EsSinkArguments(c: Configuration, p: ParseResults) = member x.Discovery = match x.Tcp, x.Port with | false, None -> Discovery.GossipDns x.Host @@ -367,9 +367,9 @@ module Args = member val Retries = p.GetResult(Retries, 3) member val Timeout = p.GetResult(Timeout, 20.) |> TimeSpan.FromSeconds member val Heartbeat = p.GetResult(HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds - member x.Connect(log: ILogger, storeLog: ILogger, connectionStrategy, appName, connIndex) = + member x.Connect(log: ILogger, storeLog: ILogger, connectionStrategy, appName) = let discovery = x.Discovery - let s (x : TimeSpan) = x.TotalSeconds + let s (x: TimeSpan) = x.TotalSeconds log.ForContext("host", x.Host).ForContext("port", x.Port) .Information("EventStore {discovery} heartbeat: {heartbeat}s Timeout: {timeout}s Retries {retries}", discovery, s x.Heartbeat, s x.Timeout, x.Retries) @@ -387,7 +387,7 @@ module Args = | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)." | Producers _ -> "specify number of Kafka Producer instances to use. Default: 1." - and KafkaSinkArguments(c : Configuration, p : ParseResults) = + and KafkaSinkArguments(c: Configuration, p: ParseResults) = member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) member val Producers = p.GetResult(Producers, 1) @@ -395,7 +395,7 @@ module Args = #endif /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args - let parse tryGetConfigValue argv : Arguments = + let parse tryGetConfigValue argv: Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) @@ -441,17 +441,17 @@ module EventV0Parser = System.Text.Json.JsonSerializer.Deserialize<'T>(document.RootElement) /// We assume all Documents represent Events laid out as above - let parse (d : System.Text.Json.JsonDocument) : Propulsion.Streams.Default.StreamEvent = + let parse (d: System.Text.Json.JsonDocument): Propulsion.Sinks.StreamEvent = let e = d.Cast() - FsCodec.StreamName.parse e.s, e |> FsCodec.Core.TimelineEvent.Map ReadOnlyMemory + FsCodec.StreamName.parse e.s, e |> FsCodec.Core.TimelineEvent.Map(Func<_, _> ReadOnlyMemory) -let transformV0 catFilter v0SchemaDocument : Propulsion.Streams.StreamEvent<_> seq = seq { +let transformV0 catFilter v0SchemaDocument: Propulsion.Streams.StreamEvent<_> seq = seq { let parsed = EventV0Parser.parse v0SchemaDocument let struct (FsCodec.StreamName.Category cat, _) = parsed if catFilter cat then yield parsed } //#else -let transformOrFilter catFilter changeFeedDocument : Propulsion.Streams.Default.StreamEvent seq = seq { +let transformOrFilter catFilter changeFeedDocument: Propulsion.Sinks.StreamEvent seq = seq { for FsCodec.StreamName.Category cat, _ as x in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents catFilter changeFeedDocument do // NB the `index` needs to be contiguous with existing events - IOW filtering needs to be at stream (and not event) level if catFilter cat then @@ -473,18 +473,18 @@ module Checkpoints = let create groupName (context, cache) = let caching = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) let cat = CosmosStoreCategory(context, codec, Checkpoint.Fold.fold, Checkpoint.Fold.initial, caching, access) - let resolve log = Equinox.Decider.resolve log cat - Checkpoint.CheckpointSeries(groupName, resolve) + let resolve log = Equinox.DeciderCore.Resolve(cat, log) + failwith "TODO Checkpoint.CheckpointSeries(groupName, resolve) // Propulsion 3rc5 has a very unfortunate mangle, and don't have time to figure out a hack-workaround" type Stats(log, statsInterval, stateInterval) = - inherit Propulsion.Streams.Sync.Stats(log, statsInterval, stateInterval) + inherit Propulsion.Sync.Stats(log, statsInterval, stateInterval) override _.HandleOk(()) = () override _.HandleExn(log, exn) = log.Information(exn, "Unhandled") open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException -let build (args : Args.Arguments, log) = +let build (args: Args.Arguments, log) = let maybeDstCosmos, sink, streamFilter = match args.Sink with | Choice1Of2 cosmos -> @@ -495,31 +495,31 @@ let build (args : Args.Arguments, log) = match cosmos.KafkaSink with | Some kafka -> let broker, topic, producers = kafka.BuildTargetParams() - let render (stream: FsCodec.StreamName) (span: Propulsion.Streams.Default.StreamSpan) ct = Async.startImmediateAsTask ct <| async { + let render (stream: FsCodec.StreamName) (events: Propulsion.Sinks.Event[]) = async { let value = - span + events |> Propulsion.Codec.NewtonsoftJson.RenderedSpan.ofStreamSpan stream |> Propulsion.Codec.NewtonsoftJson.Serdes.Serialize - return struct (FsCodec.StreamName.toString stream, value) } - let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic, degreeOfParallelism=producers) + return FsCodec.StreamName.toString stream, value } + let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic, degreeOfParallelism = producers) let stats = Stats(Log.Logger, args.StatsInterval, args.StateInterval) StreamsProducerSink.Start( - Log.Logger, args.MaxReadAhead, args.MaxWriters, render, producer, stats, statsInterval = args.StatsInterval, maxBytes = maxBytes, maxEvents = maxEvents), + Log.Logger, args.MaxReadAhead, args.MaxWriters, render, producer, stats, maxBytes = maxBytes, maxEvents = maxEvents), args.CategoryFilterFunction(longOnly=true) | None -> #endif let context = CosmosStoreContext.create target - let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Config.log) + let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Store.log) Propulsion.CosmosStore.CosmosStoreSink.Start(log, args.MaxReadAhead, eventsContext, args.MaxWriters, args.StatsInterval, args.StateInterval), args.CategoryFilterFunction(excludeLong=true) Some target, sink, streamFilter | Choice2Of2 es -> let connect connIndex = async { - let lfc = Config.log.ForContext("ConnId", connIndex) - let! c = es.Connect(log, lfc, ConnectionStrategy.ClusterSingle NodePreference.Master, AppName, connIndex) + let lfc = Store.log.ForContext("ConnId", connIndex) + let! c = es.Connect(log, lfc, ConnectionStrategy.ClusterSingle NodePreference.Master, AppName) return EventStoreContext(c, batchSize = Int32.MaxValue) } let targets = Array.init args.MaxConnections (string >> connect) |> Async.Parallel |> Async.RunSynchronously - let sink = EventStoreSink.Start(log, Config.log, args.MaxReadAhead, targets, args.MaxWriters, args.StatsInterval, args.StateInterval) + let sink = EventStoreSink.Start(log, Store.log, args.MaxReadAhead, targets, args.MaxWriters, args.StatsInterval, args.StateInterval) None, sink, args.CategoryFilterFunction() match args.SourceParams() with | Choice1Of2 (monitored, leases, processorName, startFromTail, maxItems, lagFrequency) -> @@ -541,9 +541,9 @@ let build (args : Args.Arguments, log) = let cache = Equinox.Cache(AppName, sizeMb=1) let checkpoints = Checkpoints.Cosmos.create spec.groupName (checkpointsContext, cache) - let withNullData (e : FsCodec.ITimelineEvent<_>) : FsCodec.ITimelineEvent<_> = + let withNullData (e: FsCodec.ITimelineEvent<_>): FsCodec.ITimelineEvent<_> = FsCodec.Core.TimelineEvent.Create(e.Index, e.EventType, ReadOnlyMemory.Empty, e.Meta, timestamp=e.Timestamp) :> _ - let tryMapEvent streamFilter (x : EventStore.ClientAPI.ResolvedEvent) = + let tryMapEvent streamFilter (x: EventStore.ClientAPI.ResolvedEvent) = match x.Event with | e when not e.IsJson || e.EventStreamId.StartsWith "$" || not (streamFilter e.EventStreamId) -> None @@ -563,8 +563,8 @@ let build (args : Args.Arguments, log) = args.MaxReadAhead, args.StatsInterval, ct) [ runPipeline CancellationToken.None |> Async.ofTask; sink.AwaitWithStopOnCancellation() ] -let run (args : Args.Arguments) = - let log = (Log.forGroup args.ProcessorName).ForContext() +let run (args: Args.Arguments) = + let log = (Log.forGroup args.ProcessorName).ForContext() build (args, log) |> Async.Parallel |> Async.Ignore [] diff --git a/propulsion-sync/Sync.fsproj b/propulsion-sync/Sync.fsproj index b962dca10..d63781ffc 100644 --- a/propulsion-sync/Sync.fsproj +++ b/propulsion-sync/Sync.fsproj @@ -14,10 +14,10 @@ - - + + - + diff --git a/propulsion-tracking-consumer/Infrastructure.fs b/propulsion-tracking-consumer/Infrastructure.fs index 3de2c9c5d..d4c3c86ee 100644 --- a/propulsion-tracking-consumer/Infrastructure.fs +++ b/propulsion-tracking-consumer/Infrastructure.fs @@ -9,31 +9,13 @@ open System.Runtime.CompilerServices type SkuId = string and [] skuId module SkuId = - let toString (value : SkuId) : string = % value - let parse (value : string) : SkuId = let raw = value in % raw + let toString (value: SkuId): string = % value + let parse (value: string): SkuId = let raw = value in % raw let (|Parse|) = parse module EnvVar = - let tryGet varName : string option = System.Environment.GetEnvironmentVariable varName |> Option.ofObj - -module EventCodec = - - /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = - match codec.TryDecode x with - | ValueNone -> - if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) - .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) - ValueNone - | x -> x - - open FsCodec.SystemTextJson - - let private defaultOptions = Options.Create() - let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = - CodecJsonElement.Create<'t>(options = defaultOptions) + let tryGet varName: string option = System.Environment.GetEnvironmentVariable varName |> Option.ofObj type Equinox.CosmosStore.CosmosStoreConnector with @@ -53,7 +35,7 @@ type Equinox.CosmosStore.CosmosStoreConnector with module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations - let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = + let create (storeClient: Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) @@ -61,7 +43,7 @@ module CosmosStoreContext = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, ?verbose) = + static member Configure(configuration: LoggerConfiguration, ?verbose) = configuration .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-tracking-consumer/Ingester.fs b/propulsion-tracking-consumer/Ingester.fs index 31fc6e235..94f58161a 100644 --- a/propulsion-tracking-consumer/Ingester.fs +++ b/propulsion-tracking-consumer/Ingester.fs @@ -2,27 +2,22 @@ /// Compared to the Ingester in the `proReactor` template, each event is potentially relevant module ConsumerTemplate.Ingester -open Propulsion.Internal - /// Defines the shape of input messages on the topic we're consuming module Contract = - type OrderInfo = { poNumber : string; reservedUnitQuantity : int } + type OrderInfo = { poNumber: string; reservedUnitQuantity: int } type Message = - { skuId : SkuId // primary key for the aggregate - locationId : string - messageIndex : int64 - pickTicketId : string - purchaseOrderInfo : OrderInfo[] } - let serdes = FsCodec.SystemTextJson.Options.Default |> FsCodec.SystemTextJson.Serdes - let parse (utf8 : Propulsion.Streams.Default.EventBody) : Message = - // NB see https://github.com/jet/FsCodec for details of the default serialization profile (TL;DR only has an `OptionConverter`) - System.Text.Encoding.UTF8.GetString(utf8.Span) - |> serdes.Deserialize - -type Outcome = Completed of used : int * unused : int - -/// Gathers stats based on the outcome of each Span processed for emission at intervals controlled by `StreamsConsumer` + { skuId: SkuId // primary key for the aggregate + locationId: string + messageIndex: int64 + pickTicketId: string + purchaseOrderInfo: OrderInfo[] } + let serdes = FsCodec.SystemTextJson.Serdes.Default + let parse (utf8: Propulsion.Sinks.EventBody): Message = serdes.Deserialize(utf8) + +type Outcome = Completed of used: int * unused: int + +/// Gathers stats based on the Outcome of each Span as it's processed, for periodic emission via DumpStats() type Stats(log, statsInterval, stateInterval) = inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) @@ -41,13 +36,13 @@ type Stats(log, statsInterval, stateInterval) = /// Ingest queued events per sku - each time we handle all the incoming updates for a given stream as a single act let ingest - (service : SkuSummary.Service) - (FsCodec.StreamName.CategoryAndId (_, SkuId.Parse skuId)) (span : Propulsion.Streams.StreamSpan<_>) ct = Async.startImmediateAsTask ct <| async { + (service: SkuSummary.Service) + (FsCodec.StreamName.CategoryAndId (_, SkuId.Parse skuId)) (events: Propulsion.Sinks.Event[]) = async { let items = - [ for e in span do + [ for e in events do let x = Contract.parse e.Data for o in x.purchaseOrderInfo do - let x : SkuSummary.Events.ItemData = + let x: SkuSummary.Events.ItemData = { locationId = x.locationId messageIndex = x.messageIndex picketTicketId = x.pickTicketId @@ -55,4 +50,4 @@ let ingest reservedQuantity = o.reservedUnitQuantity } yield x ] let! used = service.Ingest(skuId, items) - return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Completed(used, items.Length - used)) } + return Propulsion.Sinks.StreamResult.AllProcessed, Outcome.Completed(used, items.Length - used) } diff --git a/propulsion-tracking-consumer/Program.fs b/propulsion-tracking-consumer/Program.fs index 5ff792def..3c3d674d6 100644 --- a/propulsion-tracking-consumer/Program.fs +++ b/propulsion-tracking-consumer/Program.fs @@ -3,7 +3,7 @@ open Serilog open System -exception MissingArg of message : string with override this.Message = this.message +exception MissingArg of message: string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = @@ -41,7 +41,7 @@ module Args = | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: off" | MaxWriters _ -> "maximum number of items to process in parallel. Default: 8" | Cosmos _ -> "specify CosmosDb input parameters" - and Arguments(c : Configuration, p : ParseResults) = + and Arguments(c: Configuration, p: ParseResults) = member val Verbose = p.Contains Verbose member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) @@ -69,7 +69,7 @@ module Args = | Timeout _ -> "specify operation timeout in seconds (default: 5)." | Retries _ -> "specify operation retries (default: 1)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 5)" - and CosmosArguments(c : Configuration, p : ParseResults) = + and CosmosArguments(c: Configuration, p: ParseResults) = let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString let mode = p.TryGetResult ConnectionMode let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds @@ -88,13 +88,13 @@ module Args = let [] AppName = "ConsumerTemplate" -let start (args : Args.Arguments) = +let start (args: Args.Arguments) = let service = let store = let context = args.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let cache = Equinox.Cache(AppName, sizeMb = 10) - Config.Store.Cosmos (context, cache) - SkuSummary.Config.create store + Store.Context.Cosmos (context, cache) + SkuSummary.Factory.create store let config = FsKafka.KafkaConsumerConfig.Create( AppName, args.Broker, [args.Topic], args.Group, Confluent.Kafka.AutoOffsetReset.Earliest, @@ -106,9 +106,7 @@ let start (args : Args.Arguments) = // The StreamNameSequenceGenerator maintains an Index per stream with which the messages are tagged in order to be able to // represent them as a sequence of indexed messages per stream let sequencer = Propulsion.Kafka.StreamNameSequenceGenerator() - Propulsion.Kafka.StreamsConsumer.Start - ( Log.Logger, config, sequencer.ConsumeResultToStreamEvent(), Ingester.ingest service, args.MaxConcurrentStreams, - stats, args.StateInterval) + Propulsion.Kafka.Factory.StartConcurrent(Log.Logger, config, sequencer.ConsumeResultToStreamEvent(), args.MaxConcurrentStreams, Ingester.ingest service, stats) let run args = async { use consumer = start args diff --git a/propulsion-tracking-consumer/SkuSummary.fs b/propulsion-tracking-consumer/SkuSummary.fs index 1a054af5b..621ece6ef 100644 --- a/propulsion-tracking-consumer/SkuSummary.fs +++ b/propulsion-tracking-consumer/SkuSummary.fs @@ -7,24 +7,24 @@ let streamId = Equinox.StreamId.gen SkuId.toString module Events = type ItemData = - { locationId : string - messageIndex : int64 - picketTicketId : string - poNumber : string - reservedQuantity : int } + { locationId: string + messageIndex: int64 + picketTicketId: string + poNumber: string + reservedQuantity: int } type Event = | Ingested of ItemData | Snapshotted of ItemData[] interface TypeShape.UnionContract.IUnionContract - let codec = EventCodec.gen + let codec = Store.Codec.gen module Fold = type State = Events.ItemData list module State = - let equals (x : Events.ItemData) (y : Events.ItemData) = + let equals (x: Events.ItemData) (y: Events.ItemData) = x.locationId = y.locationId - let supersedes (x : Events.ItemData) (y : Events.ItemData) = + let supersedes (x: Events.ItemData) (y: Events.ItemData) = equals x y && y.messageIndex > x.messageIndex && y.reservedQuantity <> x.reservedQuantity @@ -39,16 +39,16 @@ module Fold = let evolve state = function | Events.Ingested e -> e :: state | Events.Snapshotted items -> List.ofArray items - let fold : State -> Events.Event seq -> State = Seq.fold evolve - let toSnapshot (x : State) : Events.Event = Events.Snapshotted (Array.ofList x) + let fold: State -> Events.Event seq -> State = Seq.fold evolve + let toSnapshot (x: State): Events.Event = Events.Snapshotted (Array.ofList x) -let ingest (updates : Events.ItemData list) (state : Fold.State) = +let ingest (updates: Events.ItemData list) (state: Fold.State) = [for x in updates do if x |> Fold.State.isNewOrUpdated state then yield Events.Ingested x] -type Service internal (resolve : SkuId -> Equinox.Decider) = +type Service internal (resolve: SkuId -> Equinox.Decider) = /// count of items - member _.Ingest(skuId, items) : Async = + member _.Ingest(skuId, items): Async = let decider = resolve skuId let decide state = let events = ingest items state @@ -59,8 +59,8 @@ type Service internal (resolve : SkuId -> Equinox.Decider Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - let create (Category cat) = Service(streamId >> Config.createDecider cat Category) + | Store.Context.Cosmos (context, cache) -> Store.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create (Category cat) = Service(streamId >> Store.createDecider cat Category) diff --git a/propulsion-tracking-consumer/Config.fs b/propulsion-tracking-consumer/Store.fs similarity index 78% rename from propulsion-tracking-consumer/Config.fs rename to propulsion-tracking-consumer/Store.fs index 3e1129098..faa923fdb 100644 --- a/propulsion-tracking-consumer/Config.fs +++ b/propulsion-tracking-consumer/Store.fs @@ -1,8 +1,13 @@ -module ConsumerTemplate.Config +module ConsumerTemplate.Store let log = Serilog.Log.ForContext("isMetric", true) let createDecider cat = Equinox.Decider.resolve log cat +module Codec = + + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + FsCodec.SystemTextJson.CodecJsonElement.Create<'t>() // options = Options.Default + module Cosmos = let private createCached codec initial fold accessStrategy (context, cache) = @@ -14,5 +19,5 @@ module Cosmos = createCached codec initial fold accessStrategy (context, cache) [] -type Store = - | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache +type Context = + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Cache diff --git a/propulsion-tracking-consumer/TrackingConsumer.fsproj b/propulsion-tracking-consumer/TrackingConsumer.fsproj index d204071a1..f3933d1ce 100644 --- a/propulsion-tracking-consumer/TrackingConsumer.fsproj +++ b/propulsion-tracking-consumer/TrackingConsumer.fsproj @@ -9,7 +9,7 @@ - + @@ -17,9 +17,9 @@ - - - + + + diff --git a/tests/Equinox.Templates.Tests/DotnetBuild.fs b/tests/Equinox.Templates.Tests/DotnetBuild.fs index 91041c35d..ff760b038 100644 --- a/tests/Equinox.Templates.Tests/DotnetBuild.fs +++ b/tests/Equinox.Templates.Tests/DotnetBuild.fs @@ -36,7 +36,7 @@ type EqxWebs() as this = #endif do this.Add("eqxweb", ["--todos"; "--aggregate"; "--dynamo"]) -type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = +type DotnetBuild(output: ITestOutputHelper, folder: EquinoxTemplatesFixture) = let run template args = output.WriteLine(sprintf "using %s" folder.PackagePath) diff --git a/tests/Equinox.Templates.Tests/Infrastructure.fs b/tests/Equinox.Templates.Tests/Infrastructure.fs index 2a10cc571..8cd5493f0 100644 --- a/tests/Equinox.Templates.Tests/Infrastructure.fs +++ b/tests/Equinox.Templates.Tests/Infrastructure.fs @@ -9,7 +9,7 @@ open System.Runtime.CompilerServices module Process = - let direct (evt : IEvent<_, _>) output = evt.AddHandler(DataReceivedEventHandler(fun _sender args -> output args.Data)) + let direct (evt: IEvent<_, _>) output = evt.AddHandler(DataReceivedEventHandler(fun _sender args -> output args.Data)) let run fileName args = let out, err = System.Text.StringBuilder(), System.Text.StringBuilder()