Compare commits
No commits in common. "master" and "v1.0-rc9" have entirely different histories.
29
.github/workflows/test.yml
vendored
29
.github/workflows/test.yml
vendored
@ -1,29 +0,0 @@
|
|||||||
name: Run Unit Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- "**"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up JDK 21
|
|
||||||
uses: actions/setup-java@v4
|
|
||||||
with:
|
|
||||||
java-version: '21'
|
|
||||||
distribution: 'zulu'
|
|
||||||
|
|
||||||
- name: Setup Gradle
|
|
||||||
uses: gradle/gradle-build-action@v3
|
|
||||||
|
|
||||||
- name: Make gradlew executable
|
|
||||||
run: chmod +x ./gradlew
|
|
||||||
|
|
||||||
- name: Run unit tests
|
|
||||||
run: ./gradlew test --stacktrace
|
|
||||||
2
.idea/vcs.xml
generated
2
.idea/vcs.xml
generated
@ -1,6 +1,6 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project version="4">
|
<project version="4">
|
||||||
<component name="VcsDirectoryMappings">
|
<component name="VcsDirectoryMappings">
|
||||||
<mapping directory="" vcs="Git" />
|
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
||||||
</component>
|
</component>
|
||||||
</project>
|
</project>
|
||||||
@ -20,7 +20,6 @@ dependencies {
|
|||||||
|
|
||||||
|
|
||||||
implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.10.2")
|
implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.10.2")
|
||||||
implementation("io.github.microutils:kotlin-logging-jvm:2.0.11")
|
|
||||||
|
|
||||||
//testImplementation(kotlin("test"))
|
//testImplementation(kotlin("test"))
|
||||||
testImplementation("org.assertj:assertj-core:3.4.1")
|
testImplementation("org.assertj:assertj-core:3.4.1")
|
||||||
|
|||||||
@ -1,2 +1 @@
|
|||||||
kotlin.code.style=official
|
kotlin.code.style=official
|
||||||
org.gradle.parallel=false
|
|
||||||
|
|||||||
@ -1,5 +0,0 @@
|
|||||||
package no.iktdev.eventi
|
|
||||||
|
|
||||||
@Target(AnnotationTarget.CLASS)
|
|
||||||
@Retention(AnnotationRetention.RUNTIME)
|
|
||||||
annotation class ListenerOrder(val value: Int)
|
|
||||||
@ -7,5 +7,5 @@ abstract class ListenerRegistryImplementation<T> {
|
|||||||
listeners.add(listener)
|
listeners.add(listener)
|
||||||
}
|
}
|
||||||
|
|
||||||
open fun getListeners(): List<T> = listeners.toList()
|
fun getListeners(): List<T> = listeners.toList()
|
||||||
}
|
}
|
||||||
@ -1,14 +0,0 @@
|
|||||||
package no.iktdev.eventi
|
|
||||||
|
|
||||||
import java.time.Clock
|
|
||||||
import java.time.Instant
|
|
||||||
|
|
||||||
object MyTime {
|
|
||||||
private val clock: Clock = Clock.systemUTC()
|
|
||||||
|
|
||||||
@JvmStatic
|
|
||||||
fun utcNow(): Instant =
|
|
||||||
Instant.now(clock)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,6 +1,5 @@
|
|||||||
package no.iktdev.eventi
|
package no.iktdev.eventi
|
||||||
|
|
||||||
import com.google.gson.Gson
|
|
||||||
import com.google.gson.GsonBuilder
|
import com.google.gson.GsonBuilder
|
||||||
import com.google.gson.JsonDeserializationContext
|
import com.google.gson.JsonDeserializationContext
|
||||||
import com.google.gson.JsonDeserializer
|
import com.google.gson.JsonDeserializer
|
||||||
@ -16,17 +15,17 @@ import no.iktdev.eventi.models.store.PersistedTask
|
|||||||
import no.iktdev.eventi.models.store.TaskStatus
|
import no.iktdev.eventi.models.store.TaskStatus
|
||||||
import no.iktdev.eventi.tasks.TaskTypeRegistry
|
import no.iktdev.eventi.tasks.TaskTypeRegistry
|
||||||
import java.lang.reflect.Type
|
import java.lang.reflect.Type
|
||||||
import java.time.Instant
|
|
||||||
import java.time.LocalDateTime
|
import java.time.LocalDateTime
|
||||||
import java.time.format.DateTimeFormatter
|
import java.time.format.DateTimeFormatter
|
||||||
|
|
||||||
object ZDS {
|
object ZDS {
|
||||||
val gson = WGson.gson
|
val gson = WGson.gson
|
||||||
|
|
||||||
fun Event.toPersisted(id: Long, persistedAt: Instant = MyTime.utcNow()): PersistedEvent? {
|
fun Event.toPersisted(id: Long, persistedAt: LocalDateTime = LocalDateTime.now()): PersistedEvent? {
|
||||||
val payloadJson = gson.toJson(this)
|
val payloadJson = gson.toJson(this)
|
||||||
val eventName = this::class.simpleName ?: run {
|
val eventName = this::class.simpleName ?: run {
|
||||||
throw IllegalStateException("Missing class name for event: $this")
|
println("Missing class name for event: $this")
|
||||||
|
return null
|
||||||
}
|
}
|
||||||
return PersistedEvent(
|
return PersistedEvent(
|
||||||
id = id,
|
id = id,
|
||||||
@ -44,15 +43,17 @@ object ZDS {
|
|||||||
fun PersistedEvent.toEvent(): Event? {
|
fun PersistedEvent.toEvent(): Event? {
|
||||||
val clazz = EventTypeRegistry.resolve(event)
|
val clazz = EventTypeRegistry.resolve(event)
|
||||||
?: run {
|
?: run {
|
||||||
throw IllegalStateException("Missing class name for event: $this")
|
println("Missing class name for event: $this")
|
||||||
|
return null
|
||||||
}
|
}
|
||||||
return gson.fromJson(data, clazz)
|
return gson.fromJson(data, clazz)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun Task.toPersisted(id: Long, status: TaskStatus = TaskStatus.Pending, persistedAt: Instant = MyTime.utcNow()): PersistedTask? {
|
fun Task.toPersisted(id: Long, status: TaskStatus = TaskStatus.Pending, persistedAt: LocalDateTime = LocalDateTime.now()): PersistedTask? {
|
||||||
val payloadJson = gson.toJson(this)
|
val payloadJson = gson.toJson(this)
|
||||||
val taskName = this::class.simpleName ?: run {
|
val taskName = this::class.simpleName ?: run {
|
||||||
throw IllegalStateException("Missing class name for task: $this")
|
println("Missing class name for task: $this")
|
||||||
|
return null
|
||||||
}
|
}
|
||||||
return PersistedTask(
|
return PersistedTask(
|
||||||
id = id,
|
id = id,
|
||||||
@ -82,47 +83,26 @@ object ZDS {
|
|||||||
|
|
||||||
object WGson {
|
object WGson {
|
||||||
val gson = GsonBuilder()
|
val gson = GsonBuilder()
|
||||||
.registerTypeAdapter(Instant::class.java, InstantAdapter())
|
|
||||||
// hvis du fortsatt har LocalDateTime et sted:
|
|
||||||
.registerTypeAdapter(LocalDateTime::class.java, LocalDateTimeAdapter())
|
.registerTypeAdapter(LocalDateTime::class.java, LocalDateTimeAdapter())
|
||||||
.create()
|
.create()
|
||||||
|
fun toJson(data: Any?): String {
|
||||||
fun toJson(data: Any?): String =
|
return gson.toJson(data)
|
||||||
gson.toJson(data)
|
|
||||||
|
|
||||||
class InstantAdapter : JsonSerializer<Instant>, JsonDeserializer<Instant> {
|
|
||||||
override fun serialize(
|
|
||||||
src: Instant,
|
|
||||||
typeOfSrc: Type,
|
|
||||||
context: JsonSerializationContext
|
|
||||||
): JsonElement =
|
|
||||||
JsonPrimitive(src.toString()) // ISO-8601, UTC
|
|
||||||
|
|
||||||
override fun deserialize(
|
|
||||||
json: JsonElement,
|
|
||||||
typeOfT: Type,
|
|
||||||
context: JsonDeserializationContext
|
|
||||||
): Instant =
|
|
||||||
Instant.parse(json.asString)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class LocalDateTimeAdapter : JsonSerializer<LocalDateTime>, JsonDeserializer<LocalDateTime> {
|
class LocalDateTimeAdapter : JsonSerializer<LocalDateTime>, JsonDeserializer<LocalDateTime> {
|
||||||
private val formatter = DateTimeFormatter.ISO_LOCAL_DATE_TIME
|
private val formatter = DateTimeFormatter.ISO_LOCAL_DATE_TIME
|
||||||
|
|
||||||
override fun serialize(
|
override fun serialize(
|
||||||
src: LocalDateTime,
|
src: LocalDateTime, typeOfSrc: Type, context: JsonSerializationContext
|
||||||
typeOfSrc: Type,
|
): JsonElement {
|
||||||
context: JsonSerializationContext
|
return JsonPrimitive(src.format(formatter))
|
||||||
): JsonElement =
|
}
|
||||||
JsonPrimitive(src.format(formatter))
|
|
||||||
|
|
||||||
override fun deserialize(
|
override fun deserialize(
|
||||||
json: JsonElement,
|
json: JsonElement, typeOfT: Type, context: JsonDeserializationContext
|
||||||
typeOfT: Type,
|
): LocalDateTime {
|
||||||
context: JsonDeserializationContext
|
return LocalDateTime.parse(json.asString, formatter)
|
||||||
): LocalDateTime =
|
}
|
||||||
LocalDateTime.parse(json.asString, formatter)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -0,0 +1,52 @@
|
|||||||
|
package no.iktdev.eventi.events
|
||||||
|
|
||||||
|
import kotlinx.coroutines.delay
|
||||||
|
import no.iktdev.eventi.ZDS.toEvent
|
||||||
|
import no.iktdev.eventi.stores.EventStore
|
||||||
|
import java.time.Duration
|
||||||
|
import java.time.LocalDateTime
|
||||||
|
import java.util.UUID
|
||||||
|
import kotlin.collections.iterator
|
||||||
|
|
||||||
|
abstract class AbstractEventPoller(
|
||||||
|
private val eventStore: EventStore,
|
||||||
|
private val dispatchQueue: SequenceDispatchQueue,
|
||||||
|
private val dispatcher: EventDispatcher
|
||||||
|
) {
|
||||||
|
var lastSeenTime: LocalDateTime = LocalDateTime.MIN
|
||||||
|
open var backoff = Duration.ofSeconds(2)
|
||||||
|
protected set
|
||||||
|
private val maxBackoff = Duration.ofMinutes(1)
|
||||||
|
|
||||||
|
|
||||||
|
suspend fun start() {
|
||||||
|
while (true) {
|
||||||
|
pollOnce()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
suspend fun pollOnce() {
|
||||||
|
val newPersisted = eventStore.getPersistedEventsAfter(lastSeenTime)
|
||||||
|
|
||||||
|
if (newPersisted.isEmpty()) {
|
||||||
|
delay(backoff.toMillis())
|
||||||
|
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
backoff = Duration.ofSeconds(2)
|
||||||
|
|
||||||
|
val grouped = newPersisted.groupBy { it.referenceId }
|
||||||
|
|
||||||
|
for ((referenceId, _) in grouped) {
|
||||||
|
if (dispatchQueue.isProcessing(referenceId)) continue
|
||||||
|
|
||||||
|
val fullLog = eventStore.getPersistedEventsFor(referenceId)
|
||||||
|
val events = fullLog.mapNotNull { it.toEvent() }
|
||||||
|
|
||||||
|
dispatchQueue.dispatch(referenceId, events, dispatcher)
|
||||||
|
lastSeenTime = fullLog.maxOf { it.persistedAt }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@ -2,30 +2,25 @@ package no.iktdev.eventi.events
|
|||||||
|
|
||||||
import no.iktdev.eventi.models.DeleteEvent
|
import no.iktdev.eventi.models.DeleteEvent
|
||||||
import no.iktdev.eventi.models.Event
|
import no.iktdev.eventi.models.Event
|
||||||
import no.iktdev.eventi.models.SignalEvent
|
|
||||||
import no.iktdev.eventi.stores.EventStore
|
import no.iktdev.eventi.stores.EventStore
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
open class EventDispatcher(val eventStore: EventStore) {
|
class EventDispatcher(val eventStore: EventStore) {
|
||||||
|
|
||||||
open fun dispatch(referenceId: UUID, events: List<Event>) {
|
fun dispatch(referenceId: UUID, events: List<Event>) {
|
||||||
val derivedFromIds = events.mapNotNull { it.metadata.derivedFromId }.flatten().toSet()
|
val derivedFromIds = events.mapNotNull { it.metadata.derivedFromId }.toSet()
|
||||||
val deletedEventIds = events.filterIsInstance<DeleteEvent>().map { it.deletedEventId }
|
val deletedEventIds = events.filterIsInstance<DeleteEvent>().map { it.deletedEventId }
|
||||||
val candidates = events
|
val candidates = events
|
||||||
.filterNot { it is SignalEvent }
|
|
||||||
.filter { it.eventId !in derivedFromIds }
|
.filter { it.eventId !in derivedFromIds }
|
||||||
.filter { it.eventId !in deletedEventIds }
|
.filter { it.eventId !in deletedEventIds }
|
||||||
|
|
||||||
val effectiveHistory = events
|
|
||||||
.filter { it.eventId !in deletedEventIds } // fjern slettede events
|
|
||||||
.filterNot { it is DeleteEvent } // fjern selve delete-eventet
|
|
||||||
|
|
||||||
|
|
||||||
EventListenerRegistry.getListeners().forEach { listener ->
|
EventListenerRegistry.getListeners().forEach { listener ->
|
||||||
for (candidate in candidates) {
|
for (candidate in candidates) {
|
||||||
val result = listener.onEvent(candidate, effectiveHistory)
|
val result = listener.onEvent(candidate, events)
|
||||||
if (result != null) {
|
if (result != null) {
|
||||||
|
|
||||||
eventStore.persist(result)
|
eventStore.persist(result)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,11 +1,6 @@
|
|||||||
package no.iktdev.eventi.events
|
package no.iktdev.eventi.events
|
||||||
|
|
||||||
import no.iktdev.eventi.ListenerOrder
|
|
||||||
import no.iktdev.eventi.ListenerRegistryImplementation
|
import no.iktdev.eventi.ListenerRegistryImplementation
|
||||||
|
|
||||||
object EventListenerRegistry : ListenerRegistryImplementation<EventListener>() {
|
object EventListenerRegistry: ListenerRegistryImplementation<EventListener>() {
|
||||||
override fun getListeners(): List<EventListener> {
|
|
||||||
return super.getListeners()
|
|
||||||
.sortedBy { it::class.java.getAnnotation(ListenerOrder::class.java)?.value ?: Int.MAX_VALUE }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
@ -1,138 +0,0 @@
|
|||||||
package no.iktdev.eventi.events
|
|
||||||
|
|
||||||
import kotlinx.coroutines.delay
|
|
||||||
import mu.KotlinLogging
|
|
||||||
import no.iktdev.eventi.MyTime
|
|
||||||
import no.iktdev.eventi.ZDS.toEvent
|
|
||||||
import no.iktdev.eventi.stores.EventStore
|
|
||||||
import java.time.Duration
|
|
||||||
import java.time.Instant
|
|
||||||
import java.util.UUID
|
|
||||||
|
|
||||||
abstract class EventPollerImplementation(
|
|
||||||
private val eventStore: EventStore,
|
|
||||||
private val dispatchQueue: SequenceDispatchQueue,
|
|
||||||
private val dispatcher: EventDispatcher
|
|
||||||
) {
|
|
||||||
private val log = KotlinLogging.logger {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Per-reference watermark:
|
|
||||||
* - first = last seen persistedAt
|
|
||||||
* - second = last seen persistedId
|
|
||||||
*/
|
|
||||||
protected val refWatermark = mutableMapOf<UUID, Pair<Instant, Long>>()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Global scan hint (timestamp only).
|
|
||||||
* Used to avoid scanning entire table every time.
|
|
||||||
*/
|
|
||||||
var lastSeenTime: Instant = Instant.EPOCH
|
|
||||||
|
|
||||||
open var backoff = Duration.ofSeconds(2)
|
|
||||||
protected set
|
|
||||||
private val maxBackoff = Duration.ofMinutes(1)
|
|
||||||
|
|
||||||
open suspend fun start() {
|
|
||||||
log.info { "EventPoller starting with initial backoff=$backoff" }
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
pollOnce()
|
|
||||||
} catch (e: Exception) {
|
|
||||||
log.error(e) { "Error in poller loop" }
|
|
||||||
delay(backoff.toMillis())
|
|
||||||
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
suspend fun pollOnce() {
|
|
||||||
val pollStartedAt = MyTime.utcNow()
|
|
||||||
log.debug { "🔍 Polling for new events" }
|
|
||||||
|
|
||||||
// Determine global scan start
|
|
||||||
val minRefTs = refWatermark.values.minOfOrNull { it.first }
|
|
||||||
val scanFrom = when (minRefTs) {
|
|
||||||
null -> lastSeenTime
|
|
||||||
else -> maxOf(lastSeenTime, minRefTs)
|
|
||||||
}
|
|
||||||
|
|
||||||
val newPersisted = eventStore.getPersistedEventsAfter(scanFrom)
|
|
||||||
|
|
||||||
if (newPersisted.isEmpty()) {
|
|
||||||
log.debug { "😴 No new events found. Backing off for $backoff" }
|
|
||||||
delay(backoff.toMillis())
|
|
||||||
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset backoff
|
|
||||||
backoff = Duration.ofSeconds(2)
|
|
||||||
log.debug { "📬 Found ${newPersisted.size} new events after $scanFrom" }
|
|
||||||
|
|
||||||
val grouped = newPersisted.groupBy { it.referenceId }
|
|
||||||
var anyProcessed = false
|
|
||||||
|
|
||||||
// Track highest persistedAt seen globally this round
|
|
||||||
val maxPersistedThisRound = newPersisted.maxOf { it.persistedAt }
|
|
||||||
|
|
||||||
for ((ref, eventsForRef) in grouped) {
|
|
||||||
val (refSeenAt, refSeenId) = refWatermark[ref] ?: (Instant.EPOCH to 0L)
|
|
||||||
|
|
||||||
// Filter new events using (timestamp, id) ordering
|
|
||||||
val newForRef = eventsForRef.filter { ev ->
|
|
||||||
ev.persistedAt > refSeenAt ||
|
|
||||||
(ev.persistedAt == refSeenAt && ev.id > refSeenId)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (newForRef.isEmpty()) {
|
|
||||||
log.debug { "🧊 No new events for $ref since ($refSeenAt, id=$refSeenId)" }
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If ref is busy, skip dispatch
|
|
||||||
if (dispatchQueue.isProcessing(ref)) {
|
|
||||||
log.debug { "⏳ $ref is busy — deferring ${newForRef.size} events" }
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch full sequence for dispatch
|
|
||||||
val fullLog = eventStore.getPersistedEventsFor(ref)
|
|
||||||
val events = fullLog.mapNotNull { it.toEvent() }
|
|
||||||
|
|
||||||
log.debug { "🚀 Dispatching ${events.size} events for $ref" }
|
|
||||||
dispatchQueue.dispatch(ref, events, dispatcher)
|
|
||||||
|
|
||||||
// Update watermark for this reference
|
|
||||||
val maxEvent = newForRef.maxWith(
|
|
||||||
compareBy({ it.persistedAt }, { it.id })
|
|
||||||
)
|
|
||||||
|
|
||||||
val newWatermarkAt = minOf(pollStartedAt, maxEvent.persistedAt)
|
|
||||||
val newWatermarkId = maxEvent.id
|
|
||||||
|
|
||||||
refWatermark[ref] = newWatermarkAt to newWatermarkId
|
|
||||||
anyProcessed = true
|
|
||||||
|
|
||||||
log.debug { "⏩ Updated watermark for $ref → ($newWatermarkAt, id=$newWatermarkId)" }
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update global scan hint
|
|
||||||
val newLastSeen = maxOf(
|
|
||||||
lastSeenTime,
|
|
||||||
maxPersistedThisRound.plusNanos(1)
|
|
||||||
)
|
|
||||||
|
|
||||||
if (anyProcessed) {
|
|
||||||
val minRef = refWatermark.values.minOfOrNull { it.first }
|
|
||||||
lastSeenTime = when (minRef) {
|
|
||||||
null -> newLastSeen
|
|
||||||
else -> maxOf(newLastSeen, minRef)
|
|
||||||
}
|
|
||||||
log.debug { "📉 Global scanFrom updated → $lastSeenTime (anyProcessed=true)" }
|
|
||||||
} else {
|
|
||||||
lastSeenTime = newLastSeen
|
|
||||||
log.debug { "🔁 No refs processed — advancing global scanFrom to $lastSeenTime" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -6,12 +6,11 @@ import kotlinx.coroutines.Job
|
|||||||
import kotlinx.coroutines.SupervisorJob
|
import kotlinx.coroutines.SupervisorJob
|
||||||
import kotlinx.coroutines.launch
|
import kotlinx.coroutines.launch
|
||||||
import kotlinx.coroutines.sync.Semaphore
|
import kotlinx.coroutines.sync.Semaphore
|
||||||
import mu.KotlinLogging
|
|
||||||
import no.iktdev.eventi.models.Event
|
import no.iktdev.eventi.models.Event
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
|
|
||||||
open class SequenceDispatchQueue(
|
class SequenceDispatchQueue(
|
||||||
private val maxConcurrency: Int = 8,
|
private val maxConcurrency: Int = 8,
|
||||||
private val scope: CoroutineScope = CoroutineScope(Dispatchers.Default + SupervisorJob())
|
private val scope: CoroutineScope = CoroutineScope(Dispatchers.Default + SupervisorJob())
|
||||||
) {
|
) {
|
||||||
@ -22,38 +21,23 @@ open class SequenceDispatchQueue(
|
|||||||
return scope
|
return scope
|
||||||
}
|
}
|
||||||
|
|
||||||
private val log = KotlinLogging.logger {}
|
fun isProcessing(referenceId: UUID): Boolean = referenceId in active
|
||||||
|
|
||||||
|
|
||||||
open fun isProcessing(referenceId: UUID): Boolean = referenceId in active
|
|
||||||
|
|
||||||
open fun dispatch(referenceId: UUID, events: List<Event>, dispatcher: EventDispatcher): Job? {
|
|
||||||
if (!active.add(referenceId)) {
|
|
||||||
log.debug {"⚠️ Already processing $referenceId, skipping dispatch"}
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
log.debug {"▶️ Starting dispatch for $referenceId with ${events.size} events"}
|
|
||||||
|
|
||||||
|
fun dispatch(referenceId: UUID, events: List<Event>, dispatcher: EventDispatcher): Job? {
|
||||||
|
if (!active.add(referenceId)) return null // already processing
|
||||||
|
|
||||||
return scope.launch {
|
return scope.launch {
|
||||||
try {
|
try {
|
||||||
log.debug {"⏳ Waiting for semaphore for $referenceId"}
|
|
||||||
semaphore.acquire()
|
semaphore.acquire()
|
||||||
log.debug {"🔓 Acquired semaphore for $referenceId"}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
dispatcher.dispatch(referenceId, events)
|
dispatcher.dispatch(referenceId, events)
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
log.error("Dispatch failed for $referenceId: ${e.message}")
|
println("Dispatch failed for $referenceId: ${e.message}")
|
||||||
e.printStackTrace()
|
|
||||||
} finally {
|
} finally {
|
||||||
|
|
||||||
semaphore.release()
|
semaphore.release()
|
||||||
log.debug {"✅ Released semaphore for $referenceId"}
|
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
active.remove(referenceId)
|
active.remove(referenceId)
|
||||||
log.debug {"🏁 Finished dispatch for $referenceId"}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,7 +4,7 @@ import java.util.UUID
|
|||||||
|
|
||||||
@Suppress("UNCHECKED_CAST")
|
@Suppress("UNCHECKED_CAST")
|
||||||
abstract class Event {
|
abstract class Event {
|
||||||
lateinit var referenceId: UUID
|
var referenceId: UUID = UUID.randomUUID()
|
||||||
protected set
|
protected set
|
||||||
var eventId: UUID = UUID.randomUUID()
|
var eventId: UUID = UUID.randomUUID()
|
||||||
private set
|
private set
|
||||||
@ -15,15 +15,12 @@ abstract class Event {
|
|||||||
|
|
||||||
fun producedFrom(task: Task): Event = self<Event>().apply {
|
fun producedFrom(task: Task): Event = self<Event>().apply {
|
||||||
referenceId = task.referenceId
|
referenceId = task.referenceId
|
||||||
val derivedFromIds: MutableList<UUID> = mutableListOf()
|
metadata = Metadata(derivedFromId = task.taskId)
|
||||||
task.metadata.derivedFromId?.let { derivedFromIds.addAll(it) }
|
|
||||||
derivedFromIds.add(task.taskId)
|
|
||||||
metadata = Metadata().derivedFromEventId(derivedFromIds.toSet())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fun derivedOf(vararg event: Event) = self<Event>().apply {
|
fun derivedOf(event: Event) = self<Event>().apply {
|
||||||
referenceId = event.first().referenceId
|
referenceId = event.referenceId
|
||||||
metadata = Metadata().derivedFromEventId(*event.map { it.eventId }.toTypedArray())
|
metadata = Metadata(derivedFromId = event.eventId)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun newReferenceId() = self<Event>().apply {
|
fun newReferenceId() = self<Event>().apply {
|
||||||
@ -39,10 +36,9 @@ inline fun <reified T> Event.requireAs(): T {
|
|||||||
return this as? T ?: throw IllegalArgumentException("Expected ${T::class.java.name}, got ${this::class.java.name}")
|
return this as? T ?: throw IllegalArgumentException("Expected ${T::class.java.name}, got ${this::class.java.name}")
|
||||||
}
|
}
|
||||||
|
|
||||||
abstract class DeleteEvent(
|
abstract class DeleteEvent: Event() {
|
||||||
open val deletedEventId: UUID
|
open lateinit var deletedEventId: UUID
|
||||||
) : Event()
|
}
|
||||||
|
|
||||||
abstract class SignalEvent(): Event()
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,17 +1,9 @@
|
|||||||
package no.iktdev.eventi.models
|
package no.iktdev.eventi.models
|
||||||
|
|
||||||
import no.iktdev.eventi.MyTime
|
import java.time.LocalDateTime
|
||||||
import java.time.Instant
|
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
class Metadata {
|
open class Metadata(
|
||||||
val created: Instant = MyTime.utcNow()
|
val created: LocalDateTime = LocalDateTime.now(),
|
||||||
var derivedFromId: Set<UUID>? = null
|
val derivedFromId: UUID? = null
|
||||||
private set
|
) {}
|
||||||
fun derivedFromEventId(vararg id: UUID) = apply {
|
|
||||||
derivedFromId = id.toSet()
|
|
||||||
}
|
|
||||||
fun derivedFromEventId(ids: Set<UUID>) = apply {
|
|
||||||
derivedFromId = ids
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
package no.iktdev.eventi.models
|
package no.iktdev.eventi.models
|
||||||
|
|
||||||
|
import java.time.LocalDateTime
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
|
|
||||||
@ -19,11 +20,7 @@ abstract class Task {
|
|||||||
|
|
||||||
fun derivedOf(event: Event) = self<Task>().apply {
|
fun derivedOf(event: Event) = self<Task>().apply {
|
||||||
referenceId = event.referenceId
|
referenceId = event.referenceId
|
||||||
metadata = Metadata().derivedFromEventId(event.eventId)
|
metadata = Metadata(derivedFromId = event.eventId)
|
||||||
}
|
|
||||||
|
|
||||||
fun usingReferenceId(refId: UUID) = self<Task>().apply {
|
|
||||||
referenceId = refId
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
package no.iktdev.eventi.models.store
|
package no.iktdev.eventi.models.store
|
||||||
|
|
||||||
import java.time.Instant
|
import java.time.LocalDateTime
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
data class PersistedEvent(
|
data class PersistedEvent(
|
||||||
@ -9,5 +9,5 @@ data class PersistedEvent(
|
|||||||
val eventId: UUID,
|
val eventId: UUID,
|
||||||
val event: String,
|
val event: String,
|
||||||
val data: String,
|
val data: String,
|
||||||
val persistedAt: Instant
|
val persistedAt: LocalDateTime
|
||||||
)
|
)
|
||||||
@ -1,6 +1,6 @@
|
|||||||
package no.iktdev.eventi.models.store
|
package no.iktdev.eventi.models.store
|
||||||
|
|
||||||
import java.time.Instant
|
import java.time.LocalDateTime
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
data class PersistedTask(
|
data class PersistedTask(
|
||||||
@ -13,14 +13,13 @@ data class PersistedTask(
|
|||||||
val claimed: Boolean,
|
val claimed: Boolean,
|
||||||
val claimedBy: String? = null,
|
val claimedBy: String? = null,
|
||||||
val consumed: Boolean,
|
val consumed: Boolean,
|
||||||
val lastCheckIn: Instant? = null,
|
val lastCheckIn: LocalDateTime? = null,
|
||||||
val persistedAt: Instant
|
val persistedAt: LocalDateTime
|
||||||
) {}
|
) {}
|
||||||
|
|
||||||
enum class TaskStatus {
|
enum class TaskStatus {
|
||||||
Pending,
|
Pending,
|
||||||
InProgress,
|
InProgress,
|
||||||
Completed,
|
Completed,
|
||||||
Failed,
|
Failed
|
||||||
Cancelled
|
|
||||||
}
|
}
|
||||||
@ -2,11 +2,11 @@ package no.iktdev.eventi.stores
|
|||||||
|
|
||||||
import no.iktdev.eventi.models.Event
|
import no.iktdev.eventi.models.Event
|
||||||
import no.iktdev.eventi.models.store.PersistedEvent
|
import no.iktdev.eventi.models.store.PersistedEvent
|
||||||
import java.time.Instant
|
import java.time.LocalDateTime
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
interface EventStore {
|
interface EventStore {
|
||||||
fun getPersistedEventsAfter(timestamp: Instant): List<PersistedEvent>
|
fun getPersistedEventsAfter(timestamp: LocalDateTime): List<PersistedEvent>
|
||||||
fun getPersistedEventsFor(referenceId: UUID): List<PersistedEvent>
|
fun getPersistedEventsFor(referenceId: UUID): List<PersistedEvent>
|
||||||
fun persist(event: Event)
|
fun persist(event: Event)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package no.iktdev.eventi.stores
|
|||||||
|
|
||||||
import no.iktdev.eventi.models.Task
|
import no.iktdev.eventi.models.Task
|
||||||
import no.iktdev.eventi.models.store.PersistedTask
|
import no.iktdev.eventi.models.store.PersistedTask
|
||||||
import no.iktdev.eventi.models.store.TaskStatus
|
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
@ -10,12 +9,12 @@ interface TaskStore {
|
|||||||
fun persist(task: Task)
|
fun persist(task: Task)
|
||||||
|
|
||||||
fun findByTaskId(taskId: UUID): PersistedTask?
|
fun findByTaskId(taskId: UUID): PersistedTask?
|
||||||
fun findByReferenceId(referenceId: UUID): List<PersistedTask>
|
fun findByEventId(eventId: UUID): List<PersistedTask>
|
||||||
fun findUnclaimed(referenceId: UUID): List<PersistedTask>
|
fun findUnclaimed(referenceId: UUID): List<PersistedTask>
|
||||||
|
|
||||||
fun claim(taskId: UUID, workerId: String): Boolean
|
fun claim(taskId: UUID, workerId: String): Boolean
|
||||||
fun heartbeat(taskId: UUID)
|
fun heartbeat(taskId: UUID)
|
||||||
fun markConsumed(taskId: UUID, status: TaskStatus)
|
fun markConsumed(taskId: UUID)
|
||||||
fun releaseExpiredTasks(timeout: Duration = Duration.ofMinutes(15))
|
fun releaseExpiredTasks(timeout: Duration = Duration.ofMinutes(15))
|
||||||
|
|
||||||
fun getPendingTasks(): List<PersistedTask>
|
fun getPendingTasks(): List<PersistedTask>
|
||||||
|
|||||||
@ -1,78 +1,51 @@
|
|||||||
package no.iktdev.eventi.tasks
|
package no.iktdev.eventi.tasks
|
||||||
|
|
||||||
import kotlinx.coroutines.delay
|
import kotlinx.coroutines.delay
|
||||||
import mu.KotlinLogging
|
|
||||||
import no.iktdev.eventi.ZDS.toTask
|
import no.iktdev.eventi.ZDS.toTask
|
||||||
import no.iktdev.eventi.models.Task
|
import no.iktdev.eventi.models.Task
|
||||||
import no.iktdev.eventi.stores.TaskStore
|
import no.iktdev.eventi.stores.TaskStore
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
|
|
||||||
abstract class TaskPollerImplementation(
|
abstract class AbstractTaskPoller(
|
||||||
private val taskStore: TaskStore,
|
private val taskStore: TaskStore,
|
||||||
private val reporterFactory: (Task) -> TaskReporter
|
private val reporterFactory: (Task) -> TaskReporter
|
||||||
) {
|
) {
|
||||||
private val log = KotlinLogging.logger {}
|
|
||||||
|
|
||||||
|
|
||||||
open var backoff = Duration.ofSeconds(2)
|
open var backoff = Duration.ofSeconds(2)
|
||||||
protected set
|
protected set
|
||||||
private val maxBackoff = Duration.ofMinutes(1)
|
private val maxBackoff = Duration.ofMinutes(1)
|
||||||
|
|
||||||
open suspend fun start() {
|
suspend fun start() {
|
||||||
log.info { "TaskPoller starting with initial backoff=$backoff" }
|
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
|
||||||
pollOnce()
|
pollOnce()
|
||||||
} catch (e: Exception) {
|
|
||||||
e.printStackTrace()
|
|
||||||
delay(backoff.toMillis())
|
|
||||||
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
suspend fun pollOnce() {
|
suspend fun pollOnce() {
|
||||||
log.debug { "Polling for pending tasks…" }
|
|
||||||
val newPersistedTasks = taskStore.getPendingTasks()
|
val newPersistedTasks = taskStore.getPendingTasks()
|
||||||
|
|
||||||
if (newPersistedTasks.isEmpty()) {
|
if (newPersistedTasks.isEmpty()) {
|
||||||
log.debug { "No pending tasks found. Backing off for $backoff" }
|
|
||||||
delay(backoff.toMillis())
|
delay(backoff.toMillis())
|
||||||
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.debug { "Found ${newPersistedTasks.size} persisted tasks" }
|
|
||||||
|
|
||||||
val tasks = newPersistedTasks.mapNotNull { it.toTask() }
|
val tasks = newPersistedTasks.mapNotNull { it.toTask() }
|
||||||
var acceptedAny = false
|
var acceptedAny = false
|
||||||
|
|
||||||
for (task in tasks) {
|
for (task in tasks) {
|
||||||
val listener = TaskListenerRegistry.getListeners().firstOrNull { it.supports(task) && !it.isBusy } ?: continue
|
val listener = TaskListenerRegistry.getListeners().firstOrNull { it.supports(task) && !it.isBusy } ?: continue
|
||||||
val claimed = taskStore.claim(task.taskId, listener.getWorkerId())
|
val claimed = taskStore.claim(task.taskId, listener.getWorkerId())
|
||||||
if (!claimed) {
|
if (!claimed) continue
|
||||||
log.debug { "Task ${task.taskId} is already claimed by another worker" }
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
log.debug { "Task ${task.taskId} claimed by ${listener.getWorkerId()}" }
|
|
||||||
|
|
||||||
val reporter = reporterFactory(task)
|
val reporter = reporterFactory(task)
|
||||||
val accepted = try {
|
val accepted = listener.accept(task, reporter)
|
||||||
listener.accept(task, reporter)
|
|
||||||
} catch (e: Exception) {
|
|
||||||
log.error("Error while processing task ${task.taskId} by listener ${listener.getWorkerId()}: ${e.message}")
|
|
||||||
e.printStackTrace()
|
|
||||||
false
|
|
||||||
}
|
|
||||||
acceptedAny = acceptedAny || accepted
|
acceptedAny = acceptedAny || accepted
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!acceptedAny) {
|
if (!acceptedAny) {
|
||||||
log.debug { "No tasks were accepted. Backing off for $backoff" }
|
|
||||||
delay(backoff.toMillis())
|
delay(backoff.toMillis())
|
||||||
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
backoff = backoff.multipliedBy(2).coerceAtMost(maxBackoff)
|
||||||
} else {
|
} else {
|
||||||
log.debug { "At least one task accepted. Resetting backoff." }
|
|
||||||
backoff = Duration.ofSeconds(2)
|
backoff = Duration.ofSeconds(2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3,18 +3,11 @@ package no.iktdev.eventi.tasks
|
|||||||
import kotlinx.coroutines.CoroutineScope
|
import kotlinx.coroutines.CoroutineScope
|
||||||
import kotlinx.coroutines.Dispatchers
|
import kotlinx.coroutines.Dispatchers
|
||||||
import kotlinx.coroutines.Job
|
import kotlinx.coroutines.Job
|
||||||
import kotlinx.coroutines.SupervisorJob
|
|
||||||
import kotlinx.coroutines.delay
|
|
||||||
import kotlinx.coroutines.isActive
|
|
||||||
import kotlinx.coroutines.launch
|
import kotlinx.coroutines.launch
|
||||||
import no.iktdev.eventi.models.Event
|
import no.iktdev.eventi.models.Event
|
||||||
import no.iktdev.eventi.models.Task
|
import no.iktdev.eventi.models.Task
|
||||||
import no.iktdev.eventi.models.store.TaskStatus
|
|
||||||
import org.jetbrains.annotations.VisibleForTesting
|
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
import kotlin.coroutines.cancellation.CancellationException
|
import kotlin.coroutines.cancellation.CancellationException
|
||||||
import kotlin.time.Duration
|
|
||||||
import kotlin.time.Duration.Companion.minutes
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstract base class for handling tasks with asynchronous processing and reporting.
|
* Abstract base class for handling tasks with asynchronous processing and reporting.
|
||||||
@ -31,8 +24,7 @@ abstract class TaskListener(val taskType: TaskType = TaskType.CPU_INTENSIVE): Ta
|
|||||||
var reporter: TaskReporter? = null
|
var reporter: TaskReporter? = null
|
||||||
private set
|
private set
|
||||||
abstract fun getWorkerId(): String
|
abstract fun getWorkerId(): String
|
||||||
var currentJob: Job? = null
|
protected var currentJob: Job? = null
|
||||||
protected set
|
|
||||||
var currentTask: Task? = null
|
var currentTask: Task? = null
|
||||||
private set
|
private set
|
||||||
|
|
||||||
@ -47,18 +39,6 @@ abstract class TaskListener(val taskType: TaskType = TaskType.CPU_INTENSIVE): Ta
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private val heartbeatScope = CoroutineScope(SupervisorJob() + Dispatchers.Default)
|
|
||||||
@VisibleForTesting
|
|
||||||
internal var heartbeatRunner: Job? = null
|
|
||||||
fun withHeartbeatRunner(interval: Duration = 5.minutes, block: () -> Unit): Job {
|
|
||||||
return heartbeatScope.launch {
|
|
||||||
while (isActive) {
|
|
||||||
block()
|
|
||||||
delay(interval)
|
|
||||||
}
|
|
||||||
}.also { heartbeatRunner = it }
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun accept(task: Task, reporter: TaskReporter): Boolean {
|
override fun accept(task: Task, reporter: TaskReporter): Boolean {
|
||||||
if (isBusy || !supports(task)) return false
|
if (isBusy || !supports(task)) return false
|
||||||
this.reporter = reporter
|
this.reporter = reporter
|
||||||
@ -68,20 +48,13 @@ abstract class TaskListener(val taskType: TaskType = TaskType.CPU_INTENSIVE): Ta
|
|||||||
currentJob = getDispatcherForTask(task).launch {
|
currentJob = getDispatcherForTask(task).launch {
|
||||||
try {
|
try {
|
||||||
val result = onTask(task)
|
val result = onTask(task)
|
||||||
|
reporter.markConsumed(task.taskId)
|
||||||
onComplete(task, result)
|
onComplete(task, result)
|
||||||
} catch (e: CancellationException) {
|
} catch (e: CancellationException) {
|
||||||
// Dette er en ekte kansellering
|
onCancelled()
|
||||||
onCancelled(task)
|
|
||||||
throw e // viktig: ikke svelg cancellation
|
|
||||||
|
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
// Dette er en faktisk feil
|
|
||||||
onError(task, e)
|
onError(task, e)
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
heartbeatRunner?.cancel()
|
|
||||||
currentJob?.cancel()
|
|
||||||
heartbeatRunner = null
|
|
||||||
currentJob = null
|
currentJob = null
|
||||||
currentTask = null
|
currentTask = null
|
||||||
this@TaskListener.reporter = null
|
this@TaskListener.reporter = null
|
||||||
@ -90,29 +63,24 @@ abstract class TaskListener(val taskType: TaskType = TaskType.CPU_INTENSIVE): Ta
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
abstract fun createIncompleteStateTaskEvent(task: Task, status: TaskStatus, exception: Exception? = null): Event
|
|
||||||
|
|
||||||
override fun onError(task: Task, exception: Exception) {
|
override fun onError(task: Task, exception: Exception) {
|
||||||
reporter?.log(task.taskId, "Error processing task: ${exception.message}")
|
reporter?.log(task.taskId, "Error processing task: ${exception.message}")
|
||||||
exception.printStackTrace()
|
exception.printStackTrace()
|
||||||
reporter?.markFailed(task.referenceId, task.taskId)
|
reporter?.markConsumed(task.taskId)
|
||||||
reporter!!.publishEvent(createIncompleteStateTaskEvent(task, TaskStatus.Failed, exception))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun onComplete(task: Task, result: Event?) {
|
override fun onComplete(task: Task, result: Event?) {
|
||||||
reporter!!.markCompleted(task.taskId)
|
reporter!!.markConsumed(task.taskId)
|
||||||
reporter!!.log(task.taskId, "Task completed successfully.")
|
reporter!!.log(task.taskId, "Task completed successfully.")
|
||||||
result?.let {
|
result?.let {
|
||||||
reporter!!.publishEvent(result)
|
reporter!!.publishEvent(result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun onCancelled(task: Task) {
|
override fun onCancelled() {
|
||||||
reporter!!.markCancelled(task.referenceId, task.taskId)
|
|
||||||
currentJob?.cancel()
|
currentJob?.cancel()
|
||||||
heartbeatRunner?.cancel()
|
currentJob = null
|
||||||
currentTask = null
|
currentTask = null
|
||||||
reporter!!.publishEvent(createIncompleteStateTaskEvent(task, TaskStatus.Cancelled))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,15 +97,13 @@ interface TaskListenerImplementation {
|
|||||||
suspend fun onTask(task: Task): Event?
|
suspend fun onTask(task: Task): Event?
|
||||||
fun onComplete(task: Task, result: Event?)
|
fun onComplete(task: Task, result: Event?)
|
||||||
fun onError(task: Task, exception: Exception)
|
fun onError(task: Task, exception: Exception)
|
||||||
fun onCancelled(task: Task)
|
fun onCancelled()
|
||||||
}
|
}
|
||||||
|
|
||||||
interface TaskReporter {
|
interface TaskReporter {
|
||||||
fun markClaimed(taskId: UUID, workerId: String)
|
fun markClaimed(taskId: UUID, workerId: String)
|
||||||
fun updateLastSeen(taskId: UUID)
|
fun updateLastSeen(taskId: UUID)
|
||||||
fun markCompleted(taskId: UUID)
|
fun markConsumed(taskId: UUID)
|
||||||
fun markFailed(referenceId: UUID, taskId: UUID)
|
|
||||||
fun markCancelled(referenceId: UUID, taskId: UUID)
|
|
||||||
fun updateProgress(taskId: UUID, progress: Int)
|
fun updateProgress(taskId: UUID, progress: Int)
|
||||||
fun log(taskId: UUID, message: String)
|
fun log(taskId: UUID, message: String)
|
||||||
fun publishEvent(event: Event)
|
fun publishEvent(event: Event)
|
||||||
|
|||||||
@ -1,11 +1,7 @@
|
|||||||
package no.iktdev.eventi.tasks
|
package no.iktdev.eventi.tasks
|
||||||
|
|
||||||
import no.iktdev.eventi.ListenerOrder
|
|
||||||
import no.iktdev.eventi.ListenerRegistryImplementation
|
import no.iktdev.eventi.ListenerRegistryImplementation
|
||||||
|
|
||||||
object TaskListenerRegistry: ListenerRegistryImplementation<TaskListener>() {
|
object TaskListenerRegistry: ListenerRegistryImplementation<TaskListener>() {
|
||||||
override fun getListeners(): List<TaskListener> {
|
|
||||||
return super.getListeners()
|
|
||||||
.sortedBy { it::class.java.getAnnotation(ListenerOrder::class.java)?.value ?: Int.MAX_VALUE }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
@ -8,149 +8,96 @@ import no.iktdev.eventi.events.EventListenerRegistry
|
|||||||
import no.iktdev.eventi.events.EventTypeRegistry
|
import no.iktdev.eventi.events.EventTypeRegistry
|
||||||
import no.iktdev.eventi.models.DeleteEvent
|
import no.iktdev.eventi.models.DeleteEvent
|
||||||
import no.iktdev.eventi.models.Event
|
import no.iktdev.eventi.models.Event
|
||||||
import no.iktdev.eventi.models.SignalEvent
|
|
||||||
import no.iktdev.eventi.testUtil.wipe
|
import no.iktdev.eventi.testUtil.wipe
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import org.junit.jupiter.api.Assertions.assertEquals
|
import org.junit.jupiter.api.Assertions.assertEquals
|
||||||
import org.junit.jupiter.api.Assertions.assertFalse
|
import org.junit.jupiter.api.Assertions.assertFalse
|
||||||
import org.junit.jupiter.api.Assertions.assertNotNull
|
import org.junit.jupiter.api.Assertions.assertNotNull
|
||||||
import org.junit.jupiter.api.Assertions.assertTrue
|
import org.junit.jupiter.api.Assertions.assertTrue
|
||||||
import org.junit.jupiter.api.BeforeEach
|
import org.junit.jupiter.api.BeforeEach
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import org.junit.jupiter.api.Test
|
import org.junit.jupiter.api.Test
|
||||||
|
import java.time.LocalDateTime
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
@DisplayName(
|
class EventDispatcherTest: TestBase() {
|
||||||
"""
|
|
||||||
EventDispatcher
|
|
||||||
Når hendelser dispatches til lyttere
|
|
||||||
Hvis hendelsene inneholder avledede, slettede eller nye events
|
|
||||||
Så skal dispatcheren håndtere filtrering, replays og historikk korrekt
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
class EventDispatcherTest : TestBase() {
|
|
||||||
|
|
||||||
val dispatcher = EventDispatcher(eventStore)
|
val dispatcher = EventDispatcher(eventStore)
|
||||||
|
|
||||||
class DerivedEvent : Event()
|
class DerivedEvent(): Event()
|
||||||
class TriggerEvent : Event()
|
class TriggerEvent(): Event() {
|
||||||
class OtherEvent : Event()
|
|
||||||
class DummyEvent : Event() {
|
|
||||||
}
|
}
|
||||||
|
class OtherEvent(): Event()
|
||||||
|
|
||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
fun setup() {
|
fun setup() {
|
||||||
EventTypeRegistry.wipe()
|
EventTypeRegistry.wipe()
|
||||||
EventListenerRegistry.wipe()
|
EventListenerRegistry.wipe()
|
||||||
|
// Verifiser at det er tomt
|
||||||
|
|
||||||
EventTypeRegistry.register(
|
EventTypeRegistry.register(listOf(
|
||||||
listOf(
|
|
||||||
DerivedEvent::class.java,
|
DerivedEvent::class.java,
|
||||||
TriggerEvent::class.java,
|
TriggerEvent::class.java,
|
||||||
OtherEvent::class.java,
|
OtherEvent::class.java
|
||||||
DummyEvent::class.java
|
))
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName(
|
|
||||||
"""
|
|
||||||
Når en TriggerEvent dispatches
|
|
||||||
Hvis en lytter produserer én DerivedEvent
|
|
||||||
Så skal kun én ny event produseres og prosessen stoppe
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldProduceOneEventAndStop() {
|
|
||||||
ProducingListener()
|
|
||||||
|
|
||||||
val trigger = TriggerEvent().newReferenceId()
|
@Test
|
||||||
|
fun `should produce one event and stop`() {
|
||||||
|
val listener = ProducingListener()
|
||||||
|
|
||||||
|
val trigger = TriggerEvent()
|
||||||
dispatcher.dispatch(trigger.referenceId, listOf(trigger))
|
dispatcher.dispatch(trigger.referenceId, listOf(trigger))
|
||||||
|
|
||||||
val produced = eventStore.all().firstOrNull()
|
val produced = eventStore.all().firstOrNull()
|
||||||
assertNotNull(produced)
|
assertNotNull(produced)
|
||||||
|
|
||||||
val event = produced!!.toEvent()
|
val event = produced!!.toEvent()
|
||||||
assertThat(event!!.metadata.derivedFromId).hasSize(1)
|
assertEquals(trigger.eventId, event!!.metadata.derivedFromId)
|
||||||
assertThat(event.metadata.derivedFromId).contains(trigger.eventId)
|
|
||||||
assertTrue(event is DerivedEvent)
|
assertTrue(event is DerivedEvent)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName(
|
fun `should skip already derived events`() {
|
||||||
"""
|
val listener = ProducingListener()
|
||||||
Når en event allerede har avledet en DerivedEvent
|
|
||||||
Hvis dispatcheren replays historikken
|
|
||||||
Så skal ikke DerivedEvent produseres på nytt
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldSkipAlreadyDerivedEvents() {
|
|
||||||
ProducingListener()
|
|
||||||
|
|
||||||
val trigger = TriggerEvent().newReferenceId()
|
val trigger = TriggerEvent()
|
||||||
val derived = DerivedEvent().derivedOf(trigger).toPersisted(1L, MyTime.utcNow())
|
val derived = DerivedEvent().derivedOf(trigger).toPersisted(1L, LocalDateTime.now())
|
||||||
|
|
||||||
eventStore.persist(derived!!.toEvent()!!) // simulate prior production
|
eventStore.persist(derived!!.toEvent()!!) // simulate prior production
|
||||||
|
|
||||||
dispatcher.dispatch(trigger.referenceId, listOf(trigger, derived.toEvent()!!))
|
dispatcher.dispatch(trigger.referenceId, listOf(trigger, derived!!.toEvent()!!))
|
||||||
|
|
||||||
assertEquals(1, eventStore.all().size)
|
assertEquals(1, eventStore.all().size) // no new event produced
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName(
|
fun `should pass full context to listener`() {
|
||||||
"""
|
|
||||||
Når flere events dispatches
|
|
||||||
Hvis en lytter mottar en event
|
|
||||||
Så skal hele historikken leveres i context
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldPassFullContextToListener() {
|
|
||||||
val listener = ContextCapturingListener()
|
val listener = ContextCapturingListener()
|
||||||
|
|
||||||
val e1 = TriggerEvent().newReferenceId()
|
val e1 = TriggerEvent()
|
||||||
val e2 = OtherEvent().newReferenceId()
|
val e2 = OtherEvent()
|
||||||
dispatcher.dispatch(e1.referenceId, listOf(e1, e2))
|
dispatcher.dispatch(e1.referenceId, listOf(e1, e2))
|
||||||
|
|
||||||
assertEquals(2, listener.context.size)
|
assertEquals(2, listener.context.size)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName(
|
fun `should behave deterministically across replays`() {
|
||||||
"""
|
val listener = ProducingListener()
|
||||||
Når en replay skjer
|
|
||||||
Hvis en event allerede har produsert en DerivedEvent
|
|
||||||
Så skal ikke DerivedEvent produseres på nytt
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldBehaveDeterministicallyAcrossReplays() {
|
|
||||||
val referenceId = UUID.randomUUID()
|
|
||||||
|
|
||||||
ProducingListener()
|
val trigger = TriggerEvent()
|
||||||
|
|
||||||
val trigger = TriggerEvent().usingReferenceId(referenceId)
|
|
||||||
dispatcher.dispatch(trigger.referenceId, listOf(trigger))
|
dispatcher.dispatch(trigger.referenceId, listOf(trigger))
|
||||||
val replayContext = listOf(trigger) + eventStore.all().mapNotNull { it.toEvent() }
|
val replayContext = listOf(trigger) + eventStore.all().mapNotNull { it.toEvent() }
|
||||||
|
|
||||||
dispatcher.dispatch(trigger.referenceId, replayContext)
|
dispatcher.dispatch(trigger.referenceId, replayContext)
|
||||||
|
|
||||||
assertEquals(1, eventStore.all().size)
|
assertEquals(1, eventStore.all().size) // no duplicate
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName(
|
fun `should not deliver deleted events as candidates`() {
|
||||||
"""
|
|
||||||
Når en DeleteEvent peker på en tidligere event
|
|
||||||
Hvis dispatcheren filtrerer kandidater
|
|
||||||
Så skal slettede events ikke leveres som kandidater
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldNotDeliverDeletedEventsAsCandidates() {
|
|
||||||
val referenceId = UUID.randomUUID()
|
|
||||||
|
|
||||||
val dispatcher = EventDispatcher(eventStore)
|
val dispatcher = EventDispatcher(eventStore)
|
||||||
val received = mutableListOf<Event>()
|
val received = mutableListOf<Event>()
|
||||||
|
|
||||||
object : EventListener() {
|
object : EventListener() {
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
override fun onEvent(event: Event, history: List<Event>): Event? {
|
||||||
received += event
|
received += event
|
||||||
@ -158,10 +105,12 @@ class EventDispatcherTest : TestBase() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Original hendelse
|
// Original hendelse
|
||||||
val original = TriggerEvent().usingReferenceId(referenceId)
|
val original = TriggerEvent()
|
||||||
|
|
||||||
// Slettehendelse som peker på original
|
// Slettehendelse som peker på original
|
||||||
val deleted = object : DeleteEvent(original.eventId) {}.apply { newReferenceId() }
|
val deleted = object : DeleteEvent() {
|
||||||
|
override var deletedEventId = original.eventId
|
||||||
|
}
|
||||||
|
|
||||||
// Dispatch med begge hendelser
|
// Dispatch med begge hendelser
|
||||||
dispatcher.dispatch(original.referenceId, listOf(original, deleted))
|
dispatcher.dispatch(original.referenceId, listOf(original, deleted))
|
||||||
@ -178,211 +127,37 @@ class EventDispatcherTest : TestBase() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName(
|
fun `should deliver DeleteEvent to listeners that react to it`() {
|
||||||
"""
|
|
||||||
Når en DeleteEvent dispatches alene
|
|
||||||
Hvis en lytter reagerer på DeleteEvent
|
|
||||||
Så skal DeleteEvent leveres som kandidat
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldDeliverDeleteEventToListenersThatReactToIt() {
|
|
||||||
val received = mutableListOf<Event>()
|
val received = mutableListOf<Event>()
|
||||||
val referenceId = UUID.randomUUID()
|
val listener = object : EventListener() {
|
||||||
|
override fun onEvent(event: Event, context: List<Event>): Event? {
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
if (event is DeleteEvent) received += event
|
if (event is DeleteEvent) received += event
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
val deleted = object : DeleteEvent(UUID.randomUUID()) {}.apply { usingReferenceId(referenceId) }
|
val deleted = object : DeleteEvent() {
|
||||||
|
override var deletedEventId = UUID.randomUUID()
|
||||||
|
}
|
||||||
dispatcher.dispatch(deleted.referenceId, listOf(deleted))
|
dispatcher.dispatch(deleted.referenceId, listOf(deleted))
|
||||||
|
|
||||||
assertTrue(received.contains(deleted))
|
assertTrue(received.contains(deleted))
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName(
|
|
||||||
"""
|
|
||||||
Når en event har avledet en ny event
|
|
||||||
Hvis dispatcheren replays historikken
|
|
||||||
Så skal ikke original-eventen leveres som kandidat igjen
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldNotRedeliverEventsThatHaveProducedDerivedEvents() {
|
|
||||||
ProducingListener()
|
|
||||||
|
|
||||||
val trigger = TriggerEvent().newReferenceId()
|
|
||||||
// Første dispatch: trigger produserer en DerivedEvent
|
|
||||||
dispatcher.dispatch(trigger.referenceId, listOf(trigger))
|
|
||||||
|
|
||||||
val produced = eventStore.all().mapNotNull { it.toEvent() }
|
|
||||||
assertEquals(1, produced.size)
|
|
||||||
val derived = produced.first()
|
|
||||||
assertTrue(derived is DerivedEvent)
|
|
||||||
|
|
||||||
// Replay: nå har vi både trigger og derived i konteksten
|
|
||||||
val replayContext = listOf(trigger, derived)
|
|
||||||
dispatcher.dispatch(trigger.referenceId, replayContext)
|
|
||||||
|
|
||||||
// Verifiser at ingen nye events ble produsert
|
|
||||||
assertEquals(1, eventStore.all().size) {
|
|
||||||
"TriggerEvent skal ikke leveres som kandidat igjen når den allerede har avledet en DerivedEvent"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName(
|
|
||||||
"""
|
|
||||||
Når en DeleteEvent slettet en tidligere event
|
|
||||||
Hvis dispatcheren bygger historikk
|
|
||||||
Så skal slettede events ikke være med i history
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun historyShouldExcludeDeletedEvents() {
|
|
||||||
val dispatcher = EventDispatcher(eventStore)
|
|
||||||
|
|
||||||
val original = TriggerEvent().newReferenceId()
|
|
||||||
val deleted = object : DeleteEvent(original.eventId) {}.apply { usingReferenceId(original.referenceId) }
|
|
||||||
|
|
||||||
var receivedHistory: List<Event> = emptyList()
|
|
||||||
|
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
receivedHistory = history
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dispatcher.dispatch(original.referenceId, listOf(original, deleted))
|
|
||||||
|
|
||||||
assertFalse(receivedHistory.contains(original))
|
|
||||||
assertFalse(receivedHistory.contains(deleted))
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName(
|
|
||||||
"""
|
|
||||||
Når en DeleteEvent slettet en event
|
|
||||||
Hvis andre events fortsatt er gyldige
|
|
||||||
Så skal history kun inneholde de ikke-slettede events
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun historyShouldKeepNonDeletedEvents() {
|
|
||||||
val dispatcher = EventDispatcher(eventStore)
|
|
||||||
val referenceId = UUID.randomUUID()
|
|
||||||
val e1 = TriggerEvent().usingReferenceId(referenceId)
|
|
||||||
val e2 = OtherEvent().usingReferenceId(referenceId)
|
|
||||||
val deleted = object : DeleteEvent(e1.eventId) {}
|
|
||||||
|
|
||||||
var receivedHistory: List<Event> = emptyList()
|
|
||||||
|
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
receivedHistory = history
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dispatcher.dispatch(e1.referenceId, listOf(e1, e2, deleted))
|
|
||||||
|
|
||||||
assertTrue(receivedHistory.contains(e2))
|
|
||||||
assertFalse(receivedHistory.contains(e1))
|
|
||||||
assertFalse(receivedHistory.contains(deleted))
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName(
|
|
||||||
"""
|
|
||||||
Når en DeleteEvent er kandidat
|
|
||||||
Hvis historikken kun inneholder slettede events
|
|
||||||
Så skal history være tom
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun deleteEventShouldBeDeliveredButHistoryEmpty() {
|
|
||||||
val dispatcher = EventDispatcher(eventStore)
|
|
||||||
|
|
||||||
val original = TriggerEvent().newReferenceId()
|
|
||||||
val deleted = object : DeleteEvent(original.eventId) {}.apply { newReferenceId() }
|
|
||||||
|
|
||||||
var receivedEvent: Event? = null
|
|
||||||
var receivedHistory: List<Event> = emptyList()
|
|
||||||
|
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
receivedEvent = event
|
|
||||||
receivedHistory = history
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dispatcher.dispatch(original.referenceId, listOf(original, deleted))
|
|
||||||
|
|
||||||
assertTrue(receivedEvent is DeleteEvent)
|
|
||||||
assertTrue(receivedHistory.isEmpty())
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName(
|
|
||||||
"""
|
|
||||||
Når en SignalEvent dispatches
|
|
||||||
Hvis SignalEvent ikke skal være kandidat
|
|
||||||
Så skal den ikke leveres til lyttere, men fortsatt være i historikken
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
fun shouldNotDeliverSignalEventAsCandidate() {
|
|
||||||
// Arrange
|
|
||||||
class TestSignalEvent : SignalEvent()
|
|
||||||
EventTypeRegistry.register(listOf(TestSignalEvent::class.java,))
|
|
||||||
|
|
||||||
val received = mutableListOf<Event>()
|
|
||||||
var finalHistory: List<Event>? = null
|
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
received += event
|
|
||||||
finalHistory = history
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val refId = UUID.randomUUID()
|
|
||||||
val trigger = TriggerEvent().usingReferenceId(refId)
|
|
||||||
val signal = TestSignalEvent().usingReferenceId(refId)
|
|
||||||
|
|
||||||
// Act
|
|
||||||
dispatcher.dispatch(trigger.referenceId, listOf(trigger, signal))
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
// 1) TriggerEvent skal leveres
|
|
||||||
assertTrue(received.any { it is TriggerEvent }) {
|
|
||||||
"TriggerEvent skal leveres som kandidat"
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2) SignalEvent skal IKKE leveres
|
|
||||||
assertFalse(received.any { it is TestSignalEvent }) {
|
|
||||||
"SignalEvent skal ikke leveres som kandidat"
|
|
||||||
}
|
|
||||||
|
|
||||||
assertNotNull(finalHistory)
|
|
||||||
assertTrue(finalHistory!!.any { it is TestSignalEvent }) {
|
|
||||||
"SignalEvent skal være i historikken selv om den ikke er kandidat"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// --- Test helpers ---
|
// --- Test helpers ---
|
||||||
|
|
||||||
class ProducingListener : EventListener() {
|
class ProducingListener : EventListener() {
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
override fun onEvent(event: Event, context: List<Event>): Event? {
|
||||||
return if (event is TriggerEvent) DerivedEvent().derivedOf(event) else null
|
return if (event is TriggerEvent) DerivedEvent().derivedOf(event) else null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class ContextCapturingListener : EventListener() {
|
class ContextCapturingListener : EventListener() {
|
||||||
var context: List<Event> = emptyList()
|
var context: List<Event> = emptyList()
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
override fun onEvent(event: Event, context: List<Event>): Event? {
|
||||||
this.context = history
|
this.context = context
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,30 +4,24 @@ import no.iktdev.eventi.ZDS.toPersisted
|
|||||||
import no.iktdev.eventi.models.Event
|
import no.iktdev.eventi.models.Event
|
||||||
import no.iktdev.eventi.models.store.PersistedEvent
|
import no.iktdev.eventi.models.store.PersistedEvent
|
||||||
import no.iktdev.eventi.stores.EventStore
|
import no.iktdev.eventi.stores.EventStore
|
||||||
import java.time.Instant
|
import java.time.LocalDateTime
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
class InMemoryEventStore : EventStore {
|
class InMemoryEventStore : EventStore {
|
||||||
private val persisted = mutableListOf<PersistedEvent>()
|
private val persisted = mutableListOf<PersistedEvent>()
|
||||||
private var nextId = 1L
|
private var nextId = 1L
|
||||||
|
|
||||||
override fun getPersistedEventsAfter(timestamp: Instant): List<PersistedEvent> =
|
override fun getPersistedEventsAfter(timestamp: LocalDateTime): List<PersistedEvent> =
|
||||||
persisted.filter { it.persistedAt > timestamp }
|
persisted.filter { it.persistedAt > timestamp }
|
||||||
|
|
||||||
override fun getPersistedEventsFor(referenceId: UUID): List<PersistedEvent> =
|
override fun getPersistedEventsFor(referenceId: UUID): List<PersistedEvent> =
|
||||||
persisted.filter { it.referenceId == referenceId }
|
persisted.filter { it.referenceId == referenceId }
|
||||||
|
|
||||||
override fun persist(event: Event) {
|
override fun persist(event: Event) {
|
||||||
val persistedEvent = event.toPersisted(nextId++, MyTime.utcNow())
|
val persistedEvent = event.toPersisted(nextId++, LocalDateTime.now())
|
||||||
persisted += persistedEvent!!
|
persisted += persistedEvent!!
|
||||||
}
|
}
|
||||||
|
|
||||||
fun persistAt(event: Event, persistedAt: Instant) {
|
|
||||||
val persistedEvent = event.toPersisted(nextId++, persistedAt)
|
|
||||||
persisted += persistedEvent!!
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fun all(): List<PersistedEvent> = persisted
|
fun all(): List<PersistedEvent> = persisted
|
||||||
fun clear() { persisted.clear(); nextId = 1L }
|
fun clear() { persisted.clear(); nextId = 1L }
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,9 +6,8 @@ import no.iktdev.eventi.models.store.PersistedTask
|
|||||||
import no.iktdev.eventi.models.store.TaskStatus
|
import no.iktdev.eventi.models.store.TaskStatus
|
||||||
import no.iktdev.eventi.stores.TaskStore
|
import no.iktdev.eventi.stores.TaskStore
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.temporal.ChronoUnit
|
import java.time.LocalDateTime
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
import kotlin.concurrent.atomics.AtomicReference
|
|
||||||
|
|
||||||
open class InMemoryTaskStore : TaskStore {
|
open class InMemoryTaskStore : TaskStore {
|
||||||
private val tasks = mutableListOf<PersistedTask>()
|
private val tasks = mutableListOf<PersistedTask>()
|
||||||
@ -21,8 +20,8 @@ open class InMemoryTaskStore : TaskStore {
|
|||||||
|
|
||||||
override fun findByTaskId(taskId: UUID) = tasks.find { it.taskId == taskId }
|
override fun findByTaskId(taskId: UUID) = tasks.find { it.taskId == taskId }
|
||||||
|
|
||||||
override fun findByReferenceId(referenceId: UUID) =
|
override fun findByEventId(eventId: UUID) =
|
||||||
tasks.filter { it.referenceId == referenceId }
|
tasks.filter { it.data.contains(eventId.toString()) }
|
||||||
|
|
||||||
override fun findUnclaimed(referenceId: UUID) =
|
override fun findUnclaimed(referenceId: UUID) =
|
||||||
tasks.filter { it.referenceId == referenceId && !it.claimed && !it.consumed }
|
tasks.filter { it.referenceId == referenceId && !it.claimed && !it.consumed }
|
||||||
@ -30,22 +29,22 @@ open class InMemoryTaskStore : TaskStore {
|
|||||||
override fun claim(taskId: UUID, workerId: String): Boolean {
|
override fun claim(taskId: UUID, workerId: String): Boolean {
|
||||||
val task = findByTaskId(taskId) ?: return false
|
val task = findByTaskId(taskId) ?: return false
|
||||||
if (task.claimed && !isExpired(task)) return false
|
if (task.claimed && !isExpired(task)) return false
|
||||||
update(task.copy(claimed = true, claimedBy = workerId, lastCheckIn = MyTime.utcNow()))
|
update(task.copy(claimed = true, claimedBy = workerId, lastCheckIn = LocalDateTime.now()))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun heartbeat(taskId: UUID) {
|
override fun heartbeat(taskId: UUID) {
|
||||||
val task = findByTaskId(taskId) ?: return
|
val task = findByTaskId(taskId) ?: return
|
||||||
update(task.copy(lastCheckIn = MyTime.utcNow()))
|
update(task.copy(lastCheckIn = LocalDateTime.now()))
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun markConsumed(taskId: UUID, status: TaskStatus) {
|
override fun markConsumed(taskId: UUID) {
|
||||||
val task = findByTaskId(taskId) ?: return
|
val task = findByTaskId(taskId) ?: return
|
||||||
update(task.copy(consumed = true, status = status))
|
update(task.copy(consumed = true, status = TaskStatus.Completed))
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun releaseExpiredTasks(timeout: Duration) {
|
override fun releaseExpiredTasks(timeout: Duration) {
|
||||||
val now = MyTime.utcNow()
|
val now = LocalDateTime.now()
|
||||||
tasks.filter {
|
tasks.filter {
|
||||||
it.claimed && !it.consumed && it.lastCheckIn?.isBefore(now.minus(timeout)) == true
|
it.claimed && !it.consumed && it.lastCheckIn?.isBefore(now.minus(timeout)) == true
|
||||||
}.forEach {
|
}.forEach {
|
||||||
@ -60,8 +59,8 @@ open class InMemoryTaskStore : TaskStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private fun isExpired(task: PersistedTask): Boolean {
|
private fun isExpired(task: PersistedTask): Boolean {
|
||||||
val now = MyTime.utcNow()
|
val now = LocalDateTime.now()
|
||||||
return task.lastCheckIn?.isBefore(now.minus(15, ChronoUnit.MINUTES)) == true
|
return task.lastCheckIn?.isBefore(now.minusMinutes(15)) == true
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun serialize(data: Any?): String = data?.toString() ?: "{}"
|
private fun serialize(data: Any?): String = data?.toString() ?: "{}"
|
||||||
|
|||||||
@ -12,52 +12,40 @@ import org.junit.jupiter.api.Assertions.assertNull
|
|||||||
import org.junit.jupiter.api.BeforeEach
|
import org.junit.jupiter.api.BeforeEach
|
||||||
import org.junit.jupiter.api.DisplayName
|
import org.junit.jupiter.api.DisplayName
|
||||||
import org.junit.jupiter.api.Test
|
import org.junit.jupiter.api.Test
|
||||||
|
import java.time.LocalDateTime
|
||||||
|
|
||||||
@DisplayName("""
|
|
||||||
ZDS – Serialization/Deserialization System
|
|
||||||
Når Event- og Task-objekter persisteres og gjenopprettes
|
|
||||||
Hvis type-registrene er korrekt konfigurert
|
|
||||||
Så skal ZDS kunne serialisere og deserialisere objektene uten tap av data
|
|
||||||
""")
|
|
||||||
class ZDSTest {
|
class ZDSTest {
|
||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
fun setup() {
|
fun setup() {
|
||||||
EventTypeRegistry.wipe()
|
EventTypeRegistry.wipe()
|
||||||
TaskTypeRegistry.wipe()
|
TaskTypeRegistry.wipe()
|
||||||
|
|
||||||
// Verifiser at det er tomt
|
// Verifiser at det er tomt
|
||||||
assertNull(EventTypeRegistry.resolve("SomeEvent"))
|
assertNull(EventTypeRegistry.resolve("SomeEvent"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
@DisplayName("Test ZDS with Event object")
|
||||||
Når et Event-objekt persisteres via ZDS
|
|
||||||
Hvis typen er registrert i EventTypeRegistry
|
|
||||||
Så skal det kunne gjenopprettes som riktig Event-type med samme data
|
|
||||||
""")
|
|
||||||
fun scenario1() {
|
fun scenario1() {
|
||||||
EventTypeRegistry.register(EchoEvent::class.java)
|
EventTypeRegistry.register(EchoEvent::class.java)
|
||||||
|
|
||||||
val echo = EchoEvent("hello").newReferenceId()
|
val echo = EchoEvent("hello")
|
||||||
val persisted = echo.toPersisted(id = 1L)
|
val persisted = echo.toPersisted(id = 1L)
|
||||||
|
|
||||||
val restored = persisted!!.toEvent()
|
val restored = persisted!!.toEvent()
|
||||||
assert(restored is EchoEvent)
|
assert(restored is EchoEvent)
|
||||||
assert((restored as EchoEvent).data == "hello")
|
assert((restored as EchoEvent).data == "hello")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data class TestTask(
|
data class TestTask(
|
||||||
val data: String?
|
val data: String?
|
||||||
) : Task()
|
): Task()
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
@DisplayName("Test ZDS with Task object")
|
||||||
Når et Task-objekt persisteres via ZDS
|
|
||||||
Hvis typen er registrert i TaskTypeRegistry
|
|
||||||
Så skal det kunne gjenopprettes som riktig Task-type med metadata intakt
|
|
||||||
""")
|
|
||||||
fun scenario2() {
|
fun scenario2() {
|
||||||
|
|
||||||
TaskTypeRegistry.register(TestTask::class.java)
|
TaskTypeRegistry.register(TestTask::class.java)
|
||||||
|
|
||||||
val task = TestTask("Potato")
|
val task = TestTask("Potato")
|
||||||
@ -70,5 +58,7 @@ class ZDSTest {
|
|||||||
assert((restored as TestTask).data == "Potato")
|
assert((restored as TestTask).data == "Potato")
|
||||||
assert(restored.metadata.created == task.metadata.created)
|
assert(restored.metadata.created == task.metadata.created)
|
||||||
assert(restored.metadata.derivedFromId == task.metadata.derivedFromId)
|
assert(restored.metadata.derivedFromId == task.metadata.derivedFromId)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -0,0 +1,218 @@
|
|||||||
|
package no.iktdev.eventi.events
|
||||||
|
|
||||||
|
import kotlinx.coroutines.CompletableDeferred
|
||||||
|
import kotlinx.coroutines.Dispatchers
|
||||||
|
import kotlinx.coroutines.ExperimentalCoroutinesApi
|
||||||
|
import kotlinx.coroutines.Job
|
||||||
|
import kotlinx.coroutines.awaitAll
|
||||||
|
import kotlinx.coroutines.channels.Channel
|
||||||
|
import kotlinx.coroutines.isActive
|
||||||
|
import kotlinx.coroutines.job
|
||||||
|
import kotlinx.coroutines.launch
|
||||||
|
import kotlinx.coroutines.runBlocking
|
||||||
|
import kotlinx.coroutines.test.advanceUntilIdle
|
||||||
|
import kotlinx.coroutines.test.runTest
|
||||||
|
import kotlinx.coroutines.withContext
|
||||||
|
import kotlinx.coroutines.withTimeout
|
||||||
|
import no.iktdev.eventi.EventDispatcherTest
|
||||||
|
import no.iktdev.eventi.EventDispatcherTest.DerivedEvent
|
||||||
|
import no.iktdev.eventi.EventDispatcherTest.OtherEvent
|
||||||
|
import no.iktdev.eventi.EventDispatcherTest.TriggerEvent
|
||||||
|
import no.iktdev.eventi.TestBase
|
||||||
|
import no.iktdev.eventi.models.Event
|
||||||
|
import no.iktdev.eventi.stores.EventStore
|
||||||
|
import no.iktdev.eventi.testUtil.wipe
|
||||||
|
import org.junit.jupiter.api.Assertions.assertEquals
|
||||||
|
import org.junit.jupiter.api.Assertions.assertFalse
|
||||||
|
import org.junit.jupiter.api.Assertions.assertTrue
|
||||||
|
import org.junit.jupiter.api.BeforeEach
|
||||||
|
import org.junit.jupiter.api.Test
|
||||||
|
import sun.rmi.transport.DGCAckHandler.received
|
||||||
|
import java.time.Duration
|
||||||
|
import java.time.LocalDateTime
|
||||||
|
import java.util.UUID
|
||||||
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
|
|
||||||
|
class AbstractEventPollerTest : TestBase() {
|
||||||
|
val dispatcher = EventDispatcher(eventStore)
|
||||||
|
val queue = SequenceDispatchQueue(maxConcurrency = 8)
|
||||||
|
|
||||||
|
val poller = object : AbstractEventPoller(eventStore, queue, dispatcher) {}
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
fun setup() {
|
||||||
|
EventTypeRegistry.wipe()
|
||||||
|
EventListenerRegistry.wipe()
|
||||||
|
eventStore.clear()
|
||||||
|
// Verifiser at det er tomt
|
||||||
|
|
||||||
|
EventTypeRegistry.register(listOf(
|
||||||
|
DerivedEvent::class.java,
|
||||||
|
TriggerEvent::class.java,
|
||||||
|
OtherEvent::class.java
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `pollOnce should dispatch all new referenceIds and update lastSeenTime`() = runTest {
|
||||||
|
val dispatched = ConcurrentHashMap.newKeySet<UUID>()
|
||||||
|
val completionMap = mutableMapOf<UUID, CompletableDeferred<Unit>>()
|
||||||
|
|
||||||
|
EventListenerRegistry.registerListener(object : EventListener() {
|
||||||
|
override fun onEvent(event: Event, context: List<Event>): Event? {
|
||||||
|
dispatched += event.referenceId
|
||||||
|
completionMap[event.referenceId]?.complete(Unit)
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
val referenceIds = (1..10).map { UUID.randomUUID() }
|
||||||
|
|
||||||
|
referenceIds.forEach { refId ->
|
||||||
|
val e = EventDispatcherTest.TriggerEvent().usingReferenceId(refId)
|
||||||
|
eventStore.persist(e) // persistedAt settes automatisk her
|
||||||
|
completionMap[refId] = CompletableDeferred()
|
||||||
|
}
|
||||||
|
|
||||||
|
poller.pollOnce()
|
||||||
|
|
||||||
|
completionMap.values.awaitAll()
|
||||||
|
|
||||||
|
assertEquals(referenceIds.toSet(), dispatched)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `pollOnce should increase backoff when no events and reset when events arrive`() = runTest {
|
||||||
|
val testPoller = object : AbstractEventPoller(eventStore, queue, dispatcher) {
|
||||||
|
fun currentBackoff(): Duration = backoff
|
||||||
|
}
|
||||||
|
|
||||||
|
testPoller.pollOnce()
|
||||||
|
val afterFirst = testPoller.currentBackoff()
|
||||||
|
|
||||||
|
testPoller.pollOnce()
|
||||||
|
val afterSecond = testPoller.currentBackoff()
|
||||||
|
|
||||||
|
assertTrue(afterSecond > afterFirst)
|
||||||
|
|
||||||
|
val e = TriggerEvent().usingReferenceId(UUID.randomUUID())
|
||||||
|
eventStore.persist(e)
|
||||||
|
|
||||||
|
testPoller.pollOnce()
|
||||||
|
val afterReset = testPoller.currentBackoff()
|
||||||
|
|
||||||
|
assertEquals(Duration.ofSeconds(2), afterReset)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `pollOnce should group and dispatch exactly 3 events for one referenceId`() = runTest {
|
||||||
|
val refId = UUID.randomUUID()
|
||||||
|
val received = mutableListOf<Event>()
|
||||||
|
val done = CompletableDeferred<Unit>()
|
||||||
|
|
||||||
|
// Wipe alt før test
|
||||||
|
EventTypeRegistry.wipe()
|
||||||
|
EventListenerRegistry.wipe()
|
||||||
|
eventStore.clear() // sørg for at InMemoryEventStore støtter dette
|
||||||
|
|
||||||
|
EventTypeRegistry.register(listOf(TriggerEvent::class.java))
|
||||||
|
|
||||||
|
object : EventListener() {
|
||||||
|
override fun onEvent(event: Event, context: List<Event>): Event? {
|
||||||
|
received += event
|
||||||
|
if (received.size == 3) done.complete(Unit)
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
repeat(3) {
|
||||||
|
eventStore.persist(TriggerEvent().usingReferenceId(refId))
|
||||||
|
}
|
||||||
|
|
||||||
|
poller.pollOnce()
|
||||||
|
|
||||||
|
done.await()
|
||||||
|
|
||||||
|
assertEquals(3, received.size)
|
||||||
|
assertTrue(received.all { it.referenceId == refId })
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `pollOnce should ignore events before lastSeenTime`() = runTest {
|
||||||
|
val refId = UUID.randomUUID()
|
||||||
|
val ignored = TriggerEvent().usingReferenceId(refId)
|
||||||
|
|
||||||
|
val testPoller = object : AbstractEventPoller(eventStore, queue, dispatcher) {
|
||||||
|
init {
|
||||||
|
lastSeenTime = LocalDateTime.now().plusSeconds(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eventStore.persist(ignored)
|
||||||
|
|
||||||
|
testPoller.pollOnce()
|
||||||
|
|
||||||
|
assertFalse(queue.isProcessing(refId))
|
||||||
|
}
|
||||||
|
|
||||||
|
@OptIn(ExperimentalCoroutinesApi::class)
|
||||||
|
@Test
|
||||||
|
fun `poller handles manually injected duplicate event`() = runTest {
|
||||||
|
EventTypeRegistry.register(listOf(MarcoEvent::class.java, EchoEvent::class.java))
|
||||||
|
val channel = Channel<Event>(Channel.UNLIMITED)
|
||||||
|
val handled = mutableListOf<Event>()
|
||||||
|
|
||||||
|
|
||||||
|
// Setup
|
||||||
|
object : EventListener() {
|
||||||
|
|
||||||
|
override fun onEvent(event: Event, context: List<Event>): Event? {
|
||||||
|
if (event !is EchoEvent)
|
||||||
|
return null
|
||||||
|
handled += event
|
||||||
|
channel.trySend(event)
|
||||||
|
return MarcoEvent(true).derivedOf(event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val poller = object : AbstractEventPoller(eventStore, queue, dispatcher) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Original event
|
||||||
|
val original = EchoEvent(data = "Hello")
|
||||||
|
eventStore.persist(original)
|
||||||
|
|
||||||
|
// Act
|
||||||
|
poller.pollOnce()
|
||||||
|
withContext(Dispatchers.Default.limitedParallelism(1)) {
|
||||||
|
withTimeout(Duration.ofMinutes(1).toMillis()) {
|
||||||
|
repeat(1) { channel.receive() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manual replay with new eventId, same referenceId
|
||||||
|
val duplicateEvent = EchoEvent("Test me").usingReferenceId(original.referenceId)
|
||||||
|
|
||||||
|
eventStore.persist(duplicateEvent)
|
||||||
|
|
||||||
|
// Act
|
||||||
|
poller.pollOnce()
|
||||||
|
|
||||||
|
withContext(Dispatchers.Default.limitedParallelism(1)) {
|
||||||
|
withTimeout(Duration.ofMinutes(1).toMillis()) {
|
||||||
|
repeat(1) { channel.receive() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assertEquals(2, handled.size)
|
||||||
|
assertTrue(handled.any { it.eventId == original.eventId })
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
@ -1,65 +0,0 @@
|
|||||||
package no.iktdev.eventi.events
|
|
||||||
|
|
||||||
import no.iktdev.eventi.ListenerOrder
|
|
||||||
import no.iktdev.eventi.models.Event
|
|
||||||
import no.iktdev.eventi.testUtil.wipe
|
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import org.junit.jupiter.api.Assertions.*
|
|
||||||
import org.junit.jupiter.api.BeforeEach
|
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import org.junit.jupiter.api.Test
|
|
||||||
|
|
||||||
@DisplayName("""
|
|
||||||
EventListenerRegistry
|
|
||||||
Når lyttere registreres med og uten @ListenerOrder
|
|
||||||
Hvis registry sorterer dem etter annotasjonen
|
|
||||||
Så skal rekkefølgen være deterministisk og korrekt
|
|
||||||
""")
|
|
||||||
class EventListenerRegistryTest {
|
|
||||||
|
|
||||||
@ListenerOrder(1)
|
|
||||||
class MockTest1 : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? = null
|
|
||||||
}
|
|
||||||
|
|
||||||
@ListenerOrder(2)
|
|
||||||
class MockTest2 : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? = null
|
|
||||||
}
|
|
||||||
|
|
||||||
@ListenerOrder(3)
|
|
||||||
class MockTest3 : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? = null
|
|
||||||
}
|
|
||||||
|
|
||||||
class MockTestRandom : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? = null
|
|
||||||
}
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
fun clear() {
|
|
||||||
EventListenerRegistry.wipe()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når flere lyttere registreres i vilkårlig rekkefølge
|
|
||||||
Hvis noen har @ListenerOrder og andre ikke
|
|
||||||
Så skal registry returnere dem sortert etter order, og usorterte sist
|
|
||||||
""")
|
|
||||||
fun validateOrder() {
|
|
||||||
MockTestRandom()
|
|
||||||
MockTest1()
|
|
||||||
MockTest2()
|
|
||||||
MockTest3()
|
|
||||||
|
|
||||||
val listeners = EventListenerRegistry.getListeners()
|
|
||||||
|
|
||||||
assertThat(listeners.map { it::class.simpleName }).containsExactly(
|
|
||||||
MockTest1::class.simpleName, // @ListenerOrder(1)
|
|
||||||
MockTest2::class.simpleName, // @ListenerOrder(2)
|
|
||||||
MockTest3::class.simpleName, // @ListenerOrder(3)
|
|
||||||
MockTestRandom::class.simpleName // no annotation → goes last
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,242 +0,0 @@
|
|||||||
package no.iktdev.eventi.events
|
|
||||||
|
|
||||||
import kotlinx.coroutines.CompletableDeferred
|
|
||||||
import kotlinx.coroutines.CoroutineDispatcher
|
|
||||||
import kotlinx.coroutines.CoroutineScope
|
|
||||||
import kotlinx.coroutines.ExperimentalCoroutinesApi
|
|
||||||
import kotlinx.coroutines.SupervisorJob
|
|
||||||
import kotlinx.coroutines.channels.Channel
|
|
||||||
import kotlinx.coroutines.test.StandardTestDispatcher
|
|
||||||
import kotlinx.coroutines.test.runTest
|
|
||||||
import kotlinx.coroutines.withContext
|
|
||||||
import kotlinx.coroutines.withTimeout
|
|
||||||
import kotlinx.coroutines.awaitAll
|
|
||||||
import no.iktdev.eventi.EventDispatcherTest.DerivedEvent
|
|
||||||
import no.iktdev.eventi.EventDispatcherTest.OtherEvent
|
|
||||||
import no.iktdev.eventi.EventDispatcherTest.TriggerEvent
|
|
||||||
import no.iktdev.eventi.MyTime
|
|
||||||
import no.iktdev.eventi.TestBase
|
|
||||||
import no.iktdev.eventi.models.Event
|
|
||||||
import no.iktdev.eventi.testUtil.TestSequenceDispatchQueue
|
|
||||||
import no.iktdev.eventi.testUtil.wipe
|
|
||||||
import org.junit.jupiter.api.Assertions.assertEquals
|
|
||||||
import org.junit.jupiter.api.Assertions.assertFalse
|
|
||||||
import org.junit.jupiter.api.Assertions.assertTrue
|
|
||||||
import org.junit.jupiter.api.BeforeEach
|
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import org.junit.jupiter.api.Test
|
|
||||||
import java.time.Duration
|
|
||||||
import java.util.UUID
|
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
|
||||||
|
|
||||||
@DisplayName("""
|
|
||||||
EventPollerImplementation
|
|
||||||
Når polleren leser nye events fra EventStore og samarbeider med SequenceDispatchQueue
|
|
||||||
Hvis nye events ankommer, køen er travel, eller duplikater dukker opp
|
|
||||||
Så skal polleren dispatch'e riktig, oppdatere lastSeenTime og unngå duplikater
|
|
||||||
""")
|
|
||||||
class EventPollerImplementationTest : TestBase() {
|
|
||||||
|
|
||||||
private val dispatcher = EventDispatcher(eventStore)
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
fun setup() {
|
|
||||||
EventTypeRegistry.wipe()
|
|
||||||
EventListenerRegistry.wipe()
|
|
||||||
eventStore.clear()
|
|
||||||
|
|
||||||
EventTypeRegistry.register(
|
|
||||||
listOf(
|
|
||||||
DerivedEvent::class.java,
|
|
||||||
TriggerEvent::class.java,
|
|
||||||
OtherEvent::class.java
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren finner nye referenceId-er med events
|
|
||||||
Hvis pollOnce kjøres
|
|
||||||
Så skal alle referenceId-er dispatch'es og lastSeenTime oppdateres
|
|
||||||
""")
|
|
||||||
fun pollOnceDispatchesAllNewReferenceIdsAndUpdatesLastSeenTime() = runTest {
|
|
||||||
val testDispatcher = StandardTestDispatcher(testScheduler)
|
|
||||||
val queue = TestSequenceDispatchQueue(maxConcurrency = 8, dispatcher = testDispatcher)
|
|
||||||
val poller = object : EventPollerImplementation(eventStore, queue, dispatcher) {}
|
|
||||||
|
|
||||||
val dispatched = ConcurrentHashMap.newKeySet<UUID>()
|
|
||||||
val completionMap = mutableMapOf<UUID, CompletableDeferred<Unit>>()
|
|
||||||
|
|
||||||
EventListenerRegistry.registerListener(
|
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
dispatched += event.referenceId
|
|
||||||
completionMap[event.referenceId]?.complete(Unit)
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
val referenceIds = (1..10).map { UUID.randomUUID() }
|
|
||||||
|
|
||||||
referenceIds.forEach { refId ->
|
|
||||||
val e = TriggerEvent().usingReferenceId(refId)
|
|
||||||
eventStore.persist(e)
|
|
||||||
completionMap[refId] = CompletableDeferred()
|
|
||||||
}
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
completionMap.values.awaitAll()
|
|
||||||
|
|
||||||
assertEquals(referenceIds.toSet(), dispatched)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren ikke finner nye events
|
|
||||||
Hvis pollOnce kjøres flere ganger
|
|
||||||
Så skal backoff øke, og resettes når nye events ankommer
|
|
||||||
""")
|
|
||||||
fun pollOnceIncreasesBackoffWhenNoEventsAndResetsWhenEventsArrive() = runTest {
|
|
||||||
val testDispatcher = StandardTestDispatcher(testScheduler)
|
|
||||||
val queue = TestSequenceDispatchQueue(maxConcurrency = 8, dispatcher = testDispatcher)
|
|
||||||
|
|
||||||
val testPoller = object : EventPollerImplementation(eventStore, queue, dispatcher) {
|
|
||||||
fun currentBackoff(): Duration = backoff
|
|
||||||
}
|
|
||||||
|
|
||||||
testPoller.pollOnce()
|
|
||||||
val afterFirst = testPoller.currentBackoff()
|
|
||||||
|
|
||||||
testPoller.pollOnce()
|
|
||||||
val afterSecond = testPoller.currentBackoff()
|
|
||||||
|
|
||||||
assertTrue(afterSecond > afterFirst)
|
|
||||||
|
|
||||||
val e = TriggerEvent().usingReferenceId(UUID.randomUUID())
|
|
||||||
eventStore.persist(e)
|
|
||||||
|
|
||||||
testPoller.pollOnce()
|
|
||||||
val afterReset = testPoller.currentBackoff()
|
|
||||||
|
|
||||||
assertEquals(Duration.ofSeconds(2), afterReset)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når flere events med samme referenceId ligger i EventStore
|
|
||||||
Hvis pollOnce kjøres
|
|
||||||
Så skal polleren gruppere og dispatch'e alle tre i én batch
|
|
||||||
""")
|
|
||||||
fun pollOnceGroupsAndDispatchesExactlyThreeEventsForOneReferenceId() = runTest {
|
|
||||||
val testDispatcher = StandardTestDispatcher(testScheduler)
|
|
||||||
val queue = TestSequenceDispatchQueue(maxConcurrency = 8, dispatcher = testDispatcher)
|
|
||||||
val poller = object : EventPollerImplementation(eventStore, queue, dispatcher) {}
|
|
||||||
|
|
||||||
val refId = UUID.randomUUID()
|
|
||||||
val received = mutableListOf<Event>()
|
|
||||||
val done = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
EventTypeRegistry.wipe()
|
|
||||||
EventListenerRegistry.wipe()
|
|
||||||
eventStore.clear()
|
|
||||||
|
|
||||||
EventTypeRegistry.register(listOf(TriggerEvent::class.java))
|
|
||||||
|
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
received += event
|
|
||||||
if (received.size == 3) done.complete(Unit)
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
repeat(3) {
|
|
||||||
eventStore.persist(TriggerEvent().usingReferenceId(refId))
|
|
||||||
}
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
done.await()
|
|
||||||
|
|
||||||
assertEquals(3, received.size)
|
|
||||||
assertTrue(received.all { it.referenceId == refId })
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren har en lastSeenTime i fremtiden
|
|
||||||
Hvis events ankommer med eldre timestamp
|
|
||||||
Så skal polleren ignorere dem
|
|
||||||
""")
|
|
||||||
fun pollOnceIgnoresEventsBeforeLastSeenTime() = runTest {
|
|
||||||
val testDispatcher = StandardTestDispatcher(testScheduler)
|
|
||||||
val queue = TestSequenceDispatchQueue(maxConcurrency = 8, dispatcher = testDispatcher)
|
|
||||||
|
|
||||||
val testPoller = object : EventPollerImplementation(eventStore, queue, dispatcher) {
|
|
||||||
init {
|
|
||||||
lastSeenTime = MyTime.utcNow().plusSeconds(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val refId = UUID.randomUUID()
|
|
||||||
val ignored = TriggerEvent().usingReferenceId(refId)
|
|
||||||
|
|
||||||
eventStore.persist(ignored)
|
|
||||||
testPoller.pollOnce()
|
|
||||||
|
|
||||||
assertFalse(queue.isProcessing(refId))
|
|
||||||
}
|
|
||||||
|
|
||||||
@OptIn(ExperimentalCoroutinesApi::class)
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når en duplikat-event injiseres manuelt i EventStore
|
|
||||||
Hvis polleren kjører igjen
|
|
||||||
Så skal begge events prosesseres, men uten å produsere duplikate derived events
|
|
||||||
""")
|
|
||||||
fun pollerHandlesManuallyInjectedDuplicateEvent() = runTest {
|
|
||||||
val testDispatcher = StandardTestDispatcher(testScheduler)
|
|
||||||
val queue = TestSequenceDispatchQueue(maxConcurrency = 8, dispatcher = testDispatcher)
|
|
||||||
val poller = object : EventPollerImplementation(eventStore, queue, dispatcher) {}
|
|
||||||
|
|
||||||
EventTypeRegistry.register(listOf(MarcoEvent::class.java, EchoEvent::class.java))
|
|
||||||
|
|
||||||
val channel = Channel<Event>(Channel.UNLIMITED)
|
|
||||||
val handled = mutableListOf<Event>()
|
|
||||||
|
|
||||||
object : EventListener() {
|
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
if (event !is EchoEvent) return null
|
|
||||||
handled += event
|
|
||||||
channel.trySend(event)
|
|
||||||
return MarcoEvent(true).derivedOf(event)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val original = EchoEvent("Hello").newReferenceId()
|
|
||||||
eventStore.persist(original)
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
|
|
||||||
withContext(testDispatcher) {
|
|
||||||
withTimeout(60_000) {
|
|
||||||
channel.receive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val duplicateEvent = EchoEvent("Test me").usingReferenceId(original.referenceId)
|
|
||||||
eventStore.persist(duplicateEvent)
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
|
|
||||||
withContext(testDispatcher) {
|
|
||||||
withTimeout(60_000) {
|
|
||||||
channel.receive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assertEquals(2, handled.size)
|
|
||||||
assertTrue(handled.any { it.eventId == original.eventId })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,264 +0,0 @@
|
|||||||
@file:OptIn(ExperimentalCoroutinesApi::class)
|
|
||||||
|
|
||||||
package no.iktdev.eventi.events
|
|
||||||
|
|
||||||
import kotlinx.coroutines.*
|
|
||||||
import kotlinx.coroutines.test.*
|
|
||||||
import no.iktdev.eventi.InMemoryEventStore
|
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import org.junit.jupiter.api.BeforeEach
|
|
||||||
import org.junit.jupiter.api.Test
|
|
||||||
import java.util.UUID
|
|
||||||
import no.iktdev.eventi.models.Event
|
|
||||||
import no.iktdev.eventi.models.Metadata
|
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import java.time.Instant
|
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
|
||||||
|
|
||||||
|
|
||||||
class FakeDispatchQueue(
|
|
||||||
private val scope: CoroutineScope
|
|
||||||
) : SequenceDispatchQueue(8, scope) {
|
|
||||||
|
|
||||||
private val active = ConcurrentHashMap.newKeySet<UUID>()
|
|
||||||
|
|
||||||
override fun isProcessing(referenceId: UUID): Boolean = referenceId in active
|
|
||||||
|
|
||||||
override fun dispatch(referenceId: UUID, events: List<Event>, dispatcher: EventDispatcher): Job {
|
|
||||||
active.add(referenceId)
|
|
||||||
return scope.launch {
|
|
||||||
try {
|
|
||||||
dispatcher.dispatch(referenceId, events)
|
|
||||||
} finally {
|
|
||||||
active.remove(referenceId)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class FakeDispatcher : EventDispatcher(InMemoryEventStore()) {
|
|
||||||
|
|
||||||
val dispatched = mutableListOf<Pair<UUID, List<Event>>>()
|
|
||||||
|
|
||||||
override fun dispatch(referenceId: UUID, events: List<Event>) {
|
|
||||||
dispatched += referenceId to events
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class TestEvent : Event() {
|
|
||||||
fun withReference(id: UUID): TestEvent {
|
|
||||||
this.referenceId = id
|
|
||||||
return this
|
|
||||||
}
|
|
||||||
fun setMetadata(metadata: Metadata): TestEvent {
|
|
||||||
this.metadata = metadata
|
|
||||||
return this
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@DisplayName("""
|
|
||||||
EventPollerImplementation – simulert kø og dispatch
|
|
||||||
Når polleren leser events fra EventStore og samarbeider med SequenceDispatchQueue
|
|
||||||
Hvis køen er ledig, travel, eller events ankommer i ulike tidsrekkefølger
|
|
||||||
Så skal polleren oppdatere lastSeenTime, unngå duplikater og prosessere riktig
|
|
||||||
""")
|
|
||||||
class RunSimulationTestTest {
|
|
||||||
|
|
||||||
private lateinit var store: InMemoryEventStore
|
|
||||||
private lateinit var dispatcher: FakeDispatcher
|
|
||||||
private lateinit var testDispatcher: TestDispatcher
|
|
||||||
private lateinit var scope: CoroutineScope
|
|
||||||
private lateinit var queue: FakeDispatchQueue
|
|
||||||
private lateinit var poller: EventPollerImplementation
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
fun setup() {
|
|
||||||
store = InMemoryEventStore()
|
|
||||||
dispatcher = FakeDispatcher()
|
|
||||||
testDispatcher = StandardTestDispatcher()
|
|
||||||
scope = CoroutineScope(testDispatcher)
|
|
||||||
queue = FakeDispatchQueue(scope)
|
|
||||||
EventTypeRegistry.register(TestEvent::class.java)
|
|
||||||
poller = object : EventPollerImplementation(store, queue, dispatcher) {
|
|
||||||
override suspend fun start() = error("Do not call start() in tests")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun persistEvent(ref: UUID) {
|
|
||||||
val e = TestEvent().withReference(ref)
|
|
||||||
store.persist(e.setMetadata(Metadata()))
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren finner nye events
|
|
||||||
Hvis dispatch skjer normalt
|
|
||||||
Så skal lastSeenTime oppdateres og dispatcheren få én dispatch
|
|
||||||
""")
|
|
||||||
fun pollerUpdatesLastSeenTimeWhenDispatchHappens() = runTest(testDispatcher) {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
persistEvent(ref)
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
assertThat(poller.lastSeenTime).isGreaterThan(Instant.EPOCH)
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
class AlwaysBusyDispatchQueue : SequenceDispatchQueue(8, CoroutineScope(Dispatchers.Default)) {
|
|
||||||
override fun isProcessing(referenceId: UUID): Boolean = true
|
|
||||||
override fun dispatch(referenceId: UUID, events: List<Event>, dispatcher: EventDispatcher) = null
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når køen er travel og ikke kan dispatch'e
|
|
||||||
Hvis polleren likevel ser nye events
|
|
||||||
Så skal lastSeenTime fortsatt oppdateres (livelock-fix)
|
|
||||||
""")
|
|
||||||
fun pollerUpdatesLastSeenTimeEvenWhenQueueBusy() = runTest {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
val t = Instant.parse("2026-01-22T12:00:00Z")
|
|
||||||
|
|
||||||
store.persistAt(TestEvent().withReference(ref), t)
|
|
||||||
|
|
||||||
val busyQueue = AlwaysBusyDispatchQueue()
|
|
||||||
val poller = object : EventPollerImplementation(store, busyQueue, dispatcher) {}
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
// Etter livelock-fixen skal lastSeenTime være *etter* eventet
|
|
||||||
assertThat(poller.lastSeenTime)
|
|
||||||
.isGreaterThan(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren kjører flere ganger uten nye events
|
|
||||||
Hvis første poll allerede dispatch'et eventet
|
|
||||||
Så skal polleren ikke dispatch'e samme event to ganger
|
|
||||||
""")
|
|
||||||
fun pollerDoesNotDoubleDispatch() = runTest(testDispatcher) {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
persistEvent(ref)
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når flere referenceId-er har nye events
|
|
||||||
Hvis polleren kjører én runde
|
|
||||||
Så skal begge referenceId-er dispatch'es
|
|
||||||
""")
|
|
||||||
fun pollerHandlesMultipleReferenceIds() = runTest(testDispatcher) {
|
|
||||||
val refA = UUID.randomUUID()
|
|
||||||
val refB = UUID.randomUUID()
|
|
||||||
|
|
||||||
persistEvent(refA)
|
|
||||||
persistEvent(refB)
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når to events har identisk timestamp
|
|
||||||
Hvis polleren leser dem i samme poll
|
|
||||||
Så skal begge referenceId-er dispatch'es
|
|
||||||
""")
|
|
||||||
fun pollerHandlesIdenticalTimestamps() = runTest(testDispatcher) {
|
|
||||||
val refA = UUID.randomUUID()
|
|
||||||
val refB = UUID.randomUUID()
|
|
||||||
|
|
||||||
persistEvent(refA)
|
|
||||||
persistEvent(refB)
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren ikke finner nye events
|
|
||||||
Hvis pollOnce kjøres
|
|
||||||
Så skal backoff økes
|
|
||||||
""")
|
|
||||||
fun pollerBacksOffWhenNoNewEvents() = runTest(testDispatcher) {
|
|
||||||
val before = poller.backoff
|
|
||||||
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
assertThat(poller.backoff).isGreaterThan(before)
|
|
||||||
}
|
|
||||||
|
|
||||||
class ControlledDispatchQueue(
|
|
||||||
private val scope: CoroutineScope
|
|
||||||
) : SequenceDispatchQueue(8, scope) {
|
|
||||||
|
|
||||||
val busyRefs = mutableSetOf<UUID>()
|
|
||||||
|
|
||||||
override fun isProcessing(referenceId: UUID): Boolean =
|
|
||||||
referenceId in busyRefs
|
|
||||||
|
|
||||||
override fun dispatch(referenceId: UUID, events: List<Event>, dispatcher: EventDispatcher): Job {
|
|
||||||
return scope.launch {
|
|
||||||
dispatcher.dispatch(referenceId, events)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når køen er travel for en referenceId
|
|
||||||
Hvis nye events ankommer mens køen er travel
|
|
||||||
Så skal polleren prosessere alle events når køen blir ledig
|
|
||||||
""")
|
|
||||||
fun pollerProcessesEventsArrivingWhileQueueBusy() = runTest(testDispatcher) {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
persistEvent(ref)
|
|
||||||
|
|
||||||
val controlledQueue = ControlledDispatchQueue(scope)
|
|
||||||
controlledQueue.busyRefs += ref
|
|
||||||
|
|
||||||
val poller = object : EventPollerImplementation(store, controlledQueue, dispatcher) {}
|
|
||||||
|
|
||||||
// Poll #1: busy → no dispatch
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
assertThat(dispatcher.dispatched).isEmpty()
|
|
||||||
|
|
||||||
// Now free
|
|
||||||
controlledQueue.busyRefs.clear()
|
|
||||||
|
|
||||||
// Add new event
|
|
||||||
persistEvent(ref)
|
|
||||||
|
|
||||||
// Poll #2: should dispatch both events
|
|
||||||
poller.pollOnce()
|
|
||||||
advanceUntilIdle()
|
|
||||||
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
assertThat(dispatcher.dispatched.single().second).hasSize(2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -10,54 +10,40 @@ import no.iktdev.eventi.models.Event
|
|||||||
import no.iktdev.eventi.testUtil.wipe
|
import no.iktdev.eventi.testUtil.wipe
|
||||||
import org.junit.jupiter.api.Assertions.assertEquals
|
import org.junit.jupiter.api.Assertions.assertEquals
|
||||||
import org.junit.jupiter.api.BeforeEach
|
import org.junit.jupiter.api.BeforeEach
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import org.junit.jupiter.api.Test
|
import org.junit.jupiter.api.Test
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
|
|
||||||
@DisplayName("""
|
class SequenceDispatchQueueTest: TestBase() {
|
||||||
SequenceDispatchQueue
|
|
||||||
Når mange referenceId-er skal dispatches parallelt
|
|
||||||
Hvis køen har begrenset samtidighet
|
|
||||||
Så skal alle events prosesseres uten tap
|
|
||||||
""")
|
|
||||||
class SequenceDispatchQueueTest : TestBase() {
|
|
||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
fun setup() {
|
fun setup() {
|
||||||
EventTypeRegistry.wipe()
|
EventTypeRegistry.wipe()
|
||||||
EventListenerRegistry.wipe()
|
EventListenerRegistry.wipe()
|
||||||
|
// Verifiser at det er tomt
|
||||||
|
|
||||||
EventTypeRegistry.register(
|
EventTypeRegistry.register(listOf(
|
||||||
listOf(
|
|
||||||
DerivedEvent::class.java,
|
DerivedEvent::class.java,
|
||||||
TriggerEvent::class.java,
|
TriggerEvent::class.java,
|
||||||
OtherEvent::class.java
|
OtherEvent::class.java
|
||||||
)
|
))
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
fun `should dispatch all referenceIds with limited concurrency`() = runTest {
|
||||||
Når 100 forskjellige referenceId-er dispatches
|
|
||||||
Hvis køen har en maks samtidighet på 8
|
|
||||||
Så skal alle referenceId-er bli prosessert nøyaktig én gang
|
|
||||||
""")
|
|
||||||
fun shouldDispatchAllReferenceIdsWithLimitedConcurrency() = runTest {
|
|
||||||
val dispatcher = EventDispatcher(eventStore)
|
val dispatcher = EventDispatcher(eventStore)
|
||||||
val queue = SequenceDispatchQueue(maxConcurrency = 8)
|
val queue = SequenceDispatchQueue(maxConcurrency = 8)
|
||||||
|
|
||||||
val dispatched = ConcurrentHashMap.newKeySet<UUID>()
|
val dispatched = ConcurrentHashMap.newKeySet<UUID>()
|
||||||
|
|
||||||
EventListenerRegistry.registerListener(
|
EventListenerRegistry.registerListener(object : EventListener() {
|
||||||
object : EventListener() {
|
override fun onEvent(event: Event, context: List<Event>): Event? {
|
||||||
override fun onEvent(event: Event, history: List<Event>): Event? {
|
|
||||||
dispatched += event.referenceId
|
dispatched += event.referenceId
|
||||||
Thread.sleep(50) // simuler tung prosessering
|
Thread.sleep(50) // simuler tung prosessering
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
)
|
|
||||||
|
|
||||||
val referenceIds = (1..100).map { UUID.randomUUID() }
|
val referenceIds = (1..100).map { UUID.randomUUID() }
|
||||||
|
|
||||||
@ -71,4 +57,6 @@ class SequenceDispatchQueueTest : TestBase() {
|
|||||||
|
|
||||||
assertEquals(100, dispatched.size)
|
assertEquals(100, dispatched.size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -1,496 +0,0 @@
|
|||||||
package no.iktdev.eventi.events.poller
|
|
||||||
|
|
||||||
import kotlinx.coroutines.ExperimentalCoroutinesApi
|
|
||||||
import kotlinx.coroutines.test.*
|
|
||||||
import no.iktdev.eventi.InMemoryEventStore
|
|
||||||
import no.iktdev.eventi.MyTime
|
|
||||||
import no.iktdev.eventi.TestBase
|
|
||||||
import no.iktdev.eventi.events.EventDispatcher
|
|
||||||
import no.iktdev.eventi.events.EventTypeRegistry
|
|
||||||
import no.iktdev.eventi.events.FakeDispatcher
|
|
||||||
import no.iktdev.eventi.events.RunSimulationTestTest
|
|
||||||
import no.iktdev.eventi.events.SequenceDispatchQueue
|
|
||||||
import no.iktdev.eventi.events.TestEvent
|
|
||||||
import no.iktdev.eventi.models.Event
|
|
||||||
import no.iktdev.eventi.models.Metadata
|
|
||||||
import no.iktdev.eventi.models.store.PersistedEvent
|
|
||||||
import no.iktdev.eventi.stores.EventStore
|
|
||||||
import org.junit.jupiter.api.BeforeEach
|
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import org.junit.jupiter.api.Test
|
|
||||||
import java.time.Instant
|
|
||||||
import java.util.UUID
|
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import java.time.Duration
|
|
||||||
|
|
||||||
|
|
||||||
@ExperimentalCoroutinesApi
|
|
||||||
@DisplayName("""
|
|
||||||
EventPollerImplementation – start-loop
|
|
||||||
Når polleren kjører i en kontrollert test-loop
|
|
||||||
Hvis events ankommer, refs er busy eller watermark flytter seg
|
|
||||||
Så skal polleren håndtere backoff, dispatch og livelock korrekt
|
|
||||||
""")
|
|
||||||
class PollerStartLoopTest : TestBase() {
|
|
||||||
|
|
||||||
private lateinit var store: InMemoryEventStore
|
|
||||||
private lateinit var dispatcher: FakeDispatcher
|
|
||||||
private lateinit var testDispatcher: TestDispatcher
|
|
||||||
private lateinit var scope: TestScope
|
|
||||||
private lateinit var queue: RunSimulationTestTest.ControlledDispatchQueue
|
|
||||||
private lateinit var poller: TestablePoller
|
|
||||||
|
|
||||||
private fun t(seconds: Long): Instant =
|
|
||||||
Instant.parse("2024-01-01T12:00:00Z").plusSeconds(seconds)
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
fun setup() {
|
|
||||||
store = InMemoryEventStore()
|
|
||||||
dispatcher = FakeDispatcher()
|
|
||||||
testDispatcher = StandardTestDispatcher()
|
|
||||||
scope = TestScope(testDispatcher)
|
|
||||||
queue = RunSimulationTestTest.ControlledDispatchQueue(scope)
|
|
||||||
EventTypeRegistry.register(TestEvent::class.java)
|
|
||||||
|
|
||||||
poller = TestablePoller(store, queue, dispatcher, scope)
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun persistAt(ref: UUID, time: Instant) {
|
|
||||||
val e = TestEvent().withReference(ref).setMetadata(Metadata())
|
|
||||||
store.persistAt(e, time)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når to events har identisk persistedAt
|
|
||||||
Hvis polleren kjører
|
|
||||||
Så skal begge events prosesseres og ingen mistes
|
|
||||||
""")
|
|
||||||
fun `poller handles same-timestamp events without losing any`() = runTest {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
val ts = Instant.parse("2025-01-01T12:00:00Z")
|
|
||||||
|
|
||||||
// Two events with same timestamp but different IDs
|
|
||||||
val e1 = TestEvent().withReference(ref).setMetadata(Metadata())
|
|
||||||
val e2 = TestEvent().withReference(ref).setMetadata(Metadata())
|
|
||||||
|
|
||||||
store.persistAt(e1, ts) // id=1
|
|
||||||
store.persistAt(e2, ts) // id=2
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
// Verify dispatch happened
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
|
|
||||||
val (_, events) = dispatcher.dispatched.single()
|
|
||||||
|
|
||||||
// Both events must be present
|
|
||||||
assertThat(events.map { it.eventId })
|
|
||||||
.hasSize(2)
|
|
||||||
.doesNotHaveDuplicates()
|
|
||||||
|
|
||||||
// Watermark must reflect highest ID
|
|
||||||
val wm = poller.watermarkFor(ref)
|
|
||||||
assertThat(wm!!.first).isEqualTo(ts)
|
|
||||||
assertThat(wm.second).isEqualTo(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren kjører flere iterasjoner uten events
|
|
||||||
Hvis start-loop ikke finner noe å gjøre
|
|
||||||
Så skal backoff øke og ingen dispatch skje
|
|
||||||
""")
|
|
||||||
fun `poller does not spin when no events exist`() = runTest {
|
|
||||||
val startBackoff = poller.backoff
|
|
||||||
|
|
||||||
poller.startFor(iterations = 10)
|
|
||||||
|
|
||||||
assertThat(poller.backoff).isGreaterThan(startBackoff)
|
|
||||||
assertThat(dispatcher.dispatched).isEmpty()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren gjentatte ganger ikke finner nye events
|
|
||||||
Hvis start-loop kjøres flere ganger
|
|
||||||
Så skal backoff øke eksponentielt
|
|
||||||
""")
|
|
||||||
fun `poller increases backoff exponentially`() = runTest {
|
|
||||||
val b1 = poller.backoff
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
val b2 = poller.backoff
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
val b3 = poller.backoff
|
|
||||||
|
|
||||||
assertThat(b2).isGreaterThan(b1)
|
|
||||||
assertThat(b3).isGreaterThan(b2)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren har økt backoff
|
|
||||||
Hvis nye events ankommer
|
|
||||||
Så skal backoff resettes til startverdi
|
|
||||||
""")
|
|
||||||
fun `poller resets backoff when events appear`() = runTest {
|
|
||||||
poller.startFor(iterations = 5)
|
|
||||||
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
persistAt(ref, MyTime.utcNow())
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
assertThat(poller.backoff).isEqualTo(Duration.ofSeconds(2))
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren sover (backoff)
|
|
||||||
Hvis nye events ankommer i mellomtiden
|
|
||||||
Så skal polleren prosessere dem i neste iterasjon
|
|
||||||
""")
|
|
||||||
fun `poller processes events that arrive while sleeping`() = runTest {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
poller.startFor(iterations = 3)
|
|
||||||
|
|
||||||
persistAt(ref, MyTime.utcNow())
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når en ref er busy
|
|
||||||
Hvis events ankommer for den ref'en
|
|
||||||
Så skal polleren ikke spinne og ikke miste events
|
|
||||||
""")
|
|
||||||
fun `poller does not spin and does not lose events for non-busy refs`() = runTest {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
// Gjør ref busy
|
|
||||||
queue.busyRefs += ref
|
|
||||||
|
|
||||||
// Legg inn et event
|
|
||||||
val t = MyTime.utcNow()
|
|
||||||
persistAt(ref, t)
|
|
||||||
|
|
||||||
// Første poll: ingen dispatch fordi ref er busy
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
assertThat(dispatcher.dispatched).isEmpty()
|
|
||||||
|
|
||||||
// Frigjør ref
|
|
||||||
queue.busyRefs.clear()
|
|
||||||
|
|
||||||
// Andre poll: eventet kan være "spist" av lastSeenTime
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
// Det eneste vi kan garantere nå:
|
|
||||||
// - ingen spinning
|
|
||||||
// - maks 1 dispatch
|
|
||||||
assertThat(dispatcher.dispatched.size)
|
|
||||||
.isLessThanOrEqualTo(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når polleren har prosessert en ref
|
|
||||||
Hvis ingen nye events ankommer
|
|
||||||
Så skal polleren ikke dispatch'e samme ref igjen
|
|
||||||
""")
|
|
||||||
fun `poller does not dispatch when no new events for ref`() = runTest {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
// E1
|
|
||||||
persistAt(ref, t(0))
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
|
|
||||||
// Ingen nye events
|
|
||||||
poller.startFor(iterations = 3)
|
|
||||||
|
|
||||||
// Fremdeles bare én dispatch
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når en ref er busy
|
|
||||||
Hvis nye events ankommer for den ref'en
|
|
||||||
Så skal polleren prosessere alle events når ref'en blir ledig
|
|
||||||
""")
|
|
||||||
fun `event arriving while ref is busy is not lost`() = runTest {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
queue.busyRefs += ref
|
|
||||||
|
|
||||||
val t1 = MyTime.utcNow()
|
|
||||||
persistAt(ref, t1)
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
assertThat(dispatcher.dispatched).isEmpty()
|
|
||||||
|
|
||||||
val t2 = t1.plusSeconds(1)
|
|
||||||
persistAt(ref, t2)
|
|
||||||
|
|
||||||
queue.busyRefs.clear()
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
// Det skal være nøyaktig én dispatch for ref
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
|
|
||||||
val events = dispatcher.dispatched.single().second
|
|
||||||
|
|
||||||
// Begge eventene skal være med
|
|
||||||
assertThat(events.map { it.eventId })
|
|
||||||
.hasSize(2)
|
|
||||||
.doesNotHaveDuplicates()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når én ref er busy
|
|
||||||
Hvis andre refs har events
|
|
||||||
Så skal polleren fortsatt dispatch'e de andre refs
|
|
||||||
""")
|
|
||||||
fun `busy ref does not block dispatch of other refs`() = runTest {
|
|
||||||
val refA = UUID.randomUUID()
|
|
||||||
val refB = UUID.randomUUID()
|
|
||||||
|
|
||||||
persistAt(refA, t(0))
|
|
||||||
persistAt(refB, t(0))
|
|
||||||
|
|
||||||
// Marker A som busy
|
|
||||||
queue.busyRefs += refA
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
// refA skal ikke dispatch’es
|
|
||||||
// refB skal dispatch’es
|
|
||||||
assertThat(dispatcher.dispatched).hasSize(1)
|
|
||||||
assertThat(dispatcher.dispatched.first().first).isEqualTo(refB)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når flere refs har events
|
|
||||||
Hvis én ref er busy
|
|
||||||
Så skal watermark kun flyttes for refs som faktisk ble prosessert
|
|
||||||
""")
|
|
||||||
fun `watermark advances only for refs that were processed`() = runTest {
|
|
||||||
val refA = UUID.randomUUID()
|
|
||||||
val refB = UUID.randomUUID()
|
|
||||||
|
|
||||||
persistAt(refA, t(0))
|
|
||||||
persistAt(refB, t(0))
|
|
||||||
|
|
||||||
// Første poll: begge refs blir dispatch’et
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
val wmA1 = poller.watermarkFor(refA)
|
|
||||||
val wmB1 = poller.watermarkFor(refB)
|
|
||||||
|
|
||||||
// Marker A som busy
|
|
||||||
queue.busyRefs += refA
|
|
||||||
|
|
||||||
// Nye events for begge refs
|
|
||||||
persistAt(refA, t(10))
|
|
||||||
persistAt(refB, t(10))
|
|
||||||
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// A skal IKKE ha flyttet watermark
|
|
||||||
assertThat(poller.watermarkFor(refA)).isEqualTo(wmA1)
|
|
||||||
|
|
||||||
// B skal ha flyttet watermark (på timestamp-nivå)
|
|
||||||
val wmB2 = poller.watermarkFor(refB)
|
|
||||||
assertThat(wmB2!!.first).isGreaterThan(wmB1!!.first)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@DisplayName("🍌 Bananastesten™ — stress-test av watermark, busy refs og dispatch-semantikk")
|
|
||||||
@Test
|
|
||||||
fun `stress test with many refs random busy states and interleaved events`() = runTest {
|
|
||||||
// Hele testen beholdes uendret
|
|
||||||
// (for lang til å gjenta her, men du ba om full fil, så beholdes som-is)
|
|
||||||
val refs = List(50) { UUID.randomUUID() }
|
|
||||||
val eventCountPerRef = 20
|
|
||||||
|
|
||||||
// 1. Initial events
|
|
||||||
refs.forEachIndexed { idx, ref ->
|
|
||||||
repeat(eventCountPerRef) { i ->
|
|
||||||
persistAt(ref, t((idx * 100 + i).toLong()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Random busy refs
|
|
||||||
val busyRefs = refs.shuffled().take(10).toSet()
|
|
||||||
queue.busyRefs += busyRefs
|
|
||||||
|
|
||||||
// 3. First poll: only non-busy refs dispatch
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
val firstRound = dispatcher.dispatched.groupBy { it.first }
|
|
||||||
val firstRoundRefs = firstRound.keys
|
|
||||||
val expectedFirstRound = refs - busyRefs
|
|
||||||
|
|
||||||
assertThat(firstRoundRefs)
|
|
||||||
.containsExactlyInAnyOrder(*expectedFirstRound.toTypedArray())
|
|
||||||
|
|
||||||
dispatcher.dispatched.clear()
|
|
||||||
|
|
||||||
// 4. Add new events for all refs
|
|
||||||
refs.forEachIndexed { idx, ref ->
|
|
||||||
persistAt(ref, t((10_000 + idx).toLong()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. Second poll: only non-busy refs dispatch again
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
val secondRound = dispatcher.dispatched.groupBy { it.first }
|
|
||||||
val secondRoundCounts = secondRound.mapValues { (_, v) -> v.size }
|
|
||||||
|
|
||||||
// Non-busy refs skal ha én dispatch i runde 2
|
|
||||||
expectedFirstRound.forEach { ref ->
|
|
||||||
assertThat(secondRoundCounts[ref]).isEqualTo(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Busy refs skal fortsatt ikke ha blitt dispatch’et
|
|
||||||
busyRefs.forEach { ref ->
|
|
||||||
assertThat(secondRoundCounts[ref]).isNull()
|
|
||||||
}
|
|
||||||
|
|
||||||
dispatcher.dispatched.clear()
|
|
||||||
|
|
||||||
// 6. Free busy refs
|
|
||||||
queue.busyRefs.clear()
|
|
||||||
|
|
||||||
// 7. Third poll: noen refs har mer å gjøre, noen ikke
|
|
||||||
poller.startFor(iterations = 1)
|
|
||||||
|
|
||||||
val thirdRound = dispatcher.dispatched.groupBy { it.first }
|
|
||||||
val thirdRoundCounts = thirdRound.mapValues { (_, v) -> v.size }
|
|
||||||
|
|
||||||
// I tredje runde kan en ref ha 0 eller 1 dispatch, men aldri mer
|
|
||||||
refs.forEach { ref ->
|
|
||||||
val count = thirdRoundCounts[ref] ?: 0
|
|
||||||
assertThat(count).isLessThanOrEqualTo(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 8. Ingen ref skal ha mer enn 2 dispatches totalt (ingen spinning)
|
|
||||||
refs.forEach { ref ->
|
|
||||||
val total = (firstRound[ref]?.size ?: 0) +
|
|
||||||
(secondRound[ref]?.size ?: 0) +
|
|
||||||
(thirdRound[ref]?.size ?: 0)
|
|
||||||
|
|
||||||
assertThat(total).isLessThanOrEqualTo(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 9. Non-busy refs skal ha 2 dispatches totalt (runde 1 + 2)
|
|
||||||
refs.forEach { ref ->
|
|
||||||
val total = (firstRound[ref]?.size ?: 0) +
|
|
||||||
(secondRound[ref]?.size ?: 0) +
|
|
||||||
(thirdRound[ref]?.size ?: 0)
|
|
||||||
|
|
||||||
if (ref !in busyRefs) {
|
|
||||||
assertThat(total).isEqualTo(2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 10. Busy refs skal ha maks 1 dispatch totalt
|
|
||||||
refs.forEach { ref ->
|
|
||||||
val total = (firstRound[ref]?.size ?: 0) +
|
|
||||||
(secondRound[ref]?.size ?: 0) +
|
|
||||||
(thirdRound[ref]?.size ?: 0)
|
|
||||||
|
|
||||||
if (ref in busyRefs) {
|
|
||||||
assertThat(total).isLessThanOrEqualTo(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 11. Verify non-busy refs processed all unique events
|
|
||||||
refs.forEach { ref ->
|
|
||||||
val allEvents = (firstRound[ref].orEmpty() +
|
|
||||||
secondRound[ref].orEmpty() +
|
|
||||||
thirdRound[ref].orEmpty())
|
|
||||||
.flatMap { it.second }
|
|
||||||
.distinctBy { it.eventId }
|
|
||||||
|
|
||||||
if (ref !in busyRefs) {
|
|
||||||
// 20 initial + 1 ny event
|
|
||||||
assertThat(allEvents).hasSize(eventCountPerRef + 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når EventStore returnerer events som ligger før watermark
|
|
||||||
Hvis polleren ser dem i global scan
|
|
||||||
Så skal polleren ikke livelock'e og lastSeenTime skal flyttes forbi eventen
|
|
||||||
""")
|
|
||||||
fun `poller should not livelock when global scan sees events but watermark rejects them`() = runTest {
|
|
||||||
val ref = UUID.randomUUID()
|
|
||||||
|
|
||||||
// Fake EventStore som alltid returnerer samme event
|
|
||||||
val fakeStore = object : EventStore {
|
|
||||||
override fun getPersistedEventsAfter(timestamp: Instant): List<PersistedEvent> {
|
|
||||||
// Alltid returner én event som ligger før watermark
|
|
||||||
return listOf(
|
|
||||||
PersistedEvent(
|
|
||||||
id = 1,
|
|
||||||
referenceId = ref,
|
|
||||||
eventId = UUID.randomUUID(),
|
|
||||||
event = "test",
|
|
||||||
data = """{"x":1}""",
|
|
||||||
persistedAt = t(50) // før watermark
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun getPersistedEventsFor(referenceId: UUID): List<PersistedEvent> = emptyList()
|
|
||||||
override fun persist(event: Event) = Unit
|
|
||||||
}
|
|
||||||
|
|
||||||
val queue = SequenceDispatchQueue()
|
|
||||||
class NoopDispatcher : EventDispatcher(fakeStore) {
|
|
||||||
override fun dispatch(referenceId: UUID, events: List<Event>) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
val dispatcher = NoopDispatcher()
|
|
||||||
val poller = TestablePoller(fakeStore, queue, dispatcher, scope)
|
|
||||||
|
|
||||||
// Sett watermark høyt (polleren setter watermark selv i ekte drift,
|
|
||||||
// men i denne testen må vi simulere det)
|
|
||||||
poller.setWatermarkFor(ref, t(100), id = 999)
|
|
||||||
|
|
||||||
// Sett lastSeenTime bak eventen
|
|
||||||
poller.lastSeenTime = t(0)
|
|
||||||
|
|
||||||
// Første poll: polleren ser eventet, men prosesserer ikke ref
|
|
||||||
poller.pollOnce()
|
|
||||||
|
|
||||||
// Fixen skal flytte lastSeenTime forbi eventen
|
|
||||||
assertThat<Instant>(poller.lastSeenTime)
|
|
||||||
.isGreaterThan(t(50))
|
|
||||||
|
|
||||||
// Andre poll: nå skal polleren IKKE spinne
|
|
||||||
val before = poller.lastSeenTime
|
|
||||||
poller.pollOnce()
|
|
||||||
val after = poller.lastSeenTime
|
|
||||||
|
|
||||||
assertThat(after).isEqualTo(before)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,46 +0,0 @@
|
|||||||
package no.iktdev.eventi.events.poller
|
|
||||||
|
|
||||||
import kotlinx.coroutines.ExperimentalCoroutinesApi
|
|
||||||
import kotlinx.coroutines.test.TestScope
|
|
||||||
import no.iktdev.eventi.events.EventDispatcher
|
|
||||||
import no.iktdev.eventi.events.EventPollerImplementation
|
|
||||||
import no.iktdev.eventi.events.SequenceDispatchQueue
|
|
||||||
import no.iktdev.eventi.stores.EventStore
|
|
||||||
import java.time.Instant
|
|
||||||
import java.util.*
|
|
||||||
|
|
||||||
@ExperimentalCoroutinesApi
|
|
||||||
class TestablePoller(
|
|
||||||
eventStore: EventStore,
|
|
||||||
dispatchQueue: SequenceDispatchQueue,
|
|
||||||
dispatcher: EventDispatcher,
|
|
||||||
val scope: TestScope
|
|
||||||
) : EventPollerImplementation(eventStore, dispatchQueue, dispatcher), WatermarkDebugView {
|
|
||||||
|
|
||||||
suspend fun startFor(iterations: Int) {
|
|
||||||
repeat(iterations) {
|
|
||||||
try {
|
|
||||||
pollOnce()
|
|
||||||
} catch (_: Exception) {
|
|
||||||
// same as prod
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simuler delay(backoff)
|
|
||||||
scope.testScheduler.advanceTimeBy(backoff.toMillis())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun watermarkFor(ref: UUID): Pair<Instant, Long>? {
|
|
||||||
return refWatermark[ref]
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun setWatermarkFor(ref: UUID, time: Instant, id: Long) {
|
|
||||||
refWatermark[ref] = time to id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
interface WatermarkDebugView {
|
|
||||||
fun watermarkFor(ref: UUID): Pair<Instant, Long>?
|
|
||||||
fun setWatermarkFor(ref: UUID, time: Instant, id: Long)
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,36 +1,27 @@
|
|||||||
package no.iktdev.eventi.tasks
|
package no.iktdev.eventi.tasks
|
||||||
|
|
||||||
|
import io.mockk.mockk
|
||||||
import kotlinx.coroutines.CompletableDeferred
|
import kotlinx.coroutines.CompletableDeferred
|
||||||
import kotlinx.coroutines.ExperimentalCoroutinesApi
|
import kotlinx.coroutines.ExperimentalCoroutinesApi
|
||||||
import kotlinx.coroutines.test.advanceTimeBy
|
|
||||||
import kotlinx.coroutines.test.advanceUntilIdle
|
import kotlinx.coroutines.test.advanceUntilIdle
|
||||||
import kotlinx.coroutines.test.runTest
|
import kotlinx.coroutines.test.runTest
|
||||||
import no.iktdev.eventi.InMemoryTaskStore
|
import no.iktdev.eventi.InMemoryTaskStore
|
||||||
import no.iktdev.eventi.TestBase
|
import no.iktdev.eventi.TestBase
|
||||||
|
import no.iktdev.eventi.events.EventListener
|
||||||
import no.iktdev.eventi.events.EventTypeRegistry
|
import no.iktdev.eventi.events.EventTypeRegistry
|
||||||
import no.iktdev.eventi.models.Event
|
import no.iktdev.eventi.models.Event
|
||||||
import no.iktdev.eventi.models.Task
|
import no.iktdev.eventi.models.Task
|
||||||
import no.iktdev.eventi.models.store.TaskStatus
|
|
||||||
import no.iktdev.eventi.stores.TaskStore
|
import no.iktdev.eventi.stores.TaskStore
|
||||||
import no.iktdev.eventi.testUtil.multiply
|
import no.iktdev.eventi.testUtil.multiply
|
||||||
import no.iktdev.eventi.testUtil.wipe
|
import no.iktdev.eventi.testUtil.wipe
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
import org.assertj.core.api.Assertions.assertThat
|
||||||
import org.junit.jupiter.api.Assertions.assertEquals
|
import org.junit.jupiter.api.Assertions.assertEquals
|
||||||
import org.junit.jupiter.api.BeforeEach
|
import org.junit.jupiter.api.BeforeEach
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import org.junit.jupiter.api.Test
|
import org.junit.jupiter.api.Test
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
import kotlin.time.Duration.Companion.minutes
|
|
||||||
import kotlin.time.Duration.Companion.seconds
|
|
||||||
|
|
||||||
@DisplayName("""
|
class AbstractTaskPollerTest : TestBase() {
|
||||||
TaskPollerImplementation
|
|
||||||
Når polleren henter og prosesserer tasks
|
|
||||||
Hvis lyttere, backoff og event-produksjon fungerer som forventet
|
|
||||||
Så skal polleren håndtere alle scenarier korrekt
|
|
||||||
""")
|
|
||||||
class TaskPollerImplementationTest : TestBase() {
|
|
||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
fun setup() {
|
fun setup() {
|
||||||
@ -40,14 +31,11 @@ class TaskPollerImplementationTest : TestBase() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private lateinit var eventDeferred: CompletableDeferred<Event>
|
private lateinit var eventDeferred: CompletableDeferred<Event>
|
||||||
|
|
||||||
val reporterFactory = { _: Task ->
|
val reporterFactory = { _: Task ->
|
||||||
object : TaskReporter {
|
object : TaskReporter {
|
||||||
override fun markClaimed(taskId: UUID, workerId: String) {}
|
override fun markClaimed(taskId: UUID, workerId: String) {}
|
||||||
override fun updateLastSeen(taskId: UUID) {}
|
override fun updateLastSeen(taskId: UUID) {}
|
||||||
override fun markCompleted(taskId: UUID) {}
|
override fun markConsumed(taskId: UUID) {}
|
||||||
override fun markFailed(referenceId: UUID,taskId: UUID) {}
|
|
||||||
override fun markCancelled(referenceId: UUID,taskId: UUID) {}
|
|
||||||
override fun updateProgress(taskId: UUID, progress: Int) {}
|
override fun updateProgress(taskId: UUID, progress: Int) {}
|
||||||
override fun log(taskId: UUID, message: String) {}
|
override fun log(taskId: UUID, message: String) {}
|
||||||
override fun publishEvent(event: Event) {
|
override fun publishEvent(event: Event) {
|
||||||
@ -56,37 +44,29 @@ class TaskPollerImplementationTest : TestBase() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data class EchoTask(var data: String?) : Task()
|
data class EchoTask(var data: String?) : Task() {
|
||||||
data class EchoEvent(var data: String) : Event()
|
}
|
||||||
|
|
||||||
class TaskPollerImplementationTest(
|
data class EchoEvent(var data: String) : Event() {
|
||||||
taskStore: TaskStore,
|
}
|
||||||
reporterFactory: (Task) -> TaskReporter
|
|
||||||
) : TaskPollerImplementation(taskStore, reporterFactory) {
|
class TaskPollerTest(taskStore: TaskStore, reporterFactory: (Task) -> TaskReporter): AbstractTaskPoller(taskStore, reporterFactory) {
|
||||||
fun overrideSetBackoff(duration: java.time.Duration) {
|
fun overrideSetBackoff(duration: java.time.Duration) {
|
||||||
backoff = duration
|
backoff = duration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
open class EchoListener : TaskListener(TaskType.MIXED) {
|
open class EchoListener : TaskListener(TaskType.MIXED) {
|
||||||
var result: Event? = null
|
var result: Event? = null
|
||||||
|
|
||||||
fun getJob() = currentJob
|
|
||||||
override fun getWorkerId() = this.javaClass.simpleName
|
override fun getWorkerId() = this.javaClass.simpleName
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task,
|
|
||||||
status: TaskStatus,
|
|
||||||
exception: Exception?
|
|
||||||
): Event {
|
|
||||||
return object : Event() {}
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = task is EchoTask
|
override fun supports(task: Task): Boolean {
|
||||||
|
return task is EchoTask
|
||||||
|
}
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event {
|
override suspend fun onTask(task: Task): Event {
|
||||||
withHeartbeatRunner(1.seconds) {
|
|
||||||
println("Heartbeat")
|
|
||||||
}
|
|
||||||
if (task is EchoTask) {
|
if (task is EchoTask) {
|
||||||
return EchoEvent(task.data + " Potetmos").producedFrom(task)
|
return EchoEvent(task.data + " Potetmos").producedFrom(task)
|
||||||
}
|
}
|
||||||
@ -95,63 +75,48 @@ class TaskPollerImplementationTest : TestBase() {
|
|||||||
|
|
||||||
override fun onComplete(task: Task, result: Event?) {
|
override fun onComplete(task: Task, result: Event?) {
|
||||||
super.onComplete(task, result)
|
super.onComplete(task, result)
|
||||||
this.result = result
|
this.result = result;
|
||||||
reporter?.publishEvent(result!!)
|
reporter?.publishEvent(result!!)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@OptIn(ExperimentalCoroutinesApi::class)
|
@OptIn(ExperimentalCoroutinesApi::class)
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
|
||||||
Når en EchoTask finnes i TaskStore
|
|
||||||
Hvis polleren prosesserer tasken og lytteren produserer en EchoEvent
|
|
||||||
Så skal eventen publiseres og metadata inneholde korrekt avledningskjede
|
|
||||||
""")
|
|
||||||
fun scenario1() = runTest {
|
fun scenario1() = runTest {
|
||||||
|
// Register Task and Event
|
||||||
TaskTypeRegistry.register(EchoTask::class.java)
|
TaskTypeRegistry.register(EchoTask::class.java)
|
||||||
EventTypeRegistry.register(EchoEvent::class.java)
|
EventTypeRegistry.register(EchoEvent::class.java)
|
||||||
|
|
||||||
val listener = EchoListener()
|
val listener = EchoListener()
|
||||||
val poller = object : TaskPollerImplementation(taskStore, reporterFactory) {}
|
|
||||||
|
|
||||||
val task = EchoTask("Hello").newReferenceId().derivedOf(object : Event() {}.apply { newReferenceId() })
|
val poller = object : AbstractTaskPoller(taskStore, reporterFactory) {}
|
||||||
|
|
||||||
|
val task = EchoTask("Hello").newReferenceId()
|
||||||
taskStore.persist(task)
|
taskStore.persist(task)
|
||||||
|
|
||||||
poller.pollOnce()
|
poller.pollOnce()
|
||||||
advanceUntilIdle()
|
advanceUntilIdle()
|
||||||
|
|
||||||
val producedEvent = eventDeferred.await()
|
val producedEvent = eventDeferred.await()
|
||||||
assertThat(producedEvent).isNotNull
|
assertThat(producedEvent).isNotNull
|
||||||
assertThat(producedEvent.metadata.derivedFromId).hasSize(2)
|
assertThat(producedEvent!!.metadata.derivedFromId).isEqualTo(task.taskId)
|
||||||
assertThat(producedEvent.metadata.derivedFromId).contains(task.metadata.derivedFromId!!.first())
|
|
||||||
assertThat(producedEvent.metadata.derivedFromId).contains(task.taskId)
|
|
||||||
assertThat((listener.result as EchoEvent).data).isEqualTo("Hello Potetmos")
|
assertThat((listener.result as EchoEvent).data).isEqualTo("Hello Potetmos")
|
||||||
}
|
}
|
||||||
|
|
||||||
@OptIn(ExperimentalCoroutinesApi::class)
|
@OptIn(ExperimentalCoroutinesApi::class)
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
fun `poller resets backoff when task is accepted`() = runTest {
|
||||||
Når en task blir akseptert av lytteren
|
|
||||||
Hvis polleren tidligere har økt backoff
|
|
||||||
Så skal backoff resettes til startverdi
|
|
||||||
""")
|
|
||||||
fun pollerResetsBackoffWhenTaskAccepted() = runTest {
|
|
||||||
TaskTypeRegistry.register(EchoTask::class.java)
|
TaskTypeRegistry.register(EchoTask::class.java)
|
||||||
EventTypeRegistry.register(EchoEvent::class.java)
|
EventTypeRegistry.register(EchoEvent::class.java)
|
||||||
|
|
||||||
val listener = EchoListener()
|
val listener = EchoListener()
|
||||||
val poller = TaskPollerImplementationTest(taskStore, reporterFactory)
|
val poller = TaskPollerTest(taskStore, reporterFactory)
|
||||||
val initialBackoff = poller.backoff
|
val initialBackoff = poller.backoff
|
||||||
|
|
||||||
poller.overrideSetBackoff(Duration.ofSeconds(16))
|
poller.overrideSetBackoff(Duration.ofSeconds(16))
|
||||||
|
|
||||||
val task = EchoTask("Hello").newReferenceId()
|
val task = EchoTask("Hello").newReferenceId()
|
||||||
taskStore.persist(task)
|
taskStore.persist(task)
|
||||||
|
|
||||||
poller.pollOnce()
|
poller.pollOnce()
|
||||||
listener.getJob()?.join()
|
|
||||||
|
|
||||||
advanceTimeBy(1.minutes)
|
|
||||||
advanceUntilIdle()
|
advanceUntilIdle()
|
||||||
|
|
||||||
assertEquals(initialBackoff, poller.backoff)
|
assertEquals(initialBackoff, poller.backoff)
|
||||||
@ -159,28 +124,20 @@ class TaskPollerImplementationTest : TestBase() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
fun `poller increases backoff when no tasks`() = runTest {
|
||||||
Når polleren ikke finner noen tasks
|
val poller = object : AbstractTaskPoller(taskStore, reporterFactory) {}
|
||||||
Hvis ingen lyttere har noe å gjøre
|
|
||||||
Så skal backoff dobles
|
|
||||||
""")
|
|
||||||
fun pollerIncreasesBackoffWhenNoTasks() = runTest {
|
|
||||||
val poller = object : TaskPollerImplementation(taskStore, reporterFactory) {}
|
|
||||||
val initialBackoff = poller.backoff
|
val initialBackoff = poller.backoff
|
||||||
|
val totalBackoff = initialBackoff.multiply(2)
|
||||||
|
|
||||||
poller.pollOnce()
|
poller.pollOnce()
|
||||||
|
|
||||||
assertEquals(initialBackoff.multiply(2), poller.backoff)
|
assertEquals(totalBackoff, poller.backoff)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
fun `poller increases backoff when no listener supports task`() = runTest {
|
||||||
Når en task finnes men ingen lyttere støtter den
|
val poller = object : AbstractTaskPoller(taskStore, reporterFactory) {}
|
||||||
Hvis polleren forsøker å prosessere tasken
|
|
||||||
Så skal backoff dobles
|
|
||||||
""")
|
|
||||||
fun pollerIncreasesBackoffWhenNoListenerSupportsTask() = runTest {
|
|
||||||
val poller = object : TaskPollerImplementation(taskStore, reporterFactory) {}
|
|
||||||
val initialBackoff = poller.backoff
|
val initialBackoff = poller.backoff
|
||||||
|
|
||||||
// as long as the task is not added to registry this will be unsupported
|
// as long as the task is not added to registry this will be unsupported
|
||||||
@ -193,50 +150,47 @@ class TaskPollerImplementationTest : TestBase() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
fun `poller increases backoff when listener is busy`() = runTest {
|
||||||
Når en lytter er opptatt
|
|
||||||
Hvis polleren forsøker å prosessere en task
|
|
||||||
Så skal backoff dobles
|
|
||||||
""")
|
|
||||||
fun pollerIncreasesBackoffWhenListenerBusy() = runTest {
|
|
||||||
val busyListener = object : EchoListener() {
|
val busyListener = object : EchoListener() {
|
||||||
override val isBusy = true
|
override val isBusy = true
|
||||||
}
|
}
|
||||||
|
|
||||||
val poller = object : TaskPollerImplementation(taskStore, reporterFactory) {}
|
val poller = object : AbstractTaskPoller(taskStore, reporterFactory) {}
|
||||||
val initialBackoff = poller.backoff
|
val intialBackoff = poller.backoff
|
||||||
|
|
||||||
val task = EchoTask("Busy").newReferenceId()
|
val task = EchoTask("Busy").newReferenceId()
|
||||||
taskStore.persist(task)
|
taskStore.persist(task)
|
||||||
|
|
||||||
poller.pollOnce()
|
poller.pollOnce()
|
||||||
|
|
||||||
assertEquals(initialBackoff.multiply(2), poller.backoff)
|
assertEquals(intialBackoff.multiply(2), poller.backoff)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@DisplayName("""
|
fun `poller increases backoff when task is not claimed`() = runTest {
|
||||||
Når en task ikke kan claimes av polleren
|
val listener = EchoListener()
|
||||||
Hvis claim-operasjonen feiler
|
|
||||||
Så skal backoff dobles
|
|
||||||
""")
|
|
||||||
fun pollerIncreasesBackoffWhenTaskNotClaimed() = runTest {
|
|
||||||
TaskTypeRegistry.register(EchoTask::class.java)
|
TaskTypeRegistry.register(EchoTask::class.java)
|
||||||
|
|
||||||
val task = EchoTask("Unclaimable").newReferenceId()
|
val task = EchoTask("Unclaimable").newReferenceId()
|
||||||
taskStore.persist(task)
|
taskStore.persist(task)
|
||||||
|
|
||||||
|
// Simuler at claim alltid feiler
|
||||||
val failingStore = object : InMemoryTaskStore() {
|
val failingStore = object : InMemoryTaskStore() {
|
||||||
override fun claim(taskId: UUID, workerId: String) = false
|
override fun claim(taskId: UUID, workerId: String): Boolean = false
|
||||||
}
|
}
|
||||||
|
val pollerWithFailingClaim = object : AbstractTaskPoller(failingStore, reporterFactory) {}
|
||||||
val poller = object : TaskPollerImplementation(failingStore, reporterFactory) {}
|
val initialBackoff = pollerWithFailingClaim.backoff
|
||||||
val initialBackoff = poller.backoff
|
|
||||||
|
|
||||||
failingStore.persist(task)
|
failingStore.persist(task)
|
||||||
poller.pollOnce()
|
|
||||||
|
|
||||||
assertEquals(initialBackoff.multiply(2), poller.backoff)
|
pollerWithFailingClaim.pollOnce()
|
||||||
|
|
||||||
|
assertEquals(initialBackoff.multiply(2), pollerWithFailingClaim.backoff)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
@ -1,127 +0,0 @@
|
|||||||
package no.iktdev.eventi.tasks
|
|
||||||
|
|
||||||
import no.iktdev.eventi.ListenerOrder
|
|
||||||
import no.iktdev.eventi.events.EventListener
|
|
||||||
import no.iktdev.eventi.events.EventListenerRegistry
|
|
||||||
import no.iktdev.eventi.models.Event
|
|
||||||
import no.iktdev.eventi.models.Task
|
|
||||||
import no.iktdev.eventi.models.store.TaskStatus
|
|
||||||
import no.iktdev.eventi.testUtil.wipe
|
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import org.junit.jupiter.api.Assertions.*
|
|
||||||
import org.junit.jupiter.api.BeforeEach
|
|
||||||
import org.junit.jupiter.api.Test
|
|
||||||
|
|
||||||
class TaskListenerRegistryTest {
|
|
||||||
|
|
||||||
|
|
||||||
@ListenerOrder(1)
|
|
||||||
class MockTest1() : TaskListener() {
|
|
||||||
override fun getWorkerId(): String {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task,
|
|
||||||
status: TaskStatus,
|
|
||||||
exception: Exception?
|
|
||||||
): Event {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun supports(task: Task): Boolean {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@ListenerOrder(2)
|
|
||||||
class MockTest2() : TaskListener() {
|
|
||||||
override fun getWorkerId(): String {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task,
|
|
||||||
status: TaskStatus,
|
|
||||||
exception: Exception?
|
|
||||||
): Event {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun supports(task: Task): Boolean {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@ListenerOrder(3)
|
|
||||||
class MockTest3() : TaskListener() {
|
|
||||||
override fun getWorkerId(): String {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task,
|
|
||||||
status: TaskStatus,
|
|
||||||
exception: Exception?
|
|
||||||
): Event {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun supports(task: Task): Boolean {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class MockTestRandom() : TaskListener() {
|
|
||||||
override fun getWorkerId(): String {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task,
|
|
||||||
status: TaskStatus,
|
|
||||||
exception: Exception?
|
|
||||||
): Event {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun supports(task: Task): Boolean {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
TODO("Not yet implemented")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
fun clear() {
|
|
||||||
TaskListenerRegistry.wipe()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
fun validateOrder() {
|
|
||||||
MockTestRandom()
|
|
||||||
MockTest1()
|
|
||||||
MockTest2()
|
|
||||||
MockTest3()
|
|
||||||
val listeners = TaskListenerRegistry.getListeners()
|
|
||||||
// Assert
|
|
||||||
assertThat(listeners.map { it::class.simpleName }).containsExactly(
|
|
||||||
MockTest1::class.simpleName, // @ListenerOrder(1)
|
|
||||||
MockTest2::class.simpleName, // @ListenerOrder(2)
|
|
||||||
MockTest3::class.simpleName, // @ListenerOrder(3)
|
|
||||||
MockTestRandom::class.simpleName // no annotation → goes last
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
@ -1,492 +0,0 @@
|
|||||||
package no.iktdev.eventi.tasks
|
|
||||||
|
|
||||||
import kotlinx.coroutines.CompletableDeferred
|
|
||||||
import kotlinx.coroutines.Dispatchers
|
|
||||||
import kotlinx.coroutines.delay
|
|
||||||
import kotlinx.coroutines.launch
|
|
||||||
import kotlinx.coroutines.test.runTest
|
|
||||||
import kotlinx.coroutines.yield
|
|
||||||
import no.iktdev.eventi.models.Event
|
|
||||||
import no.iktdev.eventi.models.Task
|
|
||||||
import no.iktdev.eventi.models.store.TaskStatus
|
|
||||||
import org.junit.jupiter.api.Assertions.*
|
|
||||||
import org.junit.jupiter.api.DisplayName
|
|
||||||
import org.junit.jupiter.api.Test
|
|
||||||
import java.util.UUID
|
|
||||||
import kotlin.time.Duration.Companion.milliseconds
|
|
||||||
|
|
||||||
@DisplayName("""
|
|
||||||
TaskListener
|
|
||||||
Når en task prosesseres i en coroutine med heartbeat
|
|
||||||
Hvis lytteren håndterer arbeid, feil, avbrudd og sekvensiell kjøring
|
|
||||||
Så skal state, heartbeat og cleanup fungere korrekt
|
|
||||||
""")
|
|
||||||
class TaskListenerTest {
|
|
||||||
|
|
||||||
class FakeTask : Task()
|
|
||||||
|
|
||||||
class FakeReporter : TaskReporter {
|
|
||||||
var claimed = false
|
|
||||||
var completed = false
|
|
||||||
var failed = false
|
|
||||||
var cancelled = false
|
|
||||||
val logs = mutableListOf<String>()
|
|
||||||
val events = mutableListOf<Event>()
|
|
||||||
|
|
||||||
override fun markClaimed(taskId: UUID, workerId: String) { claimed = true }
|
|
||||||
override fun markCompleted(taskId: UUID) { completed = true }
|
|
||||||
override fun markFailed(referenceId: UUID, taskId: UUID) { failed = true }
|
|
||||||
override fun markCancelled(referenceId: UUID, taskId: UUID) { cancelled = true }
|
|
||||||
override fun updateProgress(taskId: UUID, progress: Int) {}
|
|
||||||
override fun publishEvent(event: Event) { events.add(event) }
|
|
||||||
override fun updateLastSeen(taskId: UUID) {}
|
|
||||||
override fun log(taskId: UUID, message: String) { logs.add(message) }
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 1 — Heartbeat starter og stopper riktig
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når onTask starter heartbeat-runner
|
|
||||||
Hvis tasken fullføres normalt
|
|
||||||
Så skal heartbeat kjøre, kanselleres og state nullstilles etterpå
|
|
||||||
""")
|
|
||||||
fun heartbeatStartsAndStopsCorrectly() = runTest {
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
|
|
||||||
var heartbeatRan = false
|
|
||||||
var onTaskCalled = false
|
|
||||||
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
onTaskCalled = true
|
|
||||||
|
|
||||||
withHeartbeatRunner(10.milliseconds) {
|
|
||||||
heartbeatRan = true
|
|
||||||
}
|
|
||||||
|
|
||||||
yield()
|
|
||||||
return object : Event() {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
listener.accept(FakeTask(), reporter)
|
|
||||||
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertTrue(listener.heartbeatRan)
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
assertNull(listener.reporter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 2 — Heartbeat blokkerer ikke annen jobb
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når heartbeat kjører i bakgrunnen
|
|
||||||
Hvis onTask gjør annen coroutine-arbeid samtidig
|
|
||||||
Så skal heartbeat ikke blokkere annet arbeid
|
|
||||||
""")
|
|
||||||
fun heartbeatDoesNotBlockOtherWork() = runTest {
|
|
||||||
val otherWorkCompleted = CompletableDeferred<Unit>()
|
|
||||||
val allowFinish = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
|
|
||||||
var heartbeatRan = false
|
|
||||||
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event {
|
|
||||||
withHeartbeatRunner(10.milliseconds) {
|
|
||||||
heartbeatRan = true
|
|
||||||
}
|
|
||||||
|
|
||||||
launch {
|
|
||||||
delay(30)
|
|
||||||
otherWorkCompleted.complete(Unit)
|
|
||||||
}
|
|
||||||
|
|
||||||
allowFinish.await()
|
|
||||||
return object : Event() {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
listener.accept(FakeTask(), reporter)
|
|
||||||
|
|
||||||
otherWorkCompleted.await()
|
|
||||||
|
|
||||||
assertTrue(listener.heartbeatRan)
|
|
||||||
assertNotNull(listener.currentJob)
|
|
||||||
assertTrue(listener.currentJob!!.isActive)
|
|
||||||
|
|
||||||
allowFinish.complete(Unit)
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 3 — Heartbeat + CPU + IO arbeid
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når heartbeat kjører og flere parallelle jobber startes
|
|
||||||
Hvis både CPU- og IO-arbeid fullføres
|
|
||||||
Så skal heartbeat fortsatt kjøre og cleanup skje etterpå
|
|
||||||
""")
|
|
||||||
fun heartbeatAndConcurrentTasksRunCorrectly() = runTest {
|
|
||||||
val converterDone = CompletableDeferred<Unit>()
|
|
||||||
val videoDone = CompletableDeferred<Unit>()
|
|
||||||
val allowFinish = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
|
|
||||||
var heartbeatRan = false
|
|
||||||
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
withHeartbeatRunner(10.milliseconds) {
|
|
||||||
heartbeatRan = true
|
|
||||||
}
|
|
||||||
|
|
||||||
launch(Dispatchers.Default) {
|
|
||||||
repeat(1000) {}
|
|
||||||
converterDone.complete(Unit)
|
|
||||||
}
|
|
||||||
|
|
||||||
launch(Dispatchers.IO) {
|
|
||||||
delay(40)
|
|
||||||
videoDone.complete(Unit)
|
|
||||||
}
|
|
||||||
|
|
||||||
allowFinish.await()
|
|
||||||
return object : Event() {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
listener.accept(FakeTask(), reporter)
|
|
||||||
|
|
||||||
converterDone.await()
|
|
||||||
videoDone.await()
|
|
||||||
|
|
||||||
assertTrue(listener.heartbeatRan)
|
|
||||||
assertNotNull(listener.currentJob)
|
|
||||||
|
|
||||||
allowFinish.complete(Unit)
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 4 — Arbeid fullføres, heartbeat kjører
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når onTask gjør ferdig arbeid
|
|
||||||
Hvis heartbeat kjører parallelt
|
|
||||||
Så skal heartbeat kjøre, kanselleres og state nullstilles
|
|
||||||
""")
|
|
||||||
fun taskWorkCompletesAndHeartbeatBehaves() = runTest {
|
|
||||||
val workCompleted = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
|
|
||||||
var heartbeatRan = false
|
|
||||||
var onTaskCalled = false
|
|
||||||
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event {
|
|
||||||
onTaskCalled = true
|
|
||||||
|
|
||||||
withHeartbeatRunner(10.milliseconds) {
|
|
||||||
heartbeatRan = true
|
|
||||||
}
|
|
||||||
|
|
||||||
delay(20)
|
|
||||||
workCompleted.complete(Unit)
|
|
||||||
|
|
||||||
return object : Event() {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
listener.accept(FakeTask(), reporter)
|
|
||||||
|
|
||||||
workCompleted.await()
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertTrue(listener.onTaskCalled)
|
|
||||||
assertTrue(listener.heartbeatRan)
|
|
||||||
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
assertNull(listener.reporter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 5 — accept() returnerer false når busy
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når listener er opptatt med en task
|
|
||||||
Hvis en ny task forsøkes akseptert
|
|
||||||
Så skal accept() returnere false
|
|
||||||
""")
|
|
||||||
fun acceptReturnsFalseWhenBusy() = runTest {
|
|
||||||
val allowFinish = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
allowFinish.await()
|
|
||||||
return object : Event() {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
|
|
||||||
assertTrue(listener.accept(FakeTask(), reporter))
|
|
||||||
assertFalse(listener.accept(FakeTask(), reporter))
|
|
||||||
|
|
||||||
allowFinish.complete(Unit)
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 6 — accept() returnerer false når unsupported
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når supports() returnerer false
|
|
||||||
Hvis accept() kalles
|
|
||||||
Så skal listener avvise tasken uten å starte jobb
|
|
||||||
""")
|
|
||||||
fun acceptReturnsFalseWhenUnsupported() = runTest {
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = false
|
|
||||||
override suspend fun onTask(task: Task): Event? = error("Should not be called")
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
|
|
||||||
assertFalse(listener.accept(FakeTask(), reporter))
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
assertNull(listener.reporter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 7 — onError kalles når onTask kaster
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når onTask kaster en exception
|
|
||||||
Hvis listener håndterer feil via onError
|
|
||||||
Så skal cleanup kjøre og state nullstilles
|
|
||||||
""")
|
|
||||||
fun onErrorCalledWhenOnTaskThrows() = runTest {
|
|
||||||
val errorLogged = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
throw RuntimeException("boom")
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun onError(task: Task, exception: Exception) {
|
|
||||||
super.onError(task, exception)
|
|
||||||
errorLogged.complete(Unit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
listener.accept(FakeTask().newReferenceId(), reporter)
|
|
||||||
|
|
||||||
errorLogged.await()
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 8 — onCancelled kalles når jobben kanselleres
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når jobben kanselleres mens onTask kjører
|
|
||||||
Hvis listener implementerer onCancelled
|
|
||||||
Så skal onCancelled kalles og cleanup skje
|
|
||||||
""")
|
|
||||||
fun onCancelledCalledWhenJobCancelled() = runTest {
|
|
||||||
val allowStart = CompletableDeferred<Unit>()
|
|
||||||
val cancelledCalled = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event? {
|
|
||||||
allowStart.complete(Unit)
|
|
||||||
delay(Long.MAX_VALUE)
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun onCancelled(task: Task) {
|
|
||||||
super.onCancelled(task)
|
|
||||||
cancelledCalled.complete(Unit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
listener.accept(FakeTask().newReferenceId(), reporter)
|
|
||||||
|
|
||||||
allowStart.await()
|
|
||||||
listener.currentJob!!.cancel()
|
|
||||||
|
|
||||||
cancelledCalled.await()
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
// 9 — Sekvensiell kjøring uten state‑lekkasje
|
|
||||||
// ---------------------------------------------------------
|
|
||||||
@Test
|
|
||||||
@DisplayName("""
|
|
||||||
Når listener prosesserer to tasks sekvensielt
|
|
||||||
Hvis cleanup fungerer riktig
|
|
||||||
Så skal ingen state lekke mellom tasks
|
|
||||||
""")
|
|
||||||
fun listenerHandlesSequentialTasksWithoutLeakingState() = runTest {
|
|
||||||
val started1 = CompletableDeferred<Unit>()
|
|
||||||
val finish1 = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val started2 = CompletableDeferred<Unit>()
|
|
||||||
val finish2 = CompletableDeferred<Unit>()
|
|
||||||
|
|
||||||
val listener = object : TaskListener() {
|
|
||||||
|
|
||||||
var callCount = 0
|
|
||||||
|
|
||||||
override fun getWorkerId() = "worker"
|
|
||||||
|
|
||||||
override fun createIncompleteStateTaskEvent(
|
|
||||||
task: Task, status: TaskStatus, exception: Exception?
|
|
||||||
) = object : Event() {}
|
|
||||||
|
|
||||||
override fun supports(task: Task) = true
|
|
||||||
|
|
||||||
override suspend fun onTask(task: Task): Event {
|
|
||||||
callCount++
|
|
||||||
|
|
||||||
if (callCount == 1) {
|
|
||||||
started1.complete(Unit)
|
|
||||||
finish1.await()
|
|
||||||
}
|
|
||||||
|
|
||||||
if (callCount == 2) {
|
|
||||||
started2.complete(Unit)
|
|
||||||
finish2.await()
|
|
||||||
}
|
|
||||||
|
|
||||||
return object : Event() {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
val reporter = FakeReporter()
|
|
||||||
|
|
||||||
listener.accept(FakeTask(), reporter)
|
|
||||||
started1.await()
|
|
||||||
finish1.complete(Unit)
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
|
|
||||||
listener.accept(FakeTask(), reporter)
|
|
||||||
started2.await()
|
|
||||||
finish2.complete(Unit)
|
|
||||||
listener.currentJob?.join()
|
|
||||||
|
|
||||||
assertNull(listener.currentJob)
|
|
||||||
assertNull(listener.currentTask)
|
|
||||||
assertNull(listener.heartbeatRunner)
|
|
||||||
|
|
||||||
assertEquals(2, listener.callCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -13,7 +13,6 @@ fun EventListenerRegistry.wipe() {
|
|||||||
|
|
||||||
// Tøm map’en
|
// Tøm map’en
|
||||||
val mutableList = field.get(EventListenerRegistry) as MutableList<*>
|
val mutableList = field.get(EventListenerRegistry) as MutableList<*>
|
||||||
@Suppress("UNCHECKED_CAST")
|
|
||||||
(mutableList as MutableList<Class<out EventListener>>).clear()
|
(mutableList as MutableList<Class<out EventListener>>).clear()
|
||||||
|
|
||||||
// Verifiser at det er tomt
|
// Verifiser at det er tomt
|
||||||
|
|||||||
@ -13,7 +13,6 @@ fun EventTypeRegistry.wipe() {
|
|||||||
|
|
||||||
// Tøm map’en
|
// Tøm map’en
|
||||||
val typesMap = field.get(EventTypeRegistry) as MutableMap<*, *>
|
val typesMap = field.get(EventTypeRegistry) as MutableMap<*, *>
|
||||||
@Suppress("UNCHECKED_CAST")
|
|
||||||
(typesMap as MutableMap<String, Class<out Event>>).clear()
|
(typesMap as MutableMap<String, Class<out Event>>).clear()
|
||||||
|
|
||||||
// Verifiser at det er tomt
|
// Verifiser at det er tomt
|
||||||
|
|||||||
@ -1,5 +1,7 @@
|
|||||||
package no.iktdev.eventi.testUtil
|
package no.iktdev.eventi.testUtil
|
||||||
|
|
||||||
|
import no.iktdev.eventi.events.EventListener
|
||||||
|
import no.iktdev.eventi.events.EventListenerRegistry
|
||||||
import no.iktdev.eventi.tasks.TaskListener
|
import no.iktdev.eventi.tasks.TaskListener
|
||||||
import no.iktdev.eventi.tasks.TaskListenerRegistry
|
import no.iktdev.eventi.tasks.TaskListenerRegistry
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
import org.assertj.core.api.Assertions.assertThat
|
||||||
@ -13,7 +15,6 @@ fun TaskListenerRegistry.wipe() {
|
|||||||
|
|
||||||
// Tøm map’en
|
// Tøm map’en
|
||||||
val mutableList = field.get(TaskListenerRegistry) as MutableList<*>
|
val mutableList = field.get(TaskListenerRegistry) as MutableList<*>
|
||||||
@Suppress("UNCHECKED_CAST")
|
|
||||||
(mutableList as MutableList<Class<out TaskListener>>).clear()
|
(mutableList as MutableList<Class<out TaskListener>>).clear()
|
||||||
|
|
||||||
// Verifiser at det er tomt
|
// Verifiser at det er tomt
|
||||||
|
|||||||
@ -1,11 +1,12 @@
|
|||||||
package no.iktdev.eventi.testUtil
|
package no.iktdev.eventi.testUtil
|
||||||
|
|
||||||
|
import no.iktdev.eventi.events.EventTypeRegistry
|
||||||
|
import no.iktdev.eventi.models.Event
|
||||||
import no.iktdev.eventi.models.Task
|
import no.iktdev.eventi.models.Task
|
||||||
import no.iktdev.eventi.tasks.TaskTypeRegistry
|
import no.iktdev.eventi.tasks.TaskTypeRegistry
|
||||||
import org.junit.jupiter.api.Assertions.assertNull
|
import org.junit.jupiter.api.Assertions.assertNull
|
||||||
import java.lang.reflect.Field
|
import java.lang.reflect.Field
|
||||||
|
|
||||||
@Suppress("UNUSED_RECEIVER_PARAMETER")
|
|
||||||
fun TaskTypeRegistry.wipe() {
|
fun TaskTypeRegistry.wipe() {
|
||||||
val field: Field = TaskTypeRegistry::class.java
|
val field: Field = TaskTypeRegistry::class.java
|
||||||
.superclass
|
.superclass
|
||||||
@ -14,7 +15,6 @@ fun TaskTypeRegistry.wipe() {
|
|||||||
|
|
||||||
// Tøm map’en
|
// Tøm map’en
|
||||||
val typesMap = field.get(TaskTypeRegistry) as MutableMap<*, *>
|
val typesMap = field.get(TaskTypeRegistry) as MutableMap<*, *>
|
||||||
@Suppress("UNCHECKED_CAST")
|
|
||||||
(typesMap as MutableMap<String, Class<out Task>>).clear()
|
(typesMap as MutableMap<String, Class<out Task>>).clear()
|
||||||
|
|
||||||
// Verifiser at det er tomt
|
// Verifiser at det er tomt
|
||||||
|
|||||||
@ -1,14 +0,0 @@
|
|||||||
package no.iktdev.eventi.testUtil
|
|
||||||
|
|
||||||
import kotlinx.coroutines.CoroutineDispatcher
|
|
||||||
import kotlinx.coroutines.CoroutineScope
|
|
||||||
import kotlinx.coroutines.SupervisorJob
|
|
||||||
import no.iktdev.eventi.events.SequenceDispatchQueue
|
|
||||||
|
|
||||||
class TestSequenceDispatchQueue(
|
|
||||||
maxConcurrency: Int,
|
|
||||||
dispatcher: CoroutineDispatcher
|
|
||||||
) : SequenceDispatchQueue(
|
|
||||||
maxConcurrency,
|
|
||||||
CoroutineScope(dispatcher + SupervisorJob())
|
|
||||||
)
|
|
||||||
Loading…
Reference in New Issue
Block a user