Map performance critical actors to dedicated dispatchers

Using built-in akka configs, we are able to map Actors to dispatchers in
a fine-grained way. Now each session router, network listener, and zone
have a dedicated set of threads. Currently all WorldSessions are sharing
a single worker pool, which means that the server could still crash if
all worker threads are exhausted. To prevent conditions like these, the
amount of time spent on single actors based on time and number of
messages can be tuned.Threads when not receiving work exit to save
resources.

Also all .conf files are now included in the resource packing process.
Users should create user.conf to override these .conf files to avoid
losing edits when updating the server version.
This commit is contained in:
Chord 2020-05-12 07:11:12 +02:00
parent fbca774a37
commit a80e869fb9
4 changed files with 781 additions and 15 deletions

View file

@ -64,7 +64,8 @@ lazy val psloginPackSettings = Seq(
packExtraClasspath := Map("ps-login" -> Seq("${PROG_HOME}/pscrypto-lib", packExtraClasspath := Map("ps-login" -> Seq("${PROG_HOME}/pscrypto-lib",
"${PROG_HOME}/config")), "${PROG_HOME}/config")),
packResourceDir += (baseDirectory.value / "pscrypto-lib" -> "pscrypto-lib"), packResourceDir += (baseDirectory.value / "pscrypto-lib" -> "pscrypto-lib"),
packResourceDir += (baseDirectory.value / "config" -> "config") packResourceDir += (baseDirectory.value / "config" -> "config"),
packResourceDir += (baseDirectory.value / "pslogin/src/main/resources" -> "config")
) )
lazy val root = (project in file(".")). lazy val root = (project in file(".")).

View file

@ -1,4 +1,269 @@
#####################################################
## PSForever application.conf
##
## Do not edit this directly!
## Instead override variables by creating user.conf
#####################################################
kamon { kamon {
environment.service = "PSForever" environment.service = "PSForever"
apm.api-key = "" apm.api-key = ""
} }
akka {
loggers = ["akka.event.slf4j.Slf4jLogger"]
loglevel = INFO
logging-filter = akka.event.slf4j.Slf4jLoggingFilter
}
akka.actor.deployment {
"/login-udp-endpoint" {
dispatcher = network-listener
}
"/login-udp-endpoint/login-session-router" {
dispatcher = login-session-router
}
"/login-udp-endpoint/login-session-router/*" {
dispatcher = login-session
}
# Extremely performance critical (dedicated thread)
"/world-udp-endpoint" {
dispatcher = network-listener
}
# Extremely performance critical (dedicated thread)
"/world-udp-endpoint/world-session-router" {
dispatcher = world-session-router
}
# Most likely to crash (isolate)
"/world-udp-endpoint/world-session-router/*" {
dispatcher = world-session
}
# Service dedicated pool
"/service" {
dispatcher = service-dispatcher
}
"/service/*" {
dispatcher = service-dispatcher
}
# Bottleneck (dedicated thread)
"/service/galaxy" {
dispatcher = galaxy-service
}
# Isolate tasks
"/service/taskResolver*" {
dispatcher = task-dispatcher
}
# Bottleneck (dedicated thread)
"/service/cluster" {
dispatcher = interstellar-cluster-service
}
# Zone actors (lots of messages)
"/service/cluster/c1-actor" {
dispatcher = c1-zone-dispatcher
}
"/service/cluster/c1-actor/*" {
dispatcher = c1-zone-dispatcher
}
"/service/cluster/c2-actor" {
dispatcher = c2-zone-dispatcher
}
"/service/cluster/c2-actor/*" {
dispatcher = c2-zone-dispatcher
}
"/service/cluster/c3-actor" {
dispatcher = c3-zone-dispatcher
}
"/service/cluster/c3-actor/*" {
dispatcher = c3-zone-dispatcher
}
"/service/cluster/c4-actor" {
dispatcher = c4-zone-dispatcher
}
"/service/cluster/c4-actor/*" {
dispatcher = c4-zone-dispatcher
}
"/service/cluster/c5-actor" {
dispatcher = c5-zone-dispatcher
}
"/service/cluster/c5-actor/*" {
dispatcher = c5-zone-dispatcher
}
"/service/cluster/c6-actor" {
dispatcher = c6-zone-dispatcher
}
"/service/cluster/c6-actor/*" {
dispatcher = c6-zone-dispatcher
}
"/service/cluster/i1-actor" {
dispatcher = i1-zone-dispatcher
}
"/service/cluster/i1-actor/*" {
dispatcher = i1-zone-dispatcher
}
"/service/cluster/i2-actor" {
dispatcher = i2-zone-dispatcher
}
"/service/cluster/i2-actor/*" {
dispatcher = i2-zone-dispatcher
}
"/service/cluster/i3-actor" {
dispatcher = i3-zone-dispatcher
}
"/service/cluster/i3-actor/*" {
dispatcher = i3-zone-dispatcher
}
"/service/cluster/i4-actor" {
dispatcher = i4-zone-dispatcher
}
"/service/cluster/i4-actor/*" {
dispatcher = i4-zone-dispatcher
}
"/service/cluster/z1-actor" {
dispatcher = z1-zone-dispatcher
}
"/service/cluster/z1-actor/*" {
dispatcher = z1-zone-dispatcher
}
"/service/cluster/z2-actor" {
dispatcher = z2-zone-dispatcher
}
"/service/cluster/z2-actor/*" {
dispatcher = z2-zone-dispatcher
}
"/service/cluster/z3-actor" {
dispatcher = z3-zone-dispatcher
}
"/service/cluster/z3-actor/*" {
dispatcher = z3-zone-dispatcher
}
"/service/cluster/z4-actor" {
dispatcher = z4-zone-dispatcher
}
"/service/cluster/z4-actor/*" {
dispatcher = z4-zone-dispatcher
}
"/service/cluster/z5-actor" {
dispatcher = z5-zone-dispatcher
}
"/service/cluster/z5-actor/*" {
dispatcher = z5-zone-dispatcher
}
"/service/cluster/z6-actor" {
dispatcher = z6-zone-dispatcher
}
"/service/cluster/z6-actor/*" {
dispatcher = z6-zone-dispatcher
}
"/service/cluster/z7-actor" {
dispatcher = z7-zone-dispatcher
}
"/service/cluster/z7-actor/*" {
dispatcher = z7-zone-dispatcher
}
"/service/cluster/z8-actor" {
dispatcher = z8-zone-dispatcher
}
"/service/cluster/z8-actor/*" {
dispatcher = z8-zone-dispatcher
}
"/service/cluster/z9-actor" {
dispatcher = z9-zone-dispatcher
}
"/service/cluster/z9-actor/*" {
dispatcher = z9-zone-dispatcher
}
"/service/cluster/z10-actor" {
dispatcher = z10-zone-dispatcher
}
"/service/cluster/z10-actor/*" {
dispatcher = z10-zone-dispatcher
}
"/service/cluster/home1-actor" {
dispatcher = home1-zone-dispatcher
}
"/service/cluster/home1-actor/*" {
dispatcher = home1-zone-dispatcher
}
"/service/cluster/home2-actor" {
dispatcher = home2-zone-dispatcher
}
"/service/cluster/home2-actor/*" {
dispatcher = home2-zone-dispatcher
}
"/service/cluster/home3-actor" {
dispatcher = home3-zone-dispatcher
}
"/service/cluster/home3-actor/*" {
dispatcher = home3-zone-dispatcher
}
"/service/cluster/tzconc-actor" {
dispatcher = tzconc-zone-dispatcher
}
"/service/cluster/tzconc-actor/*" {
dispatcher = tzconc-zone-dispatcher
}
"/service/cluster/tzcotr-actor" {
dispatcher = tzcotr-zone-dispatcher
}
"/service/cluster/tzcotr-actor/*" {
dispatcher = tzcotr-zone-dispatcher
}
"/service/cluster/tzcovs-actor" {
dispatcher = tzcovs-zone-dispatcher
}
"/service/cluster/tzcovs-actor/*" {
dispatcher = tzcovs-zone-dispatcher
}
"/service/cluster/tzdrnc-actor" {
dispatcher = tzdrnc-zone-dispatcher
}
"/service/cluster/tzdrnc-actor/*" {
dispatcher = tzdrnc-zone-dispatcher
}
"/service/cluster/tzdrvs-actor" {
dispatcher = tzdrvs-zone-dispatcher
}
"/service/cluster/tzdrvs-actor/*" {
dispatcher = tzdrvs-zone-dispatcher
}
"/service/cluster/tzsdrtr-actor" {
dispatcher = tzsdrtr-zone-dispatcher
}
"/service/cluster/tzsdrtr-actor/*" {
dispatcher = tzsdrtr-zone-dispatcher
}
"/service/cluster/tzshnc-actor" {
dispatcher = tzshnc-zone-dispatcher
}
"/service/cluster/tzshnc-actor/*" {
dispatcher = tzshnc-zone-dispatcher
}
"/service/cluster/tzshtr-actor" {
dispatcher = tzshtr-zone-dispatcher
}
"/service/cluster/tzshtr-actor/*" {
dispatcher = tzshtr-zone-dispatcher
}
"/service/cluster/tzshvs-actor" {
dispatcher = tzshvs-zone-dispatcher
}
"/service/cluster/tzshvs-actor/*" {
dispatcher = tzshvs-zone-dispatcher
}
}
include "psfdispatchers"
# create user.conf and add your overrides
include "user"

View file

@ -0,0 +1,513 @@
#####################################################
## PSForever psfdispatchers.conf
##
## Do not edit this directly!
## Instead override variables by creating user.conf
#####################################################
login-session {
# Dispatcher is the name of the event-based dispatcher
type = Dispatcher
# What kind of ExecutionService to use
executor = "fork-join-executor"
# Configuration for the fork join pool
fork-join-executor {
# Min number of threads to cap factor-based parallelism number to
parallelism-min = 8
# Parallelism (threads) ... ceil(available processors * factor)
parallelism-factor = 2.0
# Max number of threads to cap factor-based parallelism number to
parallelism-max = 64
}
# Throughput defines the maximum number of messages to be
# processed per actor before the thread jumps to the next actor.
# Set to 1 for as fair as possible.
throughput = 50
throughput-deadline-time = 20ms
}
world-session {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 8
parallelism-factor = 2.0
parallelism-max = 64
}
throughput = 50
throughput-deadline-time = 50ms
}
login-session-router {
executor = "thread-pool-executor"
type = PinnedDispatcher
}
world-session-router {
executor = "thread-pool-executor"
type = PinnedDispatcher
}
network-listener {
executor = "thread-pool-executor"
type = PinnedDispatcher
}
interstellar-cluster-service {
executor = "thread-pool-executor"
type = PinnedDispatcher
}
galaxy-service {
executor = "thread-pool-executor"
type = PinnedDispatcher
}
task-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 2
parallelism-factor = 2.0
parallelism-max = 64
}
throughput = 50
throughput-deadline-time = 50ms
}
service-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 2
parallelism-factor = 2.0
parallelism-max = 64
}
throughput = 50
throughput-deadline-time = 50ms
}
c1-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
c2-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
c3-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
c4-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
c5-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
c6-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
i1-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
i2-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
i3-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
i4-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z1-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z2-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z3-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z4-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z5-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z6-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z7-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z8-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z9-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
z10-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
home1-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
home2-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
home3-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzconc-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzcotr-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzcovs-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzdrnc-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzdrvs-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzsdrtr-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzshnc-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzshtr-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}
tzshvs-zone-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 1
parallelism-factor = 2.0
parallelism-max = 4
}
throughput = 50
throughput-deadline-time = 50ms
}

View file

@ -36,7 +36,6 @@ object PsLogin {
private val logger = org.log4s.getLogger private val logger = org.log4s.getLogger
var args : Array[String] = Array() var args : Array[String] = Array()
var config : java.util.Map[String,Object] = null
implicit var system : ActorSystem = null implicit var system : ActorSystem = null
var loginRouter : Props = Props.empty var loginRouter : Props = Props.empty
var worldRouter : Props = Props.empty var worldRouter : Props = Props.empty
@ -214,17 +213,6 @@ object PsLogin {
case _ => case _ =>
} }
/** Make sure we capture Akka messages (but only INFO and above)
*
* This same config can be specified in a configuration file, but that's more work at this point.
* In the future we will have a unified configuration file specific to this server
*/
config = Map(
"akka.loggers" -> List("akka.event.slf4j.Slf4jLogger").asJava,
"akka.loglevel" -> "INFO",
"akka.logging-filter" -> "akka.event.slf4j.Slf4jLoggingFilter"
).asJava
WorldConfig.Get[Boolean]("kamon.Active") match { WorldConfig.Get[Boolean]("kamon.Active") match {
case true => case true =>
logger.info("Starting Kamon") logger.info("Starting Kamon")
@ -233,11 +221,10 @@ object PsLogin {
case _ => ; case _ => ;
} }
logger.info("Starting actor subsystems") logger.info("Starting actor subsystems")
/** Start up the main actor system. This "system" is the home for all actors running on this server */ /** Start up the main actor system. This "system" is the home for all actors running on this server */
system = ActorSystem("PsLogin", ConfigFactory.parseMap(config)) system = ActorSystem("PsLogin")
logger.info("Starting actor pipelines") logger.info("Starting actor pipelines")
/** Create pipelines for the login and world servers /** Create pipelines for the login and world servers