Skip to content

Commit

Permalink
Enable WAL mode on Sqlite (#1871)
Browse files Browse the repository at this point in the history
[Write-Ahead Logging](https://sqlite.org/wal.html) is both much more performant in general, and more suited to our particular access patterns.

With a simple throughput performance test, it improves performance by a factor of 5-20x depending on the sync flag.

 version                                 | throughput
-------------------------------|-------------
mode=journal sync=normal (*)| 11 htlc/s
mode=journal sync=full| 7 htlc/s
mode=wal sync=normal| 248 htlc/s
mode=wal sync=full (**)| 62 htlc/s 

(*) previous setting
(**) new setting

I went with a conservative new setting of wal+full sync, which is both 5x more performant, and more secure than what we had before.

> In WAL mode when synchronous is NORMAL (1), the WAL file is synchronized before each checkpoint and the database file is synchronized after each completed checkpoint and the WAL file header is synchronized when a WAL file begins to be reused after a checkpoint, but no sync operations occur during most transactions. With synchronous=FULL in WAL mode, an additional sync operation of the WAL file happens after each transaction commit. The extra WAL sync following each transaction help ensure that transactions are durable across a power loss. Transactions are consistent with or without the extra syncs provided by synchronous=FULL. If durability is not a concern, then synchronous=NORMAL is normally all one needs in WAL mode.

Co-authored-by: Bastien Teinturier <[email protected]>
  • Loading branch information
pm47 and t-bast authored Jul 15, 2021
1 parent 733c6e7 commit ca51a2d
Show file tree
Hide file tree
Showing 5 changed files with 171 additions and 29 deletions.
2 changes: 1 addition & 1 deletion eclair-core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@
<dependency>
<groupId>org.xerial</groupId>
<artifactId>sqlite-jdbc</artifactId>
<version>3.27.2.1</version>
<version>3.34.0</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
Expand Down
28 changes: 8 additions & 20 deletions eclair-core/src/main/scala/fr/acinq/eclair/db/Databases.scala
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import grizzled.slf4j.Logging

import java.io.File
import java.nio.file._
import java.sql.{Connection, DriverManager}
import java.sql.Connection
import java.util.UUID
import scala.concurrent.duration._

Expand Down Expand Up @@ -183,28 +183,16 @@ object Databases extends Logging {
* Given a parent folder it creates or loads all the databases from a JDBC connection
*/
def sqlite(dbdir: File): SqliteDatabases = {
dbdir.mkdir()
var sqliteEclair: Connection = null
var sqliteNetwork: Connection = null
var sqliteAudit: Connection = null
try {
sqliteEclair = DriverManager.getConnection(s"jdbc:sqlite:${new File(dbdir, "eclair.sqlite")}")
sqliteNetwork = DriverManager.getConnection(s"jdbc:sqlite:${new File(dbdir, "network.sqlite")}")
sqliteAudit = DriverManager.getConnection(s"jdbc:sqlite:${new File(dbdir, "audit.sqlite")}")
SqliteUtils.obtainExclusiveLock(sqliteEclair) // there should only be one process writing to this file
logger.info("successful lock on eclair.sqlite")
SqliteDatabases(sqliteAudit, sqliteNetwork, sqliteEclair)
} catch {
case t: Throwable =>
logger.error("could not create connection to sqlite databases: ", t)
if (sqliteEclair != null) sqliteEclair.close()
if (sqliteNetwork != null) sqliteNetwork.close()
if (sqliteAudit != null) sqliteAudit.close()
throw t
}
dbdir.mkdirs()
SqliteDatabases(
eclairJdbc = SqliteUtils.openSqliteFile(dbdir, "eclair.sqlite", exclusiveLock = true, journalMode = "wal", syncFlag = "full"), // there should only be one process writing to this file
networkJdbc = SqliteUtils.openSqliteFile(dbdir, "network.sqlite", exclusiveLock = false, journalMode = "wal", syncFlag = "normal"), // we don't need strong durability guarantees on the network db
auditJdbc = SqliteUtils.openSqliteFile(dbdir, "audit.sqlite", exclusiveLock = false, journalMode = "wal", syncFlag = "full")
)
}

def postgres(dbConfig: Config, instanceId: UUID, dbdir: File, lockExceptionHandler: LockFailureHandler = LockFailureHandler.logAndStop)(implicit system: ActorSystem): PostgresDatabases = {
dbdir.mkdirs()
val database = dbConfig.getString("postgres.database")
val host = dbConfig.getString("postgres.host")
val port = dbConfig.getInt("postgres.port")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,32 @@
package fr.acinq.eclair.db.sqlite

import fr.acinq.eclair.db.jdbc.JdbcUtils
import grizzled.slf4j.Logging

import java.sql.Connection
import java.io.File
import java.sql.{Connection, DriverManager}

object SqliteUtils extends JdbcUtils {
object SqliteUtils extends JdbcUtils with Logging {

def openSqliteFile(directory: File, filename: String, exclusiveLock: Boolean, journalMode: String, syncFlag: String): Connection = {
var sqlite: Connection = null
try {
sqlite = DriverManager.getConnection(s"jdbc:sqlite:${new File(directory, filename)}")
if (exclusiveLock) {
obtainExclusiveLock(sqlite)
}
setJournalMode(sqlite, journalMode)
setSynchronousFlag(sqlite, syncFlag)
sqlite
} catch {
case t: Throwable =>
logger.error("could not create connection to sqlite databases: ", t)
if (sqlite != null) {
sqlite.close()
}
throw t
}
}

/**
* Obtain an exclusive lock on a sqlite database. This is useful when we want to make sure that only one process
Expand All @@ -29,11 +51,33 @@ object SqliteUtils extends JdbcUtils {
* The lock will be kept until the database is closed, or if the locking mode is explicitly reset.
*/
def obtainExclusiveLock(sqlite: Connection): Unit = synchronized {
val statement = sqlite.createStatement()
statement.execute("PRAGMA locking_mode = EXCLUSIVE")
// we have to make a write to actually obtain the lock
statement.executeUpdate("CREATE TABLE IF NOT EXISTS dummy_table_for_locking (a INTEGER NOT NULL)")
statement.executeUpdate("INSERT INTO dummy_table_for_locking VALUES (42)")
using(sqlite.createStatement()) { statement =>
statement.execute("PRAGMA locking_mode = EXCLUSIVE")
// we have to make a write to actually obtain the lock
statement.executeUpdate("CREATE TABLE IF NOT EXISTS dummy_table_for_locking (a INTEGER NOT NULL)")
statement.executeUpdate("INSERT INTO dummy_table_for_locking VALUES (42)")
}
}

/**
* See https://www.sqlite.org/pragma.html#pragma_journal_mode
*/
def setJournalMode(sqlite: Connection, mode: String): Unit = {
using(sqlite.createStatement()) { statement =>
val res = statement.executeQuery(s"PRAGMA journal_mode=$mode")
res.next()
val currentMode = res.getString(1)
assert(currentMode == mode, s"couldn't activate mode=$mode")
}
}

/**
* See https://www.sqlite.org/pragma.html#pragma_synchronous
*/
def setSynchronousFlag(sqlite: Connection, flag: String): Unit = {
using(sqlite.createStatement()) { statement =>
statement.executeUpdate(s"PRAGMA synchronous=$flag")
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ class ChannelsDbSpec extends AnyFunSuite {
Future(db.updateChannelMeta(channelId, ChannelEvent.EventType.PaymentSent))
}
val res = Future.sequence(futures)
Await.result(res, 60 seconds)
Await.result(res, 5 minutes)
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/*
* Copyright 2019 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package fr.acinq.eclair.integration

import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import fr.acinq.bitcoin.SatoshiLong
import fr.acinq.eclair.MilliSatoshiLong
import fr.acinq.eclair.channel._
import fr.acinq.eclair.payment._
import fr.acinq.eclair.payment.receive.MultiPartHandler.ReceivePayment
import fr.acinq.eclair.payment.send.MultiPartPaymentLifecycle.PreimageReceived
import fr.acinq.eclair.payment.send.PaymentInitiator
import fr.acinq.eclair.router.Router
import org.scalatest.Ignore

import java.util.UUID
import java.util.concurrent.Executors
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.jdk.CollectionConverters._

/**
* Created by PM on 12/07/2021.
*/

@Ignore
class PerformanceIntegrationSpec extends IntegrationSpec {

test("start eclair nodes") {
val commonPerfTestConfig = ConfigFactory.parseMap(Map(
"eclair.max-funding-satoshis" -> 100_000_000,
"eclair.max-accepted-htlcs" -> Channel.MAX_ACCEPTED_HTLCS,
"eclair.file-backup.enabled" -> false,
).asJava)

instantiateEclairNode("A", ConfigFactory.parseMap(Map("eclair.node-alias" -> "A", "eclair.server.port" -> 29730).asJava).withFallback(commonPerfTestConfig).withFallback(commonFeatures).withFallback(commonConfig)) // A's channels are private
instantiateEclairNode("B", ConfigFactory.parseMap(Map("eclair.node-alias" -> "B", "eclair.server.port" -> 29731).asJava).withFallback(commonPerfTestConfig).withFallback(commonFeatures).withFallback(commonConfig))
}

test("connect nodes") {
// A---B

val eventListener = TestProbe()
nodes.values.foreach(_.system.eventStream.subscribe(eventListener.ref, classOf[ChannelStateChanged]))

connect(nodes("A"), nodes("B"), 100_000_000 sat, 0 msat)

// confirming the funding tx
generateBlocks(6)

within(60 seconds) {
eventListener.expectMsgType[ChannelStateChanged](60 seconds).currentState == NORMAL
}
}

test("wait for channels") {
// Channels should now be available in the router
val sender = TestProbe()
awaitCond({
sender.send(nodes("A").router, Router.GetRoutingState)
val routingState = sender.expectMsgType[Router.RoutingState]
routingState.channels.nonEmpty
}, 60 seconds)
}

def sendPayment()(implicit ec: ExecutionContext): Future[PaymentSent] = Future {
val sender = TestProbe()
val amountMsat = 100_000.msat
// first we retrieve a payment hash from B
sender.send(nodes("B").paymentHandler, ReceivePayment(Some(amountMsat), "1 coffee"))
val pr = sender.expectMsgType[PaymentRequest]
// then we make the actual payment
sender.send(nodes("A").paymentInitiator, PaymentInitiator.SendPayment(amountMsat, pr, fallbackFinalExpiryDelta = finalCltvExpiryDelta, routeParams = integrationTestRouteParams, maxAttempts = 1))
val paymentId = sender.expectMsgType[UUID]
sender.expectMsgType[PreimageReceived]
val ps = sender.expectMsgType[PaymentSent]
assert(ps.id == paymentId)
ps
}

test("send a large number of htlcs A->B") {
val SENDERS_COUNT = 16
val PAYMENTS_COUNT = 3_000
val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(SENDERS_COUNT))
val start = System.currentTimeMillis()
val futures = (0 until PAYMENTS_COUNT).map(_ => sendPayment()(ec))
implicit val dummyEc: ExecutionContext = ExecutionContext.Implicits.global
val f = Future.sequence(futures)
Await.result(f, 1 hour)
val end = System.currentTimeMillis()
val duration = end - start
println(s"$PAYMENTS_COUNT payments in ${duration}ms ${PAYMENTS_COUNT * 1000 / duration}htlc/s (senders=$SENDERS_COUNT)")
}

}

0 comments on commit ca51a2d

Please sign in to comment.