mirror of
https://github.com/signalapp/Signal-Android.git
synced 2026-04-21 17:29:32 +01:00
Improve the performance of the migration by ~4x.
This commit is contained in:
@@ -3721,6 +3721,15 @@ public class MessageTable extends DatabaseTable implements MmsSmsColumns, Recipi
|
||||
getWritableDatabase().update(TABLE_NAME, values, THREAD_ID + " = ?", SqlUtil.buildArgs(fromId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the next ID that would be generated if an insert was done on this table.
|
||||
* You should *not* use this for actually generating an ID to use. That will happen automatically!
|
||||
* This was added for a very narrow usecase, and you probably don't need to use it.
|
||||
*/
|
||||
public long getNextId() {
|
||||
return SqlUtil.getNextAutoIncrementId(getWritableDatabase(), TABLE_NAME);
|
||||
}
|
||||
|
||||
void updateReactionsUnread(SQLiteDatabase db, long messageId, boolean hasReactions, boolean isRemoval) {
|
||||
try {
|
||||
boolean isOutgoing = getMessageRecord(messageId).isOutgoing();
|
||||
|
||||
@@ -6,6 +6,7 @@ import android.database.Cursor
|
||||
import android.text.TextUtils
|
||||
import org.intellij.lang.annotations.Language
|
||||
import org.signal.core.util.SqlUtil
|
||||
import org.signal.core.util.logging.Log
|
||||
|
||||
/**
|
||||
* Contains all databases necessary for full-text search (FTS).
|
||||
@@ -13,6 +14,8 @@ import org.signal.core.util.SqlUtil
|
||||
@SuppressLint("RecipientIdDatabaseReferenceUsage", "ThreadIdDatabaseReferenceUsage") // Handles updates via triggers
|
||||
class SearchTable(context: Context, databaseHelper: SignalDatabase) : DatabaseTable(context, databaseHelper) {
|
||||
companion object {
|
||||
private val TAG = Log.tag(SearchTable::class.java)
|
||||
|
||||
const val MMS_FTS_TABLE_NAME = "mms_fts"
|
||||
const val ID = "rowid"
|
||||
const val BODY = MmsSmsColumns.BODY
|
||||
@@ -111,6 +114,38 @@ class SearchTable(context: Context, databaseHelper: SignalDatabase) : DatabaseTa
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-adds every message to the index. It's fine to insert the same message twice; the table will naturally de-dupe.
|
||||
*
|
||||
* In order to prevent the database from locking up with super large inserts, this will perform the re-index in batches of the size you specify.
|
||||
* It is not guaranteed that every batch will be the same size, but rather that the batches will be _no larger_ than the specified size.
|
||||
*
|
||||
* Warning: This is a potentially extremely-costly operation! It can take 10+ seconds on large installs and/or slow devices.
|
||||
* Be smart about where you call this.
|
||||
*/
|
||||
fun rebuildIndex(batchSize: Long = 10_000L) {
|
||||
val maxId: Long = SignalDatabase.messages.nextId
|
||||
|
||||
Log.i(TAG, "Re-indexing. Operating on ID's 1-$maxId in steps of $batchSize.")
|
||||
|
||||
for (i in 1..maxId step batchSize) {
|
||||
Log.i(TAG, "Reindexing ID's [$i, ${i + batchSize})")
|
||||
writableDatabase.execSQL(
|
||||
"""
|
||||
INSERT INTO $MMS_FTS_TABLE_NAME ($ID, $BODY)
|
||||
SELECT
|
||||
${MessageTable.ID},
|
||||
${MessageTable.BODY}
|
||||
FROM
|
||||
${MessageTable.TABLE_NAME}
|
||||
WHERE
|
||||
${MessageTable.ID} >= $i AND
|
||||
${MessageTable.ID} < ${i + batchSize}
|
||||
""".trimIndent()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private fun createFullTextSearchQuery(query: String): String {
|
||||
return query
|
||||
.split(" ")
|
||||
|
||||
@@ -3,14 +3,42 @@ package org.thoughtcrime.securesms.database.helpers.migration
|
||||
import android.app.Application
|
||||
import net.zetetic.database.sqlcipher.SQLiteDatabase
|
||||
import org.signal.core.util.SqlUtil
|
||||
import org.signal.core.util.Stopwatch
|
||||
import org.signal.core.util.logging.Log
|
||||
|
||||
object V168_SingleMessageTableMigration : SignalDatabaseMigration {
|
||||
private val TAG = Log.tag(V168_SingleMessageTableMigration::class.java)
|
||||
|
||||
override fun migrate(context: Application, db: SQLiteDatabase, oldVersion: Int, newVersion: Int) {
|
||||
val stopwatch = Stopwatch("migration")
|
||||
|
||||
val nextMmsId = SqlUtil.getNextAutoIncrementId(db, "mms")
|
||||
stopwatch.split("next-id")
|
||||
|
||||
db.execSQL("DROP TRIGGER msl_sms_delete")
|
||||
db.execSQL("DROP TRIGGER reactions_sms_delete")
|
||||
db.execSQL("DROP TRIGGER sms_ai")
|
||||
db.execSQL("DROP TRIGGER sms_au")
|
||||
db.execSQL("DROP TRIGGER sms_ad")
|
||||
db.execSQL("DROP TABLE sms_fts") // Will drop all other related fts tables
|
||||
stopwatch.split("drop-triggers")
|
||||
|
||||
// It's actually much faster to drop the indexes, copy the data, then recreate the indexes in bulk than it is to keep them and index-as-you-insert.
|
||||
// Like, at least twice as fast.
|
||||
db.execSQL("DROP INDEX mms_read_and_notified_and_thread_id_index")
|
||||
db.execSQL("DROP INDEX mms_type_index")
|
||||
db.execSQL("DROP INDEX mms_date_sent_index")
|
||||
db.execSQL("DROP INDEX mms_date_server_index")
|
||||
db.execSQL("DROP INDEX mms_thread_date_index")
|
||||
db.execSQL("DROP INDEX mms_reactions_unread_index")
|
||||
db.execSQL("DROP INDEX mms_story_type_index")
|
||||
db.execSQL("DROP INDEX mms_parent_story_id_index")
|
||||
db.execSQL("DROP INDEX mms_thread_story_parent_story_index")
|
||||
db.execSQL("DROP INDEX mms_quote_id_quote_author_index")
|
||||
db.execSQL("DROP INDEX mms_exported_index")
|
||||
db.execSQL("DROP INDEX mms_id_type_payment_transactions_index")
|
||||
db.execSQL("DROP TRIGGER mms_ai") // Note: For perf reasons, we won't actually rebuild the index here -- we'll rebuild it asynchronously in a job
|
||||
stopwatch.split("drop-mms-indexes")
|
||||
|
||||
db.execSQL(
|
||||
"""
|
||||
@@ -66,8 +94,31 @@ object V168_SingleMessageTableMigration : SignalDatabaseMigration {
|
||||
FROM sms
|
||||
"""
|
||||
)
|
||||
stopwatch.split("copy-sms")
|
||||
|
||||
db.execSQL("DROP TABLE sms")
|
||||
stopwatch.split("drop-sms")
|
||||
|
||||
db.execSQL("CREATE INDEX mms_read_and_notified_and_thread_id_index ON mms(read, notified, thread_id)")
|
||||
db.execSQL("CREATE INDEX mms_type_index ON mms (type)")
|
||||
db.execSQL("CREATE INDEX mms_date_sent_index ON mms (date_sent, recipient_id, thread_id)")
|
||||
db.execSQL("CREATE INDEX mms_date_server_index ON mms (date_server)")
|
||||
db.execSQL("CREATE INDEX mms_thread_date_index ON mms (thread_id, date_received)")
|
||||
db.execSQL("CREATE INDEX mms_reactions_unread_index ON mms (reactions_unread)")
|
||||
db.execSQL("CREATE INDEX mms_story_type_index ON mms (story_type)")
|
||||
db.execSQL("CREATE INDEX mms_parent_story_id_index ON mms (parent_story_id)")
|
||||
db.execSQL("CREATE INDEX mms_thread_story_parent_story_index ON mms (thread_id, date_received, story_type, parent_story_id)")
|
||||
db.execSQL("CREATE INDEX mms_quote_id_quote_author_index ON mms (quote_id, quote_author)")
|
||||
db.execSQL("CREATE INDEX mms_exported_index ON mms (exported)")
|
||||
db.execSQL("CREATE INDEX mms_id_type_payment_transactions_index ON mms (_id, type) WHERE type & ${0x300000000L} != 0")
|
||||
db.execSQL(
|
||||
"""
|
||||
CREATE TRIGGER mms_ai AFTER INSERT ON mms BEGIN
|
||||
INSERT INTO mms_fts (rowid, body, thread_id) VALUES (new._id, new.body, new.thread_id);
|
||||
END;
|
||||
"""
|
||||
)
|
||||
stopwatch.split("rebuild-indexes")
|
||||
|
||||
db.execSQL(
|
||||
"""
|
||||
@@ -76,6 +127,7 @@ object V168_SingleMessageTableMigration : SignalDatabaseMigration {
|
||||
WHERE is_mms = 0
|
||||
"""
|
||||
)
|
||||
stopwatch.split("update-reactions")
|
||||
|
||||
db.execSQL(
|
||||
"""
|
||||
@@ -84,8 +136,10 @@ object V168_SingleMessageTableMigration : SignalDatabaseMigration {
|
||||
WHERE is_mms = 0
|
||||
"""
|
||||
)
|
||||
stopwatch.split("update-msl")
|
||||
|
||||
stopwatch.stop(TAG)
|
||||
|
||||
// TODO search index?
|
||||
// TODO jobs?
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user