mirror of
https://github.com/signalapp/Signal-Android.git
synced 2025-12-23 20:48:43 +00:00
Switch the snapshot table to use incrementing versions.
This commit is contained in:
@@ -16,94 +16,97 @@ import org.thoughtcrime.securesms.testing.SignalActivityRule
|
||||
@RunWith(AndroidJUnit4::class)
|
||||
class BackupMediaSnapshotTableTest {
|
||||
|
||||
companion object {
|
||||
private const val SEQUENCE_COUNT = 100
|
||||
private const val SEQUENCE_COUNT_WITH_THUMBNAILS = 200
|
||||
}
|
||||
|
||||
@get:Rule
|
||||
val harness = SignalActivityRule()
|
||||
|
||||
@Test
|
||||
fun givenAnEmptyTable_whenIWriteToTable_thenIExpectEmptyTable() {
|
||||
val pendingSyncTime = 1L
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(), pendingSyncTime)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
|
||||
val count = getSyncedItemCount(pendingSyncTime)
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = true)
|
||||
|
||||
assertThat(count).isEqualTo(0)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun givenAnEmptyTable_whenIWriteToTableAndCommit_thenIExpectFilledTable() {
|
||||
val pendingSyncTime = 1L
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(), pendingSyncTime)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val count = getSyncedItemCount(pendingSyncTime)
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = false)
|
||||
|
||||
assertThat(count).isEqualTo(SEQUENCE_COUNT_WITH_THUMBNAILS)
|
||||
assertThat(count).isEqualTo(100)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun givenAFilledTable_whenIInsertSimilarIds_thenIExpectUncommittedOverrides() {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(), 1L)
|
||||
fun givenAnEmptyTable_whenIWriteToTableAndCommit_thenIExpectFilledTableWithThumbnails() {
|
||||
val inputCount = 100
|
||||
val countWithThumbnails = inputCount * 2
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val newPendingTime = 2L
|
||||
val newObjectCount = 50
|
||||
val newObjectCountWithThumbnails = newObjectCount * 2
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(newObjectCount), newPendingTime)
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = true)
|
||||
|
||||
val count = SignalDatabase.backupMediaSnapshots.readableDatabase.count()
|
||||
.from(BackupMediaSnapshotTable.TABLE_NAME)
|
||||
.where("${BackupMediaSnapshotTable.LAST_SYNC_TIME} = 1 AND ${BackupMediaSnapshotTable.PENDING_SYNC_TIME} = $newPendingTime")
|
||||
.run()
|
||||
.readToSingleInt(-1)
|
||||
|
||||
assertThat(count).isEqualTo(newObjectCountWithThumbnails)
|
||||
assertThat(count).isEqualTo(countWithThumbnails)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun givenAFilledTable_whenIInsertSimilarIdsAndCommit_thenIExpectCommittedOverrides() {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(), 1L)
|
||||
fun givenAFilledTable_whenIReinsertObjects_thenIExpectUncommittedOverrides() {
|
||||
val initialCount = 100
|
||||
val additionalCount = 25
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val newPendingTime = 2L
|
||||
val newObjectCount = 50
|
||||
val newObjectCountWithThumbnails = newObjectCount * 2
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(newObjectCount), newPendingTime)
|
||||
// This relies on how the sequence of mediaIds is generated in tests -- the ones we generate here will have the mediaIds as the ones we generated above
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
|
||||
val pendingCount = getCountForPending(includeThumbnails = false)
|
||||
val latestVersionCount = getCountForLatestSnapshot(includeThumbnails = false)
|
||||
|
||||
assertThat(pendingCount).isEqualTo(additionalCount)
|
||||
assertThat(latestVersionCount).isEqualTo(initialCount)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun givenAFilledTable_whenIReinsertObjectsAndCommit_thenIExpectCommittedOverrides() {
|
||||
val initialCount = 100
|
||||
val additionalCount = 25
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val count = SignalDatabase.backupMediaSnapshots.readableDatabase.count()
|
||||
.from(BackupMediaSnapshotTable.TABLE_NAME)
|
||||
.where("${BackupMediaSnapshotTable.LAST_SYNC_TIME} = $newPendingTime AND ${BackupMediaSnapshotTable.PENDING_SYNC_TIME} = $newPendingTime")
|
||||
.run()
|
||||
.readToSingleInt(-1)
|
||||
// This relies on how the sequence of mediaIds is generated in tests -- the ones we generate here will have the mediaIds as the ones we generated above
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val total = getTotalItemCount()
|
||||
val pendingCount = getCountForPending(includeThumbnails = false)
|
||||
val latestVersionCount = getCountForLatestSnapshot(includeThumbnails = false)
|
||||
val totalCount = getTotalItemCount(includeThumbnails = false)
|
||||
|
||||
assertThat(count).isEqualTo(newObjectCountWithThumbnails)
|
||||
assertThat(total).isEqualTo(SEQUENCE_COUNT_WITH_THUMBNAILS)
|
||||
assertThat(pendingCount).isEqualTo(0)
|
||||
assertThat(latestVersionCount).isEqualTo(additionalCount)
|
||||
assertThat(totalCount).isEqualTo(initialCount)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun givenAFilledTable_whenIInsertSimilarIdsAndCommitThenDelete_thenIExpectOnlyCommittedOverrides() {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(), 1L)
|
||||
val initialCount = 100
|
||||
val additionalCount = 25
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val newPendingTime = 2L
|
||||
val newObjectCount = 50
|
||||
val newObjectCountWithThumbnails = newObjectCount * 2
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(newObjectCount), newPendingTime)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val page = SignalDatabase.backupMediaSnapshots.getPageOfOldMediaObjects(currentSyncTime = newPendingTime, pageSize = 100)
|
||||
SignalDatabase.backupMediaSnapshots.deleteMediaObjects(page)
|
||||
val page = SignalDatabase.backupMediaSnapshots.getPageOfOldMediaObjects(pageSize = 1_000)
|
||||
SignalDatabase.backupMediaSnapshots.deleteOldMediaObjects(page)
|
||||
|
||||
val total = getTotalItemCount()
|
||||
val total = getTotalItemCount(includeThumbnails = false)
|
||||
|
||||
assertThat(total).isEqualTo(newObjectCountWithThumbnails)
|
||||
assertThat(total).isEqualTo(additionalCount)
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -118,7 +121,7 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 2, cdn = 2)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence(), 1L)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val mismatches = SignalDatabase.backupMediaSnapshots.getMediaObjectsWithNonMatchingCdn(remoteData)
|
||||
@@ -137,13 +140,13 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 2, cdn = 99)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence(), 1L)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val mismatches = SignalDatabase.backupMediaSnapshots.getMediaObjectsWithNonMatchingCdn(remoteData)
|
||||
assertThat(mismatches.size).isEqualTo(1)
|
||||
assertThat(mismatches.get(0).cdn).isEqualTo(99)
|
||||
assertThat(mismatches.get(0).digest).isEqualTo(localData.get(1).digest)
|
||||
assertThat(mismatches[0].cdn).isEqualTo(99)
|
||||
assertThat(mismatches[0].digest).isEqualTo(localData[1].digest)
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -158,7 +161,7 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 2, cdn = 2)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence(), 1L)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val notFound = SignalDatabase.backupMediaSnapshots.getMediaObjectsThatCantBeFound(remoteData)
|
||||
@@ -177,7 +180,7 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 3, cdn = 2)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence(), 1L)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val notFound = SignalDatabase.backupMediaSnapshots.getMediaObjectsThatCantBeFound(remoteData)
|
||||
@@ -185,20 +188,110 @@ class BackupMediaSnapshotTableTest {
|
||||
assertThat(notFound.first()).isEqualTo(remoteData[1])
|
||||
}
|
||||
|
||||
private fun getTotalItemCount(): Int {
|
||||
return SignalDatabase.backupMediaSnapshots.readableDatabase.count().from(BackupMediaSnapshotTable.TABLE_NAME).run().readToSingleInt(-1)
|
||||
@Test
|
||||
fun getCurrentSnapshotVersion_emptyTable() {
|
||||
val version = SignalDatabase.backupMediaSnapshots.getCurrentSnapshotVersion()
|
||||
|
||||
assertThat(version).isEqualTo(0)
|
||||
}
|
||||
|
||||
private fun getSyncedItemCount(pendingTime: Long): Int {
|
||||
return SignalDatabase.backupMediaSnapshots.readableDatabase.count()
|
||||
@Test
|
||||
fun getCurrentSnapshotVersion_singleCommit() {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val version = SignalDatabase.backupMediaSnapshots.getCurrentSnapshotVersion()
|
||||
|
||||
assertThat(version).isEqualTo(1)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion_noneMarkedSeen() {
|
||||
val initialCount = 100
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val notSeenCount = SignalDatabase.backupMediaSnapshots.getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion(1).count
|
||||
|
||||
val expectedOldCountIncludingThumbnails = initialCount * 2
|
||||
|
||||
assertThat(notSeenCount).isEqualTo(expectedOldCountIncludingThumbnails)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion_someMarkedSeen() {
|
||||
val initialCount = 100
|
||||
val markSeenCount = 25
|
||||
|
||||
val itemsToCommit = generateArchiveMediaItemSequence(count = initialCount)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(itemsToCommit)
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val normalIdsToMarkSeen = itemsToCommit.take(markSeenCount).map { it.mediaId }.toList()
|
||||
val thumbnailIdsToMarkSeen = itemsToCommit.take(markSeenCount).map { it.thumbnailMediaId }.toList()
|
||||
val allItemsToMarkSeen = normalIdsToMarkSeen + thumbnailIdsToMarkSeen
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.markSeenOnRemote(allItemsToMarkSeen, 1)
|
||||
|
||||
val notSeenCount = SignalDatabase.backupMediaSnapshots.getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion(1).count
|
||||
|
||||
val expectedOldCount = initialCount - markSeenCount
|
||||
val expectedOldCountIncludingThumbnails = expectedOldCount * 2
|
||||
|
||||
assertThat(notSeenCount).isEqualTo(expectedOldCountIncludingThumbnails)
|
||||
}
|
||||
|
||||
private fun getTotalItemCount(includeThumbnails: Boolean): Int {
|
||||
return if (includeThumbnails) {
|
||||
SignalDatabase.backupMediaSnapshots.readableDatabase
|
||||
.count()
|
||||
.from(BackupMediaSnapshotTable.TABLE_NAME)
|
||||
.where("${BackupMediaSnapshotTable.LAST_SYNC_TIME} = $pendingTime AND ${BackupMediaSnapshotTable.PENDING_SYNC_TIME} = $pendingTime")
|
||||
.run()
|
||||
.readToSingleInt(-1)
|
||||
.readToSingleInt(0)
|
||||
} else {
|
||||
SignalDatabase.backupMediaSnapshots.readableDatabase
|
||||
.count()
|
||||
.from(BackupMediaSnapshotTable.TABLE_NAME)
|
||||
.where("${BackupMediaSnapshotTable.IS_THUMBNAIL} = 0")
|
||||
.run()
|
||||
.readToSingleInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
private fun generateArchiveMediaItemSequence(count: Int = SEQUENCE_COUNT): Sequence<ArchiveMediaItem> {
|
||||
return generateSequence(0) { seed -> if (seed < (count - 1)) seed + 1 else null }
|
||||
private fun getCountForLatestSnapshot(includeThumbnails: Boolean): Int {
|
||||
val thumbnailFilter = if (!includeThumbnails) {
|
||||
" AND ${BackupMediaSnapshotTable.IS_THUMBNAIL} = 0"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
return SignalDatabase.backupMediaSnapshots.readableDatabase
|
||||
.count()
|
||||
.from(BackupMediaSnapshotTable.TABLE_NAME)
|
||||
.where("${BackupMediaSnapshotTable.SNAPSHOT_VERSION} = ${BackupMediaSnapshotTable.MAX_VERSION} AND ${BackupMediaSnapshotTable.SNAPSHOT_VERSION} != ${BackupMediaSnapshotTable.UNKNOWN_VERSION}" + thumbnailFilter)
|
||||
.run()
|
||||
.readToSingleInt(0)
|
||||
}
|
||||
|
||||
private fun getCountForPending(includeThumbnails: Boolean): Int {
|
||||
val thumbnailFilter = if (!includeThumbnails) {
|
||||
" AND ${BackupMediaSnapshotTable.IS_THUMBNAIL} = 0"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
return SignalDatabase.backupMediaSnapshots.readableDatabase
|
||||
.count()
|
||||
.from(BackupMediaSnapshotTable.TABLE_NAME)
|
||||
.where("${BackupMediaSnapshotTable.IS_PENDING} != 0" + thumbnailFilter)
|
||||
.run()
|
||||
.readToSingleInt(0)
|
||||
}
|
||||
|
||||
private fun generateArchiveMediaItemSequence(count: Int): Sequence<ArchiveMediaItem> {
|
||||
return (1..count)
|
||||
.asSequence()
|
||||
.map { createArchiveMediaItem(it) }
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import org.signal.core.util.SqlUtil
|
||||
import org.signal.core.util.delete
|
||||
import org.signal.core.util.readToList
|
||||
import org.signal.core.util.readToSet
|
||||
import org.signal.core.util.readToSingleLong
|
||||
import org.signal.core.util.requireBoolean
|
||||
import org.signal.core.util.requireInt
|
||||
import org.signal.core.util.requireIntOrNull
|
||||
@@ -21,7 +22,7 @@ import org.signal.core.util.requireNonNullString
|
||||
import org.signal.core.util.select
|
||||
import org.signal.core.util.toInt
|
||||
import org.signal.core.util.update
|
||||
import org.signal.core.util.updateAll
|
||||
import org.signal.core.util.withinTransaction
|
||||
import org.thoughtcrime.securesms.backup.v2.ArchivedMediaObject
|
||||
|
||||
/**
|
||||
@@ -29,6 +30,25 @@ import org.thoughtcrime.securesms.backup.v2.ArchivedMediaObject
|
||||
* references that attachment -- at least until a new backup is made.
|
||||
*
|
||||
* So, this table maintains a snapshot of the media present in the last backup, so that we know what we can and can't delete from the archive CDN.
|
||||
*
|
||||
* The lifecycle is as follows:
|
||||
* - Before we make a backup, we clear any pending entries that might be left over from an aborted backup.
|
||||
* - While a backup is in progress, we write entries here for each media item, marking them with a pending flag.
|
||||
* - After a backup is fully uploaded, we commit the pending entries and update their version to MAX([SNAPSHOT_VERSION]) + 1
|
||||
*
|
||||
* The end result is that we have all the media objects referenced in backups, tagged with the most recent snapshot version they were seen at.
|
||||
*
|
||||
* This lets us know a few things:
|
||||
* 1. We know that any non-pending entries whose version < MAX([SNAPSHOT_VERSION]) must have been deleted.
|
||||
* 2. We know that any entries with MAX([SNAPSHOT_VERSION]) who aren't fully backed up yet (according to the [AttachmentTable]) need to be backed up.
|
||||
*
|
||||
* Occasionally, we'll also run a more elaborate "reconciliation" process where we fetch all of the remote CDN entries. That data, combined with this table,
|
||||
* will let us do the following:
|
||||
* 1. Any entries on the remote CDN that are not present in the table with MAX([SNAPSHOT_VERSION]) can be deleted from the remote CDN.
|
||||
* 2. Any entries present in this table with MAX([SNAPSHOT_VERSION]) that are not present on the remote CDN need to be re-uploaded. This is trickier, since the
|
||||
* remote CDN data is too large to fit in memory. To address that, as we page through remote CDN entries, we can set the [LAST_SEEN_ON_REMOTE_SNAPSHOT_VERSION]
|
||||
* equal to MAX([SNAPSHOT_VERSION]). After we're done, any entries whose [SNAPSHOT_VERSION] = MAX([SNAPSHOT_VERSION]), but whose
|
||||
* [LAST_SEEN_ON_REMOTE_SNAPSHOT_VERSION] < MAX([SNAPSHOT_VERSION]) must be entries that were missing from the remote CDN.
|
||||
*/
|
||||
class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : DatabaseTable(context, database) {
|
||||
companion object {
|
||||
@@ -52,13 +72,13 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
* where newer backups have a greater backup id value.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
const val LAST_SYNC_TIME = "last_sync_time"
|
||||
const val SNAPSHOT_VERSION = "snapshot_version"
|
||||
|
||||
/**
|
||||
* Pending sync time, set while a backup is in the process of being exported.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
const val PENDING_SYNC_TIME = "pending_sync_time"
|
||||
const val IS_PENDING = "is_pending"
|
||||
|
||||
/**
|
||||
* Whether or not this entry is for a thumbnail.
|
||||
@@ -68,58 +88,90 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
/**
|
||||
* Timestamp when media was last seen on archive cdn. Can be reset to default.
|
||||
*/
|
||||
const val LAST_SEEN_ON_REMOTE_TIMESTAMP = "last_seen_on_remote_timestamp"
|
||||
const val LAST_SEEN_ON_REMOTE_SNAPSHOT_VERSION = "last_seen_on_remote_snapshot_version"
|
||||
|
||||
/**
|
||||
* The remote digest for the media object. This is used to find matching attachments in the attachment table when necessary.
|
||||
*/
|
||||
const val REMOTE_DIGEST = "remote_digest"
|
||||
|
||||
/** Constant representing a [SNAPSHOT_VERSION] version that has not yet been set. */
|
||||
const val UNKNOWN_VERSION = -1
|
||||
|
||||
private const val MAX_SNAPSHOT_VERSION_INDEX = "backup_snapshot_version_index"
|
||||
|
||||
/** A query that returns that max [SNAPSHOT_VERSION] presently in the table. An index exists to ensure that this is fast. */
|
||||
const val MAX_VERSION = "(SELECT MAX($SNAPSHOT_VERSION) FROM $TABLE_NAME INDEXED BY $MAX_SNAPSHOT_VERSION_INDEX WHERE $SNAPSHOT_VERSION != $UNKNOWN_VERSION)"
|
||||
|
||||
val CREATE_TABLE = """
|
||||
CREATE TABLE $TABLE_NAME (
|
||||
$ID INTEGER PRIMARY KEY,
|
||||
$MEDIA_ID TEXT UNIQUE,
|
||||
$MEDIA_ID TEXT NOT NULL UNIQUE,
|
||||
$CDN INTEGER,
|
||||
$LAST_SYNC_TIME INTEGER DEFAULT 0,
|
||||
$PENDING_SYNC_TIME INTEGER,
|
||||
$IS_THUMBNAIL INTEGER DEFAULT 0,
|
||||
$SNAPSHOT_VERSION INTEGER NOT NULL DEFAULT $UNKNOWN_VERSION,
|
||||
$IS_PENDING INTEGER NOT NULL DEFAULT 0,
|
||||
$IS_THUMBNAIL INTEGER NOT NULL DEFAULT 0,
|
||||
$REMOTE_DIGEST BLOB NOT NULL,
|
||||
$LAST_SEEN_ON_REMOTE_TIMESTAMP INTEGER DEFAULT 0
|
||||
$LAST_SEEN_ON_REMOTE_SNAPSHOT_VERSION INTEGER NOT NULL DEFAULT 0
|
||||
)
|
||||
""".trimIndent()
|
||||
|
||||
val CREATE_INDEXES = arrayOf(
|
||||
"CREATE INDEX IF NOT EXISTS $MAX_SNAPSHOT_VERSION_INDEX ON $TABLE_NAME ($SNAPSHOT_VERSION DESC) WHERE $SNAPSHOT_VERSION != $UNKNOWN_VERSION"
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the set of media items that are slated to be referenced in the next backup, updating their pending sync time.
|
||||
* Will insert multiple rows per object -- one for the main item, and one for the thumbnail.
|
||||
*/
|
||||
fun writePendingMediaObjects(mediaObjects: Sequence<ArchiveMediaItem>, pendingSyncTime: Long) {
|
||||
fun writePendingMediaObjects(mediaObjects: Sequence<ArchiveMediaItem>) {
|
||||
mediaObjects
|
||||
.chunked(SqlUtil.MAX_QUERY_ARGS)
|
||||
.forEach { chunk ->
|
||||
writePendingMediaObjectsChunk(
|
||||
chunk.map { MediaEntry(it.mediaId, it.cdn, it.digest, isThumbnail = false) },
|
||||
pendingSyncTime
|
||||
chunk.map { MediaEntry(it.mediaId, it.cdn, it.digest, isThumbnail = false) }
|
||||
)
|
||||
|
||||
writePendingMediaObjectsChunk(
|
||||
chunk.map { MediaEntry(it.thumbnailMediaId, it.cdn, it.digest, isThumbnail = true) },
|
||||
pendingSyncTime
|
||||
chunk.map { MediaEntry(it.thumbnailMediaId, it.cdn, it.digest, isThumbnail = true) }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commits the pending sync time to the last sync time. This is called once a backup has been successfully uploaded.
|
||||
* Commits all pending entries (written via [writePendingMediaObjects]) to have a concrete [SNAPSHOT_VERSION]. The version will be 1 higher than the previous
|
||||
* snapshot version.
|
||||
*/
|
||||
fun commitPendingRows() {
|
||||
writableDatabase.execSQL("UPDATE $TABLE_NAME SET $LAST_SYNC_TIME = $PENDING_SYNC_TIME")
|
||||
writableDatabase.withinTransaction {
|
||||
val currentSnapshotVersion = getCurrentSnapshotVersion()
|
||||
val nextSnapshotVersion = currentSnapshotVersion + 1
|
||||
|
||||
writableDatabase
|
||||
.update(TABLE_NAME)
|
||||
.values(
|
||||
SNAPSHOT_VERSION to nextSnapshotVersion,
|
||||
IS_PENDING to 0
|
||||
)
|
||||
.where("$IS_PENDING != 0")
|
||||
.run()
|
||||
}
|
||||
}
|
||||
|
||||
fun getPageOfOldMediaObjects(currentSyncTime: Long, pageSize: Int): Set<ArchivedMediaObject> {
|
||||
fun getCurrentSnapshotVersion(): Long {
|
||||
return readableDatabase
|
||||
.select("MAX($SNAPSHOT_VERSION)")
|
||||
.from("$TABLE_NAME INDEXED BY $MAX_SNAPSHOT_VERSION_INDEX")
|
||||
.where("$SNAPSHOT_VERSION != $UNKNOWN_VERSION")
|
||||
.run()
|
||||
.readToSingleLong(0)
|
||||
}
|
||||
|
||||
fun getPageOfOldMediaObjects(pageSize: Int): Set<ArchivedMediaObject> {
|
||||
return readableDatabase.select(MEDIA_ID, CDN)
|
||||
.from(TABLE_NAME)
|
||||
.where("$LAST_SYNC_TIME < ? AND $LAST_SYNC_TIME = $PENDING_SYNC_TIME", currentSyncTime)
|
||||
.where("$SNAPSHOT_VERSION < $MAX_VERSION AND $IS_PENDING = 0")
|
||||
.limit(pageSize)
|
||||
.run()
|
||||
.readToSet {
|
||||
@@ -127,16 +179,21 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
}
|
||||
}
|
||||
|
||||
fun deleteMediaObjects(mediaObjects: Collection<ArchivedMediaObject>) {
|
||||
/**
|
||||
* This will remove any old snapshot entries with matching mediaId's. No pending entries or entries in the latest snapshot will be affected.
|
||||
*/
|
||||
fun deleteOldMediaObjects(mediaObjects: Collection<ArchivedMediaObject>) {
|
||||
val query = SqlUtil.buildFastCollectionQuery(MEDIA_ID, mediaObjects.map { it.mediaId })
|
||||
|
||||
writableDatabase.delete(TABLE_NAME)
|
||||
.where(query.where, query.whereArgs)
|
||||
.where("$SNAPSHOT_VERSION < $MAX_VERSION AND $IS_PENDING = 0 AND " + query.where, query.whereArgs)
|
||||
.run()
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a list of media objects, find the ones that we have no knowledge of in our local store.
|
||||
* Given a list of media objects, find the ones that are not in the most recent backup snapshot.
|
||||
*
|
||||
* We purposely allow pending items here -- so long as they were in the most recent complete snapshot, we want to keep them.
|
||||
*/
|
||||
fun getMediaObjectsThatCantBeFound(objects: List<ArchivedMediaObject>): List<ArchivedMediaObject> {
|
||||
if (objects.isEmpty()) {
|
||||
@@ -146,7 +203,8 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
val queries: List<SqlUtil.Query> = SqlUtil.buildCollectionQuery(
|
||||
column = MEDIA_ID,
|
||||
values = objects.map { it.mediaId },
|
||||
collectionOperator = SqlUtil.CollectionOperator.IN
|
||||
collectionOperator = SqlUtil.CollectionOperator.IN,
|
||||
prefix = "$SNAPSHOT_VERSION = $MAX_VERSION AND "
|
||||
)
|
||||
|
||||
val foundObjects: MutableSet<String> = mutableSetOf()
|
||||
@@ -166,7 +224,10 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a list of media objects, find the ones that we have no knowledge of in our local store.
|
||||
* Given a list of media objects, find the ones that are present in the most recent snapshot, but have a different CDN than the one passed in.
|
||||
* This will ignore thumbnails, as the results are intended to be used to update CDNs, which we do not track for thumbnails.
|
||||
*
|
||||
* We purposely allow pending items here -- either way they're in the latest snapshot, and should have their CDN info updated.
|
||||
*/
|
||||
fun getMediaObjectsWithNonMatchingCdn(objects: List<ArchivedMediaObject>): List<CdnMismatchResult> {
|
||||
if (objects.isEmpty()) {
|
||||
@@ -180,7 +241,7 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
SELECT a.$REMOTE_DIGEST, b.$CDN
|
||||
FROM $TABLE_NAME a
|
||||
JOIN input_pairs b ON a.$MEDIA_ID = b.$MEDIA_ID
|
||||
WHERE a.$CDN != b.$CDN AND a.$IS_THUMBNAIL = 0
|
||||
WHERE a.$CDN != b.$CDN AND a.$IS_THUMBNAIL = 0 AND $SNAPSHOT_VERSION = $MAX_VERSION
|
||||
"""
|
||||
).readToList { cursor ->
|
||||
CdnMismatchResult(
|
||||
@@ -193,7 +254,7 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
/**
|
||||
* Indicate the time that the set of media objects were seen on the archive CDN. Can be used to reconcile our local state with the server state.
|
||||
*/
|
||||
fun markSeenOnRemote(mediaIdBatch: Collection<String>, time: Long) {
|
||||
fun markSeenOnRemote(mediaIdBatch: Collection<String>, snapshotVersion: Long) {
|
||||
if (mediaIdBatch.isEmpty()) {
|
||||
return
|
||||
}
|
||||
@@ -201,7 +262,7 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
val query = SqlUtil.buildFastCollectionQuery(MEDIA_ID, mediaIdBatch)
|
||||
writableDatabase
|
||||
.update(TABLE_NAME)
|
||||
.values(LAST_SEEN_ON_REMOTE_TIMESTAMP to time)
|
||||
.values(LAST_SEEN_ON_REMOTE_SNAPSHOT_VERSION to snapshotVersion)
|
||||
.where(query.where, query.whereArgs)
|
||||
.run()
|
||||
}
|
||||
@@ -211,45 +272,39 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
* This is used to find media objects that have not been seen on the CDN, even though they should be.
|
||||
*
|
||||
* The cursor contains rows that can be parsed into [MediaEntry] objects.
|
||||
*
|
||||
* We purposely allow pending items here -- either way they *should* be uploaded.
|
||||
*/
|
||||
fun getMediaObjectsLastSeenOnCdnBeforeTime(time: Long): Cursor {
|
||||
fun getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion(snapshotVersion: Long): Cursor {
|
||||
return readableDatabase
|
||||
.select(MEDIA_ID, CDN, REMOTE_DIGEST, IS_THUMBNAIL)
|
||||
.from(TABLE_NAME)
|
||||
.where("$LAST_SEEN_ON_REMOTE_TIMESTAMP < $time")
|
||||
.where("$LAST_SEEN_ON_REMOTE_SNAPSHOT_VERSION < $snapshotVersion AND $SNAPSHOT_VERSION = $MAX_VERSION")
|
||||
.run()
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the [LAST_SEEN_ON_REMOTE_TIMESTAMP] column back to zero. It's a good idea to do this after you have run a sync and used the value, as it can
|
||||
* mitigate various issues that can arise from having an incorrect local clock.
|
||||
*/
|
||||
fun clearLastSeenOnRemote() {
|
||||
writableDatabase
|
||||
.updateAll(TABLE_NAME)
|
||||
.values(LAST_SEEN_ON_REMOTE_TIMESTAMP to 0)
|
||||
.run()
|
||||
}
|
||||
|
||||
private fun writePendingMediaObjectsChunk(chunk: List<MediaEntry>, pendingSyncTime: Long) {
|
||||
private fun writePendingMediaObjectsChunk(chunk: List<MediaEntry>) {
|
||||
val values = chunk.map {
|
||||
contentValuesOf(
|
||||
MEDIA_ID to it.mediaId,
|
||||
CDN to it.cdn,
|
||||
REMOTE_DIGEST to it.digest,
|
||||
IS_THUMBNAIL to it.isThumbnail.toInt(),
|
||||
PENDING_SYNC_TIME to pendingSyncTime
|
||||
SNAPSHOT_VERSION to UNKNOWN_VERSION,
|
||||
IS_PENDING to 1
|
||||
)
|
||||
}
|
||||
|
||||
val query = SqlUtil.buildSingleBulkInsert(TABLE_NAME, arrayOf(MEDIA_ID, CDN, REMOTE_DIGEST, IS_THUMBNAIL, PENDING_SYNC_TIME), values)
|
||||
val query = SqlUtil.buildSingleBulkInsert(TABLE_NAME, arrayOf(MEDIA_ID, CDN, REMOTE_DIGEST, IS_THUMBNAIL, SNAPSHOT_VERSION, IS_PENDING), values)
|
||||
|
||||
writableDatabase.execSQL(
|
||||
query.where +
|
||||
"""
|
||||
${query.where}
|
||||
ON CONFLICT($MEDIA_ID) DO UPDATE SET
|
||||
$PENDING_SYNC_TIME = EXCLUDED.$PENDING_SYNC_TIME,
|
||||
$CDN = EXCLUDED.$CDN
|
||||
$CDN = excluded.$CDN,
|
||||
$REMOTE_DIGEST = excluded.$REMOTE_DIGEST,
|
||||
$IS_THUMBNAIL = excluded.$IS_THUMBNAIL,
|
||||
$IS_PENDING = excluded.$IS_PENDING
|
||||
""",
|
||||
query.whereArgs
|
||||
)
|
||||
|
||||
@@ -171,6 +171,7 @@ open class SignalDatabase(private val context: Application, databaseSecret: Data
|
||||
executeStatements(db, KyberPreKeyTable.CREATE_INDEXES)
|
||||
executeStatements(db, ChatFolderTables.CREATE_INDEXES)
|
||||
executeStatements(db, NameCollisionTables.CREATE_INDEXES)
|
||||
executeStatements(db, BackupMediaSnapshotTable.CREATE_INDEXES)
|
||||
|
||||
executeStatements(db, MessageSendLogTables.CREATE_TRIGGERS)
|
||||
|
||||
|
||||
@@ -131,6 +131,7 @@ import org.thoughtcrime.securesms.database.helpers.migration.V273_FixUnreadOrigi
|
||||
import org.thoughtcrime.securesms.database.helpers.migration.V274_BackupMediaSnapshotLastSeenOnRemote
|
||||
import org.thoughtcrime.securesms.database.helpers.migration.V275_EnsureDefaultAllChatsFolder
|
||||
import org.thoughtcrime.securesms.database.helpers.migration.V276_AttachmentCdnDefaultValueMigration
|
||||
import org.thoughtcrime.securesms.database.helpers.migration.V278_BackupSnapshotTableVersions
|
||||
import org.thoughtcrime.securesms.database.helpers.migration.V277_AddNotificationProfileStorageSync
|
||||
import org.thoughtcrime.securesms.database.SQLiteDatabase as SignalSqliteDatabase
|
||||
|
||||
@@ -269,10 +270,11 @@ object SignalDatabaseMigrations {
|
||||
274 to V274_BackupMediaSnapshotLastSeenOnRemote,
|
||||
275 to V275_EnsureDefaultAllChatsFolder,
|
||||
276 to V276_AttachmentCdnDefaultValueMigration,
|
||||
277 to V277_AddNotificationProfileStorageSync
|
||||
277 to V277_AddNotificationProfileStorageSync,
|
||||
278 to V278_BackupSnapshotTableVersions
|
||||
)
|
||||
|
||||
const val DATABASE_VERSION = 277
|
||||
const val DATABASE_VERSION = 278
|
||||
|
||||
@JvmStatic
|
||||
fun migrate(context: Application, db: SignalSqliteDatabase, oldVersion: Int, newVersion: Int) {
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright 2025 Signal Messenger, LLC
|
||||
* SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package org.thoughtcrime.securesms.database.helpers.migration
|
||||
|
||||
import android.app.Application
|
||||
import org.thoughtcrime.securesms.database.BackupMediaSnapshotTable
|
||||
import org.thoughtcrime.securesms.database.SQLiteDatabase
|
||||
|
||||
/**
|
||||
* We want to switch [BackupMediaSnapshotTable] to use versions instead of timestamps.
|
||||
*/
|
||||
object V278_BackupSnapshotTableVersions : SignalDatabaseMigration {
|
||||
override fun migrate(context: Application, db: SQLiteDatabase, oldVersion: Int, newVersion: Int) {
|
||||
db.execSQL("DROP TABLE backup_media_snapshot")
|
||||
db.execSQL(
|
||||
"""
|
||||
CREATE TABLE backup_media_snapshot (
|
||||
_id INTEGER PRIMARY KEY,
|
||||
media_id TEXT NOT NULL UNIQUE,
|
||||
cdn INTEGER,
|
||||
snapshot_version INTEGER NOT NULL DEFAULT -1,
|
||||
is_pending INTEGER NOT NULL DEFAULT 0,
|
||||
is_thumbnail INTEGER NOT NULL DEFAULT 0,
|
||||
remote_digest BLOB NOT NULL,
|
||||
last_seen_on_remote_snapshot_version INTEGER NOT NULL DEFAULT 0
|
||||
)
|
||||
"""
|
||||
)
|
||||
db.execSQL("CREATE INDEX IF NOT EXISTS backup_snapshot_version_index ON backup_media_snapshot (snapshot_version DESC) WHERE snapshot_version != -1")
|
||||
}
|
||||
}
|
||||
@@ -82,22 +82,20 @@ class BackupMediaSnapshotSyncJob private constructor(
|
||||
return syncDataFromCdn() ?: Result.success()
|
||||
}
|
||||
|
||||
override fun onFailure() {
|
||||
SignalDatabase.backupMediaSnapshots.clearLastSeenOnRemote()
|
||||
}
|
||||
override fun onFailure() = Unit
|
||||
|
||||
/**
|
||||
* Looks through our local snapshot of what attachments we put in the last backup file, and uses that to delete any old attachments from the archive CDN
|
||||
* that we no longer need.
|
||||
*/
|
||||
private fun removeLocallyDeletedAttachmentsFromCdn(): Result? {
|
||||
var mediaObjects = SignalDatabase.backupMediaSnapshots.getPageOfOldMediaObjects(syncTime, REMOTE_DELETE_BATCH_SIZE)
|
||||
var mediaObjects = SignalDatabase.backupMediaSnapshots.getPageOfOldMediaObjects(REMOTE_DELETE_BATCH_SIZE)
|
||||
|
||||
while (mediaObjects.isNotEmpty()) {
|
||||
deleteMediaObjectsFromCdn(mediaObjects)?.let { result -> return result }
|
||||
SignalDatabase.backupMediaSnapshots.deleteMediaObjects(mediaObjects)
|
||||
SignalDatabase.backupMediaSnapshots.deleteOldMediaObjects(mediaObjects)
|
||||
|
||||
mediaObjects = SignalDatabase.backupMediaSnapshots.getPageOfOldMediaObjects(syncTime, CDN_PAGE_SIZE)
|
||||
mediaObjects = SignalDatabase.backupMediaSnapshots.getPageOfOldMediaObjects(CDN_PAGE_SIZE)
|
||||
}
|
||||
|
||||
return null
|
||||
@@ -140,7 +138,7 @@ class BackupMediaSnapshotSyncJob private constructor(
|
||||
deleteMediaObjectsFromCdn(attachmentsToDelete)?.let { result -> return result }
|
||||
}
|
||||
|
||||
val entriesNeedingRepairCursor = SignalDatabase.backupMediaSnapshots.getMediaObjectsLastSeenOnCdnBeforeTime(syncTime)
|
||||
val entriesNeedingRepairCursor = SignalDatabase.backupMediaSnapshots.getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion(syncTime)
|
||||
val needRepairCount = entriesNeedingRepairCursor.count
|
||||
|
||||
if (needRepairCount > 0) {
|
||||
@@ -178,7 +176,7 @@ class BackupMediaSnapshotSyncJob private constructor(
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.markSeenOnRemote(
|
||||
mediaIdBatch = mediaObjects.map { it.mediaId },
|
||||
time = syncTime
|
||||
snapshotVersion = syncTime
|
||||
)
|
||||
|
||||
val notFoundMediaObjects = SignalDatabase.backupMediaSnapshots.getMediaObjectsThatCantBeFound(mediaObjects)
|
||||
|
||||
@@ -272,8 +272,7 @@ class BackupMessagesJob private constructor(
|
||||
if (mediaBackupEnabled) {
|
||||
db.attachmentTable.getAttachmentsEligibleForArchiveUpload().use {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(
|
||||
mediaObjects = ArchiveMediaItemIterator(it).asSequence(),
|
||||
pendingSyncTime = currentTime
|
||||
mediaObjects = ArchiveMediaItemIterator(it).asSequence()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,9 +418,11 @@ object SqlUtil {
|
||||
.toList()
|
||||
}
|
||||
|
||||
fun buildSingleBulkInsert(tableName: String, columns: Array<String>, contentValues: List<ContentValues>): Query {
|
||||
fun buildSingleBulkInsert(tableName: String, columns: Array<String>, contentValues: List<ContentValues>, onConflict: String? = null): Query {
|
||||
val conflictString = onConflict?.let { " OR $onConflict" } ?: ""
|
||||
|
||||
val builder = StringBuilder()
|
||||
builder.append("INSERT INTO ").append(tableName).append(" (")
|
||||
builder.append("INSERT$conflictString INTO ").append(tableName).append(" (")
|
||||
|
||||
val columnString = columns.joinToString(separator = ", ")
|
||||
builder.append(columnString)
|
||||
|
||||
Reference in New Issue
Block a user