mirror of
https://github.com/signalapp/Signal-Android.git
synced 2025-12-24 04:58:45 +00:00
Fix some issues with BackupMediaSnapshots.
This commit is contained in:
@@ -21,7 +21,7 @@ class BackupMediaSnapshotTableTest {
|
||||
|
||||
@Test
|
||||
fun givenAnEmptyTable_whenIWriteToTable_thenIExpectEmptyTable() {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = true)
|
||||
|
||||
@@ -30,7 +30,7 @@ class BackupMediaSnapshotTableTest {
|
||||
|
||||
@Test
|
||||
fun givenAnEmptyTable_whenIWriteToTableAndCommit_thenIExpectFilledTable() {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = false)
|
||||
@@ -43,7 +43,8 @@ class BackupMediaSnapshotTableTest {
|
||||
val inputCount = 100
|
||||
val countWithThumbnails = inputCount * 2
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeThumbnailPendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = true)
|
||||
@@ -55,7 +56,7 @@ class BackupMediaSnapshotTableTest {
|
||||
fun givenAnEmptyTable_whenIWriteToTableAndCommitQuotes_thenIExpectFilledTableWithNoThumbnails() {
|
||||
val inputCount = 100
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount, quote = true))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount, quote = true))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = true)
|
||||
@@ -67,7 +68,7 @@ class BackupMediaSnapshotTableTest {
|
||||
fun givenAnEmptyTable_whenIWriteToTableAndCommitNonMedia_thenIExpectFilledTableWithNoThumbnails() {
|
||||
val inputCount = 100
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount, contentType = "text/plain"))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = inputCount, contentType = "text/plain"))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val count = getCountForLatestSnapshot(includeThumbnails = true)
|
||||
@@ -80,11 +81,11 @@ class BackupMediaSnapshotTableTest {
|
||||
val initialCount = 100
|
||||
val additionalCount = 25
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
// This relies on how the sequence of mediaIds is generated in tests -- the ones we generate here will have the mediaIds as the ones we generated above
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
|
||||
val pendingCount = getCountForPending(includeThumbnails = false)
|
||||
val latestVersionCount = getCountForLatestSnapshot(includeThumbnails = false)
|
||||
@@ -98,11 +99,11 @@ class BackupMediaSnapshotTableTest {
|
||||
val initialCount = 100
|
||||
val additionalCount = 25
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
// This relies on how the sequence of mediaIds is generated in tests -- the ones we generate here will have the mediaIds as the ones we generated above
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val pendingCount = getCountForPending(includeThumbnails = false)
|
||||
@@ -119,10 +120,10 @@ class BackupMediaSnapshotTableTest {
|
||||
val initialCount = 100
|
||||
val additionalCount = 25
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = additionalCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val page = SignalDatabase.backupMediaSnapshots.getPageOfOldMediaObjects(pageSize = 1_000)
|
||||
@@ -145,7 +146,7 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 2, cdn = 2)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val mismatches = SignalDatabase.backupMediaSnapshots.getMediaObjectsWithNonMatchingCdn(remoteData)
|
||||
@@ -164,7 +165,7 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 2, cdn = 99)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val mismatches = SignalDatabase.backupMediaSnapshots.getMediaObjectsWithNonMatchingCdn(remoteData)
|
||||
@@ -186,7 +187,7 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 2, cdn = 2)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val notFound = SignalDatabase.backupMediaSnapshots.getMediaObjectsThatCantBeFound(remoteData)
|
||||
@@ -205,7 +206,7 @@ class BackupMediaSnapshotTableTest {
|
||||
createArchiveMediaObject(seed = 3, cdn = 2)
|
||||
)
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(localData.asSequence())
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val notFound = SignalDatabase.backupMediaSnapshots.getMediaObjectsThatCantBeFound(remoteData)
|
||||
@@ -222,7 +223,7 @@ class BackupMediaSnapshotTableTest {
|
||||
|
||||
@Test
|
||||
fun getCurrentSnapshotVersion_singleCommit() {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = 100))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val version = SignalDatabase.backupMediaSnapshots.getCurrentSnapshotVersion()
|
||||
@@ -234,7 +235,8 @@ class BackupMediaSnapshotTableTest {
|
||||
fun getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion_noneMarkedSeen() {
|
||||
val initialCount = 100
|
||||
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.writeThumbnailPendingMediaObjects(generateArchiveMediaItemSequence(count = initialCount))
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val notSeenCount = SignalDatabase.backupMediaSnapshots.getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion(1).count
|
||||
@@ -250,7 +252,8 @@ class BackupMediaSnapshotTableTest {
|
||||
val markSeenCount = 25
|
||||
|
||||
val itemsToCommit = generateArchiveMediaItemSequence(count = initialCount)
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(itemsToCommit)
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(itemsToCommit)
|
||||
SignalDatabase.backupMediaSnapshots.writeThumbnailPendingMediaObjects(itemsToCommit)
|
||||
SignalDatabase.backupMediaSnapshots.commitPendingRows()
|
||||
|
||||
val normalIdsToMarkSeen = itemsToCommit.take(markSeenCount).map { it.mediaId }.toList()
|
||||
|
||||
@@ -30,6 +30,8 @@ import com.bumptech.glide.Glide
|
||||
import com.fasterxml.jackson.annotation.JsonProperty
|
||||
import kotlinx.parcelize.IgnoredOnParcel
|
||||
import kotlinx.parcelize.Parcelize
|
||||
import okio.ByteString
|
||||
import okio.ByteString.Companion.toByteString
|
||||
import org.json.JSONArray
|
||||
import org.json.JSONException
|
||||
import org.signal.core.util.Base64
|
||||
@@ -46,6 +48,7 @@ import org.signal.core.util.groupBy
|
||||
import org.signal.core.util.isNull
|
||||
import org.signal.core.util.logging.Log
|
||||
import org.signal.core.util.readToList
|
||||
import org.signal.core.util.readToSet
|
||||
import org.signal.core.util.readToSingleInt
|
||||
import org.signal.core.util.readToSingleLong
|
||||
import org.signal.core.util.readToSingleObject
|
||||
@@ -95,6 +98,7 @@ import org.thoughtcrime.securesms.dependencies.AppDependencies
|
||||
import org.thoughtcrime.securesms.jobs.AttachmentDownloadJob
|
||||
import org.thoughtcrime.securesms.jobs.AttachmentUploadJob
|
||||
import org.thoughtcrime.securesms.jobs.GenerateAudioWaveFormJob
|
||||
import org.thoughtcrime.securesms.keyvalue.SignalStore
|
||||
import org.thoughtcrime.securesms.mms.DecryptableUri
|
||||
import org.thoughtcrime.securesms.mms.MediaStream
|
||||
import org.thoughtcrime.securesms.mms.MmsException
|
||||
@@ -111,6 +115,8 @@ import org.thoughtcrime.securesms.util.StorageUtil
|
||||
import org.thoughtcrime.securesms.util.Util
|
||||
import org.thoughtcrime.securesms.video.EncryptedMediaDataSource
|
||||
import org.whispersystems.signalservice.api.attachment.AttachmentUploadResult
|
||||
import org.whispersystems.signalservice.api.backup.MediaId
|
||||
import org.whispersystems.signalservice.api.backup.MediaName
|
||||
import org.whispersystems.signalservice.api.crypto.AttachmentCipherStreamUtil
|
||||
import org.whispersystems.signalservice.api.util.UuidUtil
|
||||
import org.whispersystems.signalservice.internal.crypto.PaddingInputStream
|
||||
@@ -422,10 +428,10 @@ class AttachmentTable(
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a cursor (with just the plaintextHash+remoteKey+archive_cdn) for all attachments that are slated to be included in the current archive upload.
|
||||
* Returns a cursor (with just the plaintextHash+remoteKey+archive_cdn) for all full-size attachments that are slated to be included in the current archive upload.
|
||||
* Used for snapshotting data in [BackupMediaSnapshotTable].
|
||||
*/
|
||||
fun getAttachmentsThatWillBeIncludedInArchive(): Cursor {
|
||||
fun getFullSizeAttachmentsThatWillBeIncludedInArchive(): Cursor {
|
||||
return readableDatabase
|
||||
.select(DATA_HASH_END, REMOTE_KEY, ARCHIVE_CDN, QUOTE, CONTENT_TYPE)
|
||||
.from("$TABLE_NAME LEFT JOIN ${MessageTable.TABLE_NAME} ON $TABLE_NAME.$MESSAGE_ID = ${MessageTable.TABLE_NAME}.${MessageTable.ID}")
|
||||
@@ -433,6 +439,25 @@ class AttachmentTable(
|
||||
.run()
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a cursor (with just the plaintextHash+remoteKey+archive_cdn) for all thumbnail attachments that are slated to be included in the current archive upload.
|
||||
* Used for snapshotting data in [BackupMediaSnapshotTable].
|
||||
*/
|
||||
fun getThumbnailAttachmentsThatWillBeIncludedInArchive(): Cursor {
|
||||
return readableDatabase
|
||||
.select(DATA_HASH_END, REMOTE_KEY, ARCHIVE_CDN, QUOTE, CONTENT_TYPE)
|
||||
.from("$TABLE_NAME LEFT JOIN ${MessageTable.TABLE_NAME} ON $TABLE_NAME.$MESSAGE_ID = ${MessageTable.TABLE_NAME}.${MessageTable.ID}")
|
||||
.where(
|
||||
"""
|
||||
${buildAttachmentsThatNeedUploadQuery(transferStateFilter = "$ARCHIVE_THUMBNAIL_TRANSFER_STATE != ${ArchiveTransferState.PERMANENT_FAILURE.value}")} AND
|
||||
$QUOTE = 0 AND
|
||||
($CONTENT_TYPE LIKE 'image/%' OR $CONTENT_TYPE LIKE 'video/%') AND
|
||||
$CONTENT_TYPE != 'image/svg+xml'
|
||||
"""
|
||||
)
|
||||
.run()
|
||||
}
|
||||
|
||||
fun hasData(attachmentId: AttachmentId): Boolean {
|
||||
return readableDatabase
|
||||
.exists(TABLE_NAME)
|
||||
@@ -1074,6 +1099,7 @@ class AttachmentTable(
|
||||
* Should be the same or subset of that returned by [getAttachmentsThatNeedArchiveUpload].
|
||||
*/
|
||||
fun getPendingArchiveUploadBytes(): Long {
|
||||
val archiveTransferStateFilter = "$ARCHIVE_TRANSFER_STATE NOT IN (${ArchiveTransferState.FINISHED.value}, ${ArchiveTransferState.PERMANENT_FAILURE.value})"
|
||||
return readableDatabase
|
||||
.rawQuery(
|
||||
"""
|
||||
@@ -1081,50 +1107,13 @@ class AttachmentTable(
|
||||
FROM (
|
||||
SELECT DISTINCT $DATA_HASH_END, $REMOTE_KEY, $DATA_SIZE
|
||||
FROM $TABLE_NAME LEFT JOIN ${MessageTable.TABLE_NAME} ON $TABLE_NAME.$MESSAGE_ID = ${MessageTable.TABLE_NAME}.${MessageTable.ID}
|
||||
WHERE
|
||||
$DATA_FILE NOT NULL AND
|
||||
$DATA_HASH_END NOT NULL AND
|
||||
$REMOTE_KEY NOT NULL AND
|
||||
$ARCHIVE_TRANSFER_STATE NOT IN (${ArchiveTransferState.FINISHED.value}, ${ArchiveTransferState.PERMANENT_FAILURE.value}) AND
|
||||
$CONTENT_TYPE != '${MediaUtil.LONG_TEXT}' AND
|
||||
(${MessageTable.STORY_TYPE} = 0 OR ${MessageTable.STORY_TYPE} IS NULL) AND
|
||||
(${MessageTable.EXPIRES_IN} = 0 OR ${MessageTable.EXPIRES_IN} > ${ChatItemArchiveExporter.EXPIRATION_CUTOFF.inWholeMilliseconds}) AND
|
||||
${MessageTable.TABLE_NAME}.${MessageTable.VIEW_ONCE} = 0
|
||||
WHERE ${buildAttachmentsThatNeedUploadQuery(archiveTransferStateFilter)}
|
||||
)
|
||||
""".trimIndent()
|
||||
)
|
||||
.readToSingleLong()
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns sum of the file sizes of attachments that are not fully uploaded to the archive CDN.
|
||||
*/
|
||||
fun debugGetPendingArchiveUploadAttachments(): List<DatabaseAttachment> {
|
||||
return readableDatabase
|
||||
.rawQuery(
|
||||
"""
|
||||
SELECT *
|
||||
FROM $TABLE_NAME as t
|
||||
JOIN (
|
||||
SELECT DISTINCT $DATA_HASH_END, $REMOTE_KEY, $DATA_SIZE
|
||||
FROM $TABLE_NAME LEFT JOIN ${MessageTable.TABLE_NAME} ON $TABLE_NAME.$MESSAGE_ID = ${MessageTable.TABLE_NAME}.${MessageTable.ID}
|
||||
WHERE
|
||||
$DATA_FILE NOT NULL AND
|
||||
$DATA_HASH_END NOT NULL AND
|
||||
$REMOTE_KEY NOT NULL AND
|
||||
$ARCHIVE_TRANSFER_STATE NOT IN (${ArchiveTransferState.FINISHED.value}, ${ArchiveTransferState.PERMANENT_FAILURE.value}) AND
|
||||
$CONTENT_TYPE != '${MediaUtil.LONG_TEXT}' AND
|
||||
(${MessageTable.STORY_TYPE} = 0 OR ${MessageTable.STORY_TYPE} IS NULL) AND
|
||||
(${MessageTable.EXPIRES_IN} = 0 OR ${MessageTable.EXPIRES_IN} > ${ChatItemArchiveExporter.EXPIRATION_CUTOFF.inWholeMilliseconds})
|
||||
) as filtered
|
||||
ON t.$DATA_HASH_END = filtered.$DATA_HASH_END
|
||||
""".trimIndent()
|
||||
)
|
||||
.readToList {
|
||||
it.readAttachment()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears out the incrementalMac for the specified [attachmentId], as well as any other attachments that share the same ([remoteKey], [plaintextHash]) pair (if present).
|
||||
*/
|
||||
@@ -3186,9 +3175,41 @@ class AttachmentTable(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Important: This is an expensive query that involves iterating over every row in the table. Only call this for debug stuff!
|
||||
*/
|
||||
fun debugGetAttachmentsForMediaIds(mediaIds: Set<MediaId>, limit: Int): List<Pair<DatabaseAttachment, Boolean>> {
|
||||
val byteStringMediaIds: Set<ByteString> = mediaIds.map { it.value.toByteString() }.toSet()
|
||||
val found = mutableListOf<Pair<DatabaseAttachment, Boolean>>()
|
||||
|
||||
readableDatabase
|
||||
.select(*PROJECTION)
|
||||
.from(TABLE_NAME)
|
||||
.where("$REMOTE_KEY NOT NULL AND $DATA_HASH_END NOT NULL")
|
||||
.run()
|
||||
.forEach { cursor ->
|
||||
val remoteKey = Base64.decode(cursor.requireNonNullString(REMOTE_KEY))
|
||||
val plaintextHash = Base64.decode(cursor.requireNonNullString(DATA_HASH_END))
|
||||
val mediaId = MediaName.fromPlaintextHashAndRemoteKey(plaintextHash, remoteKey).toMediaId(SignalStore.backup.mediaRootBackupKey).value.toByteString()
|
||||
val mediaIdThumbnail = MediaName.fromPlaintextHashAndRemoteKeyForThumbnail(plaintextHash, remoteKey).toMediaId(SignalStore.backup.mediaRootBackupKey).value.toByteString()
|
||||
|
||||
if (mediaId in byteStringMediaIds) {
|
||||
found.add(getAttachment(cursor) to false)
|
||||
}
|
||||
|
||||
if (mediaIdThumbnail in byteStringMediaIds) {
|
||||
found.add(getAttachment(cursor) to true)
|
||||
}
|
||||
|
||||
if (found.size >= limit) return@forEach
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
fun debugGetAttachmentStats(): DebugAttachmentStats {
|
||||
val totalAttachmentRows = readableDatabase.count().from(TABLE_NAME).run().readToSingleLong(0)
|
||||
val totalEligibleForUploadRows = getAttachmentsThatWillBeIncludedInArchive().count
|
||||
val totalEligibleForUploadRows = getFullSizeAttachmentsThatWillBeIncludedInArchive().count
|
||||
|
||||
val totalUniqueDataFiles = readableDatabase.select("COUNT(DISTINCT $DATA_FILE)").from(TABLE_NAME).run().readToSingleLong(0)
|
||||
val totalUniqueMediaNames = readableDatabase.query("SELECT COUNT(*) FROM (SELECT DISTINCT $DATA_HASH_END, $REMOTE_KEY FROM $TABLE_NAME WHERE $DATA_HASH_END NOT NULL AND $REMOTE_KEY NOT NULL)").readToSingleLong(0)
|
||||
@@ -3273,6 +3294,26 @@ class AttachmentTable(
|
||||
)
|
||||
}
|
||||
|
||||
fun getDebugMediaInfoForEntries(hashes: Collection<BackupMediaSnapshotTable.MediaEntry>): Set<DebugArchiveMediaInfo> {
|
||||
val entriesByHash = hashes.associateBy { Base64.encodeWithPadding(it.plaintextHash) }
|
||||
|
||||
val query = SqlUtil.buildFastCollectionQuery(DATA_HASH_END, entriesByHash.keys)
|
||||
|
||||
return readableDatabase
|
||||
.select(ID, MESSAGE_ID, CONTENT_TYPE, DATA_HASH_END)
|
||||
.from(TABLE_NAME)
|
||||
.where(query.where, query.whereArgs)
|
||||
.run()
|
||||
.readToSet { cursor ->
|
||||
DebugArchiveMediaInfo(
|
||||
attachmentId = AttachmentId(cursor.requireLong(ID)),
|
||||
messageId = cursor.requireLong(MESSAGE_ID),
|
||||
contentType = cursor.requireString(CONTENT_TYPE),
|
||||
isThumbnail = entriesByHash[cursor.requireString(DATA_HASH_END)]!!.isThumbnail
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fun debugAttachmentStatsForBackupProto(): BackupDebugInfo.AttachmentDetails {
|
||||
val archiveStateCounts = ArchiveTransferState
|
||||
.entries.associateWith {
|
||||
@@ -3676,4 +3717,11 @@ class AttachmentTable(
|
||||
data class CreateRemoteKeyResult(val totalCount: Int, val notQuoteOrSickerDupeNotFoundCount: Int, val notQuoteOrSickerDupeFoundCount: Int) {
|
||||
val unexpectedKeyCreation = notQuoteOrSickerDupeFoundCount > 0 || notQuoteOrSickerDupeNotFoundCount > 0
|
||||
}
|
||||
|
||||
class DebugArchiveMediaInfo(
|
||||
val attachmentId: AttachmentId,
|
||||
val messageId: Long,
|
||||
val contentType: String?,
|
||||
val isThumbnail: Boolean
|
||||
)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ import org.signal.core.util.toInt
|
||||
import org.signal.core.util.update
|
||||
import org.signal.core.util.withinTransaction
|
||||
import org.thoughtcrime.securesms.backup.v2.ArchivedMediaObject
|
||||
import org.thoughtcrime.securesms.util.MediaUtil
|
||||
|
||||
/**
|
||||
* When we delete attachments locally, we can't immediately delete them from the archive CDN. This is because there is still a backup that exists that
|
||||
@@ -129,26 +128,29 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the set of media items that are slated to be referenced in the next backup, updating their pending sync time.
|
||||
* Writes the set of full-size media items that are slated to be referenced in the next backup, updating their pending sync time.
|
||||
* Will insert multiple rows per object -- one for the main item, and one for the thumbnail.
|
||||
*/
|
||||
fun writePendingMediaObjects(mediaObjects: Sequence<ArchiveMediaItem>) {
|
||||
fun writeFullSizePendingMediaObjects(mediaObjects: Sequence<ArchiveMediaItem>) {
|
||||
mediaObjects
|
||||
.chunked(SqlUtil.MAX_QUERY_ARGS)
|
||||
.forEach { chunk ->
|
||||
// Full attachment
|
||||
writePendingMediaObjectsChunk(
|
||||
chunk
|
||||
.filterNot { MediaUtil.isViewOnceType(it.contentType) || MediaUtil.isLongTextType(it.contentType) }
|
||||
.map { MediaEntry(it.mediaId, it.cdn, it.plaintextHash, it.remoteKey, isThumbnail = false) }
|
||||
chunk.map { MediaEntry(it.mediaId, it.cdn, it.plaintextHash, it.remoteKey, isThumbnail = false) }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Thumbnail
|
||||
/**
|
||||
* Writes the set of thumbnail media items that are slated to be referenced in the next backup, updating their pending sync time.
|
||||
* Will insert multiple rows per object -- one for the main item, and one for the thumbnail.
|
||||
*/
|
||||
fun writeThumbnailPendingMediaObjects(mediaObjects: Sequence<ArchiveMediaItem>) {
|
||||
mediaObjects
|
||||
.chunked(SqlUtil.MAX_QUERY_ARGS)
|
||||
.forEach { chunk ->
|
||||
writePendingMediaObjectsChunk(
|
||||
chunk
|
||||
.filterNot { it.quote }
|
||||
.filter { MediaUtil.isImageOrVideoType(it.contentType) }
|
||||
.map { MediaEntry(it.thumbnailMediaId, it.cdn, it.plaintextHash, it.remoteKey, isThumbnail = true) }
|
||||
chunk.map { MediaEntry(it.thumbnailMediaId, it.cdn, it.plaintextHash, it.remoteKey, isThumbnail = true) }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -237,6 +239,32 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
|
||||
return objects.filterNot { foundObjects.contains(it.mediaId) }.toSet()
|
||||
}
|
||||
|
||||
fun getMediaEntriesForObjects(objects: List<ArchivedMediaObject>): Set<MediaEntry> {
|
||||
if (objects.isEmpty()) {
|
||||
return emptySet()
|
||||
}
|
||||
|
||||
val queries: List<SqlUtil.Query> = SqlUtil.buildCollectionQuery(
|
||||
column = MEDIA_ID,
|
||||
values = objects.map { it.mediaId },
|
||||
collectionOperator = SqlUtil.CollectionOperator.IN,
|
||||
prefix = "$SNAPSHOT_VERSION = $MAX_VERSION AND "
|
||||
)
|
||||
|
||||
val entries: MutableSet<MediaEntry> = mutableSetOf()
|
||||
|
||||
for (query in queries) {
|
||||
entries += readableDatabase
|
||||
.select(MEDIA_ID, CDN, PLAINTEXT_HASH, REMOTE_KEY, IS_THUMBNAIL)
|
||||
.from("$TABLE_NAME JOIN ${AttachmentTable.TABLE_NAME}")
|
||||
.where(query.where, query.whereArgs)
|
||||
.run()
|
||||
.readToList { MediaEntry.fromCursor(it) }
|
||||
}
|
||||
|
||||
return entries.toSet()
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a list of media objects, find the ones that are present in the most recent snapshot, but have a different CDN than the one passed in.
|
||||
* This will ignore thumbnails, as the results are intended to be used to update CDNs, which we do not track for thumbnails.
|
||||
|
||||
@@ -5,22 +5,35 @@
|
||||
|
||||
package org.thoughtcrime.securesms.jobs
|
||||
|
||||
import android.app.Notification
|
||||
import android.app.PendingIntent
|
||||
import android.content.Intent
|
||||
import androidx.core.app.NotificationCompat
|
||||
import androidx.core.app.NotificationManagerCompat
|
||||
import org.signal.core.util.PendingIntentFlags
|
||||
import org.signal.core.util.forEach
|
||||
import org.signal.core.util.logging.Log
|
||||
import org.signal.core.util.nullIfBlank
|
||||
import org.thoughtcrime.securesms.R
|
||||
import org.thoughtcrime.securesms.backup.v2.ArchivedMediaObject
|
||||
import org.thoughtcrime.securesms.backup.v2.BackupRepository
|
||||
import org.thoughtcrime.securesms.database.BackupMediaSnapshotTable
|
||||
import org.thoughtcrime.securesms.database.SignalDatabase
|
||||
import org.thoughtcrime.securesms.dependencies.AppDependencies
|
||||
import org.thoughtcrime.securesms.jobmanager.Job
|
||||
import org.thoughtcrime.securesms.jobmanager.impl.NetworkConstraint
|
||||
import org.thoughtcrime.securesms.jobs.protos.ArchiveAttachmentReconciliationJobData
|
||||
import org.thoughtcrime.securesms.keyvalue.SignalStore
|
||||
import org.thoughtcrime.securesms.logsubmit.SubmitDebugLogActivity
|
||||
import org.thoughtcrime.securesms.notifications.NotificationChannels
|
||||
import org.thoughtcrime.securesms.notifications.NotificationIds
|
||||
import org.thoughtcrime.securesms.util.RemoteConfig
|
||||
import org.whispersystems.signalservice.api.NetworkResult
|
||||
import org.whispersystems.signalservice.api.archive.ArchiveGetMediaItemsResponse
|
||||
import org.whispersystems.signalservice.api.backup.MediaId
|
||||
import java.lang.RuntimeException
|
||||
import kotlin.time.Duration.Companion.days
|
||||
import kotlin.time.Duration.Companion.hours
|
||||
|
||||
/**
|
||||
* We do our best to keep our local attachments in sync with the archive CDN, but we still want to have a backstop that periodically
|
||||
@@ -88,7 +101,8 @@ class ArchiveAttachmentReconciliationJob private constructor(
|
||||
}
|
||||
|
||||
val timeSinceLastSync = System.currentTimeMillis() - SignalStore.backup.lastAttachmentReconciliationTime
|
||||
if (!forced && serverCursor == null && timeSinceLastSync > 0 && timeSinceLastSync < RemoteConfig.archiveReconciliationSyncInterval.inWholeMilliseconds) {
|
||||
val syncThreshold = if (RemoteConfig.internalUser) 12.hours.inWholeMilliseconds else RemoteConfig.archiveReconciliationSyncInterval.inWholeMilliseconds
|
||||
if (!forced && serverCursor == null && timeSinceLastSync > 0 && timeSinceLastSync < syncThreshold) {
|
||||
Log.d(TAG, "No need to do a remote sync yet. Time since last sync: $timeSinceLastSync ms")
|
||||
return Result.success()
|
||||
}
|
||||
@@ -123,6 +137,8 @@ class ArchiveAttachmentReconciliationJob private constructor(
|
||||
}
|
||||
check(archivedItemPage != null)
|
||||
|
||||
Log.d(TAG, "Fetched CDN page. Requested size: $CDN_FETCH_LIMIT, Actual size: ${archivedItemPage.storedMediaObjects.size}")
|
||||
|
||||
syncCdnPage(archivedItemPage, snapshotVersion)?.let { return it }
|
||||
|
||||
serverCursor = archivedItemPage.cursor
|
||||
@@ -136,16 +152,27 @@ class ArchiveAttachmentReconciliationJob private constructor(
|
||||
val mediaObjectsThatMayNeedReUpload = SignalDatabase.backupMediaSnapshots.getMediaObjectsLastSeenOnCdnBeforeSnapshotVersion(snapshotVersion)
|
||||
val mayNeedReUploadCount = mediaObjectsThatMayNeedReUpload.count
|
||||
|
||||
val mediaIdsThatNeedUpload = mutableSetOf<MediaId>()
|
||||
val internalUser = RemoteConfig.internalUser
|
||||
|
||||
if (mayNeedReUploadCount > 0) {
|
||||
Log.w(TAG, "Found $mayNeedReUploadCount attachments that are present in the target snapshot, but could not be found on the CDN. This could be a bookkeeping error, or the upload may still be in progress. Checking.", true)
|
||||
|
||||
var newBackupJobRequired = false
|
||||
var bookkeepingErrorCount = 0
|
||||
|
||||
var fullSizeMismatchFound = false
|
||||
var thumbnailMismatchFound = false
|
||||
|
||||
mediaObjectsThatMayNeedReUpload.forEach { mediaObjectCursor ->
|
||||
val entry = BackupMediaSnapshotTable.MediaEntry.fromCursor(mediaObjectCursor)
|
||||
|
||||
if (internalUser) {
|
||||
mediaIdsThatNeedUpload += MediaId(entry.mediaId)
|
||||
}
|
||||
|
||||
if (entry.isThumbnail) {
|
||||
thumbnailMismatchFound = true
|
||||
val wasReset = SignalDatabase.attachments.resetArchiveThumbnailTransferStateByPlaintextHashAndRemoteKeyIfNecessary(entry.plaintextHash, entry.remoteKey)
|
||||
if (wasReset) {
|
||||
newBackupJobRequired = true
|
||||
@@ -154,6 +181,7 @@ class ArchiveAttachmentReconciliationJob private constructor(
|
||||
Log.w(TAG, "[Thumbnail] Did not need to reset the transfer state by hash/key because the thumbnail either no longer exists or the upload is already in-progress.", true)
|
||||
}
|
||||
} else {
|
||||
fullSizeMismatchFound = true
|
||||
val wasReset = SignalDatabase.attachments.resetArchiveTransferStateByPlaintextHashAndRemoteKeyIfNecessary(entry.plaintextHash, entry.remoteKey)
|
||||
if (wasReset) {
|
||||
newBackupJobRequired = true
|
||||
@@ -170,9 +198,30 @@ class ArchiveAttachmentReconciliationJob private constructor(
|
||||
Log.i(TAG, "None of the $mayNeedReUploadCount CDN mismatches were bookkeeping errors.", true)
|
||||
}
|
||||
|
||||
if (internalUser && mediaIdsThatNeedUpload.isNotEmpty()) {
|
||||
Log.w(TAG, "Starting internal-only lookup of matching attachments. May take a while!")
|
||||
|
||||
val matchingAttachments = SignalDatabase.attachments.debugGetAttachmentsForMediaIds(mediaIdsThatNeedUpload, limit = 100)
|
||||
Log.w(TAG, "Found ${matchingAttachments.size} out of the ${mediaIdsThatNeedUpload.size} attachments we looked up (capped lookups to 100).", true)
|
||||
|
||||
matchingAttachments.forEach { attachment ->
|
||||
Log.w(TAG, "Needed Upload: attachmentId=${attachment.first.attachmentId}, messageId=${attachment.first.mmsId}, thumbnail=${attachment.second}, contentType=${attachment.first.contentType}, quote=${attachment.first.quote}, transferState=${attachment.first.transferState}, archiveTransferState=${attachment.first.archiveTransferState}, hasData=${attachment.first.hasData}")
|
||||
}
|
||||
}
|
||||
|
||||
if (newBackupJobRequired) {
|
||||
Log.w(TAG, "Some of the errors require re-uploading a new backup job to resolve.", true)
|
||||
maybePostReconciliationFailureNotification()
|
||||
BackupMessagesJob.enqueue()
|
||||
} else {
|
||||
if (fullSizeMismatchFound) {
|
||||
Log.d(TAG, "Full size mismatch found. Enqueuing an attachment backfill job to be safe.", true)
|
||||
AppDependencies.jobManager.add(ArchiveAttachmentBackfillJob())
|
||||
}
|
||||
if (thumbnailMismatchFound) {
|
||||
Log.d(TAG, "Thumbnail mismatch found. Enqueuing a thumbnail backfill job to be safe.", true)
|
||||
AppDependencies.jobManager.add(ArchiveThumbnailBackfillJob())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Log.d(TAG, "No attachments need to be repaired.")
|
||||
@@ -208,6 +257,10 @@ class ArchiveAttachmentReconciliationJob private constructor(
|
||||
val mediaOnRemoteButNotLocal = SignalDatabase.backupMediaSnapshots.getMediaObjectsThatCantBeFound(mediaObjects)
|
||||
val mediaObjectsOnBothRemoteAndLocal = mediaObjects - mediaOnRemoteButNotLocal
|
||||
|
||||
if (RemoteConfig.internalUser && mediaOnRemoteButNotLocal.isNotEmpty()) {
|
||||
Log.w(TAG, "MediaIds of items on remote but not local: ${mediaOnRemoteButNotLocal.joinToString(", ") { it.mediaId }}", true)
|
||||
}
|
||||
|
||||
val cdnMismatches = SignalDatabase.backupMediaSnapshots.getMediaObjectsWithNonMatchingCdn(mediaObjectsOnBothRemoteAndLocal)
|
||||
if (cdnMismatches.isNotEmpty()) {
|
||||
Log.w(TAG, "Found ${cdnMismatches.size} items with CDNs that differ from what we have locally. Updating our local store.")
|
||||
@@ -251,6 +304,21 @@ class ArchiveAttachmentReconciliationJob private constructor(
|
||||
}
|
||||
}
|
||||
|
||||
private fun maybePostReconciliationFailureNotification() {
|
||||
if (!RemoteConfig.internalUser) {
|
||||
return
|
||||
}
|
||||
|
||||
val notification: Notification = NotificationCompat.Builder(context, NotificationChannels.getInstance().FAILURES)
|
||||
.setSmallIcon(R.drawable.ic_notification)
|
||||
.setContentTitle("[Internal-only] Archive reconciliation found an error!")
|
||||
.setContentText("Tap to send a debug log")
|
||||
.setContentIntent(PendingIntent.getActivity(context, 0, Intent(context, SubmitDebugLogActivity::class.java), PendingIntentFlags.mutable()))
|
||||
.build()
|
||||
|
||||
NotificationManagerCompat.from(context).notify(NotificationIds.RECONCILIATION_ERROR, notification)
|
||||
}
|
||||
|
||||
class Factory : Job.Factory<ArchiveAttachmentReconciliationJob> {
|
||||
override fun create(parameters: Parameters, serializedData: ByteArray?): ArchiveAttachmentReconciliationJob {
|
||||
val data = ArchiveAttachmentReconciliationJobData.ADAPTER.decode(serializedData!!)
|
||||
|
||||
@@ -420,8 +420,14 @@ class BackupMessagesJob private constructor(
|
||||
|
||||
private fun writeMediaCursorToTemporaryTable(db: SignalDatabase, mediaBackupEnabled: Boolean) {
|
||||
if (mediaBackupEnabled) {
|
||||
db.attachmentTable.getAttachmentsThatWillBeIncludedInArchive().use {
|
||||
SignalDatabase.backupMediaSnapshots.writePendingMediaObjects(
|
||||
db.attachmentTable.getFullSizeAttachmentsThatWillBeIncludedInArchive().use {
|
||||
SignalDatabase.backupMediaSnapshots.writeFullSizePendingMediaObjects(
|
||||
mediaObjects = ArchiveMediaItemIterator(it).asSequence()
|
||||
)
|
||||
}
|
||||
|
||||
db.attachmentTable.getThumbnailAttachmentsThatWillBeIncludedInArchive().use {
|
||||
SignalDatabase.backupMediaSnapshots.writeThumbnailPendingMediaObjects(
|
||||
mediaObjects = ArchiveMediaItemIterator(it).asSequence()
|
||||
)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ public final class NotificationIds {
|
||||
public static final int THREAD = 50000;
|
||||
public static final int MAX_THREAD = THREAD + 100_000;
|
||||
public static final int INTERNAL_ERROR = 258069;
|
||||
public static final int RECONCILIATION_ERROR = 258070;
|
||||
public static final int LEGACY_SQLCIPHER_MIGRATION = 494949;
|
||||
public static final int USER_NOTIFICATION_MIGRATION = 525600;
|
||||
public static final int DEVICE_TRANSFER = 625420;
|
||||
|
||||
Reference in New Issue
Block a user