Dynamically compute MediaName and MediaId.

This commit is contained in:
Greyson Parrelli
2025-03-28 14:19:16 -04:00
parent f1985cf506
commit 17216316f6
34 changed files with 641 additions and 396 deletions

View File

@@ -64,6 +64,7 @@ import org.signal.core.util.stream.LimitedInputStream
import org.signal.core.util.stream.NullOutputStream
import org.signal.core.util.toInt
import org.signal.core.util.update
import org.signal.core.util.updateAll
import org.signal.core.util.withinTransaction
import org.thoughtcrime.securesms.attachments.ArchivedAttachment
import org.thoughtcrime.securesms.attachments.Attachment
@@ -97,7 +98,6 @@ import org.thoughtcrime.securesms.util.StorageUtil
import org.thoughtcrime.securesms.util.Util
import org.thoughtcrime.securesms.video.EncryptedMediaDataSource
import org.whispersystems.signalservice.api.attachment.AttachmentUploadResult
import org.whispersystems.signalservice.api.backup.MediaId
import org.whispersystems.signalservice.api.crypto.AttachmentCipherOutputStream
import org.whispersystems.signalservice.api.util.UuidUtil
import org.whispersystems.signalservice.internal.crypto.PaddingInputStream
@@ -164,9 +164,6 @@ class AttachmentTable(
const val DISPLAY_ORDER = "display_order"
const val UPLOAD_TIMESTAMP = "upload_timestamp"
const val ARCHIVE_CDN = "archive_cdn"
const val ARCHIVE_MEDIA_NAME = "archive_media_name"
const val ARCHIVE_MEDIA_ID = "archive_media_id"
const val ARCHIVE_THUMBNAIL_MEDIA_ID = "archive_thumbnail_media_id"
const val ARCHIVE_TRANSFER_FILE = "archive_transfer_file"
const val ARCHIVE_TRANSFER_STATE = "archive_transfer_state"
const val THUMBNAIL_RESTORE_STATE = "thumbnail_restore_state"
@@ -224,8 +221,6 @@ class AttachmentTable(
DATA_HASH_START,
DATA_HASH_END,
ARCHIVE_CDN,
ARCHIVE_MEDIA_NAME,
ARCHIVE_MEDIA_ID,
ARCHIVE_TRANSFER_FILE,
THUMBNAIL_FILE,
THUMBNAIL_RESTORE_STATE,
@@ -270,11 +265,8 @@ class AttachmentTable(
$DATA_HASH_START TEXT DEFAULT NULL,
$DATA_HASH_END TEXT DEFAULT NULL,
$ARCHIVE_CDN INTEGER DEFAULT 0,
$ARCHIVE_MEDIA_NAME TEXT DEFAULT NULL,
$ARCHIVE_MEDIA_ID TEXT DEFAULT NULL,
$ARCHIVE_TRANSFER_FILE TEXT DEFAULT NULL,
$ARCHIVE_TRANSFER_STATE INTEGER DEFAULT ${ArchiveTransferState.NONE.value},
$ARCHIVE_THUMBNAIL_MEDIA_ID TEXT DEFAULT NULL,
$THUMBNAIL_FILE TEXT DEFAULT NULL,
$THUMBNAIL_RANDOM BLOB DEFAULT NULL,
$THUMBNAIL_RESTORE_STATE INTEGER DEFAULT ${ThumbnailRestoreState.NONE.value},
@@ -294,8 +286,8 @@ class AttachmentTable(
"CREATE INDEX IF NOT EXISTS attachment_data_hash_start_index ON $TABLE_NAME ($DATA_HASH_START);",
"CREATE INDEX IF NOT EXISTS attachment_data_hash_end_index ON $TABLE_NAME ($DATA_HASH_END);",
"CREATE INDEX IF NOT EXISTS $DATA_FILE_INDEX ON $TABLE_NAME ($DATA_FILE);",
"CREATE INDEX IF NOT EXISTS attachment_archive_media_id_index ON $TABLE_NAME ($ARCHIVE_MEDIA_ID);",
"CREATE INDEX IF NOT EXISTS attachment_archive_transfer_state ON $TABLE_NAME ($ARCHIVE_TRANSFER_STATE);"
"CREATE INDEX IF NOT EXISTS attachment_archive_transfer_state ON $TABLE_NAME ($ARCHIVE_TRANSFER_STATE);",
"CREATE INDEX IF NOT EXISTS attachment_remote_digest_index ON $TABLE_NAME ($REMOTE_DIGEST);"
)
@JvmStatic
@@ -403,11 +395,15 @@ class AttachmentTable(
}
}
fun getMediaIdCursor(): Cursor {
/**
* Returns a cursor (with just the digest+archive_cdn) for all attachments that are eligible for archive upload.
* In practice, this means that the attachments have a digest and have not hit a permanent archive upload failure.
*/
fun getAttachmentsEligibleForArchiveUpload(): Cursor {
return readableDatabase
.select(ARCHIVE_MEDIA_ID, ARCHIVE_CDN)
.select(REMOTE_DIGEST, ARCHIVE_CDN)
.from(TABLE_NAME)
.where("$ARCHIVE_MEDIA_ID IS NOT NULL")
.where("$REMOTE_DIGEST IS NOT NULL AND $ARCHIVE_TRANSFER_STATE != ${ArchiveTransferState.PERMANENT_FAILURE.value}")
.run()
}
@@ -742,9 +738,12 @@ class AttachmentTable(
"""
SELECT SUM($DATA_SIZE)
FROM (
SELECT DISTINCT $ARCHIVE_MEDIA_ID, $DATA_SIZE
SELECT DISTINCT $REMOTE_DIGEST, $DATA_SIZE
FROM $TABLE_NAME
WHERE $ARCHIVE_TRANSFER_STATE NOT IN (${ArchiveTransferState.FINISHED.value}, ${ArchiveTransferState.PERMANENT_FAILURE.value})
WHERE
$DATA_FILE NOT NULL AND
$REMOTE_DIGEST NOT NULL AND
$ARCHIVE_TRANSFER_STATE NOT IN (${ArchiveTransferState.FINISHED.value}, ${ArchiveTransferState.PERMANENT_FAILURE.value})
)
""".trimIndent()
)
@@ -1158,7 +1157,7 @@ class AttachmentTable(
// We don't look at hash_start here because that could result in us matching on a file that got compressed down to something smaller, effectively lowering
// the quality of the attachment we received.
val hashMatch: DataFileInfo? = readableDatabase
.select(ID, DATA_FILE, DATA_SIZE, DATA_RANDOM, DATA_HASH_START, DATA_HASH_END, TRANSFORM_PROPERTIES, UPLOAD_TIMESTAMP, ARCHIVE_CDN, ARCHIVE_MEDIA_NAME, ARCHIVE_MEDIA_ID)
.select(ID, DATA_FILE, DATA_SIZE, DATA_RANDOM, DATA_HASH_START, DATA_HASH_END, TRANSFORM_PROPERTIES, UPLOAD_TIMESTAMP, ARCHIVE_CDN)
.from(TABLE_NAME)
.where("$DATA_HASH_END = ? AND $DATA_HASH_END NOT NULL AND $TRANSFER_STATE = $TRANSFER_PROGRESS_DONE AND $DATA_FILE NOT NULL", fileWriteResult.hash)
.run()
@@ -1175,8 +1174,6 @@ class AttachmentTable(
values.put(DATA_HASH_START, hashMatch.hashEnd)
values.put(DATA_HASH_END, hashMatch.hashEnd)
values.put(ARCHIVE_CDN, hashMatch.archiveCdn)
values.put(ARCHIVE_MEDIA_NAME, hashMatch.archiveMediaName)
values.put(ARCHIVE_MEDIA_ID, hashMatch.archiveMediaId)
} else {
values.put(DATA_FILE, fileWriteResult.file.absolutePath)
values.put(DATA_SIZE, fileWriteResult.length)
@@ -1252,7 +1249,7 @@ class AttachmentTable(
}
@Throws(IOException::class)
fun finalizeAttachmentThumbnailAfterDownload(attachmentId: AttachmentId, archiveMediaId: String, inputStream: InputStream, transferFile: File) {
fun finalizeAttachmentThumbnailAfterDownload(attachmentId: AttachmentId, digest: ByteArray, inputStream: InputStream, transferFile: File) {
Log.i(TAG, "[finalizeAttachmentThumbnailAfterDownload] Finalizing downloaded data for $attachmentId.")
val fileWriteResult: DataFileWriteResult = writeToDataFile(newDataFile(context), inputStream, TransformProperties.empty())
@@ -1265,7 +1262,7 @@ class AttachmentTable(
db.update(TABLE_NAME)
.values(values)
.where("$ARCHIVE_MEDIA_ID = ?", archiveMediaId)
.where("$REMOTE_DIGEST = ?", digest)
.run()
}
@@ -1277,10 +1274,12 @@ class AttachmentTable(
}
}
/**
* Updates the state around archive thumbnail uploads, and ensures that all attachments sharing the same digest remain in sync.
*/
fun finalizeAttachmentThumbnailAfterUpload(
attachmentId: AttachmentId,
archiveMediaId: String,
archiveThumbnailMediaId: MediaId,
attachmentDigest: ByteArray,
data: ByteArray
) {
Log.i(TAG, "[finalizeAttachmentThumbnailAfterUpload] Finalizing archive data for $attachmentId thumbnail.")
@@ -1290,13 +1289,12 @@ class AttachmentTable(
val values = contentValuesOf(
THUMBNAIL_FILE to fileWriteResult.file.absolutePath,
THUMBNAIL_RANDOM to fileWriteResult.random,
THUMBNAIL_RESTORE_STATE to ThumbnailRestoreState.FINISHED.value,
ARCHIVE_THUMBNAIL_MEDIA_ID to archiveThumbnailMediaId.encode()
THUMBNAIL_RESTORE_STATE to ThumbnailRestoreState.FINISHED.value
)
db.update(TABLE_NAME)
.values(values)
.where("$ARCHIVE_MEDIA_ID = ? OR $ID = ?", archiveMediaId, attachmentId)
.where("$ID = ? OR $REMOTE_DIGEST = ?", attachmentId, attachmentDigest)
.run()
}
}
@@ -1601,10 +1599,7 @@ class AttachmentTable(
$DATA_RANDOM,
$DATA_HASH_START,
$DATA_HASH_END,
$ARCHIVE_MEDIA_ID,
$ARCHIVE_MEDIA_NAME,
$ARCHIVE_CDN,
$ARCHIVE_THUMBNAIL_MEDIA_ID,
$THUMBNAIL_RESTORE_STATE
)
SELECT
@@ -1631,10 +1626,7 @@ class AttachmentTable(
$DATA_RANDOM,
$DATA_HASH_START,
$DATA_HASH_END,
"${attachment.archiveMediaId}",
"${attachment.archiveMediaName}",
${attachment.archiveCdn},
$ARCHIVE_THUMBNAIL_MEDIA_ID,
${if (forThumbnail) ThumbnailRestoreState.NEEDS_RESTORE.value else ThumbnailRestoreState.NONE.value}
FROM $TABLE_NAME
WHERE $ID = ${attachment.attachmentId.id}
@@ -1733,7 +1725,7 @@ class AttachmentTable(
fun getDataFileInfo(attachmentId: AttachmentId): DataFileInfo? {
return readableDatabase
.select(ID, DATA_FILE, DATA_SIZE, DATA_RANDOM, DATA_HASH_START, DATA_HASH_END, TRANSFORM_PROPERTIES, UPLOAD_TIMESTAMP, ARCHIVE_CDN, ARCHIVE_MEDIA_NAME, ARCHIVE_MEDIA_ID)
.select(ID, DATA_FILE, DATA_SIZE, DATA_RANDOM, DATA_HASH_START, DATA_HASH_END, TRANSFORM_PROPERTIES, UPLOAD_TIMESTAMP, ARCHIVE_CDN)
.from(TABLE_NAME)
.where("$ID = ?", attachmentId.id)
.run()
@@ -1934,9 +1926,6 @@ class AttachmentTable(
uploadTimestamp = jsonObject.getLong(UPLOAD_TIMESTAMP),
dataHash = jsonObject.getString(DATA_HASH_END),
archiveCdn = jsonObject.getInt(ARCHIVE_CDN),
archiveMediaName = jsonObject.getString(ARCHIVE_MEDIA_NAME),
archiveMediaId = jsonObject.getString(ARCHIVE_MEDIA_ID),
hasArchiveThumbnail = !TextUtils.isEmpty(jsonObject.getString(THUMBNAIL_FILE)),
thumbnailRestoreState = ThumbnailRestoreState.deserialize(jsonObject.getInt(THUMBNAIL_RESTORE_STATE)),
archiveTransferState = ArchiveTransferState.deserialize(jsonObject.getInt(ARCHIVE_TRANSFER_STATE)),
uuid = UuidUtil.parseOrNull(jsonObject.getString(ATTACHMENT_UUID))
@@ -1982,7 +1971,7 @@ class AttachmentTable(
/**
* Sets the archive data for the specific attachment, as well as for any attachments that use the same underlying file.
*/
fun setArchiveData(attachmentId: AttachmentId, archiveCdn: Int, archiveMediaName: String, archiveMediaId: String, archiveThumbnailMediaId: String) {
fun setArchiveCdn(attachmentId: AttachmentId, archiveCdn: Int) {
writableDatabase.withinTransaction { db ->
val dataFile = db
.select(DATA_FILE)
@@ -1999,9 +1988,6 @@ class AttachmentTable(
db.update(TABLE_NAME)
.values(
ARCHIVE_CDN to archiveCdn,
ARCHIVE_MEDIA_ID to archiveMediaId,
ARCHIVE_MEDIA_NAME to archiveMediaName,
ARCHIVE_THUMBNAIL_MEDIA_ID to archiveThumbnailMediaId,
ARCHIVE_TRANSFER_STATE to ArchiveTransferState.FINISHED.value
)
.where("$DATA_FILE = ?", dataFile)
@@ -2009,14 +1995,15 @@ class AttachmentTable(
}
}
fun updateArchiveCdnByMediaId(archiveMediaId: String, archiveCdn: Int): Int {
return writableDatabase.rawQuery(
"UPDATE $TABLE_NAME SET " +
"$ARCHIVE_CDN = CASE WHEN $ARCHIVE_MEDIA_ID = ? THEN ? ELSE $ARCHIVE_CDN END " +
"WHERE $ARCHIVE_MEDIA_ID = ? OR $ARCHIVE_THUMBNAIL_MEDIA_ID = ? " +
"RETURNING $ARCHIVE_CDN",
SqlUtil.buildArgs(archiveMediaId, archiveCdn, archiveMediaId, archiveMediaId)
).count
/**
* Updates all attachments that share the same digest with the given archive CDN.
*/
fun setArchiveCdnByDigest(digest: ByteArray, archiveCdn: Int) {
writableDatabase
.update(TABLE_NAME)
.values(ARCHIVE_CDN to archiveCdn)
.where("$REMOTE_DIGEST = ?", digest)
.run()
}
fun clearArchiveData(attachmentIds: List<AttachmentId>) {
@@ -2025,9 +2012,7 @@ class AttachmentTable(
writableDatabase
.update(TABLE_NAME)
.values(
ARCHIVE_CDN to 0,
ARCHIVE_MEDIA_ID to null,
ARCHIVE_MEDIA_NAME to null
ARCHIVE_CDN to 0
)
.where(query.where, query.whereArgs)
.run()
@@ -2036,13 +2021,11 @@ class AttachmentTable(
fun clearAllArchiveData() {
writableDatabase
.update(TABLE_NAME)
.updateAll(TABLE_NAME)
.values(
ARCHIVE_CDN to 0,
ARCHIVE_MEDIA_ID to null,
ARCHIVE_MEDIA_NAME to null
ARCHIVE_TRANSFER_STATE to ArchiveTransferState.NONE.value
)
.where("$ARCHIVE_CDN > 0 OR $ARCHIVE_MEDIA_ID IS NOT NULL OR $ARCHIVE_MEDIA_NAME IS NOT NULL")
.run()
}
@@ -2332,9 +2315,6 @@ class AttachmentTable(
put(CAPTION, attachment.caption)
put(UPLOAD_TIMESTAMP, attachment.uploadTimestamp)
put(ARCHIVE_CDN, attachment.archiveCdn)
put(ARCHIVE_MEDIA_NAME, attachment.archiveMediaName)
put(ARCHIVE_MEDIA_ID, attachment.archiveMediaId)
put(ARCHIVE_THUMBNAIL_MEDIA_ID, attachment.archiveThumbnailMediaId)
put(ARCHIVE_TRANSFER_STATE, ArchiveTransferState.FINISHED.value)
put(THUMBNAIL_RESTORE_STATE, ThumbnailRestoreState.NEEDS_RESTORE.value)
put(ATTACHMENT_UUID, attachment.uuid?.toString())
@@ -2399,7 +2379,7 @@ class AttachmentTable(
// First we'll check if our file hash matches the starting or ending hash of any other attachments and has compatible transform properties.
// We'll prefer the match with the most recent upload timestamp.
val hashMatch: DataFileInfo? = readableDatabase
.select(ID, DATA_FILE, DATA_SIZE, DATA_RANDOM, DATA_HASH_START, DATA_HASH_END, TRANSFORM_PROPERTIES, UPLOAD_TIMESTAMP, ARCHIVE_CDN, ARCHIVE_MEDIA_NAME, ARCHIVE_MEDIA_ID)
.select(ID, DATA_FILE, DATA_SIZE, DATA_RANDOM, DATA_HASH_START, DATA_HASH_END, TRANSFORM_PROPERTIES, UPLOAD_TIMESTAMP, ARCHIVE_CDN)
.from(TABLE_NAME)
.where("$DATA_FILE NOT NULL AND ($DATA_HASH_START = ? OR $DATA_HASH_END = ?)", fileWriteResult.hash, fileWriteResult.hash)
.run()
@@ -2435,8 +2415,6 @@ class AttachmentTable(
contentValues.put(DATA_HASH_START, fileWriteResult.hash)
contentValues.put(DATA_HASH_END, hashMatch.hashEnd)
contentValues.put(ARCHIVE_CDN, hashMatch.archiveCdn)
contentValues.put(ARCHIVE_MEDIA_NAME, hashMatch.archiveMediaName)
contentValues.put(ARCHIVE_MEDIA_ID, hashMatch.archiveMediaId)
if (hashMatch.transformProperties.skipTransform) {
Log.i(TAG, "[insertAttachmentWithData] The hash match has a DATA_HASH_END and skipTransform=true, so skipping transform of the new file as well. (MessageId: $messageId, ${attachment.uri})")
@@ -2597,9 +2575,6 @@ class AttachmentTable(
uploadTimestamp = cursor.requireLong(UPLOAD_TIMESTAMP),
dataHash = cursor.requireString(DATA_HASH_END),
archiveCdn = cursor.requireInt(ARCHIVE_CDN),
archiveMediaName = cursor.requireString(ARCHIVE_MEDIA_NAME),
archiveMediaId = cursor.requireString(ARCHIVE_MEDIA_ID),
hasArchiveThumbnail = !cursor.isNull(THUMBNAIL_FILE),
thumbnailRestoreState = ThumbnailRestoreState.deserialize(cursor.requireInt(THUMBNAIL_RESTORE_STATE)),
archiveTransferState = ArchiveTransferState.deserialize(cursor.requireInt(ARCHIVE_TRANSFER_STATE)),
uuid = UuidUtil.parseOrNull(cursor.requireString(ATTACHMENT_UUID))
@@ -2627,9 +2602,7 @@ class AttachmentTable(
hashEnd = this.requireString(DATA_HASH_END),
transformProperties = TransformProperties.parse(this.requireString(TRANSFORM_PROPERTIES)),
uploadTimestamp = this.requireLong(UPLOAD_TIMESTAMP),
archiveCdn = this.requireInt(ARCHIVE_CDN),
archiveMediaName = this.requireString(ARCHIVE_MEDIA_NAME),
archiveMediaId = this.requireString(ARCHIVE_MEDIA_ID)
archiveCdn = this.requireInt(ARCHIVE_CDN)
)
}
@@ -2693,9 +2666,7 @@ class AttachmentTable(
val hashEnd: String?,
val transformProperties: TransformProperties,
val uploadTimestamp: Long,
val archiveCdn: Int,
val archiveMediaName: String?,
val archiveMediaId: String?
val archiveCdn: Int
)
@VisibleForTesting

View File

@@ -10,15 +10,20 @@ import androidx.annotation.VisibleForTesting
import androidx.core.content.contentValuesOf
import org.signal.core.util.SqlUtil
import org.signal.core.util.delete
import org.signal.core.util.exists
import org.signal.core.util.readToList
import org.signal.core.util.readToSet
import org.signal.core.util.requireInt
import org.signal.core.util.requireNonNullBlob
import org.signal.core.util.requireNonNullString
import org.signal.core.util.select
import org.signal.core.util.toInt
import org.thoughtcrime.securesms.backup.v2.ArchivedMediaObject
/**
* Helper table for attachment deletion sync
* When we delete attachments locally, we can't immediately delete them from the archive CDN. This is because there is still a backup that exists that
* references that attachment -- at least until a new backup is made.
*
* So, this table maintains a snapshot of the media present in the last backup, so that we know what we can and can't delete from the archive CDN.
*/
class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : DatabaseTable(context, database) {
companion object {
@@ -50,72 +55,160 @@ class BackupMediaSnapshotTable(context: Context, database: SignalDatabase) : Dat
@VisibleForTesting
const val PENDING_SYNC_TIME = "pending_sync_time"
/**
* Whether or not this entry is for a thumbnail.
*/
const val IS_THUMBNAIL = "is_thumbnail"
/**
* The remote digest for the media object. This is used to find matching attachments in the attachment table when necessary.
*/
const val REMOTE_DIGEST = "remote_digest"
val CREATE_TABLE = """
CREATE TABLE $TABLE_NAME (
$ID INTEGER PRIMARY KEY,
$MEDIA_ID TEXT UNIQUE,
$CDN INTEGER,
$LAST_SYNC_TIME INTEGER DEFAULT 0,
$PENDING_SYNC_TIME INTEGER
$PENDING_SYNC_TIME INTEGER,
$IS_THUMBNAIL INTEGER DEFAULT 0,
$REMOTE_DIGEST BLOB NOT NULL
)
""".trimIndent()
private const val ON_MEDIA_ID_CONFLICT = """
ON CONFLICT($MEDIA_ID) DO UPDATE SET
$PENDING_SYNC_TIME = EXCLUDED.$PENDING_SYNC_TIME,
$CDN = EXCLUDED.$CDN
"""
}
/**
* Creates the temporary table if it doesn't exist, clears it, then inserts the media objects into it.
* Writes the set of media items that are slated to be referenced in the next backup, updating their pending sync time.
* Will insert multiple rows per object -- one for the main item, and one for the thumbnail.
*/
fun writePendingMediaObjects(mediaObjects: Sequence<ArchivedMediaObject>, pendingSyncTime: Long) {
mediaObjects.chunked(999)
fun writePendingMediaObjects(mediaObjects: Sequence<ArchiveMediaItem>, pendingSyncTime: Long) {
mediaObjects
.chunked(SqlUtil.MAX_QUERY_ARGS)
.forEach { chunk ->
writePendingMediaObjectsChunk(chunk, pendingSyncTime)
}
}
writePendingMediaObjectsChunk(
chunk.map { MediaEntry(it.mediaId, it.cdn, it.digest, isThumbnail = false) },
pendingSyncTime
)
private fun writePendingMediaObjectsChunk(chunk: List<ArchivedMediaObject>, pendingSyncTime: Long) {
SqlUtil.buildBulkInsert(
TABLE_NAME,
arrayOf(MEDIA_ID, CDN, PENDING_SYNC_TIME),
chunk.map {
contentValuesOf(MEDIA_ID to it.mediaId, CDN to it.cdn, PENDING_SYNC_TIME to pendingSyncTime)
writePendingMediaObjectsChunk(
chunk.map { MediaEntry(it.thumbnailMediaId, it.cdn, it.digest, isThumbnail = true) },
pendingSyncTime
)
}
).forEach {
writableDatabase.execSQL("${it.where} $ON_MEDIA_ID_CONFLICT", it.whereArgs)
}
}
/**
* Copies all entries from the temporary table to the persistent table, then deletes the temporary table.
* Commits the pending sync time to the last sync time. This is called once a backup has been successfully uploaded.
*/
fun commitPendingRows() {
writableDatabase.execSQL("UPDATE $TABLE_NAME SET $LAST_SYNC_TIME = $PENDING_SYNC_TIME")
}
fun getPageOfOldMediaObjects(currentSyncTime: Long, pageSize: Int): List<ArchivedMediaObject> {
fun getPageOfOldMediaObjects(currentSyncTime: Long, pageSize: Int): Set<ArchivedMediaObject> {
return readableDatabase.select(MEDIA_ID, CDN)
.from(TABLE_NAME)
.where("$LAST_SYNC_TIME < ? AND $LAST_SYNC_TIME = $PENDING_SYNC_TIME", currentSyncTime)
.limit(pageSize)
.run()
.readToList {
.readToSet {
ArchivedMediaObject(mediaId = it.requireNonNullString(MEDIA_ID), cdn = it.requireInt(CDN))
}
}
fun deleteMediaObjects(mediaObjects: List<ArchivedMediaObject>) {
SqlUtil.buildCollectionQuery(MEDIA_ID, mediaObjects.map { it.mediaId }).forEach {
writableDatabase.delete(TABLE_NAME)
.where(it.where, it.whereArgs)
.run()
fun deleteMediaObjects(mediaObjects: Collection<ArchivedMediaObject>) {
val query = SqlUtil.buildFastCollectionQuery(MEDIA_ID, mediaObjects.map { it.mediaId })
writableDatabase.delete(TABLE_NAME)
.where(query.where, query.whereArgs)
.run()
}
/**
* Given a list of media objects, find the ones that we have no knowledge of in our local store.
*/
fun getMediaObjectsThatCantBeFound(objects: List<ArchivedMediaObject>): Set<ArchivedMediaObject> {
val query = SqlUtil.buildSingleCollectionQuery(
column = MEDIA_ID,
values = objects.map { it.mediaId },
collectionOperator = SqlUtil.CollectionOperator.NOT_IN,
prefix = "$IS_THUMBNAIL = 0 AND "
)
return readableDatabase
.select(MEDIA_ID, CDN)
.from(TABLE_NAME)
.where(query.where, query.whereArgs)
.run()
.readToSet {
ArchivedMediaObject(
mediaId = it.requireNonNullString(MEDIA_ID),
cdn = it.requireInt(CDN)
)
}
}
/**
* Given a list of media objects, find the ones that we have no knowledge of in our local store.
*/
fun getMediaObjectsWithNonMatchingCdn(objects: List<ArchivedMediaObject>): List<CdnMismatchResult> {
val inputValues = objects.joinToString(separator = ", ") { "('${it.mediaId}', ${it.cdn})" }
return readableDatabase.rawQuery(
"""
WITH input_pairs($MEDIA_ID, $CDN) AS (VALUES $inputValues)
SELECT a.$REMOTE_DIGEST, b.$CDN
FROM $TABLE_NAME a
JOIN input_pairs b ON a.$MEDIA_ID = b.$MEDIA_ID
WHERE a.$CDN != b.$CDN AND a.$IS_THUMBNAIL = 0
"""
).readToList { cursor ->
CdnMismatchResult(
digest = cursor.requireNonNullBlob(REMOTE_DIGEST),
cdn = cursor.requireInt(CDN)
)
}
}
fun hasOldMediaObjects(currentSyncTime: Long): Boolean {
return readableDatabase.exists(TABLE_NAME).where("$LAST_SYNC_TIME > ? AND $LAST_SYNC_TIME = $PENDING_SYNC_TIME", currentSyncTime).run()
private fun writePendingMediaObjectsChunk(chunk: List<MediaEntry>, pendingSyncTime: Long) {
val values = chunk.map {
contentValuesOf(
MEDIA_ID to it.mediaId,
CDN to it.cdn,
REMOTE_DIGEST to it.digest,
IS_THUMBNAIL to it.isThumbnail.toInt(),
PENDING_SYNC_TIME to pendingSyncTime
)
}
val query = SqlUtil.buildSingleBulkInsert(TABLE_NAME, arrayOf(MEDIA_ID, CDN, REMOTE_DIGEST, IS_THUMBNAIL, PENDING_SYNC_TIME), values)
writableDatabase.execSQL(
"""
${query.where}
ON CONFLICT($MEDIA_ID) DO UPDATE SET
$PENDING_SYNC_TIME = EXCLUDED.$PENDING_SYNC_TIME,
$CDN = EXCLUDED.$CDN
""",
query.whereArgs
)
}
class ArchiveMediaItem(
val mediaId: String,
val thumbnailMediaId: String,
val cdn: Int,
val digest: ByteArray
)
class CdnMismatchResult(
val digest: ByteArray,
val cdn: Int
)
private data class MediaEntry(
val mediaId: String,
val cdn: Int,
val digest: ByteArray,
val isThumbnail: Boolean
)
}

View File

@@ -53,8 +53,6 @@ class MediaTable internal constructor(context: Context?, databaseHelper: SignalD
${AttachmentTable.TABLE_NAME}.${AttachmentTable.REMOTE_INCREMENTAL_DIGEST_CHUNK_SIZE},
${AttachmentTable.TABLE_NAME}.${AttachmentTable.DATA_HASH_END},
${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_CDN},
${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_MEDIA_NAME},
${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_MEDIA_ID},
${AttachmentTable.TABLE_NAME}.${AttachmentTable.THUMBNAIL_RESTORE_STATE},
${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_TRANSFER_STATE},
${AttachmentTable.TABLE_NAME}.${AttachmentTable.ATTACHMENT_UUID},

View File

@@ -389,8 +389,6 @@ open class MessageTable(context: Context?, databaseHelper: SignalDatabase) : Dat
'${AttachmentTable.UPLOAD_TIMESTAMP}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.UPLOAD_TIMESTAMP},
'${AttachmentTable.DATA_HASH_END}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.DATA_HASH_END},
'${AttachmentTable.ARCHIVE_CDN}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_CDN},
'${AttachmentTable.ARCHIVE_MEDIA_NAME}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_MEDIA_NAME},
'${AttachmentTable.ARCHIVE_MEDIA_ID}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_MEDIA_ID},
'${AttachmentTable.THUMBNAIL_RESTORE_STATE}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.THUMBNAIL_RESTORE_STATE},
'${AttachmentTable.ARCHIVE_TRANSFER_STATE}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.ARCHIVE_TRANSFER_STATE},
'${AttachmentTable.ATTACHMENT_UUID}', ${AttachmentTable.TABLE_NAME}.${AttachmentTable.ATTACHMENT_UUID}

View File

@@ -123,6 +123,7 @@ import org.thoughtcrime.securesms.database.helpers.migration.V265_FixFtsTriggers
import org.thoughtcrime.securesms.database.helpers.migration.V266_UniqueThreadPinOrder
import org.thoughtcrime.securesms.database.helpers.migration.V267_FixGroupInvitationDeclinedUpdate
import org.thoughtcrime.securesms.database.helpers.migration.V268_FixInAppPaymentsErrorStateConsistency
import org.thoughtcrime.securesms.database.helpers.migration.V269_BackupMediaSnapshotChanges
import org.thoughtcrime.securesms.database.SQLiteDatabase as SignalSqliteDatabase
/**
@@ -251,10 +252,11 @@ object SignalDatabaseMigrations {
265 to V265_FixFtsTriggers,
266 to V266_UniqueThreadPinOrder,
267 to V267_FixGroupInvitationDeclinedUpdate,
268 to V268_FixInAppPaymentsErrorStateConsistency
268 to V268_FixInAppPaymentsErrorStateConsistency,
269 to V269_BackupMediaSnapshotChanges
)
const val DATABASE_VERSION = 268
const val DATABASE_VERSION = 269
@JvmStatic
fun migrate(context: Application, db: SignalSqliteDatabase, oldVersion: Int, newVersion: Int) {

View File

@@ -0,0 +1,26 @@
/*
* Copyright 2025 Signal Messenger, LLC
* SPDX-License-Identifier: AGPL-3.0-only
*/
package org.thoughtcrime.securesms.database.helpers.migration
import android.app.Application
import org.thoughtcrime.securesms.database.SQLiteDatabase
/**
* We made a change to stop storing mediaId/names in favor of computing them on-the-fly.
* So, this change removes those columns and adds some plumbing elsewhere that we need to keep things glued together correctly.
*/
object V269_BackupMediaSnapshotChanges : SignalDatabaseMigration {
override fun migrate(context: Application, db: SQLiteDatabase, oldVersion: Int, newVersion: Int) {
db.execSQL("DROP INDEX attachment_archive_media_id_index")
db.execSQL("ALTER TABLE attachment DROP COLUMN archive_media_id")
db.execSQL("ALTER TABLE attachment DROP COLUMN archive_media_name")
db.execSQL("ALTER TABLE attachment DROP COLUMN archive_thumbnail_media_id")
db.execSQL("CREATE INDEX IF NOT EXISTS attachment_remote_digest_index ON attachment (remote_digest);")
db.execSQL("ALTER TABLE backup_media_snapshot ADD COLUMN is_thumbnail INTEGER DEFAULT 0")
db.execSQL("ALTER TABLE backup_media_snapshot ADD COLUMN remote_digest BLOB NOT NULL")
}
}