mirror of
https://github.com/signalapp/Signal-Android.git
synced 2025-12-23 12:38:33 +00:00
Backfill missing attachment hashes.
This commit is contained in:
committed by
Cody Henthorne
parent
6df1a68213
commit
1d29b0166d
@@ -11,6 +11,8 @@ import org.junit.Assert.assertTrue
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
import org.junit.runner.RunWith
|
||||
import org.signal.core.util.Base64
|
||||
import org.signal.core.util.update
|
||||
import org.thoughtcrime.securesms.attachments.AttachmentId
|
||||
import org.thoughtcrime.securesms.attachments.PointerAttachment
|
||||
import org.thoughtcrime.securesms.database.AttachmentTable.TransformProperties
|
||||
@@ -37,6 +39,7 @@ class AttachmentTableTest_deduping {
|
||||
companion object {
|
||||
val DATA_A = byteArrayOf(1, 2, 3)
|
||||
val DATA_A_COMPRESSED = byteArrayOf(4, 5, 6)
|
||||
val DATA_A_HASH = byteArrayOf(1, 1, 1)
|
||||
|
||||
val DATA_B = byteArrayOf(7, 8, 9)
|
||||
}
|
||||
@@ -339,6 +342,25 @@ class AttachmentTableTest_deduping {
|
||||
assertSkipTransform(id2, false)
|
||||
assertDoesNotHaveRemoteFields(id2)
|
||||
}
|
||||
|
||||
// Make sure that files marked as unhashable are all updated together
|
||||
test {
|
||||
val id1 = insertWithData(DATA_A)
|
||||
val id2 = insertWithData(DATA_A)
|
||||
upload(id1)
|
||||
upload(id2)
|
||||
clearHashes(id1)
|
||||
clearHashes(id2)
|
||||
|
||||
val file = dataFile(id1)
|
||||
SignalDatabase.attachments.markDataFileAsUnhashable(file)
|
||||
|
||||
assertDataFilesAreTheSame(id1, id2)
|
||||
assertDataHashEndMatches(id1, id2)
|
||||
|
||||
val dataFileInfo = SignalDatabase.attachments.getDataFileInfo(id1)!!
|
||||
assertTrue(dataFileInfo.hashEnd!!.startsWith("UNHASHABLE-"))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -412,6 +434,131 @@ class AttachmentTableTest_deduping {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Suite of tests around the migration where we hash all of the attachments and potentially dedupe them.
|
||||
*/
|
||||
@Test
|
||||
fun migration() {
|
||||
// Verifying that getUnhashedDataFile only returns if there's actually missing hashes
|
||||
test {
|
||||
val id = insertWithData(DATA_A)
|
||||
upload(id)
|
||||
assertNull(SignalDatabase.attachments.getUnhashedDataFile())
|
||||
}
|
||||
|
||||
// Verifying that getUnhashedDataFile finds the missing hash
|
||||
test {
|
||||
val id = insertWithData(DATA_A)
|
||||
upload(id)
|
||||
clearHashes(id)
|
||||
assertNotNull(SignalDatabase.attachments.getUnhashedDataFile())
|
||||
}
|
||||
|
||||
// Verifying that getUnhashedDataFile doesn't return if the file isn't done downloading
|
||||
test {
|
||||
val id = insertWithData(DATA_A)
|
||||
upload(id)
|
||||
setTransferState(id, AttachmentTable.TRANSFER_PROGRESS_PENDING)
|
||||
clearHashes(id)
|
||||
assertNull(SignalDatabase.attachments.getUnhashedDataFile())
|
||||
}
|
||||
|
||||
// If two attachments share the same file, when we backfill the hash, make sure both get their hashes set
|
||||
test {
|
||||
val id1 = insertWithData(DATA_A)
|
||||
val id2 = insertWithData(DATA_A)
|
||||
upload(id1)
|
||||
upload(id2)
|
||||
|
||||
clearHashes(id1)
|
||||
clearHashes(id2)
|
||||
|
||||
val file = dataFile(id1)
|
||||
SignalDatabase.attachments.setHashForDataFile(file, DATA_A_HASH)
|
||||
|
||||
assertDataHashEnd(id1, DATA_A_HASH)
|
||||
assertDataHashEndMatches(id1, id2)
|
||||
}
|
||||
|
||||
// Creates a situation where two different attachments have the same data but wrote to different files, and verifies the migration dedupes it
|
||||
test {
|
||||
val id1 = insertWithData(DATA_A)
|
||||
upload(id1)
|
||||
clearHashes(id1)
|
||||
|
||||
val id2 = insertWithData(DATA_A)
|
||||
upload(id2)
|
||||
clearHashes(id2)
|
||||
|
||||
assertDataFilesAreDifferent(id1, id2)
|
||||
|
||||
val file1 = dataFile(id1)
|
||||
SignalDatabase.attachments.setHashForDataFile(file1, DATA_A_HASH)
|
||||
|
||||
assertDataHashEnd(id1, DATA_A_HASH)
|
||||
assertDataFilesAreDifferent(id1, id2)
|
||||
|
||||
val file2 = dataFile(id2)
|
||||
SignalDatabase.attachments.setHashForDataFile(file2, DATA_A_HASH)
|
||||
|
||||
assertDataFilesAreTheSame(id1, id2)
|
||||
assertDataHashEndMatches(id1, id2)
|
||||
assertFalse(file2.exists())
|
||||
}
|
||||
|
||||
// We've got three files now with the same data, with two of them sharing a file. We want to make sure *both* entries that share the same file get deduped.
|
||||
test {
|
||||
val id1 = insertWithData(DATA_A)
|
||||
upload(id1)
|
||||
clearHashes(id1)
|
||||
|
||||
val id2 = insertWithData(DATA_A)
|
||||
val id3 = insertWithData(DATA_A)
|
||||
upload(id2)
|
||||
upload(id3)
|
||||
clearHashes(id2)
|
||||
clearHashes(id3)
|
||||
|
||||
assertDataFilesAreDifferent(id1, id2)
|
||||
assertDataFilesAreTheSame(id2, id3)
|
||||
|
||||
val file1 = dataFile(id1)
|
||||
SignalDatabase.attachments.setHashForDataFile(file1, DATA_A_HASH)
|
||||
assertDataHashEnd(id1, DATA_A_HASH)
|
||||
|
||||
val file2 = dataFile(id2)
|
||||
SignalDatabase.attachments.setHashForDataFile(file2, DATA_A_HASH)
|
||||
|
||||
assertDataFilesAreTheSame(id1, id2)
|
||||
assertDataHashEndMatches(id1, id2)
|
||||
assertDataHashEndMatches(id2, id3)
|
||||
assertFalse(file2.exists())
|
||||
}
|
||||
|
||||
// We don't want to mess with files that are still downloading, so this makes sure that even if data matches, we don't dedupe and don't delete the file
|
||||
test {
|
||||
val id1 = insertWithData(DATA_A)
|
||||
upload(id1)
|
||||
clearHashes(id1)
|
||||
|
||||
val id2 = insertWithData(DATA_A)
|
||||
// *not* uploaded
|
||||
clearHashes(id2)
|
||||
|
||||
assertDataFilesAreDifferent(id1, id2)
|
||||
|
||||
val file1 = dataFile(id1)
|
||||
SignalDatabase.attachments.setHashForDataFile(file1, DATA_A_HASH)
|
||||
assertDataHashEnd(id1, DATA_A_HASH)
|
||||
|
||||
val file2 = dataFile(id2)
|
||||
SignalDatabase.attachments.setHashForDataFile(file2, DATA_A_HASH)
|
||||
|
||||
assertDataFilesAreDifferent(id1, id2)
|
||||
assertTrue(file2.exists())
|
||||
}
|
||||
}
|
||||
|
||||
private class TestContext {
|
||||
fun insertWithData(data: ByteArray, transformProperties: TransformProperties = TransformProperties.empty()): AttachmentId {
|
||||
val uri = BlobProvider.getInstance().forData(data).createForSingleSessionInMemory()
|
||||
@@ -472,6 +619,22 @@ class AttachmentTableTest_deduping {
|
||||
return SignalDatabase.attachments.getDataFileInfo(attachmentId)!!.file
|
||||
}
|
||||
|
||||
fun setTransferState(attachmentId: AttachmentId, transferState: Int) {
|
||||
// messageId doesn't actually matter -- that's for notifying listeners
|
||||
SignalDatabase.attachments.setTransferState(messageId = -1, attachmentId = attachmentId, transferState = transferState)
|
||||
}
|
||||
|
||||
fun clearHashes(id: AttachmentId) {
|
||||
SignalDatabase.attachments.writableDatabase
|
||||
.update(AttachmentTable.TABLE_NAME)
|
||||
.values(
|
||||
AttachmentTable.DATA_HASH_START to null,
|
||||
AttachmentTable.DATA_HASH_END to null
|
||||
)
|
||||
.where("${AttachmentTable.ID} = ?", id)
|
||||
.run()
|
||||
}
|
||||
|
||||
fun assertDeleted(attachmentId: AttachmentId) {
|
||||
assertNull("$attachmentId exists, but it shouldn't!", SignalDatabase.attachments.getAttachment(attachmentId))
|
||||
}
|
||||
@@ -525,6 +688,11 @@ class AttachmentTableTest_deduping {
|
||||
assertEquals("DATA_HASH_END's did not match!", lhsInfo.hashEnd, rhsInfo.hashEnd)
|
||||
}
|
||||
|
||||
fun assertDataHashEnd(id: AttachmentId, byteArray: ByteArray) {
|
||||
val dataFileInfo = SignalDatabase.attachments.getDataFileInfo(id)!!
|
||||
assertArrayEquals(byteArray, Base64.decode(dataFileInfo.hashEnd!!))
|
||||
}
|
||||
|
||||
fun assertRemoteFieldsMatch(lhs: AttachmentId, rhs: AttachmentId) {
|
||||
val lhsAttachment = SignalDatabase.attachments.getAttachment(lhs)!!
|
||||
val rhsAttachment = SignalDatabase.attachments.getAttachment(rhs)!!
|
||||
|
||||
@@ -94,6 +94,7 @@ import java.security.MessageDigest
|
||||
import java.security.NoSuchAlgorithmException
|
||||
import java.util.LinkedList
|
||||
import java.util.Optional
|
||||
import java.util.UUID
|
||||
import kotlin.time.Duration.Companion.days
|
||||
|
||||
class AttachmentTable(
|
||||
@@ -255,6 +256,80 @@ class AttachmentTable(
|
||||
} ?: throw IOException("No stream for: $attachmentId")
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a [File] for an attachment that has no [DATA_HASH_END] and is in the [TRANSFER_PROGRESS_DONE] state, if present.
|
||||
*/
|
||||
fun getUnhashedDataFile(): Pair<File, AttachmentId>? {
|
||||
return readableDatabase
|
||||
.select(ID, DATA_FILE)
|
||||
.from(TABLE_NAME)
|
||||
.where("$DATA_FILE NOT NULL AND $DATA_HASH_END IS NULL AND $TRANSFER_STATE = $TRANSFER_PROGRESS_DONE")
|
||||
.orderBy("$ID DESC")
|
||||
.limit(1)
|
||||
.run()
|
||||
.readToSingleObject {
|
||||
File(it.requireNonNullString(DATA_FILE)) to AttachmentId(it.requireLong(ID))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the [DATA_HASH_END] for a given file. This is used to backfill the hash for attachments that were created before we started hashing them.
|
||||
* As a result, this will _not_ update the hashes on files that are not fully uploaded.
|
||||
*/
|
||||
fun setHashForDataFile(file: File, hash: ByteArray) {
|
||||
writableDatabase.withinTransaction { db ->
|
||||
val hashEnd = Base64.encodeWithPadding(hash)
|
||||
|
||||
val (existingFile: String?, existingSize: Long?, existingRandom: ByteArray?) = db.select(DATA_FILE, DATA_SIZE, DATA_RANDOM)
|
||||
.from(TABLE_NAME)
|
||||
.where("$DATA_HASH_END = ? AND $TRANSFER_STATE = $TRANSFER_PROGRESS_DONE AND $DATA_FILE NOT NULL AND $DATA_FILE != ?", hashEnd, file.absolutePath)
|
||||
.limit(1)
|
||||
.run()
|
||||
.readToSingleObject {
|
||||
Triple(
|
||||
it.requireString(DATA_FILE),
|
||||
it.requireLong(DATA_SIZE),
|
||||
it.requireBlob(DATA_RANDOM)
|
||||
)
|
||||
} ?: Triple(null, null, null)
|
||||
|
||||
if (existingFile != null) {
|
||||
Log.i(TAG, "[setHashForDataFile] Found that a different file has the same HASH_END. Using that one instead. Pre-existing file: $existingFile", true)
|
||||
|
||||
val updateCount = writableDatabase
|
||||
.update(TABLE_NAME)
|
||||
.values(
|
||||
DATA_FILE to existingFile,
|
||||
DATA_HASH_END to hashEnd,
|
||||
DATA_SIZE to existingSize,
|
||||
DATA_RANDOM to existingRandom
|
||||
)
|
||||
.where("$DATA_FILE = ? AND $DATA_HASH_END IS NULL AND $TRANSFER_STATE = $TRANSFER_PROGRESS_DONE", file.absolutePath)
|
||||
.run()
|
||||
|
||||
Log.i(TAG, "[setHashForDataFile] Deduped $updateCount attachments.", true)
|
||||
|
||||
val oldFileInUse = db.exists(TABLE_NAME).where("$DATA_FILE = ?", file.absolutePath).run()
|
||||
if (oldFileInUse) {
|
||||
Log.i(TAG, "[setHashForDataFile] Old file is still in use by some in-progress attachment.", true)
|
||||
} else {
|
||||
Log.i(TAG, "[setHashForDataFile] Deleting unused file: $file")
|
||||
if (!file.delete()) {
|
||||
Log.w(TAG, "Failed to delete duped file!")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
val updateCount = writableDatabase
|
||||
.update(TABLE_NAME)
|
||||
.values(DATA_HASH_END to Base64.encodeWithPadding(hash))
|
||||
.where("$DATA_FILE = ? AND $TRANSFER_STATE = $TRANSFER_PROGRESS_DONE", file.absolutePath)
|
||||
.run()
|
||||
|
||||
Log.i(TAG, "[setHashForDataFile] Updated the HASH_END for $updateCount rows using file ${file.absolutePath}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun getAttachment(attachmentId: AttachmentId): DatabaseAttachment? {
|
||||
return readableDatabase
|
||||
.select(*PROJECTION)
|
||||
@@ -482,6 +557,34 @@ class AttachmentTable(
|
||||
return onDiskButNotInDatabase.size
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all references to the provided [DATA_FILE] from all attachments.
|
||||
* Only do this if the file is known to not exist or has some other critical problem!
|
||||
*/
|
||||
fun clearUsagesOfDataFile(file: File) {
|
||||
val updateCount = writableDatabase
|
||||
.update(TABLE_NAME)
|
||||
.values(DATA_FILE to null)
|
||||
.where("$DATA_FILE = ?", file.absolutePath)
|
||||
.run()
|
||||
|
||||
Log.i(TAG, "[clearUsagesOfFile] Cleared $updateCount usages of $file", true)
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates that, for whatever reason, a hash could not be calculated for the file in question.
|
||||
* We put in a "bad hash" that will never match anything else so that we don't attempt to backfill it in the future.
|
||||
*/
|
||||
fun markDataFileAsUnhashable(file: File) {
|
||||
val updateCount = writableDatabase
|
||||
.update(TABLE_NAME)
|
||||
.values(DATA_HASH_END to "UNHASHABLE-${UUID.randomUUID()}")
|
||||
.where("$DATA_FILE = ? AND $DATA_HASH_END IS NULL AND $TRANSFER_STATE = $TRANSFER_PROGRESS_DONE", file.absolutePath)
|
||||
.run()
|
||||
|
||||
Log.i(TAG, "[markDataFileAsUnhashable] Marked $updateCount attachments as unhashable with file: ${file.absolutePath}", true)
|
||||
}
|
||||
|
||||
fun deleteAllAttachments() {
|
||||
Log.d(TAG, "[deleteAllAttachments]")
|
||||
|
||||
@@ -1610,6 +1713,11 @@ class AttachmentTable(
|
||||
return existing.copy(sentMediaQuality = sentMediaQuality.code)
|
||||
}
|
||||
|
||||
@JvmStatic
|
||||
fun forSentMediaQuality(sentMediaQuality: Int): TransformProperties {
|
||||
return TransformProperties(sentMediaQuality = sentMediaQuality)
|
||||
}
|
||||
|
||||
@JvmStatic
|
||||
fun parse(serialized: String?): TransformProperties {
|
||||
return if (serialized == null) {
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
/*
|
||||
* Copyright 2024 Signal Messenger, LLC
|
||||
* SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package org.thoughtcrime.securesms.jobmanager
|
||||
|
||||
import org.thoughtcrime.securesms.jobmanager.impl.BackoffUtil
|
||||
import org.thoughtcrime.securesms.util.FeatureFlags
|
||||
|
||||
/**
|
||||
* Helper to calculate the default backoff interval for a [Job] given it's run attempt count.
|
||||
*/
|
||||
fun Job.defaultBackoffInterval(): Long = BackoffUtil.exponentialBackoff(runAttempt + 1, FeatureFlags.getDefaultMaxBackoff())
|
||||
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright 2024 Signal Messenger, LLC
|
||||
* SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package org.thoughtcrime.securesms.jobs
|
||||
|
||||
import org.signal.core.util.ThreadUtil
|
||||
import org.signal.core.util.drain
|
||||
import org.signal.core.util.logging.Log
|
||||
import org.thoughtcrime.securesms.attachments.AttachmentId
|
||||
import org.thoughtcrime.securesms.database.SignalDatabase
|
||||
import org.thoughtcrime.securesms.dependencies.ApplicationDependencies
|
||||
import org.thoughtcrime.securesms.jobmanager.Job
|
||||
import org.thoughtcrime.securesms.jobmanager.defaultBackoffInterval
|
||||
import java.io.File
|
||||
import java.io.FileNotFoundException
|
||||
import java.io.IOException
|
||||
import java.security.DigestInputStream
|
||||
import java.security.MessageDigest
|
||||
|
||||
/**
|
||||
* This job backfills hashes for attachments that were sent before we started hashing them.
|
||||
* In order to avoid hammering the device with hash calculations and disk I/O, this job will
|
||||
* calculate the hash for a single attachment and then reschedule itself to run again if necessary.
|
||||
*/
|
||||
class AttachmentHashBackfillJob private constructor(parameters: Parameters) : Job(parameters) {
|
||||
|
||||
companion object {
|
||||
val TAG = Log.tag(AttachmentHashBackfillJob::class.java)
|
||||
|
||||
const val KEY = "AttachmentHashBackfillJob"
|
||||
}
|
||||
|
||||
private var activeFile: File? = null
|
||||
|
||||
constructor() : this(
|
||||
Parameters.Builder()
|
||||
.setQueue(KEY)
|
||||
.setMaxInstancesForFactory(2)
|
||||
.setLifespan(Parameters.IMMORTAL)
|
||||
.setMaxAttempts(10)
|
||||
.build()
|
||||
)
|
||||
|
||||
override fun serialize() = null
|
||||
|
||||
override fun getFactoryKey() = KEY
|
||||
|
||||
override fun run(): Result {
|
||||
val (file: File?, attachmentId: AttachmentId?) = SignalDatabase.attachments.getUnhashedDataFile() ?: (null to null)
|
||||
if (file == null || attachmentId == null) {
|
||||
Log.i(TAG, "No more unhashed files! Task complete.")
|
||||
return Result.success()
|
||||
}
|
||||
|
||||
activeFile = file
|
||||
|
||||
if (!file.exists()) {
|
||||
Log.w(TAG, "File does not exist! Clearing all usages.", true)
|
||||
SignalDatabase.attachments.clearUsagesOfDataFile(file)
|
||||
ApplicationDependencies.getJobManager().add(AttachmentHashBackfillJob())
|
||||
return Result.success()
|
||||
}
|
||||
|
||||
try {
|
||||
val inputStream = SignalDatabase.attachments.getAttachmentStream(attachmentId, 0)
|
||||
val messageDigest = MessageDigest.getInstance("SHA-256")
|
||||
|
||||
DigestInputStream(inputStream, messageDigest).use {
|
||||
it.drain()
|
||||
}
|
||||
|
||||
val hash = messageDigest.digest()
|
||||
|
||||
SignalDatabase.attachments.setHashForDataFile(file, hash)
|
||||
} catch (e: FileNotFoundException) {
|
||||
Log.w(TAG, "File could not be found! Clearing all usages.", true)
|
||||
SignalDatabase.attachments.clearUsagesOfDataFile(file)
|
||||
} catch (e: IOException) {
|
||||
Log.e(TAG, "Error hashing attachment. Retrying.", e)
|
||||
|
||||
if (e.cause is FileNotFoundException) {
|
||||
Log.w(TAG, "Underlying cause was a FileNotFoundException. Clearing all usages.", true)
|
||||
SignalDatabase.attachments.clearUsagesOfDataFile(file)
|
||||
} else {
|
||||
return Result.retry(defaultBackoffInterval())
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep just so we don't hammer the device with hash calculations and disk I/O
|
||||
ThreadUtil.sleep(1000)
|
||||
|
||||
ApplicationDependencies.getJobManager().add(AttachmentHashBackfillJob())
|
||||
return Result.success()
|
||||
}
|
||||
|
||||
override fun onFailure() {
|
||||
activeFile?.let { file ->
|
||||
Log.w(TAG, "Failed to calculate hash, marking as unhashable: $file", true)
|
||||
SignalDatabase.attachments.markDataFileAsUnhashable(file)
|
||||
} ?: Log.w(TAG, "Job failed, but no active file is set!")
|
||||
|
||||
ApplicationDependencies.getJobManager().add(AttachmentHashBackfillJob())
|
||||
}
|
||||
|
||||
class Factory : Job.Factory<AttachmentHashBackfillJob> {
|
||||
override fun create(parameters: Parameters, serializedData: ByteArray?): AttachmentHashBackfillJob {
|
||||
return AttachmentHashBackfillJob(parameters)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -40,6 +40,7 @@ import org.thoughtcrime.securesms.migrations.AccountConsistencyMigrationJob;
|
||||
import org.thoughtcrime.securesms.migrations.AccountRecordMigrationJob;
|
||||
import org.thoughtcrime.securesms.migrations.ApplyUnknownFieldsToSelfMigrationJob;
|
||||
import org.thoughtcrime.securesms.migrations.AttachmentCleanupMigrationJob;
|
||||
import org.thoughtcrime.securesms.migrations.AttachmentHashBackfillMigrationJob;
|
||||
import org.thoughtcrime.securesms.migrations.AttributesMigrationJob;
|
||||
import org.thoughtcrime.securesms.migrations.AvatarIdRemovalMigrationJob;
|
||||
import org.thoughtcrime.securesms.migrations.AvatarMigrationJob;
|
||||
@@ -101,6 +102,7 @@ public final class JobManagerFactories {
|
||||
put(AttachmentCompressionJob.KEY, new AttachmentCompressionJob.Factory());
|
||||
put(AttachmentCopyJob.KEY, new AttachmentCopyJob.Factory());
|
||||
put(AttachmentDownloadJob.KEY, new AttachmentDownloadJob.Factory());
|
||||
put(AttachmentHashBackfillJob.KEY, new AttachmentHashBackfillJob.Factory());
|
||||
put(AttachmentMarkUploadedJob.KEY, new AttachmentMarkUploadedJob.Factory());
|
||||
put(AttachmentUploadJob.KEY, new AttachmentUploadJob.Factory());
|
||||
put(AutomaticSessionResetJob.KEY, new AutomaticSessionResetJob.Factory());
|
||||
@@ -225,6 +227,7 @@ public final class JobManagerFactories {
|
||||
put(AccountRecordMigrationJob.KEY, new AccountRecordMigrationJob.Factory());
|
||||
put(ApplyUnknownFieldsToSelfMigrationJob.KEY, new ApplyUnknownFieldsToSelfMigrationJob.Factory());
|
||||
put(AttachmentCleanupMigrationJob.KEY, new AttachmentCleanupMigrationJob.Factory());
|
||||
put(AttachmentHashBackfillMigrationJob.KEY, new AttachmentHashBackfillMigrationJob.Factory());
|
||||
put(AttributesMigrationJob.KEY, new AttributesMigrationJob.Factory());
|
||||
put(AvatarIdRemovalMigrationJob.KEY, new AvatarIdRemovalMigrationJob.Factory());
|
||||
put(AvatarMigrationJob.KEY, new AvatarMigrationJob.Factory());
|
||||
|
||||
@@ -145,11 +145,12 @@ public class ApplicationMigrations {
|
||||
static final int STORAGE_LOCAL_UNKNOWNS_FIX = 101;
|
||||
static final int PNP_LAUNCH = 102;
|
||||
static final int EMOJI_VERSION_10 = 103;
|
||||
static final int ATTACHMENT_HASH_BACKFILL = 104;
|
||||
}
|
||||
|
||||
public static final int CURRENT_VERSION = 103;
|
||||
public static final int CURRENT_VERSION = 104;
|
||||
|
||||
/**
|
||||
/**
|
||||
* This *must* be called after the {@link JobManager} has been instantiated, but *before* the call
|
||||
* to {@link JobManager#beginJobLoop()}. Otherwise, other non-migration jobs may have started
|
||||
* executing before we add the migration jobs.
|
||||
@@ -662,6 +663,10 @@ public class ApplicationMigrations {
|
||||
jobs.put(Version.EMOJI_VERSION_10, new EmojiDownloadMigrationJob());
|
||||
}
|
||||
|
||||
if (lastSeenVersion < Version.ATTACHMENT_HASH_BACKFILL) {
|
||||
jobs.put(Version.ATTACHMENT_HASH_BACKFILL, new AttachmentHashBackfillMigrationJob());
|
||||
}
|
||||
|
||||
return jobs;
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright 2024 Signal Messenger, LLC
|
||||
* SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
package org.thoughtcrime.securesms.migrations
|
||||
|
||||
import org.signal.core.util.logging.Log
|
||||
import org.thoughtcrime.securesms.dependencies.ApplicationDependencies
|
||||
import org.thoughtcrime.securesms.jobmanager.Job
|
||||
import org.thoughtcrime.securesms.jobs.AttachmentHashBackfillJob
|
||||
import java.lang.Exception
|
||||
|
||||
/**
|
||||
* Kicks off the attachment hash backfill process by enqueueing a [AttachmentHashBackfillJob].
|
||||
*/
|
||||
internal class AttachmentHashBackfillMigrationJob(parameters: Parameters = Parameters.Builder().build()) : MigrationJob(parameters) {
|
||||
|
||||
companion object {
|
||||
val TAG = Log.tag(AttachmentHashBackfillMigrationJob::class.java)
|
||||
const val KEY = "AttachmentHashBackfillMigrationJob"
|
||||
}
|
||||
|
||||
override fun getFactoryKey(): String = KEY
|
||||
|
||||
override fun isUiBlocking(): Boolean = false
|
||||
|
||||
override fun performMigration() {
|
||||
ApplicationDependencies.getJobManager().add(AttachmentHashBackfillJob())
|
||||
}
|
||||
|
||||
override fun shouldRetry(e: Exception): Boolean = false
|
||||
|
||||
class Factory : Job.Factory<AttachmentHashBackfillMigrationJob> {
|
||||
override fun create(parameters: Parameters, serializedData: ByteArray?): AttachmentHashBackfillMigrationJob {
|
||||
return AttachmentHashBackfillMigrationJob(parameters)
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user