feat: finish implementing scheduling sync and pollers, destination based config sync job

This commit is contained in:
0x330a
2023-09-18 16:13:25 +10:00
parent 43e72550f9
commit ec02087c6b
11 changed files with 113 additions and 68 deletions

View File

@@ -959,7 +959,7 @@ open class Storage(
groupCreationTimestamp
)
val keysBatchInfo = SnodeAPI.buildAuthenticatedStoreBatchInfo(
GroupKeysConfig.namespace(),
groupKeys.namespace(),
keysSnodeMessage,
adminKey
)

View File

@@ -11,6 +11,7 @@ import network.loki.messenger.libsession_util.GroupKeysConfig
import network.loki.messenger.libsession_util.GroupMembersConfig
import network.loki.messenger.libsession_util.UserGroupsConfig
import network.loki.messenger.libsession_util.UserProfile
import org.session.libsession.messaging.messages.Destination
import org.session.libsession.snode.SnodeAPI
import org.session.libsession.utilities.ConfigFactoryProtocol
import org.session.libsession.utilities.ConfigFactoryUpdateListener
@@ -358,4 +359,9 @@ class ConfigFactory(
val timestamp = SnodeAPI.nowWithOffset
configDatabase.storeGroupConfigs(pubKey, groupKeys.dump(), groupInfo.dump(), groupMembers.dump(), timestamp)
}
override fun scheduleUpdate(destination: Destination) {
// there's probably a better way to do this
ConfigurationMessageUtilities.forceSyncConfigurationNowIfNeeded(destination)
}
}

View File

@@ -73,7 +73,7 @@ class PushRegistry @Inject constructor(
token: String,
publicKey: String,
userEd25519Key: KeyPair,
namespaces: List<Int> = listOf(Namespace.DEFAULT)
namespaces: List<Int> = listOf(Namespace.DEFAULT())
): Promise<*, Exception> {
Log.d(TAG, "register() called")

View File

@@ -53,7 +53,7 @@ class PushRegistryV2 @Inject constructor(private val pushReceiver: PushReceiver)
val requestParameters = SubscriptionRequest(
pubkey = publicKey,
session_ed25519 = userEd25519Key.publicKey.asHexString,
namespaces = listOf(Namespace.DEFAULT),
namespaces = listOf(Namespace.DEFAULT()),
data = true, // only permit data subscription for now (?)
service = device.service,
sig_ts = timestamp,

View File

@@ -104,7 +104,8 @@ class ClearAllDataDialog : DialogFragment() {
if (!deleteNetworkMessages) {
try {
ConfigurationMessageUtilities.forceSyncConfigurationNowIfNeeded(requireContext()).get()
// TODO: maybe convert this to a blocking config job
ConfigurationMessageUtilities.forceSyncConfigurationNowIfNeeded(requireContext())
} catch (e: Exception) {
Log.e("Loki", "Failed to force sync", e)
}

View File

@@ -1,7 +1,6 @@
package org.thoughtcrime.securesms.util
import android.content.Context
import android.provider.Telephony.Sms.Conversations
import network.loki.messenger.libsession_util.ConfigBase
import network.loki.messenger.libsession_util.Contacts
import network.loki.messenger.libsession_util.ConversationVolatileConfig
@@ -12,7 +11,6 @@ import network.loki.messenger.libsession_util.util.Contact
import network.loki.messenger.libsession_util.util.ExpiryMode
import network.loki.messenger.libsession_util.util.GroupInfo
import network.loki.messenger.libsession_util.util.UserPic
import nl.komponents.kovenant.Promise
import org.session.libsession.messaging.MessagingModuleConfiguration
import org.session.libsession.messaging.jobs.ConfigurationSyncJob
import org.session.libsession.messaging.jobs.JobQueue
@@ -27,6 +25,7 @@ import org.session.libsession.utilities.WindowDebouncer
import org.session.libsignal.crypto.ecc.DjbECPublicKey
import org.session.libsignal.utilities.Hex
import org.session.libsignal.utilities.IdPrefix
import org.session.libsignal.utilities.Log
import org.session.libsignal.utilities.SessionId
import org.session.libsignal.utilities.toHexString
import org.thoughtcrime.securesms.database.GroupDatabase
@@ -38,17 +37,16 @@ object ConfigurationMessageUtilities {
private val debouncer = WindowDebouncer(3000, Timer())
private fun scheduleConfigSync(userPublicKey: String) {
private fun scheduleConfigSync(destination: Destination) {
debouncer.publish {
// don't schedule job if we already have one
val storage = MessagingModuleConfiguration.shared.storage
val ourDestination = Destination.Contact(userPublicKey)
val currentStorageJob = storage.getConfigSyncJob(ourDestination)
val currentStorageJob = storage.getConfigSyncJob(destination)
if (currentStorageJob != null) {
(currentStorageJob as ConfigurationSyncJob).shouldRunAgain.set(true)
return@publish
}
val newConfigSync = ConfigurationSyncJob(ourDestination)
val newConfigSync = ConfigurationSyncJob(destination)
JobQueue.shared.add(newConfigSync)
}
}
@@ -60,7 +58,7 @@ object ConfigurationMessageUtilities {
val forcedConfig = TextSecurePreferences.hasForcedNewConfig(context)
val currentTime = SnodeAPI.nowWithOffset
if (ConfigBase.isNewConfigEnabled(forcedConfig, currentTime)) {
scheduleConfigSync(userPublicKey)
scheduleConfigSync(Destination.Contact(userPublicKey))
return
}
val lastSyncTime = TextSecurePreferences.getLastConfigurationSyncTime(context)
@@ -84,34 +82,21 @@ object ConfigurationMessageUtilities {
TextSecurePreferences.setLastConfigurationSyncTime(context, now)
}
fun forceSyncConfigurationNowIfNeeded(context: Context): Promise<Unit, Exception> {
fun forceSyncConfigurationNowIfNeeded(destination: Destination) {
scheduleConfigSync(destination)
}
fun forceSyncConfigurationNowIfNeeded(context: Context) {
// add if check here to schedule new config job process and return early
val userPublicKey = TextSecurePreferences.getLocalNumber(context) ?: return Promise.ofFail(NullPointerException("User Public Key is null"))
val userPublicKey = TextSecurePreferences.getLocalNumber(context) ?: return Log.e("Loki", NullPointerException("User Public Key is null"))
val forcedConfig = TextSecurePreferences.hasForcedNewConfig(context)
val currentTime = SnodeAPI.nowWithOffset
if (ConfigBase.isNewConfigEnabled(forcedConfig, currentTime)) {
// schedule job if none exist
// don't schedule job if we already have one
scheduleConfigSync(userPublicKey)
return Promise.ofSuccess(Unit)
scheduleConfigSync(Destination.Contact(userPublicKey))
}
val contacts = ContactUtilities.getAllContacts(context).filter { recipient ->
!recipient.isGroupRecipient && !recipient.name.isNullOrEmpty() && !recipient.isLocalNumber && recipient.address.serialize().isNotEmpty()
}.map { recipient ->
ConfigurationMessage.Contact(
publicKey = recipient.address.serialize(),
name = recipient.name!!,
profilePicture = recipient.profileAvatar,
profileKey = recipient.profileKey,
isApproved = recipient.isApproved,
isBlocked = recipient.isBlocked,
didApproveMe = recipient.hasApprovedMe()
)
}
val configurationMessage = ConfigurationMessage.getCurrent(contacts) ?: return Promise.ofSuccess(Unit)
val promise = MessageSender.send(configurationMessage, Destination.from(Address.fromSerialized(userPublicKey)), isSyncMessage = true)
TextSecurePreferences.setLastConfigurationSyncTime(context, System.currentTimeMillis())
return promise
}
private fun maybeUserSecretKey() = MessagingModuleConfiguration.shared.getUserED25519KeyPair()?.secretKey?.asBytes

View File

@@ -292,7 +292,7 @@ class GroupMembersConfig(pointer: Long): ConfigBase(pointer), Closeable {
}
}
abstract class ConfigSig(pointer: Long) : Config(pointer)
sealed class ConfigSig(pointer: Long) : Config(pointer)
class GroupKeysConfig(pointer: Long): ConfigSig(pointer) {
companion object {

View File

@@ -2,15 +2,23 @@ package org.session.libsession.messaging.jobs
import network.loki.messenger.libsession_util.Config
import network.loki.messenger.libsession_util.ConfigBase
import network.loki.messenger.libsession_util.ConfigBase.Companion.protoKindFor
import network.loki.messenger.libsession_util.GroupKeysConfig
import network.loki.messenger.libsession_util.util.ConfigPush
import nl.komponents.kovenant.functional.bind
import org.session.libsession.messaging.MessagingModuleConfiguration
import org.session.libsession.messaging.messages.Destination
import org.session.libsession.messaging.messages.control.SharedConfigurationMessage
import org.session.libsession.messaging.sending_receiving.MessageSender
import org.session.libsession.messaging.utilities.Data
import org.session.libsession.snode.RawResponse
import org.session.libsession.snode.SnodeAPI
import org.session.libsession.snode.SnodeAPI.SnodeBatchRequestInfo
import org.session.libsession.snode.SnodeMessage
import org.session.libsession.utilities.ConfigFactoryProtocol
import org.session.libsignal.utilities.Base64
import org.session.libsignal.utilities.Log
import org.session.libsignal.utilities.SessionId
import java.util.concurrent.atomic.AtomicBoolean
class InvalidDestination: Exception("Trying to push configs somewhere other than our swarm or a closed group")
@@ -30,8 +38,65 @@ data class ConfigurationSyncJob(val destination: Destination): Job {
data class SyncInformation(val configs: List<ConfigMessageInformation>, val toDelete: List<String>)
private fun destinationConfigs(delegate: JobDelegate, configFactoryProtocol: ConfigFactoryProtocol): SyncInformation {
TODO()
private fun destinationConfigs(delegate: JobDelegate,
dispatcherName: String,
configFactoryProtocol: ConfigFactoryProtocol): SyncInformation {
val toDelete = mutableListOf<String>()
val configsRequiringPush = if (destination is Destination.ClosedGroup) {
val sentTimestamp = SnodeAPI.nowWithOffset
// destination is a closed group, get all configs requiring push here
val groupId = SessionId.from(destination.publicKey)
val signingKey = configFactoryProtocol.userGroups!!.getClosedGroup(destination.publicKey)!!.signingKey()
val keys = configFactoryProtocol.getGroupKeysConfig(groupId)!!
val info = configFactoryProtocol.getGroupInfoConfig(groupId)!!
val members = configFactoryProtocol.getGroupMemberConfig(groupId)!!
val requiringPush = listOf(keys, info, members).filter {
when (it) {
is GroupKeysConfig -> it.pendingConfig()?.isNotEmpty() == true
is ConfigBase -> it.needsPush()
else -> false
}
}
// free the objects that were created but won't be used after this point
// in case any of the configs don't need pushing, they won't be freed later
(listOf(keys,info,members) subtract requiringPush).forEach(Config::free)
requiringPush.map { config ->
val (push, seqNo, obsoleteHashes) = if (config is GroupKeysConfig) {
ConfigPush(config.pendingConfig()!!, 0, emptyList()) // should not be null from filter step previous
} else if (config is ConfigBase) {
config.push()
} else throw IllegalArgumentException("Got a non group keys or config base object for config sync")
toDelete += obsoleteHashes
val message = SnodeMessage(destination.publicKey, Base64.encodeBytes(push), SnodeMessage.CONFIG_TTL, sentTimestamp)
ConfigMessageInformation(SnodeAPI.buildAuthenticatedStoreBatchInfo(config.namespace(), message, signingKey), config, seqNo)
}
} else {
// assume our own user as check already takes place in `execute` for our own key if contact
configFactoryProtocol.getUserConfigs().filter { it.needsPush() }.map { config ->
val (bytes, seqNo, obsoleteHashes) = config.push()
toDelete += obsoleteHashes
val message = messageForConfig(config, bytes, seqNo)
?: throw NullPointerException("SnodeBatchRequest message was null, check group keys exists")
ConfigMessageInformation(message, config, seqNo)
}
}
return SyncInformation(configsRequiringPush, toDelete)
}
private fun messageForConfig(
config: ConfigBase,
bytes: ByteArray,
seqNo: Long
): SnodeBatchRequestInfo? {
val message = SharedConfigurationMessage(config.protoKindFor(), bytes, seqNo)
val snodeMessage = MessageSender.buildWrappedMessageToSnode(destination, message, true)
return SnodeAPI.buildAuthenticatedStoreBatchInfo(config.namespace(), snodeMessage)
}
override suspend fun execute(dispatcherName: String) {
@@ -39,26 +104,17 @@ data class ConfigurationSyncJob(val destination: Destination): Job {
val userPublicKey = storage.getUserPublicKey()
val delegate = delegate ?: return Log.e("ConfigurationSyncJob", "No Delegate")
if (destination is Destination.Contact && destination.publicKey != userPublicKey) {
return delegate.handleJobFailedPermanently(this, dispatcherName, InvalidContactDestination())
} else if (destination !is Destination.ClosedGroup) {
if (destination !is Destination.ClosedGroup && (destination !is Destination.Contact || destination.publicKey != userPublicKey)) {
return delegate.handleJobFailedPermanently(this, dispatcherName, InvalidDestination())
}
// configFactory singleton instance will come in handy for modifying hashes and fetching configs for namespace etc
val configFactory = MessagingModuleConfiguration.shared.configFactory
// **** start user ****
// get latest states, filter out configs that don't need push
val configsRequiringPush = configFactory.getUserConfigs().filter { config -> config.needsPush() }
// don't run anything if we don't need to push anything
if (configsRequiringPush.isEmpty()) return delegate.handleJobSucceeded(this, dispatcherName)
// **** end user ****
// allow null results here so the list index matches configsRequiringPush
val sentTimestamp: Long = SnodeAPI.nowWithOffset
val (batchObjects, toDeleteHashes) = destinationConfigs(delegate, configFactory)
val (batchObjects, toDeleteHashes) = destinationConfigs(delegate, dispatcherName, configFactory)
if (batchObjects.isEmpty()) return delegate.handleJobSucceeded(this, dispatcherName)
val toDeleteRequest = toDeleteHashes.let { toDeleteFromAllNamespaces ->
if (toDeleteFromAllNamespaces.isEmpty()) null
@@ -86,22 +142,6 @@ data class ConfigurationSyncJob(val destination: Destination): Job {
val rawResponses = batchResponse.get()
@Suppress("UNCHECKED_CAST")
val responseList = (rawResponses["results"] as List<RawResponse>)
// we are always adding in deletions at the end
val deletionResponse = if (toDeleteRequest != null && responseList.isNotEmpty()) responseList.last() else null
val deletedHashes = deletionResponse?.let {
@Suppress("UNCHECKED_CAST")
// get the sub-request body
(deletionResponse["body"] as? RawResponse)?.let { body ->
// get the swarm dict
body["swarm"] as? RawResponse
}?.mapValues { (_, swarmDict) ->
// get the deleted values from dict
((swarmDict as? RawResponse)?.get("deleted") as? List<String>)?.toSet() ?: emptySet()
}?.values?.reduce { acc, strings ->
// create an intersection of all deleted hashes (common between all swarm nodes)
acc intersect strings
}
} ?: emptySet()
// at this point responseList index should line up with configsRequiringPush index
batchObjects.forEachIndexed { index, (message, config, seqNo) ->
@@ -125,6 +165,10 @@ data class ConfigurationSyncJob(val destination: Destination): Job {
if (config is ConfigBase && config.needsDump()) { // usually this will be true?
configFactory.persist(config, (message.params["timestamp"] as String).toLong())
}
if (destination is Destination.ClosedGroup) {
config.free() // after they are used, free the temporary group configs
}
}
} catch (e: Exception) {
Log.e(TAG, "Error performing batch request", e)
@@ -137,10 +181,6 @@ data class ConfigurationSyncJob(val destination: Destination): Job {
}
}
private fun getUserSyncInformation(delegate: JobDelegate) {
val userEdKeyPair = MessagingModuleConfiguration.shared.getUserED25519KeyPair()
}
fun Destination.destinationPublicKey(): String = when (this) {
is Destination.Contact -> publicKey
is Destination.ClosedGroup -> publicKey

View File

@@ -14,6 +14,7 @@ import org.session.libsession.messaging.MessagingModuleConfiguration
import org.session.libsession.messaging.jobs.BatchMessageReceiveJob
import org.session.libsession.messaging.jobs.JobQueue
import org.session.libsession.messaging.jobs.MessageReceiveParameters
import org.session.libsession.messaging.messages.Destination
import org.session.libsession.snode.RawResponse
import org.session.libsession.snode.SnodeAPI
import org.session.libsession.utilities.ConfigFactoryProtocol
@@ -101,6 +102,8 @@ class ClosedGroupPoller(private val executor: CoroutineScope,
val membersIndex = 2
val messageIndex = 3
val requiresSync = info.needsPush() || members.needsPush() || keys.needsRekey() || keys.pendingConfig() != null
val messagePoll = SnodeAPI.buildAuthenticatedRetrieveBatchRequest(
snode,
closedGroupSessionId.hexString(),
@@ -154,6 +157,9 @@ class ClosedGroupPoller(private val executor: CoroutineScope,
info.free()
members.free()
if (requiresSync) {
configFactoryProtocol.scheduleUpdate(Destination.ClosedGroup(closedGroupSessionId.hexString()))
}
} catch (e: Exception) {
if (ENABLE_LOGGING) Log.e("GroupPoller", "Polling failed for group", e)
return POLL_INTERVAL

View File

@@ -30,4 +30,8 @@ data class SnodeMessage(
"timestamp" to timestamp.toString(),
)
}
companion object {
const val CONFIG_TTL: Long = 30 * 24 * 60 * 60 * 1000L
}
}

View File

@@ -9,6 +9,7 @@ import network.loki.messenger.libsession_util.GroupKeysConfig
import network.loki.messenger.libsession_util.GroupMembersConfig
import network.loki.messenger.libsession_util.UserGroupsConfig
import network.loki.messenger.libsession_util.UserProfile
import org.session.libsession.messaging.messages.Destination
import org.session.libsignal.utilities.SessionId
interface ConfigFactoryProtocol {
@@ -31,6 +32,8 @@ interface ConfigFactoryProtocol {
groupInfo: GroupInfoConfig,
groupMembers: GroupMembersConfig
)
fun scheduleUpdate(destination: Destination)
}
interface ConfigFactoryUpdateListener {