Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/ProtonMail/WebClients.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco <marco.martinoli@protonmail.com>2022-10-06 11:43:41 +0300
committerMarco <marco.martinoli@protonmail.com>2022-10-06 11:43:41 +0300
commit621facd354722165fe9a264d4a6062d0a435db46 (patch)
tree382abf6cf8f8c7c14cdb466b1605a5d878fcd1e1
parent2ead70096f7b2d94cb09b77fe1732b2a241e0821 (diff)
Revert "ES hotfix"
This reverts commit 15998fa7e152bc228a9028dc6e55487f24480e9c.
-rw-r--r--applications/mail/src/app/components/header/search/AdvancedSearchFields/EncryptedSearchField.tsx7
-rw-r--r--applications/mail/src/app/constants.ts1
-rw-r--r--applications/mail/src/app/containers/EncryptedSearchProvider.tsx34
-rw-r--r--applications/mail/src/app/helpers/encryptedSearch/encryptedSearchMailHelpers.tsx14
-rw-r--r--applications/mail/src/app/helpers/encryptedSearch/migration.ts612
-rw-r--r--applications/mail/src/app/models/encryptedSearch.ts1
-rw-r--r--packages/encrypted-search/lib/esHelpers/esBuild.ts2
-rw-r--r--packages/encrypted-search/lib/esHelpers/esSync.ts2
-rw-r--r--packages/encrypted-search/lib/models/esCallbacks.ts5
-rw-r--r--packages/encrypted-search/lib/useEncryptedSearch.tsx10
10 files changed, 248 insertions, 440 deletions
diff --git a/applications/mail/src/app/components/header/search/AdvancedSearchFields/EncryptedSearchField.tsx b/applications/mail/src/app/components/header/search/AdvancedSearchFields/EncryptedSearchField.tsx
index 99011fc5f2..afb5fa3a41 100644
--- a/applications/mail/src/app/components/header/search/AdvancedSearchFields/EncryptedSearchField.tsx
+++ b/applications/mail/src/app/components/header/search/AdvancedSearchFields/EncryptedSearchField.tsx
@@ -43,7 +43,6 @@ const EncryptedSearchField = ({ esState }: Props) => {
isPaused,
contentIndexingDone,
activatingPartialES,
- isMigrating,
} = getESDBStatus();
const { esProgress, oldestTime, totalIndexingItems, estimatedMinutes, currentProgressValue } = esState;
@@ -84,10 +83,8 @@ const EncryptedSearchField = ({ esState }: Props) => {
: c('Info')
.t`This action will download the most recent messages so they can be searched locally. Clearing your browser data will disable this option.`;
- const esActivationTooltip = isMigrating
- ? c('Info').t`Updating your local messages, message content won't be searched during this update`
- : c('Info').t`The local database is being prepared`;
- const esActivationLoading = isMigrating || isEnablingEncryptedSearch || activatingPartialES;
+ const esActivationTooltip = c('Info').t`The local database is being prepared`;
+ const esActivationLoading = isEnablingEncryptedSearch || activatingPartialES;
const esActivationButton = (
<Button onClick={() => setEnableESModalOpen(true)} loading={esActivationLoading}>
{c('Action').t`Activate`}
diff --git a/applications/mail/src/app/constants.ts b/applications/mail/src/app/constants.ts
index 83190e5c24..4bdef9b777 100644
--- a/applications/mail/src/app/constants.ts
+++ b/applications/mail/src/app/constants.ts
@@ -134,7 +134,6 @@ export const defaultESMailStatus: ESDBStatusMail = {
temporaryToggleOff: false,
activatingPartialES: false,
lastContentTime: 0,
- isMigrating: false,
};
export const defaultESContextMail: EncryptedSearchFunctionsMail = {
...defaultESContext,
diff --git a/applications/mail/src/app/containers/EncryptedSearchProvider.tsx b/applications/mail/src/app/containers/EncryptedSearchProvider.tsx
index 46abea53fe..729946413d 100644
--- a/applications/mail/src/app/containers/EncryptedSearchProvider.tsx
+++ b/applications/mail/src/app/containers/EncryptedSearchProvider.tsx
@@ -224,35 +224,15 @@ const EncryptedSearchProvider = ({ children }: Props) => {
return esLibraryFunctions.esDelete();
}
- setESMailStatus((esMailStatus) => ({
- ...esMailStatus,
- isMigrating: true,
- }));
-
// Migrate old IDBs
- const success = await migrate(user.ID, api, getUserKeys, getMessageKeys, () =>
- esHelpers.queryItemsMetadata(new AbortController().signal)
- ).catch((error) => {
- esSentryReport(`migration: ${error.message}`, error);
- return false;
- });
-
- setESMailStatus((esMailStatus) => ({
- ...esMailStatus,
- isMigrating: false,
- }));
-
+ const success = await migrate(
+ user.ID,
+ getUserKeys,
+ () => esHelpers.queryItemsMetadata(new AbortController().signal),
+ esHelpers.getTotalItems
+ );
if (!success) {
- createNotification({
- text: c('Error')
- .t`There was a problem updating your local messages, they will be downloaded again to re-enable content search`,
- type: 'error',
- });
-
- return esLibraryFunctions
- .esDelete()
- .then(() => esLibraryFunctions.enableEncryptedSearch({ isRefreshed: true }))
- .then(() => esLibraryFunctions.enableContentSearch({ isRefreshed: true }));
+ await esLibraryFunctions.esDelete();
}
// Enable encrypted search for all new users. For paid users only,
diff --git a/applications/mail/src/app/helpers/encryptedSearch/encryptedSearchMailHelpers.tsx b/applications/mail/src/app/helpers/encryptedSearch/encryptedSearchMailHelpers.tsx
index c51f69ead3..4d84ec7eef 100644
--- a/applications/mail/src/app/helpers/encryptedSearch/encryptedSearchMailHelpers.tsx
+++ b/applications/mail/src/app/helpers/encryptedSearch/encryptedSearchMailHelpers.tsx
@@ -17,7 +17,6 @@ import {
testKeywords,
} from '@proton/encrypted-search';
import { queryMessageMetadata } from '@proton/shared/lib/api/messages';
-import { MAILBOX_LABEL_IDS } from '@proton/shared/lib/constants';
import { EVENT_ERRORS } from '@proton/shared/lib/errors';
import { hasBit } from '@proton/shared/lib/helpers/bitset';
import { Api, LabelCount, Recipient, UserModel } from '@proton/shared/lib/interfaces';
@@ -72,7 +71,7 @@ export const getESHelpers = ({
// We need to keep the recovery point for metadata indexing in memory
// for cases where IDB couldn't be instantiated but we still want to
// index content
- let metadataRecoveryPoint: MetadataRecoveryPoint | undefined;
+ let metadataRecoveryPoint: MetadataRecoveryPoint = {};
const queryItemsMetadata = async (signal: AbortSignal) => {
const messagesPromises: Promise<ESBaseMessage[] | undefined>[] = [];
const Messages: ESBaseMessage[] = [];
@@ -81,7 +80,7 @@ export const getESHelpers = ({
// Note that indexing, and therefore an instance of this function,
// can exist even without an IDB, because we can index in memory only.
// Therefore, we have to check if an IDB exists before querying it
- if (!recoveryPoint && (await checkVersionedESDB(userID))) {
+ if (await checkVersionedESDB(userID)) {
recoveryPoint = await readMetadataRecoveryPoint(userID);
}
@@ -95,8 +94,7 @@ export const getESHelpers = ({
signal,
queryMessageMetadata({
PageSize: ES_MAX_PARALLEL_ITEMS,
- Limit: ES_MAX_PARALLEL_ITEMS,
- Location: MAILBOX_LABEL_IDS.ALL_MAIL,
+ Location: '5',
Sort: 'Time',
Desc: 1,
Page,
@@ -139,11 +137,9 @@ export const getESHelpers = ({
return {
resultMetadata: Messages,
- setRecoveryPoint: async (setIDB: boolean = true) => {
+ setRecoveryPoint: async () => {
metadataRecoveryPoint = newRecoveryPoint;
- if (setIDB) {
- await setMetadataRecoveryPoint(userID, newRecoveryPoint);
- }
+ return setMetadataRecoveryPoint(userID, newRecoveryPoint);
},
};
};
diff --git a/applications/mail/src/app/helpers/encryptedSearch/migration.ts b/applications/mail/src/app/helpers/encryptedSearch/migration.ts
index 1ccd36dac9..87d320373d 100644
--- a/applications/mail/src/app/helpers/encryptedSearch/migration.ts
+++ b/applications/mail/src/app/helpers/encryptedSearch/migration.ts
@@ -12,7 +12,6 @@
import { IDBPDatabase, IDBPTransaction, openDB } from 'idb';
import {
- AesGcmCiphertext,
CiphertextToStore,
ES_MAX_ITEMS_PER_BATCH,
EncryptedSearchDB,
@@ -22,7 +21,6 @@ import {
decryptFromDB,
defaultESProgress,
encryptItem,
- esSentryReport,
getIndexKey,
openESDB,
readMetadataRecoveryPoint,
@@ -31,20 +29,12 @@ import {
setMetadataActiveProgressStatus,
setMetadataRecoveryPoint,
setMigrated,
- sizeOfESItem,
} from '@proton/encrypted-search';
-import { queryMessageMetadata } from '@proton/shared/lib/api/messages';
-import { MAILBOX_LABEL_IDS } from '@proton/shared/lib/constants';
-import runInQueue from '@proton/shared/lib/helpers/runInQueue';
import { getItem } from '@proton/shared/lib/helpers/storage';
-import { Api } from '@proton/shared/lib/interfaces';
-import { Message } from '@proton/shared/lib/interfaces/mail/Message';
-import isTruthy from '@proton/utils/isTruthy';
import { MAIL_EVENTLOOP_NAME } from '../../constants';
-import { GetMessageKeys } from '../../hooks/message/useGetMessageKeys';
import { ESBaseMessage, ESMessage } from '../../models/encryptedSearch';
-import { fetchMessage, getBaseMessage } from './esBuild';
+import { getBaseMessage } from './esBuild';
/**
* Interface of the old progress blob as we used to store in local
@@ -75,247 +65,100 @@ const getESBlobs = (userID: string) => ({
isEnabled: getItem(`ES:${userID}:ESEnabled`) === 'true',
});
-const updateSize = async (sizeDelta: number, esDB: IDBPDatabase<EncryptedSearchDB>) => {
- const oldSize: number = await esDB.get('config', 'size');
- const newSize = oldSize + sizeDelta;
- return esDB.put('config', newSize, 'size');
-};
-
-const recoverIndex = async (
- indexKey: CryptoKey,
+/**
+ * Metadata indexing routine to complete a metadata indexing
+ */
+const completeMetadataIndexing = async (
esDB: IDBPDatabase<EncryptedSearchDB>,
- api: Api,
- getMessageKeys: GetMessageKeys,
+ indexKey: CryptoKey,
queryItemsMetadata: () => Promise<{
resultMetadata?: ESBaseMessage[] | undefined;
- setRecoveryPoint?: ((setIDB?: boolean) => Promise<void>) | undefined;
+ setRecoveryPoint?: (() => Promise<void>) | undefined;
}>
) => {
let { resultMetadata, setRecoveryPoint } = await queryItemsMetadata();
if (!resultMetadata) {
- throw new Error('Metadata could not be fetched');
+ return false;
}
while (resultMetadata.length) {
- // We check whether a batch of messages' metadata is already
- // present in IDB one by one. If we find a missing one, metadata
- // are stored directly, while content is fetched later
- const contentToStore: string[] = (
+ try {
+ const ciphertexts: CiphertextToStore[] = await Promise.all(
+ resultMetadata.map(async (metadata) => ({
+ itemID: metadata.ID,
+ aesGcmCiphertext: await encryptItem(metadata, indexKey),
+ }))
+ );
+
+ const tx = esDB.transaction('metadata', 'readwrite');
await Promise.all(
- resultMetadata.map(async (metadata) => {
- if ((await esDB.count('metadata', metadata.ID)) === 0) {
- await esDB.put('metadata', await encryptItem(metadata, indexKey), metadata.ID);
- await updateSize(sizeOfESItem(metadata), esDB);
- return metadata.ID;
- }
- })
- )
- ).filter(isTruthy);
-
- if (contentToStore.length) {
- for (const messageID of contentToStore) {
- const itemToStore = await fetchMessage(messageID, api, getMessageKeys);
- if (!itemToStore) {
- continue;
- }
- await esDB.put('content', await encryptItem(itemToStore, indexKey), messageID);
- await updateSize(sizeOfESItem(itemToStore), esDB);
- }
+ ciphertexts.map((ciphertext) => tx.store.put(ciphertext.aesGcmCiphertext, ciphertext.itemID))
+ );
+ await tx.done;
+ } catch (error: any) {
+ return false;
}
if (setRecoveryPoint) {
- await setRecoveryPoint(false);
+ await setRecoveryPoint();
}
({ resultMetadata, setRecoveryPoint } = await queryItemsMetadata());
if (!resultMetadata) {
- throw new Error('Metadata could not be fetched');
+ return false;
}
}
-};
-const checkIndexCorruption = async (userID: string, api: Api, esDB: IDBPDatabase<EncryptedSearchDB>) => {
- // In order to establish whether IDB is limited or not, we check
- // whether the oldest message in the mailbox is contained. This
- // is not definitive, as there is the chance that the mistakenly
- // missing message (in case of a corrupted IDB) is precisely that
- // one. This possibility seems remote enough
- const {
- Messages: [mailboxOldestMessage],
- } = await api<{ Messages: Message[] }>({
- ...queryMessageMetadata({
- PageSize: 1,
- Limit: 1,
- Location: MAILBOX_LABEL_IDS.ALL_MAIL,
- Sort: 'Time',
- Desc: 0,
- }),
- });
+ return true;
+};
- const isDBLimited = !mailboxOldestMessage || (await esDB.count('metadata', mailboxOldestMessage.ID)) === 0;
- await esDB.put('config', isDBLimited, 'limited');
-
- // In order to establish whether IDB is corrupted or not, we check
- // whether the number of messages older than most recent message is
- // the same in IDB and in the mailbox. Note that if IDB is also
- // limited, we need to set a lower bound on the messages' age
- // in order to count consistently
-
- const {
- oldestMessage,
- mostRecentMessage: { ID: EndID, Time: End },
- } = await readMigrated(userID);
-
- const { Total } = await api<{ Total: number }>({
- ...queryMessageMetadata({
- Location: MAILBOX_LABEL_IDS.ALL_MAIL,
- End,
- EndID,
- Begin: isDBLimited ? oldestMessage.Time : undefined,
- BeginID: isDBLimited ? oldestMessage.ID : undefined,
- }),
- });
+/**
+ * Move ciphertexts from the messages table to the metadata one
+ */
+const moveCiphertexts = async (tx: IDBPTransaction<unknown, string[], 'versionchange'>) => {
+ const messagesOS = tx.objectStore('messages');
+ const metadataOS = tx.objectStore('metadata');
+ const count = await messagesOS.count();
- // Note that Total excludes the EndID message, therefore
- // we add back 1
- return Total + 1 !== (await esDB.count('metadata'));
+ let recoveryPoint: string | undefined;
+ for (let batch = 0; batch < count; batch += ES_MAX_ITEMS_PER_BATCH) {
+ const storedData = await messagesOS.getAll(
+ !!recoveryPoint ? IDBKeyRange.lowerBound(recoveryPoint, true) : undefined,
+ ES_MAX_ITEMS_PER_BATCH
+ );
+ await Promise.all(storedData.map(({ aesGcmCiphertext, ID }) => metadataOS.put(aesGcmCiphertext, ID)));
+ recoveryPoint = storedData[storedData.length - 1].ID;
+ }
};
-const completeMetadataIndexing = async (
+/**
+ * Conclude a potentially pending migration by splitting the full
+ * ciphertexts that are now stored in the metadata table to the
+ * two tables, metadata and content.
+ */
+export const finalizeMigration = async (
userID: string,
- indexKey: CryptoKey,
- esDB: IDBPDatabase<EncryptedSearchDB>,
+ getUserKeys: GetUserKeys,
queryItemsMetadata: () => Promise<{
resultMetadata?: ESBaseMessage[] | undefined;
- setRecoveryPoint?: ((setIDB?: boolean) => Promise<void>) | undefined;
+ setRecoveryPoint?: (() => Promise<void>) | undefined;
}>
) => {
- let { resultMetadata, setRecoveryPoint } = await queryItemsMetadata();
- if (!resultMetadata) {
- throw new Error('Metadata could not be fetched');
- }
-
- while (resultMetadata.length) {
- const ciphertexts: CiphertextToStore[] = await Promise.all(
- resultMetadata.map(async (metadata) => ({
- itemID: metadata.ID,
- aesGcmCiphertext: await encryptItem(metadata, indexKey),
- }))
- );
-
- const tx = esDB.transaction('metadata', 'readwrite');
- await Promise.all(
- ciphertexts.map((ciphertext) => tx.store.put(ciphertext.aesGcmCiphertext, ciphertext.itemID))
- );
- await tx.done;
-
- if (setRecoveryPoint) {
- await setRecoveryPoint();
- }
-
- ({ resultMetadata, setRecoveryPoint } = await queryItemsMetadata());
- if (!resultMetadata) {
- throw new Error('Metadata could not be fetched');
- }
+ const migrated = await readMigrated(userID);
+ if (!migrated) {
+ return true;
}
- // Finally we set the metadata indexing as completed and the content
- // indexing where it left off in the old version
- await setMetadataActiveProgressStatus(userID);
-
- const { progressBlob, isPaused } = getESBlobs(userID);
-
- // The progress blob must exist since the decision to continue
- // indexing was off it
- const {
- totalItems: oldTotalItems,
- totalMessages,
- numPauses,
- isRefreshed,
- timestamps: oldTimestamps,
- originalEstimate,
- } = progressBlob!;
-
- const totalItems = totalMessages || oldTotalItems;
- const timestamps = oldTimestamps.map((oldTimestamp) => {
- const { type: oldType, time } = oldTimestamp;
- let type: TIMESTAMP_TYPE;
- switch (oldType) {
- case 'start':
- type = TIMESTAMP_TYPE.START;
- break;
- case 'step':
- type = TIMESTAMP_TYPE.STEP;
- break;
- case 'stop':
- type = TIMESTAMP_TYPE.STOP;
- break;
- }
- return { type, time };
- });
-
- // The oldest message indexed is the one to resume from for content indexing
- const {
- oldestMessage: { Time, Order },
- } = await readMigrated(userID);
- const contentRP = [Time, Order];
-
- await esDB.put(
- 'indexingProgress',
- {
- totalItems,
- numPauses,
- isRefreshed,
- timestamps,
- originalEstimate,
- recoveryPoint: contentRP,
- status: isPaused ? INDEXING_STATUS.PAUSED : INDEXING_STATUS.INDEXING,
- },
- 'content'
- );
-};
-
-const checkPreviousIndexing = async (userID: string, esDB: IDBPDatabase<EncryptedSearchDB>) => {
- const { progressBlob } = getESBlobs(userID);
-
- if (!progressBlob) {
- // ES was fully activated
- await setMetadataActiveProgressStatus(userID);
-
- // We don't use the equivalent helper as that requires cache
- // for safe storing, which is not an issue at this point
- await esDB.put(
- 'indexingProgress',
- {
- ...defaultESProgress,
- status: INDEXING_STATUS.ACTIVE,
- },
- 'content'
- );
-
+ const indexKey = await getIndexKey(getUserKeys, userID);
+ if (!indexKey) {
return false;
}
- // ES was indexing. We need to complete metadata indexing first, which is considered
- // part of the migration, then leave a recoveryPoint for content indexing to resume
- // from there. The starting point of metadata indexing should be the recovery point
- // in its progress row, if it's in the right format, or the one from the migrated
- // row of the config table
- const metadataRP = await readMetadataRecoveryPoint(userID);
- if (!metadataRP.End || !metadataRP.EndID) {
- // Read reacovery point from the config table
- const {
- oldestMessage: { ID, Time },
- } = await readMigrated(userID);
- await setMetadataRecoveryPoint(userID, { End: Time, EndID: ID });
+ const esDB = await openESDB(userID);
+ if (!esDB) {
+ return false;
}
- return true;
-};
-
-const splitTables = async (indexKey: CryptoKey, esDB: IDBPDatabase<EncryptedSearchDB>) => {
- const maxConcurrent = navigator.hardwareConcurrency || 2;
-
// In case splitting metadata and content hasn't happened yet,
// we perform it now. We check whether that's the case by
// looking at the metadata recoverPoint point. If it's undefined,
@@ -324,20 +167,7 @@ const splitTables = async (indexKey: CryptoKey, esDB: IDBPDatabase<EncryptedSear
// from which to continue. Otherwise, the recoveryPoint actually
// refers to metadata indexing itself, i.e. splitting had already
// occured
- const progress = await esDB.get('indexingProgress', 'metadata');
- if (!progress) {
- throw new Error('Metadata progress could not be fetched');
- }
-
- // In case the status is already ACTIVE, it means that tables have
- // already been split, and the migration has been triggered again for
- // another reason, e.g. a previously failed index recovery
- if (progress.status == INDEXING_STATUS.ACTIVE) {
- return;
- }
-
- let splittingRP = progress.recoveryPoint;
-
+ let splittingRP = await readMetadataRecoveryPoint(userID);
if (typeof splittingRP === 'undefined' || typeof splittingRP === 'string') {
let storedData = await esDB.getAll(
'metadata',
@@ -346,50 +176,31 @@ const splitTables = async (indexKey: CryptoKey, esDB: IDBPDatabase<EncryptedSear
);
while (storedData.length) {
- const esIteratee = async (aesGcmCiphertext: AesGcmCiphertext) => {
- const message = await decryptFromDB<ESMessage>(aesGcmCiphertext, indexKey);
-
- const [metadata, content] = await Promise.all([
- encryptItem(getBaseMessage(message), indexKey),
- typeof message.decryptedBody === 'string' || typeof message.decryptedSubject === 'string'
- ? encryptItem(
- {
- decryptedBody: message.decryptedBody,
- decryptedSubject: message.decryptedSubject,
- },
- indexKey
- )
- : undefined,
- ]);
-
- return {
- ID: message.ID,
- metadata,
- content,
- };
- };
-
- const encrypted = await runInQueue(
- storedData.map((aesGcmCiphertext) => () => esIteratee(aesGcmCiphertext)),
- maxConcurrent
+ const messages = await Promise.all(
+ storedData.map(async (aesGcmCiphertext) => decryptFromDB<ESMessage>(aesGcmCiphertext, indexKey))
);
- const txContent = esDB.transaction('content', 'readwrite');
- const txMetadata = esDB.transaction('metadata', 'readwrite');
-
- encrypted.forEach(({ ID, metadata, content }) => {
- if (!!content) {
- void txContent.store.put(content, ID);
- }
- void txMetadata.store.put(metadata, ID);
- });
-
- await txContent.done;
- await txMetadata.done;
-
- splittingRP = encrypted[encrypted.length - 1].ID;
- esDB.put('indexingProgress', { ...progress, recoveryPoint: splittingRP }, 'metadata');
+ await Promise.all(
+ messages.map((message) =>
+ Promise.all([
+ encryptItem(getBaseMessage(message), indexKey).then((metadata) =>
+ esDB.put('metadata', metadata, message.ID)
+ ),
+ typeof message.decryptedBody === 'string' || typeof message.decryptedSubject === 'string'
+ ? encryptItem(
+ {
+ decryptedBody: message.decryptedBody,
+ decryptedSubject: message.decryptedSubject,
+ },
+ indexKey
+ ).then((content) => esDB.put('content', content, message.ID))
+ : undefined,
+ ])
+ )
+ );
+ splittingRP = messages[messages.length - 1].ID;
+ await setMetadataRecoveryPoint(userID, splittingRP);
storedData = await esDB.getAll(
'metadata',
IDBKeyRange.lowerBound(splittingRP, true),
@@ -397,36 +208,152 @@ const splitTables = async (indexKey: CryptoKey, esDB: IDBPDatabase<EncryptedSear
);
}
}
-};
-const moveCiphertexts = async (tx: IDBPTransaction<unknown, string[], 'versionchange'>) => {
- const messagesOS = tx.objectStore('messages');
- const metadataOS = tx.objectStore('metadata');
+ // Conclude metadata indexing if needed, then set content
+ // indexing progress as appropriate
+ const { progressBlob, isPaused } = getESBlobs(userID);
+
+ if (!progressBlob) {
+ // Case 2. ES was fully activated
+ await setMetadataActiveProgressStatus(userID);
+ await esDB.put(
+ 'indexingProgress',
+ {
+ ...defaultESProgress,
+ status: INDEXING_STATUS.ACTIVE,
+ },
+ 'content'
+ );
+ } else {
+ // Case 3. ES was indexing
+
+ // We need to complete metadata indexing first, which is considered part of the
+ // migration, then leave a recoveryPoint for content indexing to resume from there.
+ // The starting point of metadata indexing should be the recovery point in its
+ // progress blob, if it's in the right format, or the one from the migrated row
+ // of the config table
+ let metadataRP = await readMetadataRecoveryPoint(userID);
+ if (!metadataRP.End || !metadataRP.EndID) {
+ // Read reacovery point from the config table
+ const { ID, Time } = migrated;
+ metadataRP = { End: Time, EndID: ID };
+ await setMetadataRecoveryPoint(userID, metadataRP);
+ }
- let storedData = await messagesOS.getAll(undefined, ES_MAX_ITEMS_PER_BATCH);
+ const success = await completeMetadataIndexing(esDB, indexKey, queryItemsMetadata);
- while (storedData.length) {
- await Promise.all(storedData.map(({ aesGcmCiphertext, ID }) => metadataOS.put(aesGcmCiphertext, ID)));
+ // Fetching metadata retries in case of a network error,
+ // which means that if the above didn't succeed it must
+ // be for a permanent error
+ if (!success) {
+ return false;
+ }
- storedData = await messagesOS.getAll(
- IDBKeyRange.lowerBound(storedData[storedData.length - 1].ID, true),
- ES_MAX_ITEMS_PER_BATCH
+ // Finally we set the metadata indexing as completed and the content
+ // indexing where it left off in the old version
+ await setMetadataActiveProgressStatus(userID);
+
+ const {
+ totalItems: oldTotalItems,
+ totalMessages,
+ numPauses,
+ isRefreshed,
+ timestamps: oldTimestamps,
+ originalEstimate,
+ } = progressBlob;
+
+ const totalItems = totalMessages || oldTotalItems;
+ const timestamps = oldTimestamps.map((oldTimestamp) => {
+ const { type: oldType, time } = oldTimestamp;
+ let type: TIMESTAMP_TYPE;
+ switch (oldType) {
+ case 'start':
+ type = TIMESTAMP_TYPE.START;
+ break;
+ case 'step':
+ type = TIMESTAMP_TYPE.STEP;
+ break;
+ case 'stop':
+ type = TIMESTAMP_TYPE.STOP;
+ break;
+ }
+ return { type, time };
+ });
+
+ const { Time, Order } = migrated;
+ const contentRP = [Time, Order];
+
+ await esDB.put(
+ 'indexingProgress',
+ {
+ totalItems,
+ numPauses,
+ isRefreshed,
+ timestamps,
+ originalEstimate,
+ recoveryPoint: contentRP,
+ status: isPaused ? INDEXING_STATUS.PAUSED : INDEXING_STATUS.INDEXING,
+ },
+ 'content'
);
}
+
+ await setMigrated(userID);
+ removeESFlags(userID);
+
+ esDB.close();
+
+ return true;
};
-const structuralMigration = async (userID: string) => {
- const { armoredIndexKey, lastEventID, size, isEnabled } = getESBlobs(userID);
+/**
+ * There are three possible states of the old version of ES:
+ * 1. ES was never activated. A reliable way for checking
+ * this is if the index key in local storage doesn't exist.
+ * In this case nothing should be done;
+ * 2. ES was fully activated. A reliable way for checking
+ * this is if the index key exists in local storage and the
+ * progress blob doesn't, meaning that indexing completed.
+ * In this case we migrate old IDB and local storage to new
+ * IDB and then proceed with normal operations (i.e. catching
+ * up from last events);
+ * 3. ES was indexing. This is the trickiest case because the old
+ * version of ES indexed metadata and content together, while
+ * now we rely on metadata already being there when indexing
+ * content. In this case we want to complete the indexing of
+ * metadata first but in such a way that previously indexed
+ * content stays there (therefore we cannot use the
+ * buildMetadataDB helper as that would overwrite existing items)
+ * and then set the status of content indexing where it was left
+ * off.
+ * Note that both points 2. and 3. require to go through the existing
+ * data in IDB (be it full or partial), decrypt it and split it in two
+ * tables. This means that such an operation should be done irrespectively
+ */
+export const migrate = async (
+ userID: string,
+ getUserKeys: GetUserKeys,
+ queryItemsMetadata: () => Promise<{
+ resultMetadata?: ESBaseMessage[] | undefined;
+ setRecoveryPoint?: (() => Promise<void>) | undefined;
+ }>,
+ getTotalItems: () => Promise<number>
+) => {
+ const { armoredIndexKey, lastEventID, size, isEnabled, progressBlob } = getESBlobs(userID);
+
+ // Case 1. ES was never activated
+ if (!armoredIndexKey) {
+ return true;
+ }
// We need the last event ID to be stored in the events
// table to then sync IDB from that point
if (!lastEventID) {
- throw new Error('Last event ID is not defined');
+ return false;
}
- // Note that if a structural migration had already been performed
- // the version change callback is not invoked therefore its code
- // is not executed
+ const totalItems = await getTotalItems();
+
await openDB(`ES:${userID}:DB`, 2, {
upgrade: async (...args) => {
const [newESDB, , , tx] = args;
@@ -436,19 +363,18 @@ const structuralMigration = async (userID: string) => {
await configOS.put(armoredIndexKey, 'indexKey');
await configOS.put(size, 'size');
await configOS.put(isEnabled, 'enabled');
- await configOS.put(false, 'limited');
-
- // We store both the most recent and the oldest message since
- // both are needed at various steps of the migration
- const old = (await tx.objectStore('messages').index('byTime').openCursor(null, 'next'))?.value;
- const recent = (await tx.objectStore('messages').index('byTime').openCursor(null, 'prev'))?.value;
- await configOS.put(
- {
- oldestMessage: { ID: old.ID, Time: old.Time, Order: old.Order },
- mostRecentMessage: { ID: recent.ID, Time: recent.Time, Order: recent.Order },
- },
- 'migrated'
- );
+ // We need to store the oldest message indexed in case
+ // indexing was in progress, in order to complete it
+ const { ID, Time, Order } = (await tx.objectStore('messages').index('byTime').getAll(undefined, 1))[0];
+ await configOS.put({ ID, Time, Order }, 'migrated');
+
+ // We store whether the DB was limited
+ let limited = false;
+ if (!!progressBlob) {
+ const count = await tx.objectStore('messages').count();
+ limited = count < totalItems;
+ }
+ await configOS.put(limited, 'limited');
// Create the new events object store and fill it accordingly
const eventsOS = newESDB.createObjectStore('events');
@@ -462,94 +388,14 @@ const structuralMigration = async (userID: string) => {
const indexingProgressOS = newESDB.createObjectStore('indexingProgress');
await indexingProgressOS.put(defaultESProgress, 'metadata');
- // Create the metadata and content object stores
- newESDB.createObjectStore('content');
+ // Create the metadata and content object stored and move all ciphertexts
+ // from "messages" into the former, such that "messages" can be removed
newESDB.createObjectStore('metadata');
+ newESDB.createObjectStore('content');
await moveCiphertexts(tx);
newESDB.deleteObjectStore('messages');
},
});
-};
-
-/**
- * MIGRATION STEPS:
- * 1. Check whether ES had ever been initialised.
- * 2. Perform the "structural" migration, i.e. inside the version change transaction.
- * 2a. Open IDB with the new version to trigger the version change.
- * 2b. Create and populate the "config", "events" and "indexingProgress" tables.
- * 2c. Extract oldest and newest messages from the "messages" table.
- * 2d. Create the "content" and "metadata" tables, move the content of the "message"
- * table to "metadata" and remove it.
- * 3. Split old content into "metadata" and "content" tables.
- * 4. Check whether indexing was in progress prior to the migration and set the
- * metadata recovery point accordingly.
- * 5. If it was, conclude metadata indexing, set flags for content indexing and exit.
- * 6. If it wasn't, check whether IDB was incomplete or corrupt.
- * 7. If IDB was corrupt, recover it.
- */
-export const migrate = async (
- userID: string,
- api: Api,
- getUserKeys: GetUserKeys,
- getMessageKeys: GetMessageKeys,
- queryItemsMetadata: () => Promise<{
- resultMetadata?: ESBaseMessage[] | undefined;
- setRecoveryPoint?: ((setIDB?: boolean) => Promise<void>) | undefined;
- }>
-) => {
- const { armoredIndexKey } = getESBlobs(userID);
-
- // STEP 1
- if (!armoredIndexKey) {
- // ES was never activated or it's already been migrated, since
- // in that case there would be no more blobs in LS
- return true;
- }
-
- // STEP 2
- await structuralMigration(userID);
-
- const indexKey = await getIndexKey(getUserKeys, userID);
- if (!indexKey) {
- return false;
- }
-
- const esDB = await openESDB(userID);
- if (!esDB) {
- throw new Error('ES IDB could not be opened');
- }
- // STEP 3
- await splitTables(indexKey, esDB);
-
- // STEP 4
- const shouldCompleteIndexing = await checkPreviousIndexing(userID, esDB);
- if (shouldCompleteIndexing) {
- // STEP 5
- await completeMetadataIndexing(userID, indexKey, esDB, queryItemsMetadata);
- } else {
- // STEP 6
- const shouldRecoverIndex = await checkIndexCorruption(userID, api, esDB);
- if (shouldRecoverIndex) {
- // STEP 7
- try {
- await recoverIndex(indexKey, esDB, api, getMessageKeys, queryItemsMetadata);
- } catch (error: any) {
- // Recovery is the only step which is allowed to fail without compromising
- // the migration. Since we don't remove flags in LS nor the "migrated" row
- // from the "config" table, migration will be retried but all previous
- // steps will account to nothing, therefore recovery will be retried.
- esSentryReport(`migration>recoverIndex: ${error.message}`, error);
- esDB.close();
- return true;
- }
- }
- }
-
- esDB.close();
-
- await setMigrated(userID);
- removeESFlags(userID);
-
- return true;
+ return finalizeMigration(userID, getUserKeys, queryItemsMetadata);
};
diff --git a/applications/mail/src/app/models/encryptedSearch.ts b/applications/mail/src/app/models/encryptedSearch.ts
index 513d724ca7..7c86e5f733 100644
--- a/applications/mail/src/app/models/encryptedSearch.ts
+++ b/applications/mail/src/app/models/encryptedSearch.ts
@@ -37,7 +37,6 @@ export interface ESDBStatusMail {
temporaryToggleOff: boolean;
activatingPartialES: boolean;
lastContentTime: number;
- isMigrating: boolean;
}
export interface EncryptedSearchFunctionsMail
diff --git a/packages/encrypted-search/lib/esHelpers/esBuild.ts b/packages/encrypted-search/lib/esHelpers/esBuild.ts
index 2eec628e3a..d311a22e16 100644
--- a/packages/encrypted-search/lib/esHelpers/esBuild.ts
+++ b/packages/encrypted-search/lib/esHelpers/esBuild.ts
@@ -201,7 +201,7 @@ export const buildMetadataDB = async <ESItemMetadata>(
esCacheRef: React.MutableRefObject<ESCache<ESItemMetadata, unknown>>,
queryItemsMetadata: (signal: AbortSignal) => Promise<{
resultMetadata?: ESItemMetadata[];
- setRecoveryPoint?: (setIDB?: boolean) => Promise<void>;
+ setRecoveryPoint?: () => Promise<void>;
}>,
getItemInfo: GetItemInfo<ESItemMetadata>,
abortIndexingRef: React.MutableRefObject<AbortController>,
diff --git a/packages/encrypted-search/lib/esHelpers/esSync.ts b/packages/encrypted-search/lib/esHelpers/esSync.ts
index e0b45a76c1..1ecc3ab6fd 100644
--- a/packages/encrypted-search/lib/esHelpers/esSync.ts
+++ b/packages/encrypted-search/lib/esHelpers/esSync.ts
@@ -189,7 +189,7 @@ export const syncItemEvents = async <ESItemContent, ESItemMetadata, ESSearchPara
const oldItem = esCacheRef.current.esCache.get(ID);
// Note that the item must exist, at the very least its metadata, in cache
if (!oldItem) {
- continue;
+ throw new Error("Trying to update an item that doesn't exist");
}
const itemToCache: CachedItem<ESItemMetadata, ESItemContent> = {
diff --git a/packages/encrypted-search/lib/models/esCallbacks.ts b/packages/encrypted-search/lib/models/esCallbacks.ts
index b9fd51401e..0db6d68db1 100644
--- a/packages/encrypted-search/lib/models/esCallbacks.ts
+++ b/packages/encrypted-search/lib/models/esCallbacks.ts
@@ -9,8 +9,7 @@ import { ESCache, ESEvent, EventsObject } from './interfaces';
interface RequiredESHelpers<ESItemMetadata, ESSearchParameters> {
/**
* Retrieve a batch of items' metadata, the mechanism to keep track of where the fetching
- * has arrived is supposed to be built-in (but can optionally take a boolean indicating whether
- * to store progress in IDB or only in memory. It defaults to true, i.e. store in IDB too.)
+ * has arrived is supposed to be built-in
* @param signal an abort signal to abort potential API calls in case of sudden aborts
* @returns An array of metadata items, i.e. the next batch of items that need to be indexed,
* as well as a callback to set the recovery point in IndexedDB for the next call to queryItemsMetadata
@@ -19,7 +18,7 @@ interface RequiredESHelpers<ESItemMetadata, ESSearchParameters> {
*/
queryItemsMetadata: (signal: AbortSignal) => Promise<{
resultMetadata?: ESItemMetadata[];
- setRecoveryPoint?: (setIDB?: boolean) => Promise<void>;
+ setRecoveryPoint?: () => Promise<void>;
}>;
/**
diff --git a/packages/encrypted-search/lib/useEncryptedSearch.tsx b/packages/encrypted-search/lib/useEncryptedSearch.tsx
index f6a49d36d3..4e532adbfe 100644
--- a/packages/encrypted-search/lib/useEncryptedSearch.tsx
+++ b/packages/encrypted-search/lib/useEncryptedSearch.tsx
@@ -592,14 +592,6 @@ const useEncryptedSearch = <ESItemMetadata, ESSearchParameters, ESItemContent =
dbExists: true,
}));
- // Caching metadata because by the end of metadata indexing we also expect cache
- // to hold metadata. In case metadata indexing was interrupted, we would only add
- // metadata for the fetched items and not for those that were indexed before the
- // interruption
- if (indexKey) {
- await cacheMetadata<ESItemMetadata>(userID, indexKey, esHelpers.getItemInfo, esCacheRef);
- }
-
// Even though this procedure cannot be paused, this is still useful
// in case of clearing data and logout
abortIndexingRef.current = new AbortController();
@@ -801,7 +793,7 @@ const useEncryptedSearch = <ESItemMetadata, ESSearchParameters, ESItemContent =
// Caching content because by the end of content indexing we also expect cache
// to hold content. In case content indexing was interrupted, we would only add
// content for the fetched items and not for those that were indexed before the
- // interruption
+ // interruption, therefore we will that in here
await cacheContent<ESItemMetadata, ESItemContent>(indexKey, userID, esCacheRef, esHelpers.getItemInfo);
// We default to having the limited flag to true and