1
0
mirror of https://github.com/musix-org/musix-oss synced 2025-06-17 13:56:01 +00:00
This commit is contained in:
MatteZ02
2020-03-03 22:30:50 +02:00
parent edfcc6f474
commit 30022c7634
11800 changed files with 1984416 additions and 1 deletions

View File

@ -0,0 +1,24 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import './src/platform_browser/browser_init';
export { Firestore, FirestoreDatabase, PublicCollectionReference as CollectionReference, PublicDocumentReference as DocumentReference, PublicDocumentSnapshot as DocumentSnapshot, PublicQuerySnapshot as QuerySnapshot } from './src/api/database';
export { GeoPoint } from './src/api/geo_point';
export { PublicBlob as Blob } from './src/api/blob';
export { FirstPartyCredentialsSettings } from './src/api/credentials';
export { PublicFieldValue as FieldValue } from './src/api/field_value';
export { FieldPath } from './src/api/field_path';
export { Timestamp } from './src/api/timestamp';

45
node_modules/@firebase/firestore/dist/lib/index.d.ts generated vendored Normal file
View File

@ -0,0 +1,45 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import './src/platform_browser/browser_init';
import * as types from '@firebase/firestore-types';
import { FirebaseNamespace } from '@firebase/app-types';
export declare function registerFirestore(instance: FirebaseNamespace): void;
declare module '@firebase/app-types' {
interface FirebaseNamespace {
firestore?: {
(app?: FirebaseApp): types.FirebaseFirestore;
Blob: typeof types.Blob;
CollectionReference: typeof types.CollectionReference;
DocumentReference: typeof types.DocumentReference;
DocumentSnapshot: typeof types.DocumentSnapshot;
FieldPath: typeof types.FieldPath;
FieldValue: typeof types.FieldValue;
Firestore: typeof types.FirebaseFirestore;
GeoPoint: typeof types.GeoPoint;
Query: typeof types.Query;
QueryDocumentSnapshot: typeof types.QueryDocumentSnapshot;
QuerySnapshot: typeof types.QuerySnapshot;
Timestamp: typeof types.Timestamp;
Transaction: typeof types.Transaction;
WriteBatch: typeof types.WriteBatch;
setLogLevel: typeof types.setLogLevel;
};
}
interface FirebaseApp {
firestore?(): types.FirebaseFirestore;
}
}

View File

@ -0,0 +1,29 @@
import * as types from '@firebase/firestore-types';
import './src/platform_node/node_init';
import { FirebaseNamespace } from '@firebase/app-types';
export declare function registerFirestore(instance: FirebaseNamespace): void;
declare module '@firebase/app-types' {
interface FirebaseNamespace {
firestore?: {
(app?: FirebaseApp): types.FirebaseFirestore;
Blob: typeof types.Blob;
CollectionReference: typeof types.CollectionReference;
DocumentReference: typeof types.DocumentReference;
DocumentSnapshot: typeof types.DocumentSnapshot;
FieldPath: typeof types.FieldPath;
FieldValue: typeof types.FieldValue;
Firestore: typeof types.FirebaseFirestore;
GeoPoint: typeof types.GeoPoint;
Query: typeof types.Query;
QueryDocumentSnapshot: typeof types.QueryDocumentSnapshot;
QuerySnapshot: typeof types.QuerySnapshot;
Timestamp: typeof types.Timestamp;
Transaction: typeof types.Transaction;
WriteBatch: typeof types.WriteBatch;
setLogLevel: typeof types.setLogLevel;
};
}
interface FirebaseApp {
firestore?(): types.FirebaseFirestore;
}
}

74
node_modules/@firebase/firestore/dist/lib/package.d.ts generated vendored Normal file
View File

@ -0,0 +1,74 @@
declare const _exports: {
"name": string;
"version": string;
"description": string;
"author": string;
"scripts": {
"prebuild": string;
"build": string;
"build:console": string;
"dev": string;
"lint": string;
"lint:fix": string;
"prettier": string;
"test": string;
"test:all": string;
"test:browser": string;
"test:browser:debug": string;
"test:node": string;
"test:node:prod": string;
"test:node:persistence": string;
"test:node:persistence:prod": string;
"test:travis": string;
"test:minified": string;
"prepare": string;
};
"main": string;
"browser": string;
"browserMinified": string;
"module": string;
"moduleMinified": string;
"esm2017": string;
"esm2017Minified": string;
"license": string;
"files": string[];
"dependencies": {
"@firebase/firestore-types": string;
"@firebase/logger": string;
"@firebase/webchannel-wrapper": string;
"@grpc/proto-loader": string;
"@firebase/util": string;
"@firebase/component": string;
"grpc": string;
"tslib": string;
};
"peerDependencies": {
"@firebase/app": string;
"@firebase/app-types": string;
};
"devDependencies": {
"protobufjs": string;
"rollup": string;
"rollup-plugin-copy-assets": string;
"rollup-plugin-json": string;
"rollup-plugin-node-resolve": string;
"rollup-plugin-replace": string;
"rollup-plugin-terser": string;
"rollup-plugin-typescript2": string;
"typescript": string;
};
"repository": {
"directory": string;
"type": string;
"url": string;
};
"bugs": {
"url": string;
};
"typings": string;
"nyc": {
"extension": string[];
"reportDir": string;
};
};
export = _exports;

View File

@ -0,0 +1,21 @@
/**
* @license
* Copyright 2020 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Traverses TypeScript type definition files and returns the list of referenced
* identifiers.
*/
export declare function extractPublicIdentifiers(filePaths: string[]): Set<string>;

View File

@ -0,0 +1,31 @@
/**
* @license
* Copyright 2020 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as ts from 'typescript';
export interface SDKMinifierOptions {
/** List of identifiers that are not to be minified. */
publicIdentifiers: Set<string>;
/**
* A prefix to append to all identifiers that are not referencing the Public
* API. Defauls to '_'.
*/
prefix?: string;
}
/**
* A TypeScript transformer that minifies existing source files. All identifiers
* are minified unless listed in `config.publicIdentifiers`.
*/
export declare function renameInternals(program: ts.Program, config: SDKMinifierOptions): ts.TransformerFactory<ts.SourceFile>;

View File

@ -0,0 +1,40 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Immutable class holding a blob (binary data).
* This class is directly exposed in the public API.
*
* Note that while you can't hide the constructor in JavaScript code, we are
* using the hack above to make sure no-one outside this module can call it.
*/
export declare class Blob {
private _binaryString;
private constructor();
static fromBase64String(base64: string): Blob;
static fromUint8Array(array: Uint8Array): Blob;
toBase64(): string;
toUint8Array(): Uint8Array;
toString(): string;
isEqual(other: Blob): boolean;
_approximateByteSize(): number;
/**
* Actually private to JS consumers of our API, so this function is prefixed
* with an underscore.
*/
_compareTo(other: Blob): number;
}
export declare const PublicBlob: typeof Blob;

View File

@ -0,0 +1,149 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { User } from '../auth/user';
import { FirebaseAuthInternalName } from '@firebase/auth-interop-types';
import { Provider } from '@firebase/component';
export interface FirstPartyCredentialsSettings {
type: 'gapi';
client: unknown;
sessionIndex: string;
}
export interface ProviderCredentialsSettings {
type: 'provider';
client: CredentialsProvider;
}
/** Settings for private credentials */
export declare type CredentialsSettings = FirstPartyCredentialsSettings | ProviderCredentialsSettings;
export declare type TokenType = 'OAuth' | 'FirstParty';
export interface Token {
/** Type of token. */
type: TokenType;
/**
* The user with which the token is associated (used for persisting user
* state on disk, etc.).
*/
user: User;
/** Extra header values to be passed along with a request */
authHeaders: {
[header: string]: string;
};
}
export declare class OAuthToken implements Token {
user: User;
type: TokenType;
authHeaders: {
[header: string]: string;
};
constructor(value: string, user: User);
}
/**
* A Listener for credential change events. The listener should fetch a new
* token and may need to invalidate other state if the current user has also
* changed.
*/
export declare type CredentialChangeListener = (user: User) => void;
/**
* Provides methods for getting the uid and token for the current user and
* listening for changes.
*/
export interface CredentialsProvider {
/** Requests a token for the current user. */
getToken(): Promise<Token | null>;
/**
* Marks the last retrieved token as invalid, making the next GetToken request
* force-refresh the token.
*/
invalidateToken(): void;
/**
* Specifies a listener to be notified of credential changes
* (sign-in / sign-out, token changes). It is immediately called once with the
* initial user.
*/
setChangeListener(changeListener: CredentialChangeListener): void;
/** Removes the previously-set change listener. */
removeChangeListener(): void;
}
/** A CredentialsProvider that always yields an empty token. */
export declare class EmptyCredentialsProvider implements CredentialsProvider {
/**
* Stores the listener registered with setChangeListener()
* This isn't actually necessary since the UID never changes, but we use this
* to verify the listen contract is adhered to in tests.
*/
private changeListener;
getToken(): Promise<Token | null>;
invalidateToken(): void;
setChangeListener(changeListener: CredentialChangeListener): void;
removeChangeListener(): void;
}
export declare class FirebaseCredentialsProvider implements CredentialsProvider {
/**
* The auth token listener registered with FirebaseApp, retained here so we
* can unregister it.
*/
private tokenListener;
/** Tracks the current User. */
private currentUser;
private receivedInitialUser;
/**
* Counter used to detect if the token changed while a getToken request was
* outstanding.
*/
private tokenCounter;
/** The listener registered with setChangeListener(). */
private changeListener;
private forceRefresh;
private auth;
constructor(authProvider: Provider<FirebaseAuthInternalName>);
getToken(): Promise<Token | null>;
invalidateToken(): void;
setChangeListener(changeListener: CredentialChangeListener): void;
removeChangeListener(): void;
private getUser;
}
interface Gapi {
auth: {
getAuthHeaderValueForFirstParty: (userIdentifiers: Array<{
[key: string]: string;
}>) => string | null;
};
}
export declare class FirstPartyToken implements Token {
private gapi;
private sessionIndex;
type: TokenType;
user: User;
constructor(gapi: Gapi, sessionIndex: string);
get authHeaders(): {
[header: string]: string;
};
}
export declare class FirstPartyCredentialsProvider implements CredentialsProvider {
private gapi;
private sessionIndex;
constructor(gapi: Gapi, sessionIndex: string);
getToken(): Promise<Token | null>;
setChangeListener(changeListener: CredentialChangeListener): void;
removeChangeListener(): void;
invalidateToken(): void;
}
/**
* Builds a CredentialsProvider depending on the type of
* the credentials passed in.
*/
export declare function makeCredentialsProvider(credentials?: CredentialsSettings): CredentialsProvider;
export {};

View File

@ -0,0 +1,270 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as firestore from '@firebase/firestore-types';
import { FirebaseApp } from '@firebase/app-types';
import { FirebaseService } from '@firebase/app-types/private';
import { DatabaseId } from '../core/database_info';
import { FirestoreClient } from '../core/firestore_client';
import { Query as InternalQuery } from '../core/query';
import { Transaction as InternalTransaction } from '../core/transaction';
import { ViewSnapshot } from '../core/view_snapshot';
import { Document } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { ResourcePath } from '../model/path';
import { AsyncQueue } from '../util/async_queue';
import { FieldPath as ExternalFieldPath } from './field_path';
import { CompleteFn, ErrorFn, NextFn, PartialObserver, Unsubscribe } from './observer';
import { UserDataConverter } from './user_data_converter';
import { FirebaseAuthInternalName } from '@firebase/auth-interop-types';
import { Provider } from '@firebase/component';
/**
* Constant used to indicate the LRU garbage collection should be disabled.
* Set this value as the `cacheSizeBytes` on the settings passed to the
* `Firestore` instance.
*/
export declare const CACHE_SIZE_UNLIMITED = -1;
/**
* Options that can be provided in the Firestore constructor when not using
* Firebase (aka standalone mode).
*/
export interface FirestoreDatabase {
projectId: string;
database?: string;
}
/**
* The root reference to the database.
*/
export declare class Firestore implements firestore.FirebaseFirestore, FirebaseService {
readonly _databaseId: DatabaseId;
private readonly _persistenceKey;
private _credentials;
private readonly _firebaseApp;
private _settings;
private _firestoreClient;
readonly _queue: AsyncQueue;
readonly _dataConverter: UserDataConverter;
constructor(databaseIdOrApp: FirestoreDatabase | FirebaseApp, authProvider: Provider<FirebaseAuthInternalName>);
settings(settingsLiteral: firestore.Settings): void;
enableNetwork(): Promise<void>;
disableNetwork(): Promise<void>;
enablePersistence(settings?: firestore.PersistenceSettings): Promise<void>;
clearPersistence(): Promise<void>;
terminate(): Promise<void>;
get _isTerminated(): boolean;
waitForPendingWrites(): Promise<void>;
onSnapshotsInSync(observer: PartialObserver<void>): Unsubscribe;
onSnapshotsInSync(onSync: () => void): Unsubscribe;
private onSnapshotsInSyncInternal;
ensureClientConfigured(): FirestoreClient;
private makeDatabaseInfo;
private configureClient;
private createDataConverter;
private static databaseIdFromApp;
get app(): FirebaseApp;
INTERNAL: {
delete: () => Promise<void>;
};
collection(pathString: string): firestore.CollectionReference;
doc(pathString: string): firestore.DocumentReference;
collectionGroup(collectionId: string): firestore.Query;
runTransaction<T>(updateFunction: (transaction: firestore.Transaction) => Promise<T>): Promise<T>;
batch(): firestore.WriteBatch;
static get logLevel(): firestore.LogLevel;
static setLogLevel(level: firestore.LogLevel): void;
_areTimestampsInSnapshotsEnabled(): boolean;
}
/**
* A reference to a transaction.
*/
export declare class Transaction implements firestore.Transaction {
private _firestore;
private _transaction;
constructor(_firestore: Firestore, _transaction: InternalTransaction);
get<T>(documentRef: firestore.DocumentReference<T>): Promise<firestore.DocumentSnapshot<T>>;
set<T>(documentRef: firestore.DocumentReference<T>, value: T, options?: firestore.SetOptions): Transaction;
update(documentRef: firestore.DocumentReference<unknown>, value: firestore.UpdateData): Transaction;
update(documentRef: firestore.DocumentReference<unknown>, field: string | ExternalFieldPath, value: unknown, ...moreFieldsAndValues: unknown[]): Transaction;
delete(documentRef: firestore.DocumentReference<unknown>): Transaction;
}
export declare class WriteBatch implements firestore.WriteBatch {
private _firestore;
private _mutations;
private _committed;
constructor(_firestore: Firestore);
set<T>(documentRef: firestore.DocumentReference<T>, value: T, options?: firestore.SetOptions): WriteBatch;
update(documentRef: firestore.DocumentReference<unknown>, value: firestore.UpdateData): WriteBatch;
update(documentRef: firestore.DocumentReference<unknown>, field: string | ExternalFieldPath, value: unknown, ...moreFieldsAndValues: unknown[]): WriteBatch;
delete(documentRef: firestore.DocumentReference<unknown>): WriteBatch;
commit(): Promise<void>;
private verifyNotCommitted;
}
/**
* A reference to a particular document in a collection in the database.
*/
export declare class DocumentReference<T = firestore.DocumentData> implements firestore.DocumentReference<T> {
_key: DocumentKey;
readonly firestore: Firestore;
readonly _converter?: firestore.FirestoreDataConverter<T> | undefined;
private _firestoreClient;
constructor(_key: DocumentKey, firestore: Firestore, _converter?: firestore.FirestoreDataConverter<T> | undefined);
static forPath<U>(path: ResourcePath, firestore: Firestore, converter?: firestore.FirestoreDataConverter<U>): DocumentReference<U>;
get id(): string;
get parent(): firestore.CollectionReference<T>;
get path(): string;
collection(pathString: string): firestore.CollectionReference<firestore.DocumentData>;
isEqual(other: firestore.DocumentReference<T>): boolean;
set(value: firestore.DocumentData, options?: firestore.SetOptions): Promise<void>;
update(value: firestore.UpdateData): Promise<void>;
update(field: string | ExternalFieldPath, value: unknown, ...moreFieldsAndValues: unknown[]): Promise<void>;
delete(): Promise<void>;
onSnapshot(observer: PartialObserver<firestore.DocumentSnapshot<T>>): Unsubscribe;
onSnapshot(options: firestore.SnapshotListenOptions, observer: PartialObserver<firestore.DocumentSnapshot<T>>): Unsubscribe;
onSnapshot(onNext: NextFn<firestore.DocumentSnapshot<T>>, onError?: ErrorFn, onCompletion?: CompleteFn): Unsubscribe;
onSnapshot(options: firestore.SnapshotListenOptions, onNext: NextFn<firestore.DocumentSnapshot<T>>, onError?: ErrorFn, onCompletion?: CompleteFn): Unsubscribe;
private onSnapshotInternal;
get(options?: firestore.GetOptions): Promise<firestore.DocumentSnapshot<T>>;
private getViaSnapshotListener;
withConverter<U>(converter: firestore.FirestoreDataConverter<U>): firestore.DocumentReference<U>;
}
/**
* Options interface that can be provided to configure the deserialization of
* DocumentSnapshots.
*/
export interface SnapshotOptions extends firestore.SnapshotOptions {
}
export declare class DocumentSnapshot<T = firestore.DocumentData> implements firestore.DocumentSnapshot<T> {
private _firestore;
private _key;
_document: Document | null;
private _fromCache;
private _hasPendingWrites;
private readonly _converter?;
constructor(_firestore: Firestore, _key: DocumentKey, _document: Document | null, _fromCache: boolean, _hasPendingWrites: boolean, _converter?: firestore.FirestoreDataConverter<T> | undefined);
data(options?: firestore.SnapshotOptions): T | undefined;
get(fieldPath: string | ExternalFieldPath, options?: firestore.SnapshotOptions): unknown;
get id(): string;
get ref(): firestore.DocumentReference<T>;
get exists(): boolean;
get metadata(): firestore.SnapshotMetadata;
isEqual(other: firestore.DocumentSnapshot<T>): boolean;
private toJSObject;
private toJSValue;
private toJSArray;
}
export declare class QueryDocumentSnapshot<T = firestore.DocumentData> extends DocumentSnapshot<T> implements firestore.QueryDocumentSnapshot<T> {
data(options?: SnapshotOptions): T;
}
export declare class Query<T = firestore.DocumentData> implements firestore.Query<T> {
_query: InternalQuery;
readonly firestore: Firestore;
protected readonly _converter?: firestore.FirestoreDataConverter<T> | undefined;
constructor(_query: InternalQuery, firestore: Firestore, _converter?: firestore.FirestoreDataConverter<T> | undefined);
where(field: string | ExternalFieldPath, opStr: firestore.WhereFilterOp, value: unknown): firestore.Query<T>;
orderBy(field: string | ExternalFieldPath, directionStr?: firestore.OrderByDirection): firestore.Query<T>;
limit(n: number): firestore.Query<T>;
limitToLast(n: number): firestore.Query<T>;
startAt(docOrField: unknown | firestore.DocumentSnapshot<unknown>, ...fields: unknown[]): firestore.Query<T>;
startAfter(docOrField: unknown | firestore.DocumentSnapshot<unknown>, ...fields: unknown[]): firestore.Query<T>;
endBefore(docOrField: unknown | firestore.DocumentSnapshot<unknown>, ...fields: unknown[]): firestore.Query<T>;
endAt(docOrField: unknown | firestore.DocumentSnapshot<unknown>, ...fields: unknown[]): firestore.Query<T>;
isEqual(other: firestore.Query<T>): boolean;
withConverter<U>(converter: firestore.FirestoreDataConverter<U>): firestore.Query<U>;
/** Helper function to create a bound from a document or fields */
private boundFromDocOrFields;
/**
* Create a Bound from a query and a document.
*
* Note that the Bound will always include the key of the document
* and so only the provided document will compare equal to the returned
* position.
*
* Will throw if the document does not contain all fields of the order by
* of the query or if any of the fields in the order by are an uncommitted
* server timestamp.
*/
private boundFromDocument;
/**
* Converts a list of field values to a Bound for the given query.
*/
private boundFromFields;
onSnapshot(observer: PartialObserver<firestore.QuerySnapshot<T>>): Unsubscribe;
onSnapshot(options: firestore.SnapshotListenOptions, observer: PartialObserver<firestore.QuerySnapshot<T>>): Unsubscribe;
onSnapshot(onNext: NextFn<firestore.QuerySnapshot<T>>, onError?: ErrorFn, onCompletion?: CompleteFn): Unsubscribe;
onSnapshot(options: firestore.SnapshotListenOptions, onNext: NextFn<firestore.QuerySnapshot<T>>, onError?: ErrorFn, onCompletion?: CompleteFn): Unsubscribe;
private onSnapshotInternal;
private validateHasExplicitOrderByForLimitToLast;
get(options?: firestore.GetOptions): Promise<firestore.QuerySnapshot<T>>;
private getViaSnapshotListener;
/**
* Parses the given documentIdValue into a ReferenceValue, throwing
* appropriate errors if the value is anything other than a DocumentReference
* or String, or if the string is malformed.
*/
private parseDocumentIdValue;
/**
* Validates that the value passed into a disjunctrive filter satisfies all
* array requirements.
*/
private validateDisjunctiveFilterElements;
private validateNewFilter;
private validateNewOrderBy;
private validateOrderByAndInequalityMatch;
}
export declare class QuerySnapshot<T = firestore.DocumentData> implements firestore.QuerySnapshot<T> {
private readonly _firestore;
private readonly _originalQuery;
private readonly _snapshot;
private readonly _converter?;
private _cachedChanges;
private _cachedChangesIncludeMetadataChanges;
readonly metadata: firestore.SnapshotMetadata;
constructor(_firestore: Firestore, _originalQuery: InternalQuery, _snapshot: ViewSnapshot, _converter?: firestore.FirestoreDataConverter<T> | undefined);
get docs(): Array<firestore.QueryDocumentSnapshot<T>>;
get empty(): boolean;
get size(): number;
forEach(callback: (result: firestore.QueryDocumentSnapshot<T>) => void, thisArg?: unknown): void;
get query(): firestore.Query<T>;
docChanges(options?: firestore.SnapshotListenOptions): Array<firestore.DocumentChange<T>>;
/** Check the equality. The call can be very expensive. */
isEqual(other: firestore.QuerySnapshot<T>): boolean;
private convertToDocumentImpl;
}
export declare class CollectionReference<T = firestore.DocumentData> extends Query<T> implements firestore.CollectionReference<T> {
readonly _path: ResourcePath;
constructor(_path: ResourcePath, firestore: Firestore, _converter?: firestore.FirestoreDataConverter<T>);
get id(): string;
get parent(): firestore.DocumentReference<firestore.DocumentData> | null;
get path(): string;
doc(pathString?: string): firestore.DocumentReference<T>;
add(value: T): Promise<firestore.DocumentReference<T>>;
withConverter<U>(converter: firestore.FirestoreDataConverter<U>): firestore.CollectionReference<U>;
}
/**
* Calculates the array of firestore.DocumentChange's for a given ViewSnapshot.
*
* Exported for testing.
*/
export declare function changesFromSnapshot<T>(firestore: Firestore, includeMetadataChanges: boolean, snapshot: ViewSnapshot, converter?: firestore.FirestoreDataConverter<T>): Array<firestore.DocumentChange<T>>;
export declare const PublicFirestore: typeof Firestore;
export declare const PublicTransaction: typeof Transaction;
export declare const PublicWriteBatch: typeof WriteBatch;
export declare const PublicDocumentReference: typeof DocumentReference;
export declare const PublicDocumentSnapshot: typeof DocumentSnapshot;
export declare const PublicQueryDocumentSnapshot: typeof QueryDocumentSnapshot;
export declare const PublicQuery: typeof Query;
export declare const PublicQuerySnapshot: typeof QuerySnapshot;
export declare const PublicCollectionReference: typeof CollectionReference;

View File

@ -0,0 +1,47 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as firestore from '@firebase/firestore-types';
import { FieldPath as InternalFieldPath } from '../model/path';
/**
* A FieldPath refers to a field in a document. The path may consist of a single
* field name (referring to a top-level field in the document), or a list of
* field names (referring to a nested field in the document).
*/
export declare class FieldPath implements firestore.FieldPath {
/** Internal representation of a Firestore field path. */
_internalPath: InternalFieldPath;
/**
* Creates a FieldPath from the provided field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*
* @param fieldNames A list of field names.
*/
constructor(...fieldNames: string[]);
/**
* Internal Note: The backend doesn't technically support querying by
* document ID. Instead it queries by the entire document name (full path
* included), but in the cases we currently support documentId(), the net
* effect is the same.
*/
private static readonly _DOCUMENT_ID;
static documentId(): FieldPath;
isEqual(other: firestore.FieldPath): boolean;
}
/**
* Parses a field path string into a FieldPath, treating dots as separators.
*/
export declare function fromDotSeparatedString(path: string): FieldPath;

View File

@ -0,0 +1,54 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as firestore from '@firebase/firestore-types';
/**
* An opaque base class for FieldValue sentinel objects in our public API,
* with public static methods for creating said sentinel objects.
*/
export declare abstract class FieldValueImpl implements firestore.FieldValue {
readonly _methodName: string;
protected constructor(_methodName: string);
static delete(): FieldValueImpl;
static serverTimestamp(): FieldValueImpl;
static arrayUnion(...elements: unknown[]): FieldValueImpl;
static arrayRemove(...elements: unknown[]): FieldValueImpl;
static increment(n: number): FieldValueImpl;
isEqual(other: FieldValueImpl): boolean;
}
export declare class DeleteFieldValueImpl extends FieldValueImpl {
private constructor();
/** Singleton instance. */
static instance: DeleteFieldValueImpl;
}
export declare class ServerTimestampFieldValueImpl extends FieldValueImpl {
private constructor();
/** Singleton instance. */
static instance: ServerTimestampFieldValueImpl;
}
export declare class ArrayUnionFieldValueImpl extends FieldValueImpl {
readonly _elements: unknown[];
constructor(_elements: unknown[]);
}
export declare class ArrayRemoveFieldValueImpl extends FieldValueImpl {
readonly _elements: unknown[];
constructor(_elements: unknown[]);
}
export declare class NumericIncrementFieldValueImpl extends FieldValueImpl {
readonly _operand: number;
constructor(_operand: number);
}
export declare const PublicFieldValue: typeof FieldValueImpl;

View File

@ -0,0 +1,39 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Immutable class representing a geo point as latitude-longitude pair.
* This class is directly exposed in the public API, including its constructor.
*/
export declare class GeoPoint {
private _lat;
private _long;
constructor(latitude: number, longitude: number);
/**
* Returns the latitude of this geo point, a number between -90 and 90.
*/
get latitude(): number;
/**
* Returns the longitude of this geo point, a number between -180 and 180.
*/
get longitude(): number;
isEqual(other: GeoPoint): boolean;
/**
* Actually private to JS consumers of our API, so this function is prefixed
* with an underscore.
*/
_compareTo(other: GeoPoint): number;
}

View File

@ -0,0 +1,31 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Observer/Subscribe interfaces.
*/
export declare type NextFn<T> = (value: T) => void;
export declare type ErrorFn = (error: Error) => void;
export declare type CompleteFn = () => void;
export interface PartialObserver<T> {
next?: NextFn<T>;
error?: ErrorFn;
complete?: CompleteFn;
}
export interface Unsubscribe {
(): void;
}
export declare function isPartialObserver(obj: unknown): boolean;

View File

@ -0,0 +1,29 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export declare class Timestamp {
readonly seconds: number;
readonly nanoseconds: number;
static now(): Timestamp;
static fromDate(date: Date): Timestamp;
static fromMillis(milliseconds: number): Timestamp;
constructor(seconds: number, nanoseconds: number);
toDate(): Date;
toMillis(): number;
_compareTo(other: Timestamp): number;
isEqual(other: Timestamp): boolean;
toString(): string;
}

View File

@ -0,0 +1,117 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as firestore from '@firebase/firestore-types';
import { DatabaseId } from '../core/database_info';
import { DocumentKey } from '../model/document_key';
import { FieldValue, ObjectValue } from '../model/field_value';
import { FieldMask, FieldTransform, Mutation, Precondition } from '../model/mutation';
import { FieldPath } from '../model/path';
import { FieldPath as ExternalFieldPath } from './field_path';
/** The result of parsing document data (e.g. for a setData call). */
export declare class ParsedSetData {
readonly data: ObjectValue;
readonly fieldMask: FieldMask | null;
readonly fieldTransforms: FieldTransform[];
constructor(data: ObjectValue, fieldMask: FieldMask | null, fieldTransforms: FieldTransform[]);
toMutations(key: DocumentKey, precondition: Precondition): Mutation[];
}
/** The result of parsing "update" data (i.e. for an updateData call). */
export declare class ParsedUpdateData {
readonly data: ObjectValue;
readonly fieldMask: FieldMask;
readonly fieldTransforms: FieldTransform[];
constructor(data: ObjectValue, fieldMask: FieldMask, fieldTransforms: FieldTransform[]);
toMutations(key: DocumentKey, precondition: Precondition): Mutation[];
}
/**
* An interface that allows arbitrary pre-converting of user data. This
* abstraction allows for, e.g.:
* * The public API to convert DocumentReference objects to DocRef objects,
* avoiding a circular dependency between user_data_converter.ts and
* database.ts
* * Tests to convert test-only sentinels (e.g. '<DELETE>') into types
* compatible with UserDataConverter.
*
* Returns the converted value (can return back the input to act as a no-op).
*
* It can also throw an Error which will be wrapped into a friendly message.
*/
export declare type DataPreConverter = (input: unknown) => unknown;
/**
* A placeholder object for DocumentReferences in this file, in order to
* avoid a circular dependency. See the comments for `DataPreConverter` for
* the full context.
*/
export declare class DocumentKeyReference {
databaseId: DatabaseId;
key: DocumentKey;
constructor(databaseId: DatabaseId, key: DocumentKey);
}
/**
* Helper for parsing raw user input (provided via the API) into internal model
* classes.
*/
export declare class UserDataConverter {
private preConverter;
constructor(preConverter: DataPreConverter);
/** Parse document data from a non-merge set() call. */
parseSetData(methodName: string, input: unknown): ParsedSetData;
/** Parse document data from a set() call with '{merge:true}'. */
parseMergeData(methodName: string, input: unknown, fieldPaths?: Array<string | firestore.FieldPath>): ParsedSetData;
/** Parse update data from an update() call. */
parseUpdateData(methodName: string, input: unknown): ParsedUpdateData;
/** Parse update data from a list of field/value arguments. */
parseUpdateVarargs(methodName: string, field: string | ExternalFieldPath, value: unknown, moreFieldsAndValues: unknown[]): ParsedUpdateData;
/**
* Parse a "query value" (e.g. value in a where filter or a value in a cursor
* bound).
*
* @param allowArrays Whether the query value is an array that may directly
* contain additional arrays (e.g. the operand of an `in` query).
*/
parseQueryValue(methodName: string, input: unknown, allowArrays?: boolean): FieldValue;
/** Sends data through this.preConverter, handling any thrown errors. */
private runPreConverter;
/**
* Internal helper for parsing user data.
*
* @param input Data to be parsed.
* @param context A context object representing the current path being parsed,
* the source of the data being parsed, etc.
* @return The parsed value, or null if the value was a FieldValue sentinel
* that should not be included in the resulting parsed data.
*/
private parseData;
private parseObject;
private parseArray;
/**
* "Parses" the provided FieldValueImpl, adding any necessary transforms to
* context.fieldTransforms.
*/
private parseSentinelFieldValue;
/**
* Helper to parse a scalar value (i.e. not an Object, Array, or FieldValue)
*
* @return The parsed value
*/
private parseScalarValue;
private parseArrayTransformElements;
}
/**
* Helper that calls fromDotSeparatedString() but wraps any error thrown.
*/
export declare function fieldPathFromArgument(methodName: string, path: string | ExternalFieldPath): FieldPath;

View File

@ -0,0 +1,35 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Simple wrapper around a nullable UID. Mostly exists to make code more
* readable.
*/
export declare class User {
readonly uid: string | null;
/** A user with a null UID. */
static readonly UNAUTHENTICATED: User;
static readonly GOOGLE_CREDENTIALS: User;
static readonly FIRST_PARTY: User;
constructor(uid: string | null);
isAuthenticated(): boolean;
/**
* Returns a key representing this user, suitable for inclusion in a
* dictionary.
*/
toKey(): string;
isEqual(otherUser: User): boolean;
}

View File

@ -0,0 +1,45 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export declare class DatabaseInfo {
readonly databaseId: DatabaseId;
readonly persistenceKey: string;
readonly host: string;
readonly ssl: boolean;
readonly forceLongPolling: boolean;
/**
* Constructs a DatabaseInfo using the provided host, databaseId and
* persistenceKey.
*
* @param databaseId The database to use.
* @param persistenceKey A unique identifier for this Firestore's local
* storage (used in conjunction with the databaseId).
* @param host The Firestore backend host to connect to.
* @param ssl Whether to use SSL when connecting.
* @param forceLongPolling Whether to use the forceLongPolling option
* when using WebChannel as the network transport.
*/
constructor(databaseId: DatabaseId, persistenceKey: string, host: string, ssl: boolean, forceLongPolling: boolean);
}
/** Represents the database ID a Firestore client is associated with. */
export declare class DatabaseId {
readonly projectId: string;
readonly database: string;
constructor(projectId: string, database?: string);
get isDefaultDatabase(): boolean;
isEqual(other: {}): boolean;
compareTo(other: DatabaseId): number;
}

View File

@ -0,0 +1,89 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { EventHandler } from '../util/misc';
import { Query } from './query';
import { SyncEngine, SyncEngineListener } from './sync_engine';
import { OnlineState, TargetId } from './types';
import { ViewSnapshot } from './view_snapshot';
/**
* Interface for handling events from the EventManager.
*/
export interface Observer<T> {
next: EventHandler<T>;
error: EventHandler<Error>;
}
/**
* EventManager is responsible for mapping queries to query event emitters.
* It handles "fan-out". -- Identical queries will re-use the same watch on the
* backend.
*/
export declare class EventManager implements SyncEngineListener {
private syncEngine;
private queries;
private onlineState;
private snapshotsInSyncListeners;
constructor(syncEngine: SyncEngine);
listen(listener: QueryListener): Promise<TargetId>;
unlisten(listener: QueryListener): Promise<void>;
onWatchChange(viewSnaps: ViewSnapshot[]): void;
onWatchError(query: Query, error: Error): void;
onOnlineStateChange(onlineState: OnlineState): void;
addSnapshotsInSyncListener(observer: Observer<void>): void;
removeSnapshotsInSyncListener(observer: Observer<void>): void;
private raiseSnapshotsInSyncEvent;
}
export interface ListenOptions {
/** Raise events even when only the metadata changes */
readonly includeMetadataChanges?: boolean;
/**
* Wait for a sync with the server when online, but still raise events while
* offline.
*/
readonly waitForSyncWhenOnline?: boolean;
}
/**
* QueryListener takes a series of internal view snapshots and determines
* when to raise the event.
*
* It uses an Observer to dispatch events.
*/
export declare class QueryListener {
readonly query: Query;
private queryObserver;
/**
* Initial snapshots (e.g. from cache) may not be propagated to the wrapped
* observer. This flag is set to true once we've actually raised an event.
*/
private raisedInitialEvent;
private options;
private snap;
private onlineState;
constructor(query: Query, queryObserver: Observer<ViewSnapshot>, options?: ListenOptions);
/**
* Applies the new ViewSnapshot to this listener, raising a user-facing event
* if applicable (depending on what changed, whether the user has opted into
* metadata-only changes, etc.). Returns true if a user-facing event was
* indeed raised.
*/
onViewSnapshot(snap: ViewSnapshot): boolean;
onError(error: Error): void;
/** Returns whether a snapshot was raised. */
applyOnlineStateChange(onlineState: OnlineState): boolean;
private shouldRaiseInitialEvent;
private shouldRaiseEvent;
private raiseInitialEvent;
}

View File

@ -0,0 +1,178 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CredentialsProvider } from '../api/credentials';
import { Document } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { Mutation } from '../model/mutation';
import { Platform } from '../platform/platform';
import { AsyncQueue } from '../util/async_queue';
import { ListenOptions, Observer, QueryListener } from './event_manager';
import { LruParams } from '../local/lru_garbage_collector';
import { DatabaseId, DatabaseInfo } from './database_info';
import { Query } from './query';
import { Transaction } from './transaction';
import { ViewSnapshot } from './view_snapshot';
export declare class IndexedDbPersistenceSettings {
readonly cacheSizeBytes: number;
readonly synchronizeTabs: boolean;
constructor(cacheSizeBytes: number, synchronizeTabs: boolean);
lruParams(): LruParams;
}
export declare class MemoryPersistenceSettings {
}
export declare type InternalPersistenceSettings = IndexedDbPersistenceSettings | MemoryPersistenceSettings;
/**
* FirestoreClient is a top-level class that constructs and owns all of the
* pieces of the client SDK architecture. It is responsible for creating the
* async queue that is shared by all of the other components in the system.
*/
export declare class FirestoreClient {
private platform;
private databaseInfo;
private credentials;
/**
* Asynchronous queue responsible for all of our internal processing. When
* we get incoming work from the user (via public API) or the network
* (incoming GRPC messages), we should always schedule onto this queue.
* This ensures all of our work is properly serialized (e.g. we don't
* start processing a new operation while the previous one is waiting for
* an async I/O to complete).
*/
private asyncQueue;
private eventMgr;
private persistence;
private localStore;
private remoteStore;
private syncEngine;
private sharedClientState;
private lruScheduler?;
private readonly clientId;
constructor(platform: Platform, databaseInfo: DatabaseInfo, credentials: CredentialsProvider,
/**
* Asynchronous queue responsible for all of our internal processing. When
* we get incoming work from the user (via public API) or the network
* (incoming GRPC messages), we should always schedule onto this queue.
* This ensures all of our work is properly serialized (e.g. we don't
* start processing a new operation while the previous one is waiting for
* an async I/O to complete).
*/
asyncQueue: AsyncQueue);
/**
* Starts up the FirestoreClient, returning only whether or not enabling
* persistence succeeded.
*
* The intent here is to "do the right thing" as far as users are concerned.
* Namely, in cases where offline persistence is requested and possible,
* enable it, but otherwise fall back to persistence disabled. For the most
* part we expect this to succeed one way or the other so we don't expect our
* users to actually wait on the firestore.enablePersistence Promise since
* they generally won't care.
*
* Of course some users actually do care about whether or not persistence
* was successfully enabled, so the Promise returned from this method
* indicates this outcome.
*
* This presents a problem though: even before enablePersistence resolves or
* rejects, users may have made calls to e.g. firestore.collection() which
* means that the FirestoreClient in there will be available and will be
* enqueuing actions on the async queue.
*
* Meanwhile any failure of an operation on the async queue causes it to
* panic and reject any further work, on the premise that unhandled errors
* are fatal.
*
* Consequently the fallback is handled internally here in start, and if the
* fallback succeeds we signal success to the async queue even though the
* start() itself signals failure.
*
* @param persistenceSettings Settings object to configure offline
* persistence.
* @returns A deferred result indicating the user-visible result of enabling
* offline persistence. This method will reject this if IndexedDB fails to
* start for any reason. If usePersistence is false this is
* unconditionally resolved.
*/
start(persistenceSettings: InternalPersistenceSettings): Promise<void>;
/** Enables the network connection and requeues all pending operations. */
enableNetwork(): Promise<void>;
/**
* Initializes persistent storage, attempting to use IndexedDB if
* usePersistence is true or memory-only if false.
*
* If IndexedDB fails because it's already open in another tab or because the
* platform can't possibly support our implementation then this method rejects
* the persistenceResult and falls back on memory-only persistence.
*
* @param persistenceSettings Settings object to configure offline persistence
* @param persistenceResult A deferred result indicating the user-visible
* result of enabling offline persistence. This method will reject this if
* IndexedDB fails to start for any reason. If usePersistence is false
* this is unconditionally resolved.
* @returns a Promise indicating whether or not initialization should
* continue, i.e. that one of the persistence implementations actually
* succeeded.
*/
private initializePersistence;
/**
* Decides whether the provided error allows us to gracefully disable
* persistence (as opposed to crashing the client).
*/
private canFallback;
/**
* Checks that the client has not been terminated. Ensures that other methods on
* this class cannot be called after the client is terminated.
*/
private verifyNotTerminated;
/**
* Starts IndexedDB-based persistence.
*
* @returns A promise indicating success or failure.
*/
private startIndexedDbPersistence;
/**
* Starts Memory-backed persistence. In practice this cannot fail.
*
* @returns A promise that will successfully resolve.
*/
private startMemoryPersistence;
/**
* Initializes the rest of the FirestoreClient, assuming the initial user
* has been obtained from the credential provider and some persistence
* implementation is available in this.persistence.
*/
private initializeRest;
private handleCredentialChange;
/** Disables the network connection. Pending operations will not complete. */
disableNetwork(): Promise<void>;
terminate(): Promise<void>;
/**
* Returns a Promise that resolves when all writes that were pending at the time this
* method was called received server acknowledgement. An acknowledgement can be either acceptance
* or rejection.
*/
waitForPendingWrites(): Promise<void>;
listen(query: Query, observer: Observer<ViewSnapshot>, options: ListenOptions): QueryListener;
unlisten(listener: QueryListener): void;
getDocumentFromLocalCache(docKey: DocumentKey): Promise<Document | null>;
getDocumentsFromLocalCache(query: Query): Promise<ViewSnapshot>;
write(mutations: Mutation[]): Promise<void>;
databaseId(): DatabaseId;
addSnapshotsInSyncListener(observer: Observer<void>): void;
removeSnapshotsInSyncListener(observer: Observer<void>): void;
get clientTerminated(): boolean;
transaction<T>(updateFunction: (transaction: Transaction) => Promise<T>): Promise<T>;
}

View File

@ -0,0 +1,39 @@
/**
* @license
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ListenSequenceNumber } from './types';
/**
* `SequenceNumberSyncer` defines the methods required to keep multiple instances of a
* `ListenSequence` in sync.
*/
export interface SequenceNumberSyncer {
writeSequenceNumber(sequenceNumber: ListenSequenceNumber): void;
sequenceNumberHandler: ((sequenceNumber: ListenSequenceNumber) => void) | null;
}
/**
* `ListenSequence` is a monotonic sequence. It is initialized with a minimum value to
* exceed. All subsequent calls to next will return increasing values. If provided with a
* `SequenceNumberSyncer`, it will additionally bump its next value when told of a new value, as
* well as write out sequence numbers that it produces via `next()`.
*/
export declare class ListenSequence {
private previousValue;
static readonly INVALID: ListenSequenceNumber;
private writeNewSequenceNumber?;
constructor(previousValue: ListenSequenceNumber, sequenceNumberSyncer?: SequenceNumberSyncer);
private setPreviousValue;
next(): ListenSequenceNumber;
}

View File

@ -0,0 +1,207 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Document } from '../model/document';
import { ArrayValue, FieldValue } from '../model/field_value';
import { FieldPath, ResourcePath } from '../model/path';
import { Target } from './target';
export declare enum LimitType {
First = "F",
Last = "L"
}
/**
* Query encapsulates all the query attributes we support in the SDK. It can
* be run against the LocalStore, as well as be converted to a `Target` to
* query the RemoteStore results.
*/
export declare class Query {
readonly path: ResourcePath;
readonly collectionGroup: string | null;
readonly explicitOrderBy: OrderBy[];
readonly filters: Filter[];
readonly limit: number | null;
readonly limitType: LimitType;
readonly startAt: Bound | null;
readonly endAt: Bound | null;
static atPath(path: ResourcePath): Query;
private memoizedOrderBy;
private memoizedTarget;
/**
* Initializes a Query with a path and optional additional query constraints.
* Path must currently be empty if this is a collection group query.
*/
constructor(path: ResourcePath, collectionGroup?: string | null, explicitOrderBy?: OrderBy[], filters?: Filter[], limit?: number | null, limitType?: LimitType, startAt?: Bound | null, endAt?: Bound | null);
get orderBy(): OrderBy[];
addFilter(filter: Filter): Query;
addOrderBy(orderBy: OrderBy): Query;
withLimitToFirst(limit: number | null): Query;
withLimitToLast(limit: number | null): Query;
withStartAt(bound: Bound): Query;
withEndAt(bound: Bound): Query;
/**
* Helper to convert a collection group query into a collection query at a
* specific path. This is used when executing collection group queries, since
* we have to split the query into a set of collection queries at multiple
* paths.
*/
asCollectionQueryAtPath(path: ResourcePath): Query;
/**
* Returns true if this query does not specify any query constraints that
* could remove results.
*/
matchesAllDocuments(): boolean;
canonicalId(): string;
toString(): string;
isEqual(other: Query): boolean;
docComparator(d1: Document, d2: Document): number;
matches(doc: Document): boolean;
hasLimitToFirst(): boolean;
hasLimitToLast(): boolean;
getFirstOrderByField(): FieldPath | null;
getInequalityFilterField(): FieldPath | null;
findFilterOperator(operators: Operator[]): Operator | null;
isDocumentQuery(): boolean;
isCollectionGroupQuery(): boolean;
/**
* Converts this `Query` instance to it's corresponding `Target`
* representation.
*/
toTarget(): Target;
private matchesPathAndCollectionGroup;
/**
* A document must have a value for every ordering clause in order to show up
* in the results.
*/
private matchesOrderBy;
private matchesFilters;
/**
* Makes sure a document is within the bounds, if provided.
*/
private matchesBounds;
private assertValidBound;
}
export declare abstract class Filter {
abstract matches(doc: Document): boolean;
abstract canonicalId(): string;
abstract isEqual(filter: Filter): boolean;
}
export declare class Operator {
name: string;
static LESS_THAN: Operator;
static LESS_THAN_OR_EQUAL: Operator;
static EQUAL: Operator;
static GREATER_THAN: Operator;
static GREATER_THAN_OR_EQUAL: Operator;
static ARRAY_CONTAINS: Operator;
static IN: Operator;
static ARRAY_CONTAINS_ANY: Operator;
static fromString(op: string): Operator;
constructor(name: string);
toString(): string;
isEqual(other: Operator): boolean;
}
export declare class FieldFilter extends Filter {
field: FieldPath;
op: Operator;
value: FieldValue;
protected constructor(field: FieldPath, op: Operator, value: FieldValue);
/**
* Creates a filter based on the provided arguments.
*/
static create(field: FieldPath, op: Operator, value: FieldValue): FieldFilter;
matches(doc: Document): boolean;
protected matchesComparison(comparison: number): boolean;
isInequality(): boolean;
canonicalId(): string;
isEqual(other: Filter): boolean;
toString(): string;
}
/** Filter that matches on key fields (i.e. '__name__'). */
export declare class KeyFieldFilter extends FieldFilter {
matches(doc: Document): boolean;
}
/** Filter that matches on key fields within an array. */
export declare class KeyFieldInFilter extends FieldFilter {
value: ArrayValue;
constructor(field: FieldPath, value: ArrayValue);
matches(doc: Document): boolean;
}
/** A Filter that implements the array-contains operator. */
export declare class ArrayContainsFilter extends FieldFilter {
constructor(field: FieldPath, value: FieldValue);
matches(doc: Document): boolean;
}
/** A Filter that implements the IN operator. */
export declare class InFilter extends FieldFilter {
value: ArrayValue;
constructor(field: FieldPath, value: ArrayValue);
matches(doc: Document): boolean;
}
/** A Filter that implements the array-contains-any operator. */
export declare class ArrayContainsAnyFilter extends FieldFilter {
value: ArrayValue;
constructor(field: FieldPath, value: ArrayValue);
matches(doc: Document): boolean;
}
/**
* The direction of sorting in an order by.
*/
export declare class Direction {
name: string;
static ASCENDING: Direction;
static DESCENDING: Direction;
private constructor();
toString(): string;
}
/**
* Represents a bound of a query.
*
* The bound is specified with the given components representing a position and
* whether it's just before or just after the position (relative to whatever the
* query order is).
*
* The position represents a logical index position for a query. It's a prefix
* of values for the (potentially implicit) order by clauses of a query.
*
* Bound provides a function to determine whether a document comes before or
* after a bound. This is influenced by whether the position is just before or
* just after the provided values.
*/
export declare class Bound {
readonly position: FieldValue[];
readonly before: boolean;
constructor(position: FieldValue[], before: boolean);
canonicalId(): string;
/**
* Returns true if a document sorts before a bound using the provided sort
* order.
*/
sortsBeforeDocument(orderBy: OrderBy[], doc: Document): boolean;
isEqual(other: Bound | null): boolean;
}
/**
* An ordering on a field, in some Direction. Direction defaults to ASCENDING.
*/
export declare class OrderBy {
readonly field: FieldPath;
readonly dir: Direction;
private readonly isKeyOrderBy;
constructor(field: FieldPath, dir?: Direction);
compare(d1: Document, d2: Document): number;
canonicalId(): string;
toString(): string;
isEqual(other: OrderBy): boolean;
}

View File

@ -0,0 +1,35 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Timestamp } from '../api/timestamp';
/**
* A version of a document in Firestore. This corresponds to the version
* timestamp, such as update_time or read_time.
*/
export declare class SnapshotVersion {
private timestamp;
static readonly MIN: SnapshotVersion;
static fromMicroseconds(value: number): SnapshotVersion;
static fromTimestamp(value: Timestamp): SnapshotVersion;
static forDeletedDoc(): SnapshotVersion;
private constructor();
compareTo(other: SnapshotVersion): number;
isEqual(other: SnapshotVersion): boolean;
/** Returns a number representation of the version for use in spec tests. */
toMicroseconds(): number;
toString(): string;
toTimestamp(): Timestamp;
}

View File

@ -0,0 +1,192 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { User } from '../auth/user';
import { LocalStore } from '../local/local_store';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { Mutation } from '../model/mutation';
import { MutationBatchResult } from '../model/mutation_batch';
import { RemoteEvent } from '../remote/remote_event';
import { RemoteStore } from '../remote/remote_store';
import { RemoteSyncer } from '../remote/remote_syncer';
import { FirestoreError } from '../util/error';
import { Deferred } from '../util/promise';
import { SortedMap } from '../util/sorted_map';
import { ClientId, SharedClientState } from '../local/shared_client_state';
import { QueryTargetState, SharedClientStateSyncer } from '../local/shared_client_state_syncer';
import { Query } from './query';
import { Transaction } from './transaction';
import { BatchId, MutationBatchState, OnlineState, OnlineStateSource, TargetId } from './types';
import { ViewSnapshot } from './view_snapshot';
import { AsyncQueue } from '../util/async_queue';
/**
* Interface implemented by EventManager to handle notifications from
* SyncEngine.
*/
export interface SyncEngineListener {
/** Handles new view snapshots. */
onWatchChange(snapshots: ViewSnapshot[]): void;
/** Handles the failure of a query. */
onWatchError(query: Query, error: Error): void;
/** Handles a change in online state. */
onOnlineStateChange(onlineState: OnlineState): void;
}
/**
* SyncEngine is the central controller in the client SDK architecture. It is
* the glue code between the EventManager, LocalStore, and RemoteStore. Some of
* SyncEngine's responsibilities include:
* 1. Coordinating client requests and remote events between the EventManager
* and the local and remote data stores.
* 2. Managing a View object for each query, providing the unified view between
* the local and remote data stores.
* 3. Notifying the RemoteStore when the LocalStore has new mutations in its
* queue that need sending to the backend.
*
* The SyncEngines methods should only ever be called by methods running in the
* global async queue.
*/
export declare class SyncEngine implements RemoteSyncer, SharedClientStateSyncer {
private localStore;
private remoteStore;
private sharedClientState;
private currentUser;
private syncEngineListener;
private queryViewsByQuery;
private queriesByTarget;
private limboTargetsByKey;
private limboResolutionsByTarget;
private limboDocumentRefs;
/** Stores user completion handlers, indexed by User and BatchId. */
private mutationUserCallbacks;
/** Stores user callbacks waiting for all pending writes to be acknowledged. */
private pendingWritesCallbacks;
private limboTargetIdGenerator;
private isPrimary;
private onlineState;
constructor(localStore: LocalStore, remoteStore: RemoteStore, sharedClientState: SharedClientState, currentUser: User);
get isPrimaryClient(): boolean;
/** Subscribes to SyncEngine notifications. Has to be called exactly once. */
subscribe(syncEngineListener: SyncEngineListener): void;
/**
* Initiates the new listen, resolves promise when listen enqueued to the
* server. All the subsequent view snapshots or errors are sent to the
* subscribed handlers. Returns the targetId of the query.
*/
listen(query: Query): Promise<TargetId>;
/**
* Registers a view for a previously unknown query and computes its initial
* snapshot.
*/
private initializeViewAndComputeSnapshot;
/**
* Reconcile the list of synced documents in an existing view with those
* from persistence.
*/
private synchronizeViewAndComputeSnapshot;
/** Stops listening to the query. */
unlisten(query: Query): Promise<void>;
/**
* Initiates the write of local mutation batch which involves adding the
* writes to the mutation queue, notifying the remote store about new
* mutations and raising events for any changes this write caused.
*
* The promise returned by this call is resolved when the above steps
* have completed, *not* when the write was acked by the backend. The
* userCallback is resolved once the write was acked/rejected by the
* backend (or failed locally for any other reason).
*/
write(batch: Mutation[], userCallback: Deferred<void>): Promise<void>;
/**
* Takes an updateFunction in which a set of reads and writes can be performed
* atomically. In the updateFunction, the client can read and write values
* using the supplied transaction object. After the updateFunction, all
* changes will be committed. If a retryable error occurs (ex: some other
* client has changed any of the data referenced), then the updateFunction
* will be called again after a backoff. If the updateFunction still fails
* after all retries, then the transaction will be rejected.
*
* The transaction object passed to the updateFunction contains methods for
* accessing documents and collections. Unlike other datastore access, data
* accessed with the transaction will not reflect local changes that have not
* been committed. For this reason, it is required that all reads are
* performed before any writes. Transactions must be performed while online.
*
* The Deferred input is resolved when the transaction is fully committed.
*/
runTransaction<T>(asyncQueue: AsyncQueue, updateFunction: (transaction: Transaction) => Promise<T>, deferred: Deferred<T>): void;
applyRemoteEvent(remoteEvent: RemoteEvent): Promise<void>;
/**
* Applies an OnlineState change to the sync engine and notifies any views of
* the change.
*/
applyOnlineStateChange(onlineState: OnlineState, source: OnlineStateSource): void;
rejectListen(targetId: TargetId, err: FirestoreError): Promise<void>;
applyBatchState(batchId: BatchId, batchState: MutationBatchState, error?: FirestoreError): Promise<void>;
applySuccessfulWrite(mutationBatchResult: MutationBatchResult): Promise<void>;
rejectFailedWrite(batchId: BatchId, error: FirestoreError): Promise<void>;
/**
* Registers a user callback that resolves when all pending mutations at the moment of calling
* are acknowledged .
*/
registerPendingWritesCallback(callback: Deferred<void>): Promise<void>;
/**
* Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
* if there are any.
*/
private triggerPendingWritesCallbacks;
/** Reject all outstanding callbacks waiting for pending writes to complete. */
private rejectOutstandingPendingWritesCallbacks;
private addMutationCallback;
/**
* Resolves or rejects the user callback for the given batch and then discards
* it.
*/
private processUserCallback;
private removeAndCleanupTarget;
private removeLimboTarget;
private updateTrackedLimbos;
private trackLimboChange;
currentLimboDocs(): SortedMap<DocumentKey, TargetId>;
private emitNewSnapsAndNotifyLocalStore;
private assertSubscribed;
handleCredentialChange(user: User): Promise<void>;
applyPrimaryState(isPrimary: boolean): Promise<void>;
private resetLimboDocuments;
/**
* Reconcile the query views of the provided query targets with the state from
* persistence. Raises snapshots for any changes that affect the local
* client and returns the updated state of all target's query data.
*/
private synchronizeQueryViewsAndRaiseSnapshots;
/**
* Creates a `Query` object from the specified `Target`. There is no way to
* obtain the original `Query`, so we synthesize a `Query` from the `Target`
* object.
*
* The synthesized result might be different from the original `Query`, but
* since the synthesized `Query` should return the same results as the
* original one (only the presentation of results might differ), the potential
* difference will not cause issues.
*/
private synthesizeTargetToQuery;
getActiveClients(): Promise<ClientId[]>;
applyTargetState(targetId: TargetId, state: QueryTargetState, error?: FirestoreError): Promise<void>;
applyActiveTargetsChange(added: TargetId[], removed: TargetId[]): Promise<void>;
enableNetwork(): Promise<void>;
disableNetwork(): Promise<void>;
getRemoteKeysForTarget(targetId: TargetId): DocumentKeySet;
}

View File

@ -0,0 +1,48 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ResourcePath } from '../model/path';
import { Bound, Filter, OrderBy } from './query';
/**
* A Target represents the WatchTarget representation of a Query, which is used
* by the LocalStore and the RemoteStore to keep track of and to execute
* backend queries. While a Query can represent multiple Targets, each Targets
* maps to a single WatchTarget in RemoteStore and a single TargetData entry
* in persistence.
*/
export declare class Target {
readonly path: ResourcePath;
readonly collectionGroup: string | null;
readonly orderBy: OrderBy[];
readonly filters: Filter[];
readonly limit: number | null;
readonly startAt: Bound | null;
readonly endAt: Bound | null;
private memoizedCanonicalId;
/**
* Initializes a Target with a path and optional additional query constraints.
* Path must currently be empty if this is a collection group query.
*
* NOTE: you should always construct `Target` from `Query.toTarget` instead of
* using this constructor, because `Query` provides an implicit `orderBy`
* property.
*/
constructor(path: ResourcePath, collectionGroup?: string | null, orderBy?: OrderBy[], filters?: Filter[], limit?: number | null, startAt?: Bound | null, endAt?: Bound | null);
canonicalId(): string;
toString(): string;
isEqual(other: Target): boolean;
isDocumentQuery(): boolean;
}

View File

@ -0,0 +1,49 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { TargetId } from './types';
/**
* Generates monotonically increasing target IDs for sending targets to the
* watch stream.
*
* The client constructs two generators, one for the query cache (via
* forQueryCache()), and one for limbo documents (via forSyncEngine()). These
* two generators produce non-overlapping IDs (by using even and odd IDs
* respectively).
*
* By separating the target ID space, the query cache can generate target IDs
* that persist across client restarts, while sync engine can independently
* generate in-memory target IDs that are transient and can be reused after a
* restart.
*/
export declare class TargetIdGenerator {
private generatorId;
private nextId;
/**
* Instantiates a new TargetIdGenerator. If a seed is provided, the generator
* will use the seed value as the next target ID.
*/
constructor(generatorId: number, seed?: number);
next(): TargetId;
/**
* Returns the ID that follows the given ID. Subsequent calls to `next()`
* use the newly returned target ID as their base.
*/
after(targetId: TargetId): TargetId;
private seek;
static forTargetCache(): TargetIdGenerator;
static forSyncEngine(): TargetIdGenerator;
}

View File

@ -0,0 +1,60 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ParsedSetData, ParsedUpdateData } from '../api/user_data_converter';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { Datastore } from '../remote/datastore';
/**
* Internal transaction object responsible for accumulating the mutations to
* perform and the base versions for any documents read.
*/
export declare class Transaction {
private datastore;
private readVersions;
private mutations;
private committed;
/**
* A deferred usage error that occurred previously in this transaction that
* will cause the transaction to fail once it actually commits.
*/
private lastWriteError;
/**
* Set of documents that have been written in the transaction.
*
* When there's more than one write to the same key in a transaction, any
* writes after the first are handled differently.
*/
private writtenDocs;
constructor(datastore: Datastore);
lookup(keys: DocumentKey[]): Promise<MaybeDocument[]>;
set(key: DocumentKey, data: ParsedSetData): void;
update(key: DocumentKey, data: ParsedUpdateData): void;
delete(key: DocumentKey): void;
commit(): Promise<void>;
private recordVersion;
/**
* Returns the version of this document when it was read in this transaction,
* as a precondition, or no precondition if it was not read.
*/
private precondition;
/**
* Returns the precondition for a document if the operation is an update.
*/
private preconditionForUpdate;
private write;
private ensureCommitNotCalled;
}

View File

@ -0,0 +1,39 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Deferred } from '../util/promise';
import { AsyncQueue } from '../util/async_queue';
import { Transaction } from './transaction';
import { RemoteStore } from '../remote/remote_store';
/**
* TransactionRunner encapsulates the logic needed to run and retry transactions
* with backoff.
*/
export declare class TransactionRunner<T> {
private readonly asyncQueue;
private readonly remoteStore;
private readonly updateFunction;
private readonly deferred;
private retries;
private backoff;
constructor(asyncQueue: AsyncQueue, remoteStore: RemoteStore, updateFunction: (transaction: Transaction) => Promise<T>, deferred: Deferred<T>);
/** Runs the transaction and sets the result on deferred. */
run(): void;
private runWithBackOff;
private tryRunUpdateFunction;
private handleTransactionError;
private isRetryableTransactionError;
}

View File

@ -0,0 +1,63 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* BatchID is a locally assigned ID for a batch of mutations that have been
* applied.
*/
export declare type BatchId = number;
/**
* A locally-assigned ID used to refer to a target being watched via the
* Watch service.
*/
export declare type TargetId = number;
export declare type ListenSequenceNumber = number;
export declare type ProtoByteString = Uint8Array | string;
/** The different states of a mutation batch. */
export declare type MutationBatchState = 'pending' | 'acknowledged' | 'rejected';
/**
* Describes the online state of the Firestore client. Note that this does not
* indicate whether or not the remote store is trying to connect or not. This is
* primarily used by the View / EventManager code to change their behavior while
* offline (e.g. get() calls shouldn't wait for data from the server and
* snapshot events should set metadata.isFromCache=true).
*/
export declare enum OnlineState {
/**
* The Firestore client is in an unknown online state. This means the client
* is either not actively trying to establish a connection or it is currently
* trying to establish a connection, but it has not succeeded or failed yet.
* Higher-level components should not operate in offline mode.
*/
Unknown = 0,
/**
* The client is connected and the connections are healthy. This state is
* reached after a successful connection and there has been at least one
* successful message received from the backends.
*/
Online = 1,
/**
* The client is either trying to establish a connection but failing, or it
* has been explicitly marked offline via a call to disableNetwork().
* Higher-level components should operate in offline mode.
*/
Offline = 2
}
/** The source of an online state event. */
export declare enum OnlineStateSource {
RemoteStore = 0,
SharedClientState = 1
}

View File

@ -0,0 +1,18 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** The semver (www.semver.org) version of the SDK. */
export declare const SDK_VERSION: string;

View File

@ -0,0 +1,146 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { QueryResult } from '../local/local_store';
import { DocumentKeySet, MaybeDocumentMap } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { DocumentSet } from '../model/document_set';
import { TargetChange } from '../remote/remote_event';
import { Query } from './query';
import { OnlineState } from './types';
import { DocumentChangeSet, ViewSnapshot } from './view_snapshot';
export declare type LimboDocumentChange = AddedLimboDocument | RemovedLimboDocument;
export declare class AddedLimboDocument {
key: DocumentKey;
constructor(key: DocumentKey);
}
export declare class RemovedLimboDocument {
key: DocumentKey;
constructor(key: DocumentKey);
}
/** The result of applying a set of doc changes to a view. */
export interface ViewDocumentChanges {
/** The new set of docs that should be in the view. */
documentSet: DocumentSet;
/** The diff of these docs with the previous set of docs. */
changeSet: DocumentChangeSet;
/**
* Whether the set of documents passed in was not sufficient to calculate the
* new state of the view and there needs to be another pass based on the
* local cache.
*/
needsRefill: boolean;
mutatedKeys: DocumentKeySet;
}
export interface ViewChange {
snapshot?: ViewSnapshot;
limboChanges: LimboDocumentChange[];
}
/**
* View is responsible for computing the final merged truth of what docs are in
* a query. It gets notified of local and remote changes to docs, and applies
* the query filters and limits to determine the most correct possible results.
*/
export declare class View {
private query;
/** Documents included in the remote target */
private _syncedDocuments;
private syncState;
/**
* A flag whether the view is current with the backend. A view is considered
* current after it has seen the current flag from the backend and did not
* lose consistency within the watch stream (e.g. because of an existence
* filter mismatch).
*/
private current;
private documentSet;
/** Documents in the view but not in the remote target */
private limboDocuments;
/** Document Keys that have local changes */
private mutatedKeys;
constructor(query: Query,
/** Documents included in the remote target */
_syncedDocuments: DocumentKeySet);
/**
* The set of remote documents that the server has told us belongs to the target associated with
* this view.
*/
get syncedDocuments(): DocumentKeySet;
/**
* Iterates over a set of doc changes, applies the query limit, and computes
* what the new results should be, what the changes were, and whether we may
* need to go back to the local cache for more results. Does not make any
* changes to the view.
* @param docChanges The doc changes to apply to this view.
* @param previousChanges If this is being called with a refill, then start
* with this set of docs and changes instead of the current view.
* @return a new set of docs, changes, and refill flag.
*/
computeDocChanges(docChanges: MaybeDocumentMap, previousChanges?: ViewDocumentChanges): ViewDocumentChanges;
private shouldWaitForSyncedDocument;
/**
* Updates the view with the given ViewDocumentChanges and optionally updates
* limbo docs and sync state from the provided target change.
* @param docChanges The set of changes to make to the view's docs.
* @param updateLimboDocuments Whether to update limbo documents based on this
* change.
* @param targetChange A target change to apply for computing limbo docs and
* sync state.
* @return A new ViewChange with the given docs, changes, and sync state.
*/
applyChanges(docChanges: ViewDocumentChanges, updateLimboDocuments: boolean, targetChange?: TargetChange): ViewChange;
/**
* Applies an OnlineState change to the view, potentially generating a
* ViewChange if the view's syncState changes as a result.
*/
applyOnlineStateChange(onlineState: OnlineState): ViewChange;
/**
* Returns whether the doc for the given key should be in limbo.
*/
private shouldBeInLimbo;
/**
* Updates syncedDocuments, current, and limbo docs based on the given change.
* Returns the list of changes to which docs are in limbo.
*/
private applyTargetChange;
private updateLimboDocuments;
/**
* Update the in-memory state of the current view with the state read from
* persistence.
*
* We update the query view whenever a client's primary status changes:
* - When a client transitions from primary to secondary, it can miss
* LocalStorage updates and its query views may temporarily not be
* synchronized with the state on disk.
* - For secondary to primary transitions, the client needs to update the list
* of `syncedDocuments` since secondary clients update their query views
* based purely on synthesized RemoteEvents.
*
* @param queryResult.documents - The documents that match the query according
* to the LocalStore.
* @param queryResult.remoteKeys - The keys of the documents that match the
* query according to the backend.
*
* @return The ViewChange that resulted from this synchronization.
*/
synchronizeWithPersistedState(queryResult: QueryResult): ViewChange;
/**
* Returns a view snapshot as if this query was just listened to. Contains
* a document add for every existing document and the `fromCache` and
* `hasPendingWrites` status of the already established view.
*/
computeInitialSnapshot(): ViewSnapshot;
}

View File

@ -0,0 +1,58 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Document } from '../model/document';
import { DocumentSet } from '../model/document_set';
import { DocumentKeySet } from '../model/collections';
import { Query } from './query';
export declare enum ChangeType {
Added = 0,
Removed = 1,
Modified = 2,
Metadata = 3
}
export interface DocumentViewChange {
type: ChangeType;
doc: Document;
}
export declare enum SyncState {
Local = 0,
Synced = 1
}
/**
* DocumentChangeSet keeps track of a set of changes to docs in a query, merging
* duplicate events for the same doc.
*/
export declare class DocumentChangeSet {
private changeMap;
track(change: DocumentViewChange): void;
getChanges(): DocumentViewChange[];
}
export declare class ViewSnapshot {
readonly query: Query;
readonly docs: DocumentSet;
readonly oldDocs: DocumentSet;
readonly docChanges: DocumentViewChange[];
readonly mutatedKeys: DocumentKeySet;
readonly fromCache: boolean;
readonly syncStateChanged: boolean;
readonly excludesMetadataChanges: boolean;
constructor(query: Query, docs: DocumentSet, oldDocs: DocumentSet, docChanges: DocumentViewChange[], mutatedKeys: DocumentKeySet, fromCache: boolean, syncStateChanged: boolean, excludesMetadataChanges: boolean);
/** Returns a view snapshot as if all documents in the snapshot were added. */
static fromInitialDocuments(query: Query, documents: DocumentSet, mutatedKeys: DocumentKeySet, fromCache: boolean): ViewSnapshot;
get hasPendingWrites(): boolean;
isEqual(other: ViewSnapshot): boolean;
}

View File

@ -0,0 +1,86 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ResourcePath } from '../model/path';
/**
* Helpers for dealing with resource paths stored in IndexedDB.
*
* Resource paths in their canonical string form do not sort as the server
* sorts them. Specifically the server splits paths into segments first and then
* sorts, putting end-of-segment before any character. In a UTF-8 string
* encoding the slash ('/') that denotes the end-of-segment naturally comes
* after other characters so the intent here is to encode the path delimiters in
* such a way that the resulting strings sort naturally.
*
* Resource paths are also used for prefix scans so it's important to
* distinguish whole segments from any longer segments of which they might be a
* prefix. For example, it's important to make it possible to scan documents in
* a collection "foo" without encountering documents in a collection "foobar".
*
* Separate from the concerns about resource path ordering and separation,
* On Android, SQLite imposes additional restrictions since it does not handle
* keys with embedded NUL bytes particularly well. Rather than change the
* implementation we keep the encoding identical to keep the ports similar.
*
* Taken together this means resource paths when encoded for storage in
* IndexedDB have the following characteristics:
*
* * Segment separators ("/") sort before everything else.
* * All paths have a trailing separator.
* * NUL bytes do not exist in the output, since IndexedDB doesn't treat them
* well.
*
* Therefore resource paths are encoded into string form using the following
* rules:
*
* * '\x01' is used as an escape character.
* * Path separators are encoded as "\x01\x01"
* * NUL bytes are encoded as "\x01\x10"
* * '\x01' is encoded as "\x01\x11"
*
* This encoding leaves some room between path separators and the NUL byte
* just in case we decide to support integer document ids after all.
*
* Note that characters treated specially by the backend ('.', '/', and '~')
* are not treated specially here. This class assumes that any unescaping of
* resource path strings into actual ResourcePath objects will handle these
* characters there.
*/
export declare type EncodedResourcePath = string;
/**
* Encodes a resource path into a IndexedDb-compatible string form.
*/
export declare function encode(path: ResourcePath): EncodedResourcePath;
/**
* Decodes the given IndexedDb-compatible string form of a resource path into
* a ResourcePath instance. Note that this method is not suitable for use with
* decoding resource names from the server; those are One Platform format
* strings.
*/
export declare function decode(path: EncodedResourcePath): ResourcePath;
/**
* Computes the prefix successor of the given path, computed by encode above.
* A prefix successor is the first key that cannot be prefixed by the given
* path. It's useful for defining the end of a prefix scan such that all keys
* in the scan have the same prefix.
*
* Note that this is not a general prefix successor implementation, which is
* tricky to get right with Strings, given that they encode down to UTF-8.
* Instead this relies on the fact that all paths encoded by this class are
* always terminated with a separator, and so a successor can always be
* cheaply computed by incrementing the last character of the path.
*/
export declare function prefixSuccessor(path: EncodedResourcePath): EncodedResourcePath;

View File

@ -0,0 +1,61 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { QueryEngine } from './query_engine';
import { LocalDocumentsView } from './local_documents_view';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { Query } from '../core/query';
import { SnapshotVersion } from '../core/snapshot_version';
import { DocumentKeySet, DocumentMap } from '../model/collections';
/**
* A query engine that takes advantage of the target document mapping in the
* QueryCache. The IndexFreeQueryEngine optimizes query execution by only
* reading the documents that previously matched a query plus any documents that were
* edited after the query was last listened to.
*
* There are some cases where Index-Free queries are not guaranteed to produce
* the same results as full collection scans. In these cases, the
* IndexFreeQueryEngine falls back to full query processing. These cases are:
*
* - Limit queries where a document that matched the query previously no longer
* matches the query.
*
* - Limit queries where a document edit may cause the document to sort below
* another document that is in the local cache.
*
* - Queries that have never been CURRENT or free of Limbo documents.
*/
export declare class IndexFreeQueryEngine implements QueryEngine {
private localDocumentsView;
setLocalDocumentsView(localDocuments: LocalDocumentsView): void;
getDocumentsMatchingQuery(transaction: PersistenceTransaction, query: Query, lastLimboFreeSnapshotVersion: SnapshotVersion, remoteKeys: DocumentKeySet): PersistencePromise<DocumentMap>;
/** Applies the query filter and sorting to the provided documents. */
private applyQuery;
/**
* Determines if a limit query needs to be refilled from cache, making it
* ineligible for index-free execution.
*
* @param sortedPreviousResults The documents that matched the query when it
* was last synchronized, sorted by the query's comparator.
* @param remoteKeys The document keys that matched the query at the last
* snapshot.
* @param limboFreeSnapshotVersion The version of the snapshot when the query
* was last synchronized.
*/
private needsRefill;
private executeFullCollectionScan;
}

View File

@ -0,0 +1,43 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ResourcePath } from '../model/path';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
/**
* Represents a set of indexes that are used to execute queries efficiently.
*
* Currently the only index is a [collection id] => [parent path] index, used
* to execute Collection Group queries.
*/
export interface IndexManager {
/**
* Creates an index entry mapping the collectionId (last segment of the path)
* to the parent path (either the containing document location or the empty
* path for root-level collections). Index entries can be retrieved via
* getCollectionParents().
*
* NOTE: Currently we don't remove index entries. If this ends up being an
* issue we can devise some sort of GC strategy.
*/
addToCollectionParentIndex(transaction: PersistenceTransaction, collectionPath: ResourcePath): PersistencePromise<void>;
/**
* Retrieves all parent locations containing the given collectionId, as a
* list of paths (each path being either a document location or the empty
* path for a root-level collection).
*/
getCollectionParents(transaction: PersistenceTransaction, collectionId: string): PersistencePromise<ResourcePath[]>;
}

View File

@ -0,0 +1,42 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ResourcePath } from '../model/path';
import { IndexManager } from './index_manager';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
/**
* A persisted implementation of IndexManager.
*/
export declare class IndexedDbIndexManager implements IndexManager {
/**
* An in-memory copy of the index entries we've already written since the SDK
* launched. Used to avoid re-writing the same entry repeatedly.
*
* This is *NOT* a complete cache of what's in persistence and so can never be used to
* satisfy reads.
*/
private collectionParentsCache;
/**
* Adds a new entry to the collection parent index.
*
* Repeated calls for the same collectionPath should be avoided within a
* transaction as IndexedDbIndexManager only caches writes once a transaction
* has been committed.
*/
addToCollectionParentIndex(transaction: PersistenceTransaction, collectionPath: ResourcePath): PersistencePromise<void>;
getCollectionParents(transaction: PersistenceTransaction, collectionId: string): PersistencePromise<ResourcePath[]>;
}

View File

@ -0,0 +1,93 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Timestamp } from '../api/timestamp';
import { User } from '../auth/user';
import { Query } from '../core/query';
import { BatchId, ProtoByteString } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { Mutation } from '../model/mutation';
import { MutationBatch } from '../model/mutation_batch';
import { SortedMap } from '../util/sorted_map';
import { IndexManager } from './index_manager';
import { LocalSerializer } from './local_serializer';
import { MutationQueue } from './mutation_queue';
import { PersistenceTransaction, ReferenceDelegate } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { SimpleDbTransaction } from './simple_db';
/** A mutation queue for a specific user, backed by IndexedDB. */
export declare class IndexedDbMutationQueue implements MutationQueue {
/**
* The normalized userId (e.g. null UID => "" userId) used to store /
* retrieve mutations.
*/
private userId;
private readonly serializer;
private readonly indexManager;
private readonly referenceDelegate;
/**
* Caches the document keys for pending mutation batches. If the mutation
* has been removed from IndexedDb, the cached value may continue to
* be used to retrieve the batch's document keys. To remove a cached value
* locally, `removeCachedMutationKeys()` should be invoked either directly
* or through `removeMutationBatches()`.
*
* With multi-tab, when the primary client acknowledges or rejects a mutation,
* this cache is used by secondary clients to invalidate the local
* view of the documents that were previously affected by the mutation.
*/
private documentKeysByBatchId;
constructor(
/**
* The normalized userId (e.g. null UID => "" userId) used to store /
* retrieve mutations.
*/
userId: string, serializer: LocalSerializer, indexManager: IndexManager, referenceDelegate: ReferenceDelegate);
/**
* Creates a new mutation queue for the given user.
* @param user The user for which to create a mutation queue.
* @param serializer The serializer to use when persisting to IndexedDb.
*/
static forUser(user: User, serializer: LocalSerializer, indexManager: IndexManager, referenceDelegate: ReferenceDelegate): IndexedDbMutationQueue;
checkEmpty(transaction: PersistenceTransaction): PersistencePromise<boolean>;
acknowledgeBatch(transaction: PersistenceTransaction, batch: MutationBatch, streamToken: ProtoByteString): PersistencePromise<void>;
getLastStreamToken(transaction: PersistenceTransaction): PersistencePromise<ProtoByteString>;
setLastStreamToken(transaction: PersistenceTransaction, streamToken: ProtoByteString): PersistencePromise<void>;
addMutationBatch(transaction: PersistenceTransaction, localWriteTime: Timestamp, baseMutations: Mutation[], mutations: Mutation[]): PersistencePromise<MutationBatch>;
lookupMutationBatch(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<MutationBatch | null>;
lookupMutationKeys(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<DocumentKeySet | null>;
getNextMutationBatchAfterBatchId(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<MutationBatch | null>;
getHighestUnacknowledgedBatchId(transaction: PersistenceTransaction): PersistencePromise<BatchId>;
getAllMutationBatches(transaction: PersistenceTransaction): PersistencePromise<MutationBatch[]>;
getAllMutationBatchesAffectingDocumentKey(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MutationBatch[]>;
getAllMutationBatchesAffectingDocumentKeys(transaction: PersistenceTransaction, documentKeys: SortedMap<DocumentKey, unknown>): PersistencePromise<MutationBatch[]>;
getAllMutationBatchesAffectingQuery(transaction: PersistenceTransaction, query: Query): PersistencePromise<MutationBatch[]>;
private lookupMutationBatches;
removeMutationBatch(transaction: PersistenceTransaction, batch: MutationBatch): PersistencePromise<void>;
removeCachedMutationKeys(batchId: BatchId): void;
performConsistencyCheck(txn: PersistenceTransaction): PersistencePromise<void>;
containsKey(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<boolean>;
/** Returns the mutation queue's metadata from IndexedDb. */
private getMutationQueueMetadata;
}
/** Returns true if any mutation queue contains the given document. */
export declare function mutationQueuesContainKey(txn: PersistenceTransaction, docKey: DocumentKey): PersistencePromise<boolean>;
/**
* Delete a mutation batch and the associated document mutations.
* @return A PersistencePromise of the document mutations that were removed.
*/
export declare function removeMutationBatch(txn: SimpleDbTransaction, userId: string, batch: MutationBatch): PersistencePromise<DocumentKey[]>;

View File

@ -0,0 +1,270 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { User } from '../auth/user';
import { DatabaseInfo } from '../core/database_info';
import { SequenceNumberSyncer } from '../core/listen_sequence';
import { ListenSequenceNumber } from '../core/types';
import { DocumentKey } from '../model/document_key';
import { Platform } from '../platform/platform';
import { JsonProtoSerializer } from '../remote/serializer';
import { AsyncQueue } from '../util/async_queue';
import { IndexedDbIndexManager } from './indexeddb_index_manager';
import { IndexedDbRemoteDocumentCache } from './indexeddb_remote_document_cache';
import { IndexedDbTargetCache } from './indexeddb_target_cache';
import { ActiveTargets, LruDelegate, LruGarbageCollector, LruParams } from './lru_garbage_collector';
import { MutationQueue } from './mutation_queue';
import { Persistence, PersistenceTransaction, PersistenceTransactionMode, PrimaryStateListener, ReferenceDelegate } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { ReferenceSet } from './reference_set';
import { ClientId } from './shared_client_state';
import { TargetData } from './target_data';
import { SimpleDbStore, SimpleDbTransaction } from './simple_db';
export declare class IndexedDbTransaction extends PersistenceTransaction {
readonly simpleDbTransaction: SimpleDbTransaction;
readonly currentSequenceNumber: ListenSequenceNumber;
constructor(simpleDbTransaction: SimpleDbTransaction, currentSequenceNumber: ListenSequenceNumber);
}
/**
* An IndexedDB-backed instance of Persistence. Data is stored persistently
* across sessions.
*
* On Web only, the Firestore SDKs support shared access to its persistence
* layer. This allows multiple browser tabs to read and write to IndexedDb and
* to synchronize state even without network connectivity. Shared access is
* currently optional and not enabled unless all clients invoke
* `enablePersistence()` with `{synchronizeTabs:true}`.
*
* In multi-tab mode, if multiple clients are active at the same time, the SDK
* will designate one client as the “primary client”. An effort is made to pick
* a visible, network-connected and active client, and this client is
* responsible for letting other clients know about its presence. The primary
* client writes a unique client-generated identifier (the client ID) to
* IndexedDbs “owner” store every 4 seconds. If the primary client fails to
* update this entry, another client can acquire the lease and take over as
* primary.
*
* Some persistence operations in the SDK are designated as primary-client only
* operations. This includes the acknowledgment of mutations and all updates of
* remote documents. The effects of these operations are written to persistence
* and then broadcast to other tabs via LocalStorage (see
* `WebStorageSharedClientState`), which then refresh their state from
* persistence.
*
* Similarly, the primary client listens to notifications sent by secondary
* clients to discover persistence changes written by secondary clients, such as
* the addition of new mutations and query targets.
*
* If multi-tab is not enabled and another tab already obtained the primary
* lease, IndexedDbPersistence enters a failed state and all subsequent
* operations will automatically fail.
*
* Additionally, there is an optimization so that when a tab is closed, the
* primary lease is released immediately (this is especially important to make
* sure that a refreshed tab is able to immediately re-acquire the primary
* lease). Unfortunately, IndexedDB cannot be reliably used in window.unload
* since it is an asynchronous API. So in addition to attempting to give up the
* lease, the leaseholder writes its client ID to a "zombiedClient" entry in
* LocalStorage which acts as an indicator that another tab should go ahead and
* take the primary lease immediately regardless of the current lease timestamp.
*
* TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no
* longer optional.
*/
export declare class IndexedDbPersistence implements Persistence {
private readonly allowTabSynchronization;
private readonly persistenceKey;
private readonly clientId;
private readonly queue;
private readonly sequenceNumberSyncer;
static getStore<Key extends IDBValidKey, Value>(txn: PersistenceTransaction, store: string): SimpleDbStore<Key, Value>;
/**
* The name of the main (and currently only) IndexedDB database. this name is
* appended to the prefix provided to the IndexedDbPersistence constructor.
*/
static MAIN_DATABASE: string;
static createIndexedDbPersistence(options: {
allowTabSynchronization: boolean;
persistenceKey: string;
clientId: ClientId;
platform: Platform;
lruParams: LruParams;
queue: AsyncQueue;
serializer: JsonProtoSerializer;
sequenceNumberSyncer: SequenceNumberSyncer;
}): Promise<IndexedDbPersistence>;
private readonly document;
private readonly window;
private simpleDb;
private listenSequence;
private _started;
private isPrimary;
private networkEnabled;
private dbName;
/** Our window.unload handler, if registered. */
private windowUnloadHandler;
private inForeground;
private serializer;
/** Our 'visibilitychange' listener if registered. */
private documentVisibilityHandler;
/** The client metadata refresh task. */
private clientMetadataRefresher;
/** The last time we garbage collected the client metadata object store. */
private lastGarbageCollectionTime;
/** A listener to notify on primary state changes. */
private primaryStateListener;
private readonly targetCache;
private readonly indexManager;
private readonly remoteDocumentCache;
private readonly webStorage;
readonly referenceDelegate: IndexedDbLruDelegate;
private constructor();
/**
* Attempt to start IndexedDb persistence.
*
* @return {Promise<void>} Whether persistence was enabled.
*/
private start;
setPrimaryStateListener(primaryStateListener: PrimaryStateListener): Promise<void>;
setDatabaseDeletedListener(databaseDeletedListener: () => Promise<void>): void;
setNetworkEnabled(networkEnabled: boolean): void;
/**
* Updates the client metadata in IndexedDb and attempts to either obtain or
* extend the primary lease for the local client. Asynchronously notifies the
* primary state listener if the client either newly obtained or released its
* primary lease.
*/
private updateClientMetadataAndTryBecomePrimary;
private verifyPrimaryLease;
private removeClientMetadata;
/**
* If the garbage collection threshold has passed, prunes the
* RemoteDocumentChanges and the ClientMetadata store based on the last update
* time of all clients.
*/
private maybeGarbageCollectMultiClientState;
/**
* Schedules a recurring timer to update the client metadata and to either
* extend or acquire the primary lease if the client is eligible.
*/
private scheduleClientMetadataAndPrimaryLeaseRefreshes;
/** Checks whether `client` is the local client. */
private isLocalClient;
/**
* Evaluate the state of all active clients and determine whether the local
* client is or can act as the holder of the primary lease. Returns whether
* the client is eligible for the lease, but does not actually acquire it.
* May return 'false' even if there is no active leaseholder and another
* (foreground) client should become leaseholder instead.
*/
private canActAsPrimary;
shutdown(): Promise<void>;
/**
* Returns clients that are not zombied and have an updateTime within the
* provided threshold.
*/
private filterActiveClients;
getActiveClients(): Promise<ClientId[]>;
static clearPersistence(persistenceKey: string): Promise<void>;
get started(): boolean;
getMutationQueue(user: User): MutationQueue;
getTargetCache(): IndexedDbTargetCache;
getRemoteDocumentCache(): IndexedDbRemoteDocumentCache;
getIndexManager(): IndexedDbIndexManager;
runTransaction<T>(action: string, mode: PersistenceTransactionMode, transactionOperation: (transaction: PersistenceTransaction) => PersistencePromise<T>): Promise<T>;
/**
* Verifies that the current tab is the primary leaseholder or alternatively
* that the leaseholder has opted into multi-tab synchronization.
*/
private verifyAllowTabSynchronization;
/**
* Obtains or extends the new primary lease for the local client. This
* method does not verify that the client is eligible for this lease.
*/
private acquireOrExtendPrimaryLease;
static isAvailable(): boolean;
/**
* Generates a string used as a prefix when storing data in IndexedDB and
* LocalStorage.
*/
static buildStoragePrefix(databaseInfo: DatabaseInfo): string;
/** Checks the primary lease and removes it if we are the current primary. */
private releasePrimaryLeaseIfHeld;
/** Verifies that `updateTimeMs` is within `maxAgeMs`. */
private isWithinAge;
private attachVisibilityHandler;
private detachVisibilityHandler;
/**
* Attaches a window.unload handler that will synchronously write our
* clientId to a "zombie client id" location in LocalStorage. This can be used
* by tabs trying to acquire the primary lease to determine that the lease
* is no longer valid even if the timestamp is recent. This is particularly
* important for the refresh case (so the tab correctly re-acquires the
* primary lease). LocalStorage is used for this rather than IndexedDb because
* it is a synchronous API and so can be used reliably from an unload
* handler.
*/
private attachWindowUnloadHook;
private detachWindowUnloadHook;
/**
* Returns whether a client is "zombied" based on its LocalStorage entry.
* Clients become zombied when their tab closes without running all of the
* cleanup logic in `shutdown()`.
*/
private isClientZombied;
/**
* Record client as zombied (a client that had its tab closed). Zombied
* clients are ignored during primary tab selection.
*/
private markClientZombied;
/** Removes the zombied client entry if it exists. */
private removeClientZombiedEntry;
private zombiedClientLocalStorageKey;
}
/** Provides LRU functionality for IndexedDB persistence. */
export declare class IndexedDbLruDelegate implements ReferenceDelegate, LruDelegate {
private readonly db;
private inMemoryPins;
readonly garbageCollector: LruGarbageCollector;
constructor(db: IndexedDbPersistence, params: LruParams);
getSequenceNumberCount(txn: PersistenceTransaction): PersistencePromise<number>;
private orphanedDocmentCount;
forEachTarget(txn: PersistenceTransaction, f: (q: TargetData) => void): PersistencePromise<void>;
forEachOrphanedDocumentSequenceNumber(txn: PersistenceTransaction, f: (sequenceNumber: ListenSequenceNumber) => void): PersistencePromise<void>;
setInMemoryPins(inMemoryPins: ReferenceSet): void;
addReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
removeReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
removeTargets(txn: PersistenceTransaction, upperBound: ListenSequenceNumber, activeTargetIds: ActiveTargets): PersistencePromise<number>;
removeMutationReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
/**
* Returns true if anything would prevent this document from being garbage
* collected, given that the document in question is not present in any
* targets and has a sequence number less than or equal to the upper bound for
* the collection run.
*/
private isPinned;
removeOrphanedDocuments(txn: PersistenceTransaction, upperBound: ListenSequenceNumber): PersistencePromise<number>;
removeTarget(txn: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
updateLimboDocument(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
/**
* Call provided function for each document in the cache that is 'orphaned'. Orphaned
* means not a part of any target, so the only entry in the target-document index for
* that document will be the sentinel row (targetId 0), which will also have the sequence
* number for the last time the document was accessed.
*/
private forEachOrphanedDocument;
getCacheSize(txn: PersistenceTransaction): PersistencePromise<number>;
}

View File

@ -0,0 +1,106 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Query } from '../core/query';
import { DocumentKeySet, DocumentMap, DocumentSizeEntries, DocumentSizeEntry, MaybeDocumentMap, NullableMaybeDocumentMap } from '../model/collections';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { SnapshotVersion } from '../core/snapshot_version';
import { IndexManager } from './index_manager';
import { DbRemoteDocument } from './indexeddb_schema';
import { LocalSerializer } from './local_serializer';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { RemoteDocumentCache } from './remote_document_cache';
import { RemoteDocumentChangeBuffer } from './remote_document_change_buffer';
export declare class IndexedDbRemoteDocumentCache implements RemoteDocumentCache {
readonly serializer: LocalSerializer;
private readonly indexManager;
/**
* @param {LocalSerializer} serializer The document serializer.
* @param {IndexManager} indexManager The query indexes that need to be maintained.
*/
constructor(serializer: LocalSerializer, indexManager: IndexManager);
/**
* Adds the supplied entries to the cache.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
private addEntry;
/**
* Removes a document from the cache.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
private removeEntry;
/**
* Updates the current cache size.
*
* Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the
* cache's metadata.
*/
private updateMetadata;
getEntry(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MaybeDocument | null>;
/**
* Looks up an entry in the cache.
*
* @param documentKey The key of the entry to look up.
* @return The cached MaybeDocument entry and its size, or null if we have nothing cached.
*/
getSizedEntry(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<DocumentSizeEntry | null>;
getEntries(transaction: PersistenceTransaction, documentKeys: DocumentKeySet): PersistencePromise<NullableMaybeDocumentMap>;
/**
* Looks up several entries in the cache.
*
* @param documentKeys The set of keys entries to look up.
* @return A map of MaybeDocuments indexed by key (if a document cannot be
* found, the key will be mapped to null) and a map of sizes indexed by
* key (zero if the key cannot be found).
*/
getSizedEntries(transaction: PersistenceTransaction, documentKeys: DocumentKeySet): PersistencePromise<DocumentSizeEntries>;
private forEachDbEntry;
getDocumentsMatchingQuery(transaction: PersistenceTransaction, query: Query, sinceReadTime: SnapshotVersion): PersistencePromise<DocumentMap>;
getNewDocumentChanges(transaction: PersistenceTransaction, sinceReadTime: SnapshotVersion): PersistencePromise<{
changedDocs: MaybeDocumentMap;
readTime: SnapshotVersion;
}>;
getLastReadTime(transaction: PersistenceTransaction): PersistencePromise<SnapshotVersion>;
newChangeBuffer(options?: {
trackRemovals: boolean;
}): RemoteDocumentChangeBuffer;
getSize(txn: PersistenceTransaction): PersistencePromise<number>;
private getMetadata;
private setMetadata;
/**
* Decodes `remoteDoc` and returns the document (or null, if the document
* corresponds to the format used for sentinel deletes).
*/
private maybeDecodeDocument;
/**
* Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.
*
* Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size
* delta for all submitted changes. This avoids having to re-read all documents from IndexedDb
* when we apply the changes.
*/
private static RemoteDocumentChangeBuffer;
}
/**
* Retrusn an approximate size for the given document.
*/
export declare function dbDocumentSize(doc: DbRemoteDocument): number;

View File

@ -0,0 +1,792 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { BatchId, TargetId } from '../core/types';
import { ResourcePath } from '../model/path';
import * as api from '../protos/firestore_proto_api';
import { EncodedResourcePath } from './encoded_resource_path';
import { LocalSerializer } from './local_serializer';
import { PersistencePromise } from './persistence_promise';
import { SimpleDbSchemaConverter } from './simple_db';
/**
* Schema Version for the Web client:
* 1. Initial version including Mutation Queue, Query Cache, and Remote Document
* Cache
* 2. Used to ensure a targetGlobal object exists and add targetCount to it. No
* longer required because migration 3 unconditionally clears it.
* 3. Dropped and re-created Query Cache to deal with cache corruption related
* to limbo resolution. Addresses
* https://github.com/firebase/firebase-ios-sdk/issues/1548
* 4. Multi-Tab Support.
* 5. Removal of held write acks.
* 6. Create document global for tracking document cache size.
* 7. Ensure every cached document has a sentinel row with a sequence number.
* 8. Add collection-parent index for Collection Group queries.
* 9. Change RemoteDocumentChanges store to be keyed by readTime rather than
* an auto-incrementing ID. This is required for Index-Free queries.
*/
export declare const SCHEMA_VERSION = 9;
/** Performs database creation and schema upgrades. */
export declare class SchemaConverter implements SimpleDbSchemaConverter {
private readonly serializer;
constructor(serializer: LocalSerializer);
/**
* Performs database creation and schema upgrades.
*
* Note that in production, this method is only ever used to upgrade the schema
* to SCHEMA_VERSION. Different values of toVersion are only used for testing
* and local feature development.
*/
createOrUpgrade(db: IDBDatabase, txn: IDBTransaction, fromVersion: number, toVersion: number): PersistencePromise<void>;
private addDocumentGlobal;
private removeAcknowledgedMutations;
/**
* Ensures that every document in the remote document cache has a corresponding sentinel row
* with a sequence number. Missing rows are given the most recently used sequence number.
*/
private ensureSequenceNumbers;
private createCollectionParentIndex;
}
/**
* Wrapper class to store timestamps (seconds and nanos) in IndexedDb objects.
*/
export declare class DbTimestamp {
seconds: number;
nanoseconds: number;
constructor(seconds: number, nanoseconds: number);
}
/** A timestamp type that can be used in IndexedDb keys. */
export declare type DbTimestampKey = [/* seconds */ number, /* nanos */ number];
export declare type DbPrimaryClientKey = typeof DbPrimaryClient.key;
/**
* A singleton object to be stored in the 'owner' store in IndexedDb.
*
* A given database can have a single primary tab assigned at a given time. That
* tab must validate that it is still holding the primary lease before every
* operation that requires locked access. The primary tab should regularly
* write an updated timestamp to this lease to prevent other tabs from
* "stealing" the primary lease
*/
export declare class DbPrimaryClient {
ownerId: string;
/** Whether to allow shared access from multiple tabs. */
allowTabSynchronization: boolean;
leaseTimestampMs: number;
/**
* Name of the IndexedDb object store.
*
* Note that the name 'owner' is chosen to ensure backwards compatibility with
* older clients that only supported single locked access to the persistence
* layer.
*/
static store: string;
/**
* The key string used for the single object that exists in the
* DbPrimaryClient store.
*/
static key: string;
constructor(ownerId: string,
/** Whether to allow shared access from multiple tabs. */
allowTabSynchronization: boolean, leaseTimestampMs: number);
}
/** Object keys in the 'mutationQueues' store are userId strings. */
export declare type DbMutationQueueKey = string;
/**
* An object to be stored in the 'mutationQueues' store in IndexedDb.
*
* Each user gets a single queue of MutationBatches to apply to the server.
* DbMutationQueue tracks the metadata about the queue.
*/
export declare class DbMutationQueue {
/**
* The normalized user ID to which this queue belongs.
*/
userId: string;
/**
* An identifier for the highest numbered batch that has been acknowledged
* by the server. All MutationBatches in this queue with batchIds less
* than or equal to this value are considered to have been acknowledged by
* the server.
*
* NOTE: this is deprecated and no longer used by the code.
*/
lastAcknowledgedBatchId: number;
/**
* A stream token that was previously sent by the server.
*
* See StreamingWriteRequest in datastore.proto for more details about
* usage.
*
* After sending this token, earlier tokens may not be used anymore so
* only a single stream token is retained.
*/
lastStreamToken: string;
/** Name of the IndexedDb object store. */
static store: string;
/** Keys are automatically assigned via the userId property. */
static keyPath: string;
constructor(
/**
* The normalized user ID to which this queue belongs.
*/
userId: string,
/**
* An identifier for the highest numbered batch that has been acknowledged
* by the server. All MutationBatches in this queue with batchIds less
* than or equal to this value are considered to have been acknowledged by
* the server.
*
* NOTE: this is deprecated and no longer used by the code.
*/
lastAcknowledgedBatchId: number,
/**
* A stream token that was previously sent by the server.
*
* See StreamingWriteRequest in datastore.proto for more details about
* usage.
*
* After sending this token, earlier tokens may not be used anymore so
* only a single stream token is retained.
*/
lastStreamToken: string);
}
/** The 'mutations' store is keyed by batch ID. */
export declare type DbMutationBatchKey = BatchId;
/**
* An object to be stored in the 'mutations' store in IndexedDb.
*
* Represents a batch of user-level mutations intended to be sent to the server
* in a single write. Each user-level batch gets a separate DbMutationBatch
* with a new batchId.
*/
export declare class DbMutationBatch {
/**
* The normalized user ID to which this batch belongs.
*/
userId: string;
/**
* An identifier for this batch, allocated using an auto-generated key.
*/
batchId: BatchId;
/**
* The local write time of the batch, stored as milliseconds since the
* epoch.
*/
localWriteTimeMs: number;
/**
* A list of "mutations" that represent a partial base state from when this
* write batch was initially created. During local application of the write
* batch, these baseMutations are applied prior to the real writes in order
* to override certain document fields from the remote document cache. This
* is necessary in the case of non-idempotent writes (e.g. `increment()`
* transforms) to make sure that the local view of the modified documents
* doesn't flicker if the remote document cache receives the result of the
* non-idempotent write before the write is removed from the queue.
*
* These mutations are never sent to the backend.
*/
baseMutations: api.Write[] | undefined;
/**
* A list of mutations to apply. All mutations will be applied atomically.
*
* Mutations are serialized via JsonProtoSerializer.toMutation().
*/
mutations: api.Write[];
/** Name of the IndexedDb object store. */
static store: string;
/** Keys are automatically assigned via the userId, batchId properties. */
static keyPath: string;
/** The index name for lookup of mutations by user. */
static userMutationsIndex: string;
/** The user mutations index is keyed by [userId, batchId] pairs. */
static userMutationsKeyPath: string[];
constructor(
/**
* The normalized user ID to which this batch belongs.
*/
userId: string,
/**
* An identifier for this batch, allocated using an auto-generated key.
*/
batchId: BatchId,
/**
* The local write time of the batch, stored as milliseconds since the
* epoch.
*/
localWriteTimeMs: number,
/**
* A list of "mutations" that represent a partial base state from when this
* write batch was initially created. During local application of the write
* batch, these baseMutations are applied prior to the real writes in order
* to override certain document fields from the remote document cache. This
* is necessary in the case of non-idempotent writes (e.g. `increment()`
* transforms) to make sure that the local view of the modified documents
* doesn't flicker if the remote document cache receives the result of the
* non-idempotent write before the write is removed from the queue.
*
* These mutations are never sent to the backend.
*/
baseMutations: api.Write[] | undefined,
/**
* A list of mutations to apply. All mutations will be applied atomically.
*
* Mutations are serialized via JsonProtoSerializer.toMutation().
*/
mutations: api.Write[]);
}
/**
* The key for a db document mutation, which is made up of a userID, path, and
* batchId. Note that the path must be serialized into a form that indexedDB can
* sort.
*/
export declare type DbDocumentMutationKey = [string, EncodedResourcePath, BatchId];
/**
* An object to be stored in the 'documentMutations' store in IndexedDb.
*
* A manually maintained index of all the mutation batches that affect a given
* document key. The rows in this table are references based on the contents of
* DbMutationBatch.mutations.
*/
export declare class DbDocumentMutation {
static store: string;
/**
* Creates a [userId] key for use in the DbDocumentMutations index to iterate
* over all of a user's document mutations.
*/
static prefixForUser(userId: string): [string];
/**
* Creates a [userId, encodedPath] key for use in the DbDocumentMutations
* index to iterate over all at document mutations for a given path or lower.
*/
static prefixForPath(userId: string, path: ResourcePath): [string, EncodedResourcePath];
/**
* Creates a full index key of [userId, encodedPath, batchId] for inserting
* and deleting into the DbDocumentMutations index.
*/
static key(userId: string, path: ResourcePath, batchId: BatchId): DbDocumentMutationKey;
/**
* Because we store all the useful information for this store in the key,
* there is no useful information to store as the value. The raw (unencoded)
* path cannot be stored because IndexedDb doesn't store prototype
* information.
*/
static PLACEHOLDER: DbDocumentMutation;
private constructor();
}
/**
* A key in the 'remoteDocuments' object store is a string array containing the
* segments that make up the path.
*/
export declare type DbRemoteDocumentKey = string[];
/**
* Represents the known absence of a document at a particular version.
* Stored in IndexedDb as part of a DbRemoteDocument object.
*/
export declare class DbNoDocument {
path: string[];
readTime: DbTimestamp;
constructor(path: string[], readTime: DbTimestamp);
}
/**
* Represents a document that is known to exist but whose data is unknown.
* Stored in IndexedDb as part of a DbRemoteDocument object.
*/
export declare class DbUnknownDocument {
path: string[];
version: DbTimestamp;
constructor(path: string[], version: DbTimestamp);
}
/**
* An object to be stored in the 'remoteDocuments' store in IndexedDb.
* It represents either:
*
* - A complete document.
* - A "no document" representing a document that is known not to exist (at
* some version).
* - An "unknown document" representing a document that is known to exist (at
* some version) but whose contents are unknown.
*
* Note: This is the persisted equivalent of a MaybeDocument and could perhaps
* be made more general if necessary.
*/
export declare class DbRemoteDocument {
/**
* Set to an instance of DbUnknownDocument if the data for a document is
* not known, but it is known that a document exists at the specified
* version (e.g. it had a successful update applied to it)
*/
unknownDocument: DbUnknownDocument | null | undefined;
/**
* Set to an instance of a DbNoDocument if it is known that no document
* exists.
*/
noDocument: DbNoDocument | null;
/**
* Set to an instance of a Document if there's a cached version of the
* document.
*/
document: api.Document | null;
/**
* Documents that were written to the remote document store based on
* a write acknowledgment are marked with `hasCommittedMutations`. These
* documents are potentially inconsistent with the backend's copy and use
* the write's commit version as their document version.
*/
hasCommittedMutations: boolean | undefined;
/**
* When the document was read from the backend. Undefined for data written
* prior to schema version 9.
*/
readTime: DbTimestampKey | undefined;
/**
* The path of the collection this document is part of. Undefined for data
* written prior to schema version 9.
*/
parentPath: string[] | undefined;
static store: string;
/**
* An index that provides access to all entries sorted by read time (which
* corresponds to the last modification time of each row).
*
* This index is used to provide a changelog for Multi-Tab.
*/
static readTimeIndex: string;
static readTimeIndexPath: string;
/**
* An index that provides access to documents in a collection sorted by read
* time.
*
* This index is used to allow the RemoteDocumentCache to fetch newly changed
* documents in a collection.
*/
static collectionReadTimeIndex: string;
static collectionReadTimeIndexPath: string[];
constructor(
/**
* Set to an instance of DbUnknownDocument if the data for a document is
* not known, but it is known that a document exists at the specified
* version (e.g. it had a successful update applied to it)
*/
unknownDocument: DbUnknownDocument | null | undefined,
/**
* Set to an instance of a DbNoDocument if it is known that no document
* exists.
*/
noDocument: DbNoDocument | null,
/**
* Set to an instance of a Document if there's a cached version of the
* document.
*/
document: api.Document | null,
/**
* Documents that were written to the remote document store based on
* a write acknowledgment are marked with `hasCommittedMutations`. These
* documents are potentially inconsistent with the backend's copy and use
* the write's commit version as their document version.
*/
hasCommittedMutations: boolean | undefined,
/**
* When the document was read from the backend. Undefined for data written
* prior to schema version 9.
*/
readTime: DbTimestampKey | undefined,
/**
* The path of the collection this document is part of. Undefined for data
* written prior to schema version 9.
*/
parentPath: string[] | undefined);
}
/**
* Contains a single entry that has metadata about the remote document cache.
*/
export declare class DbRemoteDocumentGlobal {
byteSize: number;
static store: string;
static key: string;
/**
* @param byteSize Approximately the total size in bytes of all the documents in the document
* cache.
*/
constructor(byteSize: number);
}
export declare type DbRemoteDocumentGlobalKey = typeof DbRemoteDocumentGlobal.key;
/**
* A key in the 'targets' object store is a targetId of the query.
*/
export declare type DbTargetKey = TargetId;
/**
* The persisted type for a query nested with in the 'targets' store in
* IndexedDb. We use the proto definitions for these two kinds of queries in
* order to avoid writing extra serialization logic.
*/
export declare type DbQuery = api.QueryTarget | api.DocumentsTarget;
/**
* An object to be stored in the 'targets' store in IndexedDb.
*
* This is based on and should be kept in sync with the proto used in the iOS
* client.
*
* Each query the client listens to against the server is tracked on disk so
* that the query can be efficiently resumed on restart.
*/
export declare class DbTarget {
/**
* An auto-generated sequential numeric identifier for the query.
*
* Queries are stored using their canonicalId as the key, but these
* canonicalIds can be quite long so we additionally assign a unique
* queryId which can be used by referenced data structures (e.g.
* indexes) to minimize the on-disk cost.
*/
targetId: TargetId;
/**
* The canonical string representing this query. This is not unique.
*/
canonicalId: string;
/**
* The last readTime received from the Watch Service for this query.
*
* This is the same value as TargetChange.read_time in the protos.
*/
readTime: DbTimestamp;
/**
* An opaque, server-assigned token that allows watching a query to be
* resumed after disconnecting without retransmitting all the data
* that matches the query. The resume token essentially identifies a
* point in time from which the server should resume sending results.
*
* This is related to the snapshotVersion in that the resumeToken
* effectively also encodes that value, but the resumeToken is opaque
* and sometimes encodes additional information.
*
* A consequence of this is that the resumeToken should be used when
* asking the server to reason about where this client is in the watch
* stream, but the client should use the snapshotVersion for its own
* purposes.
*
* This is the same value as TargetChange.resume_token in the protos.
*/
resumeToken: string;
/**
* A sequence number representing the last time this query was
* listened to, used for garbage collection purposes.
*
* Conventionally this would be a timestamp value, but device-local
* clocks are unreliable and they must be able to create new listens
* even while disconnected. Instead this should be a monotonically
* increasing number that's incremented on each listen call.
*
* This is different from the queryId since the queryId is an
* immutable identifier assigned to the Query on first use while
* lastListenSequenceNumber is updated every time the query is
* listened to.
*/
lastListenSequenceNumber: number;
/**
* Denotes the maximum snapshot version at which the associated query view
* contained no limbo documents. Undefined for data written prior to
* schema version 9.
*/
lastLimboFreeSnapshotVersion: DbTimestamp | undefined;
/**
* The query for this target.
*
* Because canonical ids are not unique we must store the actual query. We
* use the proto to have an object we can persist without having to
* duplicate translation logic to and from a `Query` object.
*/
query: DbQuery;
static store: string;
/** Keys are automatically assigned via the targetId property. */
static keyPath: string;
/** The name of the queryTargets index. */
static queryTargetsIndexName: string;
/**
* The index of all canonicalIds to the targets that they match. This is not
* a unique mapping because canonicalId does not promise a unique name for all
* possible queries, so we append the targetId to make the mapping unique.
*/
static queryTargetsKeyPath: string[];
constructor(
/**
* An auto-generated sequential numeric identifier for the query.
*
* Queries are stored using their canonicalId as the key, but these
* canonicalIds can be quite long so we additionally assign a unique
* queryId which can be used by referenced data structures (e.g.
* indexes) to minimize the on-disk cost.
*/
targetId: TargetId,
/**
* The canonical string representing this query. This is not unique.
*/
canonicalId: string,
/**
* The last readTime received from the Watch Service for this query.
*
* This is the same value as TargetChange.read_time in the protos.
*/
readTime: DbTimestamp,
/**
* An opaque, server-assigned token that allows watching a query to be
* resumed after disconnecting without retransmitting all the data
* that matches the query. The resume token essentially identifies a
* point in time from which the server should resume sending results.
*
* This is related to the snapshotVersion in that the resumeToken
* effectively also encodes that value, but the resumeToken is opaque
* and sometimes encodes additional information.
*
* A consequence of this is that the resumeToken should be used when
* asking the server to reason about where this client is in the watch
* stream, but the client should use the snapshotVersion for its own
* purposes.
*
* This is the same value as TargetChange.resume_token in the protos.
*/
resumeToken: string,
/**
* A sequence number representing the last time this query was
* listened to, used for garbage collection purposes.
*
* Conventionally this would be a timestamp value, but device-local
* clocks are unreliable and they must be able to create new listens
* even while disconnected. Instead this should be a monotonically
* increasing number that's incremented on each listen call.
*
* This is different from the queryId since the queryId is an
* immutable identifier assigned to the Query on first use while
* lastListenSequenceNumber is updated every time the query is
* listened to.
*/
lastListenSequenceNumber: number,
/**
* Denotes the maximum snapshot version at which the associated query view
* contained no limbo documents. Undefined for data written prior to
* schema version 9.
*/
lastLimboFreeSnapshotVersion: DbTimestamp | undefined,
/**
* The query for this target.
*
* Because canonical ids are not unique we must store the actual query. We
* use the proto to have an object we can persist without having to
* duplicate translation logic to and from a `Query` object.
*/
query: DbQuery);
}
/**
* The key for a DbTargetDocument, containing a targetId and an encoded resource
* path.
*/
export declare type DbTargetDocumentKey = [TargetId, EncodedResourcePath];
/**
* An object representing an association between a target and a document, or a
* sentinel row marking the last sequence number at which a document was used.
* Each document cached must have a corresponding sentinel row before lru
* garbage collection is enabled.
*
* The target associations and sentinel rows are co-located so that orphaned
* documents and their sequence numbers can be identified efficiently via a scan
* of this store.
*/
export declare class DbTargetDocument {
/**
* The targetId identifying a target or 0 for a sentinel row.
*/
targetId: TargetId;
/**
* The path to the document, as encoded in the key.
*/
path: EncodedResourcePath;
/**
* If this is a sentinel row, this should be the sequence number of the last
* time the document specified by `path` was used. Otherwise, it should be
* `undefined`.
*/
sequenceNumber?: number | undefined;
/** Name of the IndexedDb object store. */
static store: string;
/** Keys are automatically assigned via the targetId, path properties. */
static keyPath: string[];
/** The index name for the reverse index. */
static documentTargetsIndex: string;
/** We also need to create the reverse index for these properties. */
static documentTargetsKeyPath: string[];
constructor(
/**
* The targetId identifying a target or 0 for a sentinel row.
*/
targetId: TargetId,
/**
* The path to the document, as encoded in the key.
*/
path: EncodedResourcePath,
/**
* If this is a sentinel row, this should be the sequence number of the last
* time the document specified by `path` was used. Otherwise, it should be
* `undefined`.
*/
sequenceNumber?: number | undefined);
}
/**
* The type to represent the single allowed key for the DbTargetGlobal store.
*/
export declare type DbTargetGlobalKey = typeof DbTargetGlobal.key;
/**
* A record of global state tracked across all Targets, tracked separately
* to avoid the need for extra indexes.
*
* This should be kept in-sync with the proto used in the iOS client.
*/
export declare class DbTargetGlobal {
/**
* The highest numbered target id across all targets.
*
* See DbTarget.targetId.
*/
highestTargetId: TargetId;
/**
* The highest numbered lastListenSequenceNumber across all targets.
*
* See DbTarget.lastListenSequenceNumber.
*/
highestListenSequenceNumber: number;
/**
* A global snapshot version representing the last consistent snapshot we
* received from the backend. This is monotonically increasing and any
* snapshots received from the backend prior to this version (e.g. for
* targets resumed with a resumeToken) should be suppressed (buffered)
* until the backend has caught up to this snapshot version again. This
* prevents our cache from ever going backwards in time.
*/
lastRemoteSnapshotVersion: DbTimestamp;
/**
* The number of targets persisted.
*/
targetCount: number;
/**
* The key string used for the single object that exists in the
* DbTargetGlobal store.
*/
static key: string;
static store: string;
constructor(
/**
* The highest numbered target id across all targets.
*
* See DbTarget.targetId.
*/
highestTargetId: TargetId,
/**
* The highest numbered lastListenSequenceNumber across all targets.
*
* See DbTarget.lastListenSequenceNumber.
*/
highestListenSequenceNumber: number,
/**
* A global snapshot version representing the last consistent snapshot we
* received from the backend. This is monotonically increasing and any
* snapshots received from the backend prior to this version (e.g. for
* targets resumed with a resumeToken) should be suppressed (buffered)
* until the backend has caught up to this snapshot version again. This
* prevents our cache from ever going backwards in time.
*/
lastRemoteSnapshotVersion: DbTimestamp,
/**
* The number of targets persisted.
*/
targetCount: number);
}
/**
* The key for a DbCollectionParent entry, containing the collection ID
* and the parent path that contains it. Note that the parent path will be an
* empty path in the case of root-level collections.
*/
export declare type DbCollectionParentKey = [string, EncodedResourcePath];
/**
* An object representing an association between a Collection id (e.g. 'messages')
* to a parent path (e.g. '/chats/123') that contains it as a (sub)collection.
* This is used to efficiently find all collections to query when performing
* a Collection Group query.
*/
export declare class DbCollectionParent {
/**
* The collectionId (e.g. 'messages')
*/
collectionId: string;
/**
* The path to the parent (either a document location or an empty path for
* a root-level collection).
*/
parent: EncodedResourcePath;
/** Name of the IndexedDb object store. */
static store: string;
/** Keys are automatically assigned via the collectionId, parent properties. */
static keyPath: string[];
constructor(
/**
* The collectionId (e.g. 'messages')
*/
collectionId: string,
/**
* The path to the parent (either a document location or an empty path for
* a root-level collection).
*/
parent: EncodedResourcePath);
}
/**
* A record of the metadata state of each client.
*
* PORTING NOTE: This is used to synchronize multi-tab state and does not need
* to be ported to iOS or Android.
*/
export declare class DbClientMetadata {
/** The auto-generated client id assigned at client startup. */
clientId: string;
/** The last time this state was updated. */
updateTimeMs: number;
/** Whether the client's network connection is enabled. */
networkEnabled: boolean;
/** Whether this client is running in a foreground tab. */
inForeground: boolean;
/** Name of the IndexedDb object store. */
static store: string;
/** Keys are automatically assigned via the clientId properties. */
static keyPath: string;
constructor(
/** The auto-generated client id assigned at client startup. */
clientId: string,
/** The last time this state was updated. */
updateTimeMs: number,
/** Whether the client's network connection is enabled. */
networkEnabled: boolean,
/** Whether this client is running in a foreground tab. */
inForeground: boolean);
}
/** Object keys in the 'clientMetadata' store are clientId strings. */
export declare type DbClientMetadataKey = string;
export declare const V1_STORES: string[];
export declare const V3_STORES: string[];
export declare const V4_STORES: string[];
export declare const V6_STORES: string[];
export declare const V8_STORES: string[];
/**
* The list of all default IndexedDB stores used throughout the SDK. This is
* used when creating transactions so that access across all stores is done
* atomically.
*/
export declare const ALL_STORES: string[];

View File

@ -0,0 +1,75 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { ListenSequenceNumber, TargetId } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { IndexedDbLruDelegate } from './indexeddb_persistence';
import { DbTargetDocument, DbTargetDocumentKey } from './indexeddb_schema';
import { LocalSerializer } from './local_serializer';
import { ActiveTargets } from './lru_garbage_collector';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { TargetCache } from './target_cache';
import { TargetData } from './target_data';
import { SimpleDbStore, SimpleDbTransaction } from './simple_db';
import { Target } from '../core/target';
export declare class IndexedDbTargetCache implements TargetCache {
private readonly referenceDelegate;
private serializer;
constructor(referenceDelegate: IndexedDbLruDelegate, serializer: LocalSerializer);
private targetIdGenerator;
allocateTargetId(transaction: PersistenceTransaction): PersistencePromise<TargetId>;
getLastRemoteSnapshotVersion(transaction: PersistenceTransaction): PersistencePromise<SnapshotVersion>;
getHighestSequenceNumber(transaction: PersistenceTransaction): PersistencePromise<ListenSequenceNumber>;
setTargetsMetadata(transaction: PersistenceTransaction, highestListenSequenceNumber: number, lastRemoteSnapshotVersion?: SnapshotVersion): PersistencePromise<void>;
addTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
updateTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
removeTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
/**
* Drops any targets with sequence number less than or equal to the upper bound, excepting those
* present in `activeTargetIds`. Document associations for the removed targets are also removed.
* Returns the number of targets removed.
*/
removeTargets(txn: PersistenceTransaction, upperBound: ListenSequenceNumber, activeTargetIds: ActiveTargets): PersistencePromise<number>;
/**
* Call provided function with each `TargetData` that we have cached.
*/
forEachTarget(txn: PersistenceTransaction, f: (q: TargetData) => void): PersistencePromise<void>;
private retrieveMetadata;
private saveMetadata;
private saveTargetData;
/**
* In-place updates the provided metadata to account for values in the given
* TargetData. Saving is done separately. Returns true if there were any
* changes to the metadata.
*/
private updateMetadataFromTargetData;
getTargetCount(transaction: PersistenceTransaction): PersistencePromise<number>;
getTargetData(transaction: PersistenceTransaction, target: Target): PersistencePromise<TargetData | null>;
addMatchingKeys(txn: PersistenceTransaction, keys: DocumentKeySet, targetId: TargetId): PersistencePromise<void>;
removeMatchingKeys(txn: PersistenceTransaction, keys: DocumentKeySet, targetId: TargetId): PersistencePromise<void>;
removeMatchingKeysForTargetId(txn: PersistenceTransaction, targetId: TargetId): PersistencePromise<void>;
getMatchingKeysForTargetId(txn: PersistenceTransaction, targetId: TargetId): PersistencePromise<DocumentKeySet>;
containsKey(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<boolean>;
getTargetDataForTarget(transaction: PersistenceTransaction, targetId: TargetId): PersistencePromise<TargetData | null>;
}
export declare function getHighestListenSequenceNumber(txn: SimpleDbTransaction): PersistencePromise<ListenSequenceNumber>;
/**
* Helper to get a typed SimpleDbStore for the document target object store.
*/
export declare function documentTargetStore(txn: PersistenceTransaction): SimpleDbStore<DbTargetDocumentKey, DbTargetDocument>;

View File

@ -0,0 +1,73 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Query } from '../core/query';
import { SnapshotVersion } from '../core/snapshot_version';
import { DocumentKeySet, DocumentMap, MaybeDocumentMap, NullableMaybeDocumentMap } from '../model/collections';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { IndexManager } from './index_manager';
import { MutationQueue } from './mutation_queue';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { RemoteDocumentCache } from './remote_document_cache';
/**
* A readonly view of the local state of all documents we're tracking (i.e. we
* have a cached version in remoteDocumentCache or local mutations for the
* document). The view is computed by applying the mutations in the
* MutationQueue to the RemoteDocumentCache.
*/
export declare class LocalDocumentsView {
readonly remoteDocumentCache: RemoteDocumentCache;
readonly mutationQueue: MutationQueue;
readonly indexManager: IndexManager;
constructor(remoteDocumentCache: RemoteDocumentCache, mutationQueue: MutationQueue, indexManager: IndexManager);
/**
* Get the local view of the document identified by `key`.
*
* @return Local view of the document or null if we don't have any cached
* state for it.
*/
getDocument(transaction: PersistenceTransaction, key: DocumentKey): PersistencePromise<MaybeDocument | null>;
/** Internal version of `getDocument` that allows reusing batches. */
private getDocumentInternal;
private applyLocalMutationsToDocuments;
/**
* Gets the local view of the documents identified by `keys`.
*
* If we don't have cached state for a document in `keys`, a NoDocument will
* be stored for that key in the resulting set.
*/
getDocuments(transaction: PersistenceTransaction, keys: DocumentKeySet): PersistencePromise<MaybeDocumentMap>;
/**
* Similar to `getDocuments`, but creates the local view from the given
* `baseDocs` without retrieving documents from the local store.
*/
getLocalViewOfDocuments(transaction: PersistenceTransaction, baseDocs: NullableMaybeDocumentMap): PersistencePromise<MaybeDocumentMap>;
/**
* Performs a query against the local view of all documents.
*
* @param transaction The persistence transaction.
* @param query The query to match documents against.
* @param sinceReadTime If not set to SnapshotVersion.MIN, return only
* documents that have been read since this snapshot version (exclusive).
*/
getDocumentsMatchingQuery(transaction: PersistenceTransaction, query: Query, sinceReadTime: SnapshotVersion): PersistencePromise<DocumentMap>;
private getDocumentsMatchingDocumentQuery;
private getDocumentsMatchingCollectionGroupQuery;
private getDocumentsMatchingCollectionQuery;
private addMissingBaseDocuments;
}

View File

@ -0,0 +1,48 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { MaybeDocument } from '../model/document';
import { MutationBatch } from '../model/mutation_batch';
import { JsonProtoSerializer } from '../remote/serializer';
import { DocumentKeySet } from '../model/collections';
import { EncodedResourcePath } from './encoded_resource_path';
import { DbMutationBatch, DbRemoteDocument, DbTarget, DbTimestampKey } from './indexeddb_schema';
import { TargetData } from './target_data';
/** Serializer for values stored in the LocalStore. */
export declare class LocalSerializer {
private remoteSerializer;
constructor(remoteSerializer: JsonProtoSerializer);
/** Decodes a remote document from storage locally to a Document. */
fromDbRemoteDocument(remoteDoc: DbRemoteDocument): MaybeDocument;
/** Encodes a document for storage locally. */
toDbRemoteDocument(maybeDoc: MaybeDocument, readTime: SnapshotVersion): DbRemoteDocument;
toDbTimestampKey(snapshotVersion: SnapshotVersion): DbTimestampKey;
fromDbTimestampKey(dbTimestampKey: DbTimestampKey): SnapshotVersion;
private toDbTimestamp;
private fromDbTimestamp;
/** Encodes a batch of mutations into a DbMutationBatch for local storage. */
toDbMutationBatch(userId: string, batch: MutationBatch): DbMutationBatch;
/** Decodes a DbMutationBatch into a MutationBatch */
fromDbMutationBatch(dbBatch: DbMutationBatch): MutationBatch;
toDbResourcePaths(keys: DocumentKeySet): EncodedResourcePath[];
/** Decodes an array of EncodedResourcePaths into a set of document keys. */
fromDbResourcePaths(encodedPaths: EncodedResourcePath[]): DocumentKeySet;
/** Decodes a DbTarget into TargetData */
fromDbTarget(dbTarget: DbTarget): TargetData;
/** Encodes TargetData into a DbTarget for storage locally. */
toDbTarget(targetData: TargetData): DbTarget;
}

View File

@ -0,0 +1,306 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { User } from '../auth/user';
import { Query } from '../core/query';
import { SnapshotVersion } from '../core/snapshot_version';
import { Target } from '../core/target';
import { BatchId, ProtoByteString, TargetId } from '../core/types';
import { DocumentKeySet, DocumentMap, MaybeDocumentMap } from '../model/collections';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { Mutation } from '../model/mutation';
import { MutationBatch, MutationBatchResult } from '../model/mutation_batch';
import { RemoteEvent } from '../remote/remote_event';
import { FirestoreError } from '../util/error';
import { LocalViewChanges } from './local_view_changes';
import { LruGarbageCollector, LruResults } from './lru_garbage_collector';
import { Persistence, PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { QueryEngine } from './query_engine';
import { ClientId } from './shared_client_state';
import { TargetData } from './target_data';
/** The result of a write to the local store. */
export interface LocalWriteResult {
batchId: BatchId;
changes: MaybeDocumentMap;
}
/** The result of a user-change operation in the local store. */
export interface UserChangeResult {
readonly affectedDocuments: MaybeDocumentMap;
readonly removedBatchIds: BatchId[];
readonly addedBatchIds: BatchId[];
}
/** The result of executing a query against the local store. */
export interface QueryResult {
readonly documents: DocumentMap;
readonly remoteKeys: DocumentKeySet;
}
/**
* Local storage in the Firestore client. Coordinates persistence components
* like the mutation queue and remote document cache to present a
* latency-compensated view of stored data.
*
* The LocalStore is responsible for accepting mutations from the Sync Engine.
* Writes from the client are put into a queue as provisional Mutations until
* they are processed by the RemoteStore and confirmed as having been written
* to the server.
*
* The local store provides the local version of documents that have been
* modified locally. It maintains the constraint:
*
* LocalDocument = RemoteDocument + Active(LocalMutations)
*
* (Active mutations are those that are enqueued and have not been previously
* acknowledged or rejected).
*
* The RemoteDocument ("ground truth") state is provided via the
* applyChangeBatch method. It will be some version of a server-provided
* document OR will be a server-provided document PLUS acknowledged mutations:
*
* RemoteDocument' = RemoteDocument + Acknowledged(LocalMutations)
*
* Note that this "dirty" version of a RemoteDocument will not be identical to a
* server base version, since it has LocalMutations added to it pending getting
* an authoritative copy from the server.
*
* Since LocalMutations can be rejected by the server, we have to be able to
* revert a LocalMutation that has already been applied to the LocalDocument
* (typically done by replaying all remaining LocalMutations to the
* RemoteDocument to re-apply).
*
* The LocalStore is responsible for the garbage collection of the documents it
* contains. For now, it every doc referenced by a view, the mutation queue, or
* the RemoteStore.
*
* It also maintains the persistence of mapping queries to resume tokens and
* target ids. It needs to know this data about queries to properly know what
* docs it would be allowed to garbage collect.
*
* The LocalStore must be able to efficiently execute queries against its local
* cache of the documents, to provide the initial set of results before any
* remote changes have been received.
*
* Note: In TypeScript, most methods return Promises since the implementation
* may rely on fetching data from IndexedDB which is async.
* These Promises will only be rejected on an I/O error or other internal
* (unexpected) failure (e.g. failed assert) and always represent an
* unrecoverable error (should be caught / reported by the async_queue).
*/
export declare class LocalStore {
/** Manages our in-memory or durable persistence. */
private persistence;
private queryEngine;
/**
* The maximum time to leave a resume token buffered without writing it out.
* This value is arbitrary: it's long enough to avoid several writes
* (possibly indefinitely if updates come more frequently than this) but
* short enough that restarting after crashing will still have a pretty
* recent resume token.
*/
private static readonly RESUME_TOKEN_MAX_AGE_MICROS;
/**
* The set of all mutations that have been sent but not yet been applied to
* the backend.
*/
private mutationQueue;
/** The set of all cached remote documents. */
private remoteDocuments;
/**
* The "local" view of all documents (layering mutationQueue on top of
* remoteDocumentCache).
*/
private localDocuments;
/**
* The set of document references maintained by any local views.
*/
private localViewReferences;
/** Maps a target to its `TargetData`. */
private targetCache;
/**
* Maps a targetID to data about its target.
*
* PORTING NOTE: We are using an immutable data structure on Web to make re-runs
* of `applyRemoteEvent()` idempotent.
*/
private targetDataByTarget;
/** Maps a target to its targetID. */
private targetIdByTarget;
/**
* The read time of the last entry processed by `getNewDocumentChanges()`.
*
* PORTING NOTE: This is only used for multi-tab synchronization.
*/
private lastDocumentChangeReadTime;
constructor(
/** Manages our in-memory or durable persistence. */
persistence: Persistence, queryEngine: QueryEngine, initialUser: User);
/** Starts the LocalStore. */
start(): Promise<void>;
/**
* Tells the LocalStore that the currently authenticated user has changed.
*
* In response the local store switches the mutation queue to the new user and
* returns any resulting document changes.
*/
handleUserChange(user: User): Promise<UserChangeResult>;
localWrite(mutations: Mutation[]): Promise<LocalWriteResult>;
/** Returns the local view of the documents affected by a mutation batch. */
lookupMutationDocuments(batchId: BatchId): Promise<MaybeDocumentMap | null>;
/**
* Acknowledge the given batch.
*
* On the happy path when a batch is acknowledged, the local store will
*
* + remove the batch from the mutation queue;
* + apply the changes to the remote document cache;
* + recalculate the latency compensated view implied by those changes (there
* may be mutations in the queue that affect the documents but haven't been
* acknowledged yet); and
* + give the changed documents back the sync engine
*
* @returns The resulting (modified) documents.
*/
acknowledgeBatch(batchResult: MutationBatchResult): Promise<MaybeDocumentMap>;
/**
* Remove mutations from the MutationQueue for the specified batch;
* LocalDocuments will be recalculated.
*
* @returns The resulting modified documents.
*/
rejectBatch(batchId: BatchId): Promise<MaybeDocumentMap>;
/**
* Returns the largest (latest) batch id in mutation queue that is pending server response.
* Returns `BATCHID_UNKNOWN` if the queue is empty.
*/
getHighestUnacknowledgedBatchId(): Promise<BatchId>;
/** Returns the last recorded stream token for the current user. */
getLastStreamToken(): Promise<ProtoByteString>;
/**
* Sets the stream token for the current user without acknowledging any
* mutation batch. This is usually only useful after a stream handshake or in
* response to an error that requires clearing the stream token.
*/
setLastStreamToken(streamToken: ProtoByteString): Promise<void>;
/**
* Returns the last consistent snapshot processed (used by the RemoteStore to
* determine whether to buffer incoming snapshots from the backend).
*/
getLastRemoteSnapshotVersion(): Promise<SnapshotVersion>;
/**
* Update the "ground-state" (remote) documents. We assume that the remote
* event reflects any write batches that have been acknowledged or rejected
* (i.e. we do not re-apply local mutations to updates from this event).
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
applyRemoteEvent(remoteEvent: RemoteEvent): Promise<MaybeDocumentMap>;
/**
* Returns true if the newTargetData should be persisted during an update of
* an active target. TargetData should always be persisted when a target is
* being released and should not call this function.
*
* While the target is active, TargetData updates can be omitted when nothing
* about the target has changed except metadata like the resume token or
* snapshot version. Occasionally it's worth the extra write to prevent these
* values from getting too stale after a crash, but this doesn't have to be
* too frequent.
*/
private static shouldPersistTargetData;
/**
* Notify local store of the changed views to locally pin documents.
*/
notifyLocalViewChanges(viewChanges: LocalViewChanges[]): Promise<void>;
/**
* Gets the mutation batch after the passed in batchId in the mutation queue
* or null if empty.
* @param afterBatchId If provided, the batch to search after.
* @returns The next mutation or null if there wasn't one.
*/
nextMutationBatch(afterBatchId?: BatchId): Promise<MutationBatch | null>;
/**
* Read the current value of a Document with a given key or null if not
* found - used for testing.
*/
readDocument(key: DocumentKey): Promise<MaybeDocument | null>;
/**
* Assigns the given target an internal ID so that its results can be pinned so
* they don't get GC'd. A target must be allocated in the local store before
* the store can be used to manage its view.
*
* Allocating an already allocated `Target` will return the existing `TargetData`
* for that `Target`.
*/
allocateTarget(target: Target): Promise<TargetData>;
/**
* Returns the TargetData as seen by the LocalStore, including updates that may
* have not yet been persisted to the TargetCache.
*/
getTargetData(transaction: PersistenceTransaction, target: Target): PersistencePromise<TargetData | null>;
/**
* Unpin all the documents associated with the given target. If
* `keepPersistedTargetData` is set to false and Eager GC enabled, the method
* directly removes the associated target data from the target cache.
*
* Releasing a non-existing `Target` is a no-op.
*/
releaseTarget(targetId: number, keepPersistedTargetData: boolean): Promise<void>;
/**
* Runs the specified query against the local store and returns the results,
* potentially taking advantage of query data from previous executions (such
* as the set of remote keys).
*
* @param usePreviousResults Whether results from previous executions can
* be used to optimize this query execution.
*/
executeQuery(query: Query, usePreviousResults: boolean): Promise<QueryResult>;
/**
* Returns the keys of the documents that are associated with the given
* target id in the remote table.
*/
remoteDocumentKeys(targetId: TargetId): Promise<DocumentKeySet>;
getActiveClients(): Promise<ClientId[]>;
removeCachedMutationBatchMetadata(batchId: BatchId): void;
setNetworkEnabled(networkEnabled: boolean): void;
private applyWriteToRemoteDocuments;
collectGarbage(garbageCollector: LruGarbageCollector): Promise<LruResults>;
getTarget(targetId: TargetId): Promise<Target | null>;
/**
* Returns the set of documents that have been updated since the last call.
* If this is the first call, returns the set of changes since client
* initialization. Further invocations will return document changes since
* the point of rejection.
*/
getNewDocumentChanges(): Promise<MaybeDocumentMap>;
/**
* Reads the newest document change from persistence and forwards the internal
* synchronization marker so that calls to `getNewDocumentChanges()`
* only return changes that happened after client initialization.
*/
synchronizeLastDocumentChangeReadTime(): Promise<void>;
}
/**
* Verifies the error thrown by a LocalStore operation. If a LocalStore
* operation fails because the primary lease has been taken by another client,
* we ignore the error (the persistence layer will immediately call
* `applyPrimaryLease` to propagate the primary state change). All other errors
* are re-thrown.
*
* @param err An error returned by a LocalStore operation.
* @return A Promise that resolves after we recovered, or the original error.
*/
export declare function ignoreIfPrimaryLeaseLoss(err: FirestoreError): Promise<void>;

View File

@ -0,0 +1,32 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { TargetId } from '../core/types';
import { ViewSnapshot } from '../core/view_snapshot';
import { DocumentKeySet } from '../model/collections';
/**
* A set of changes to what documents are currently in view and out of view for
* a given query. These changes are sent to the LocalStore by the View (via
* the SyncEngine) and are used to pin / unpin documents as appropriate.
*/
export declare class LocalViewChanges {
readonly targetId: TargetId;
readonly fromCache: boolean;
readonly addedKeys: DocumentKeySet;
readonly removedKeys: DocumentKeySet;
constructor(targetId: TargetId, fromCache: boolean, addedKeys: DocumentKeySet, removedKeys: DocumentKeySet);
static fromSnapshot(targetId: TargetId, viewSnapshot: ViewSnapshot): LocalViewChanges;
}

View File

@ -0,0 +1,124 @@
/**
* @license
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ListenSequenceNumber, TargetId } from '../core/types';
import { AsyncQueue } from '../util/async_queue';
import { SortedMap } from '../util/sorted_map';
import { LocalStore } from './local_store';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { TargetData } from './target_data';
/**
* Persistence layers intending to use LRU Garbage collection should have reference delegates that
* implement this interface. This interface defines the operations that the LRU garbage collector
* needs from the persistence layer.
*/
export interface LruDelegate {
readonly garbageCollector: LruGarbageCollector;
/** Enumerates all the targets in the TargetCache. */
forEachTarget(txn: PersistenceTransaction, f: (target: TargetData) => void): PersistencePromise<void>;
getSequenceNumberCount(txn: PersistenceTransaction): PersistencePromise<number>;
/**
* Enumerates sequence numbers for documents not associated with a target.
* Note that this may include duplicate sequence numbers.
*/
forEachOrphanedDocumentSequenceNumber(txn: PersistenceTransaction, f: (sequenceNumber: ListenSequenceNumber) => void): PersistencePromise<void>;
/**
* Removes all targets that have a sequence number less than or equal to `upperBound`, and are not
* present in the `activeTargetIds` set.
*
* @return the number of targets removed.
*/
removeTargets(txn: PersistenceTransaction, upperBound: ListenSequenceNumber, activeTargetIds: ActiveTargets): PersistencePromise<number>;
/**
* Removes all unreferenced documents from the cache that have a sequence number less than or
* equal to the given `upperBound`.
*
* @return the number of documents removed.
*/
removeOrphanedDocuments(txn: PersistenceTransaction, upperBound: ListenSequenceNumber): PersistencePromise<number>;
getCacheSize(txn: PersistenceTransaction): PersistencePromise<number>;
}
/**
* Describes a map whose keys are active target ids. We do not care about the type of the
* values.
*/
export declare type ActiveTargets = SortedMap<TargetId, unknown>;
/**
* Describes the results of a garbage collection run. `didRun` will be set to
* `false` if collection was skipped (either it is disabled or the cache size
* has not hit the threshold). If collection ran, the other fields will be
* filled in with the details of the results.
*/
export interface LruResults {
readonly didRun: boolean;
readonly sequenceNumbersCollected: number;
readonly targetsRemoved: number;
readonly documentsRemoved: number;
}
export declare class LruParams {
readonly cacheSizeCollectionThreshold: number;
readonly percentileToCollect: number;
readonly maximumSequenceNumbersToCollect: number;
static readonly COLLECTION_DISABLED = -1;
static readonly MINIMUM_CACHE_SIZE_BYTES: number;
static readonly DEFAULT_CACHE_SIZE_BYTES: number;
private static readonly DEFAULT_COLLECTION_PERCENTILE;
private static readonly DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT;
static withCacheSize(cacheSize: number): LruParams;
static readonly DEFAULT: LruParams;
static readonly DISABLED: LruParams;
constructor(cacheSizeCollectionThreshold: number, percentileToCollect: number, maximumSequenceNumbersToCollect: number);
}
/**
* This class is responsible for the scheduling of LRU garbage collection. It handles checking
* whether or not GC is enabled, as well as which delay to use before the next run.
*/
export declare class LruScheduler {
private readonly garbageCollector;
private readonly asyncQueue;
private readonly localStore;
private hasRun;
private gcTask;
constructor(garbageCollector: LruGarbageCollector, asyncQueue: AsyncQueue, localStore: LocalStore);
start(): void;
stop(): void;
get started(): boolean;
private scheduleGC;
}
/** Implements the steps for LRU garbage collection. */
export declare class LruGarbageCollector {
private readonly delegate;
readonly params: LruParams;
constructor(delegate: LruDelegate, params: LruParams);
/** Given a percentile of target to collect, returns the number of targets to collect. */
calculateTargetCount(txn: PersistenceTransaction, percentile: number): PersistencePromise<number>;
/** Returns the nth sequence number, counting in order from the smallest. */
nthSequenceNumber(txn: PersistenceTransaction, n: number): PersistencePromise<ListenSequenceNumber>;
/**
* Removes targets with a sequence number equal to or less than the given upper bound, and removes
* document associations with those targets.
*/
removeTargets(txn: PersistenceTransaction, upperBound: ListenSequenceNumber, activeTargetIds: ActiveTargets): PersistencePromise<number>;
/**
* Removes documents that have a sequence number equal to or less than the upper bound and are not
* otherwise pinned.
*/
removeOrphanedDocuments(txn: PersistenceTransaction, upperBound: ListenSequenceNumber): PersistencePromise<number>;
collect(txn: PersistenceTransaction, activeTargetIds: ActiveTargets): PersistencePromise<LruResults>;
getCacheSize(txn: PersistenceTransaction): PersistencePromise<number>;
private runGarbageCollection;
}

View File

@ -0,0 +1,39 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ResourcePath } from '../model/path';
import { IndexManager } from './index_manager';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
/**
* An in-memory implementation of IndexManager.
*/
export declare class MemoryIndexManager implements IndexManager {
private collectionParentIndex;
addToCollectionParentIndex(transaction: PersistenceTransaction, collectionPath: ResourcePath): PersistencePromise<void>;
getCollectionParents(transaction: PersistenceTransaction, collectionId: string): PersistencePromise<ResourcePath[]>;
}
/**
* Internal implementation of the collection-parent index exposed by MemoryIndexManager.
* Also used for in-memory caching by IndexedDbIndexManager and initial index population
* in indexeddb_schema.ts
*/
export declare class MemoryCollectionParentIndex {
private index;
add(collectionPath: ResourcePath): boolean;
has(collectionPath: ResourcePath): boolean;
getEntries(collectionId: string): ResourcePath[];
}

View File

@ -0,0 +1,89 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Timestamp } from '../api/timestamp';
import { Query } from '../core/query';
import { BatchId, ProtoByteString } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { Mutation } from '../model/mutation';
import { MutationBatch } from '../model/mutation_batch';
import { SortedMap } from '../util/sorted_map';
import { IndexManager } from './index_manager';
import { MutationQueue } from './mutation_queue';
import { PersistenceTransaction, ReferenceDelegate } from './persistence';
import { PersistencePromise } from './persistence_promise';
export declare class MemoryMutationQueue implements MutationQueue {
private readonly indexManager;
private readonly referenceDelegate;
/**
* The set of all mutations that have been sent but not yet been applied to
* the backend.
*/
private mutationQueue;
/** Next value to use when assigning sequential IDs to each mutation batch. */
private nextBatchId;
/** The last received stream token from the server, used to acknowledge which
* responses the client has processed. Stream tokens are opaque checkpoint
* markers whose only real value is their inclusion in the next request.
*/
private lastStreamToken;
/** An ordered mapping between documents and the mutations batch IDs. */
private batchesByDocumentKey;
constructor(indexManager: IndexManager, referenceDelegate: ReferenceDelegate);
checkEmpty(transaction: PersistenceTransaction): PersistencePromise<boolean>;
acknowledgeBatch(transaction: PersistenceTransaction, batch: MutationBatch, streamToken: ProtoByteString): PersistencePromise<void>;
getLastStreamToken(transaction: PersistenceTransaction): PersistencePromise<ProtoByteString>;
setLastStreamToken(transaction: PersistenceTransaction, streamToken: ProtoByteString): PersistencePromise<void>;
addMutationBatch(transaction: PersistenceTransaction, localWriteTime: Timestamp, baseMutations: Mutation[], mutations: Mutation[]): PersistencePromise<MutationBatch>;
lookupMutationBatch(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<MutationBatch | null>;
lookupMutationKeys(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<DocumentKeySet | null>;
getNextMutationBatchAfterBatchId(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<MutationBatch | null>;
getHighestUnacknowledgedBatchId(): PersistencePromise<BatchId>;
getAllMutationBatches(transaction: PersistenceTransaction): PersistencePromise<MutationBatch[]>;
getAllMutationBatchesAffectingDocumentKey(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MutationBatch[]>;
getAllMutationBatchesAffectingDocumentKeys(transaction: PersistenceTransaction, documentKeys: SortedMap<DocumentKey, unknown>): PersistencePromise<MutationBatch[]>;
getAllMutationBatchesAffectingQuery(transaction: PersistenceTransaction, query: Query): PersistencePromise<MutationBatch[]>;
private findMutationBatches;
removeMutationBatch(transaction: PersistenceTransaction, batch: MutationBatch): PersistencePromise<void>;
removeCachedMutationKeys(batchId: BatchId): void;
containsKey(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<boolean>;
performConsistencyCheck(txn: PersistenceTransaction): PersistencePromise<void>;
/**
* Finds the index of the given batchId in the mutation queue and asserts that
* the resulting index is within the bounds of the queue.
*
* @param batchId The batchId to search for
* @param action A description of what the caller is doing, phrased in passive
* form (e.g. "acknowledged" in a routine that acknowledges batches).
*/
private indexOfExistingBatchId;
/**
* Finds the index of the given batchId in the mutation queue. This operation
* is O(1).
*
* @return The computed index of the batch with the given batchId, based on
* the state of the queue. Note this index can be negative if the requested
* batchId has already been remvoed from the queue or past the end of the
* queue if the batchId is larger than the last added batch.
*/
private indexOfBatchId;
/**
* A version of lookupMutationBatch that doesn't return a promise, this makes
* other functions that uses this code easier to read and more efficent.
*/
private findMutationBatch;
}

View File

@ -0,0 +1,121 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { User } from '../auth/user';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { ActiveTargets, LruDelegate, LruGarbageCollector, LruParams } from './lru_garbage_collector';
import { ListenSequenceNumber } from '../core/types';
import { MemoryIndexManager } from './memory_index_manager';
import { MemoryRemoteDocumentCache } from './memory_remote_document_cache';
import { MemoryTargetCache } from './memory_target_cache';
import { MutationQueue } from './mutation_queue';
import { Persistence, PersistenceTransaction, PersistenceTransactionMode, PrimaryStateListener, ReferenceDelegate } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { ReferenceSet } from './reference_set';
import { ClientId } from './shared_client_state';
import { TargetData } from './target_data';
/**
* A memory-backed instance of Persistence. Data is stored only in RAM and
* not persisted across sessions.
*/
export declare class MemoryPersistence implements Persistence {
private readonly clientId;
/**
* Note that these are retained here to make it easier to write tests
* affecting both the in-memory and IndexedDB-backed persistence layers. Tests
* can create a new LocalStore wrapping this Persistence instance and this
* will make the in-memory persistence layer behave as if it were actually
* persisting values.
*/
private readonly indexManager;
private mutationQueues;
private readonly remoteDocumentCache;
private readonly targetCache;
private readonly listenSequence;
private _started;
readonly referenceDelegate: MemoryLruDelegate | MemoryEagerDelegate;
static createLruPersistence(clientId: ClientId, params: LruParams): MemoryPersistence;
static createEagerPersistence(clientId: ClientId): MemoryPersistence;
/**
* The constructor accepts a factory for creating a reference delegate. This
* allows both the delegate and this instance to have strong references to
* each other without having nullable fields that would then need to be
* checked or asserted on every access.
*/
private constructor();
shutdown(): Promise<void>;
get started(): boolean;
getActiveClients(): Promise<ClientId[]>;
setPrimaryStateListener(primaryStateListener: PrimaryStateListener): Promise<void>;
setDatabaseDeletedListener(): void;
setNetworkEnabled(networkEnabled: boolean): void;
getIndexManager(): MemoryIndexManager;
getMutationQueue(user: User): MutationQueue;
getTargetCache(): MemoryTargetCache;
getRemoteDocumentCache(): MemoryRemoteDocumentCache;
runTransaction<T>(action: string, mode: PersistenceTransactionMode, transactionOperation: (transaction: PersistenceTransaction) => PersistencePromise<T>): Promise<T>;
mutationQueuesContainKey(transaction: PersistenceTransaction, key: DocumentKey): PersistencePromise<boolean>;
}
/**
* Memory persistence is not actually transactional, but future implementations
* may have transaction-scoped state.
*/
export declare class MemoryTransaction extends PersistenceTransaction {
readonly currentSequenceNumber: ListenSequenceNumber;
constructor(currentSequenceNumber: ListenSequenceNumber);
}
export declare class MemoryEagerDelegate implements ReferenceDelegate {
private readonly persistence;
private inMemoryPins;
private _orphanedDocuments;
constructor(persistence: MemoryPersistence);
private get orphanedDocuments();
setInMemoryPins(inMemoryPins: ReferenceSet): void;
addReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
removeReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
removeMutationReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
removeTarget(txn: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
onTransactionStarted(): void;
onTransactionCommitted(txn: PersistenceTransaction): PersistencePromise<void>;
updateLimboDocument(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
documentSize(doc: MaybeDocument): number;
private isReferenced;
}
export declare class MemoryLruDelegate implements ReferenceDelegate, LruDelegate {
private readonly persistence;
private inMemoryPins;
private orphanedSequenceNumbers;
readonly garbageCollector: LruGarbageCollector;
constructor(persistence: MemoryPersistence, lruParams: LruParams);
onTransactionStarted(): void;
onTransactionCommitted(txn: PersistenceTransaction): PersistencePromise<void>;
forEachTarget(txn: PersistenceTransaction, f: (q: TargetData) => void): PersistencePromise<void>;
getSequenceNumberCount(txn: PersistenceTransaction): PersistencePromise<number>;
private orphanedDocumentCount;
forEachOrphanedDocumentSequenceNumber(txn: PersistenceTransaction, f: (sequenceNumber: ListenSequenceNumber) => void): PersistencePromise<void>;
setInMemoryPins(inMemoryPins: ReferenceSet): void;
removeTargets(txn: PersistenceTransaction, upperBound: ListenSequenceNumber, activeTargetIds: ActiveTargets): PersistencePromise<number>;
removeOrphanedDocuments(txn: PersistenceTransaction, upperBound: ListenSequenceNumber): PersistencePromise<number>;
removeMutationReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
removeTarget(txn: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
addReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
removeReference(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
updateLimboDocument(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<void>;
documentSize(maybeDoc: MaybeDocument): number;
private isPinned;
getCacheSize(txn: PersistenceTransaction): PersistencePromise<number>;
}

View File

@ -0,0 +1,71 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Query } from '../core/query';
import { DocumentKeySet, DocumentMap, MaybeDocumentMap, NullableMaybeDocumentMap } from '../model/collections';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { SnapshotVersion } from '../core/snapshot_version';
import { IndexManager } from './index_manager';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { RemoteDocumentCache } from './remote_document_cache';
import { RemoteDocumentChangeBuffer } from './remote_document_change_buffer';
export declare type DocumentSizer = (doc: MaybeDocument) => number;
export declare class MemoryRemoteDocumentCache implements RemoteDocumentCache {
private readonly indexManager;
private readonly sizer;
/** Underlying cache of documents and their read times. */
private docs;
/** Size of all cached documents. */
private size;
/**
* @param sizer Used to assess the size of a document. For eager GC, this is expected to just
* return 0 to avoid unnecessarily doing the work of calculating the size.
*/
constructor(indexManager: IndexManager, sizer: DocumentSizer);
/**
* Adds the supplied entry to the cache and updates the cache size as appropriate.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
private addEntry;
/**
* Removes the specified entry from the cache and updates the cache size as appropriate.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
private removeEntry;
getEntry(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MaybeDocument | null>;
getEntries(transaction: PersistenceTransaction, documentKeys: DocumentKeySet): PersistencePromise<NullableMaybeDocumentMap>;
getDocumentsMatchingQuery(transaction: PersistenceTransaction, query: Query, sinceReadTime: SnapshotVersion): PersistencePromise<DocumentMap>;
forEachDocumentKey(transaction: PersistenceTransaction, f: (key: DocumentKey) => PersistencePromise<void>): PersistencePromise<void>;
getNewDocumentChanges(transaction: PersistenceTransaction, sinceReadTime: SnapshotVersion): PersistencePromise<{
changedDocs: MaybeDocumentMap;
readTime: SnapshotVersion;
}>;
getLastReadTime(transaction: PersistenceTransaction): PersistencePromise<SnapshotVersion>;
newChangeBuffer(options?: {
trackRemovals: boolean;
}): RemoteDocumentChangeBuffer;
getSize(txn: PersistenceTransaction): PersistencePromise<number>;
/**
* Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.
*/
private static RemoteDocumentChangeBuffer;
}

View File

@ -0,0 +1,66 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { ListenSequenceNumber, TargetId } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { ActiveTargets } from './lru_garbage_collector';
import { MemoryPersistence } from './memory_persistence';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { TargetCache } from './target_cache';
import { TargetData } from './target_data';
import { Target } from '../core/target';
export declare class MemoryTargetCache implements TargetCache {
private readonly persistence;
/**
* Maps a target to the data about that target
*/
private targets;
/** The last received snapshot version. */
private lastRemoteSnapshotVersion;
/** The highest numbered target ID encountered. */
private highestTargetId;
/** The highest sequence number encountered. */
private highestSequenceNumber;
/**
* A ordered bidirectional mapping between documents and the remote target
* IDs.
*/
private references;
private targetCount;
private targetIdGenerator;
constructor(persistence: MemoryPersistence);
forEachTarget(txn: PersistenceTransaction, f: (q: TargetData) => void): PersistencePromise<void>;
getLastRemoteSnapshotVersion(transaction: PersistenceTransaction): PersistencePromise<SnapshotVersion>;
getHighestSequenceNumber(transaction: PersistenceTransaction): PersistencePromise<ListenSequenceNumber>;
allocateTargetId(transaction: PersistenceTransaction): PersistencePromise<TargetId>;
setTargetsMetadata(transaction: PersistenceTransaction, highestListenSequenceNumber: number, lastRemoteSnapshotVersion?: SnapshotVersion): PersistencePromise<void>;
private saveTargetData;
addTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
updateTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
removeTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
removeTargets(transaction: PersistenceTransaction, upperBound: ListenSequenceNumber, activeTargetIds: ActiveTargets): PersistencePromise<number>;
getTargetCount(transaction: PersistenceTransaction): PersistencePromise<number>;
getTargetData(transaction: PersistenceTransaction, target: Target): PersistencePromise<TargetData | null>;
getTargetDataForTarget(transaction: PersistenceTransaction, targetId: TargetId): never;
addMatchingKeys(txn: PersistenceTransaction, keys: DocumentKeySet, targetId: TargetId): PersistencePromise<void>;
removeMatchingKeys(txn: PersistenceTransaction, keys: DocumentKeySet, targetId: TargetId): PersistencePromise<void>;
removeMatchingKeysForTargetId(txn: PersistenceTransaction, targetId: TargetId): PersistencePromise<void>;
getMatchingKeysForTargetId(txn: PersistenceTransaction, targetId: TargetId): PersistencePromise<DocumentKeySet>;
containsKey(txn: PersistenceTransaction, key: DocumentKey): PersistencePromise<boolean>;
}

View File

@ -0,0 +1,146 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Timestamp } from '../api/timestamp';
import { Query } from '../core/query';
import { BatchId, ProtoByteString } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { Mutation } from '../model/mutation';
import { MutationBatch } from '../model/mutation_batch';
import { SortedMap } from '../util/sorted_map';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
/** A queue of mutations to apply to the remote store. */
export interface MutationQueue {
/** Returns true if this queue contains no mutation batches. */
checkEmpty(transaction: PersistenceTransaction): PersistencePromise<boolean>;
/**
* Acknowledges the given batch.
*/
acknowledgeBatch(transaction: PersistenceTransaction, batch: MutationBatch, streamToken: ProtoByteString): PersistencePromise<void>;
/** Returns the current stream token for this mutation queue. */
getLastStreamToken(transaction: PersistenceTransaction): PersistencePromise<ProtoByteString>;
/** Sets the stream token for this mutation queue. */
setLastStreamToken(transaction: PersistenceTransaction, streamToken: ProtoByteString): PersistencePromise<void>;
/**
* Creates a new mutation batch and adds it to this mutation queue.
*
* @param transaction The transaction this operation is scoped to.
* @param localWriteTime The original write time of this mutation.
* @param baseMutations Mutations that are used to populate the base values
* when this mutation is applied locally. These mutations are used to locally
* overwrite values that are persisted in the remote document cache.
* @param mutations The user-provided mutations in this mutation batch.
*/
addMutationBatch(transaction: PersistenceTransaction, localWriteTime: Timestamp, baseMutations: Mutation[], mutations: Mutation[]): PersistencePromise<MutationBatch>;
/**
* Loads the mutation batch with the given batchId.
*/
lookupMutationBatch(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<MutationBatch | null>;
/**
* Returns the document keys for the mutation batch with the given batchId.
* For primary clients, this method returns `null` after
* `removeMutationBatches()` has been called. Secondary clients return a
* cached result until `removeCachedMutationKeys()` is invoked.
*/
lookupMutationKeys(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<DocumentKeySet | null>;
/**
* Gets the first unacknowledged mutation batch after the passed in batchId
* in the mutation queue or null if empty.
*
* @param batchId The batch to search after, or BATCHID_UNKNOWN for the first
* mutation in the queue.
*
* @return the next mutation or null if there wasn't one.
*/
getNextMutationBatchAfterBatchId(transaction: PersistenceTransaction, batchId: BatchId): PersistencePromise<MutationBatch | null>;
/**
* Gets the largest (latest) batch id in mutation queue for the current user that is pending
* server response, returns `BATCHID_UNKNOWN` if the queue is empty.
*
* @return the largest batch id in the mutation queue that is not acknowledged.
*/
getHighestUnacknowledgedBatchId(transaction: PersistenceTransaction): PersistencePromise<BatchId>;
/** Gets all mutation batches in the mutation queue. */
getAllMutationBatches(transaction: PersistenceTransaction): PersistencePromise<MutationBatch[]>;
/**
* Finds all mutation batches that could possibly affect the given
* document key. Not all mutations in a batch will necessarily affect the
* document key, so when looping through the batch you'll need to check that
* the mutation itself matches the key.
*
* Batches are guaranteed to be in sorted order.
*
* Note that because of this requirement implementations are free to return
* mutation batches that don't contain the document key at all if it's
* convenient.
*/
getAllMutationBatchesAffectingDocumentKey(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MutationBatch[]>;
/**
* Finds all mutation batches that could possibly affect the given set of
* document keys. Not all mutations in a batch will necessarily affect each
* key, so when looping through the batch you'll need to check that the
* mutation itself matches the key.
*
* Batches are guaranteed to be in sorted order.
*
* Note that because of this requirement implementations are free to return
* mutation batches that don't contain any of the document keys at all if it's
* convenient.
*/
getAllMutationBatchesAffectingDocumentKeys(transaction: PersistenceTransaction, documentKeys: SortedMap<DocumentKey, unknown>): PersistencePromise<MutationBatch[]>;
/**
* Finds all mutation batches that could affect the results for the given
* query. Not all mutations in a batch will necessarily affect the query, so
* when looping through the batch you'll need to check that the mutation
* itself matches the query.
*
* Batches are guaranteed to be in sorted order.
*
* Note that because of this requirement implementations are free to return
* mutation batches that don't match the query at all if it's convenient.
*
* NOTE: A PatchMutation does not need to include all fields in the query
* filter criteria in order to be a match (but any fields it does contain do
* need to match).
*/
getAllMutationBatchesAffectingQuery(transaction: PersistenceTransaction, query: Query): PersistencePromise<MutationBatch[]>;
/**
* Removes the given mutation batch from the queue. This is useful in two
* circumstances:
*
* + Removing an applied mutation from the head of the queue
* + Removing a rejected mutation from anywhere in the queue
*
* Multi-Tab Note: This operation should only be called by the primary client.
*/
removeMutationBatch(transaction: PersistenceTransaction, batch: MutationBatch): PersistencePromise<void>;
/**
* Clears the cached keys for a mutation batch. This method should be
* called by secondary clients after they process mutation updates.
*
* Note that this method does not have to be called from primary clients as
* the corresponding cache entries are cleared when an acknowledged or
* rejected batch is removed from the mutation queue.
*/
removeCachedMutationKeys(batchId: BatchId): void;
/**
* Performs a consistency check, examining the mutation queue for any
* leaks, if possible.
*/
performConsistencyCheck(transaction: PersistenceTransaction): PersistencePromise<void>;
}

View File

@ -0,0 +1,223 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { User } from '../auth/user';
import { ListenSequenceNumber } from '../core/types';
import { DocumentKey } from '../model/document_key';
import { IndexManager } from './index_manager';
import { MutationQueue } from './mutation_queue';
import { PersistencePromise } from './persistence_promise';
import { TargetCache } from './target_cache';
import { ReferenceSet } from './reference_set';
import { RemoteDocumentCache } from './remote_document_cache';
import { ClientId } from './shared_client_state';
import { TargetData } from './target_data';
export declare const PRIMARY_LEASE_LOST_ERROR_MSG: string;
/**
* A base class representing a persistence transaction, encapsulating both the
* transaction's sequence numbers as well as a list of onCommitted listeners.
*
* When you call Persistence.runTransaction(), it will create a transaction and
* pass it to your callback. You then pass it to any method that operates
* on persistence.
*/
export declare abstract class PersistenceTransaction {
private readonly onCommittedListeners;
abstract readonly currentSequenceNumber: ListenSequenceNumber;
addOnCommittedListener(listener: () => void): void;
raiseOnCommittedEvent(): void;
}
/** The different modes supported by `IndexedDbPersistence.runTransaction()`. */
export declare type PersistenceTransactionMode = 'readonly' | 'readwrite' | 'readwrite-primary' | 'readonly-idempotent' | 'readwrite-idempotent' | 'readwrite-primary-idempotent';
/**
* Callback type for primary state notifications. This callback can be
* registered with the persistence layer to get notified when we transition from
* primary to secondary state and vice versa.
*
* Note: Instances can only toggle between Primary and Secondary state if
* IndexedDB persistence is enabled and multiple clients are active. If this
* listener is registered with MemoryPersistence, the callback will be called
* exactly once marking the current instance as Primary.
*/
export declare type PrimaryStateListener = (isPrimary: boolean) => Promise<void>;
/**
* A ReferenceDelegate instance handles all of the hooks into the document-reference lifecycle. This
* includes being added to a target, being removed from a target, being subject to mutation, and
* being mutated by the user.
*
* Different implementations may do different things with each of these events. Not every
* implementation needs to do something with every lifecycle hook.
*
* PORTING NOTE: since sequence numbers are attached to transactions in this
* client, the ReferenceDelegate does not need to deal in transactional
* semantics (onTransactionStarted/Committed()), nor does it need to track and
* generate sequence numbers (getCurrentSequenceNumber()).
*/
export interface ReferenceDelegate {
/**
* Registers a ReferenceSet of documents that should be considered 'referenced' and not eligible
* for removal during garbage collection.
*/
setInMemoryPins(pins: ReferenceSet): void;
/** Notify the delegate that the given document was added to a target. */
addReference(txn: PersistenceTransaction, doc: DocumentKey): PersistencePromise<void>;
/** Notify the delegate that the given document was removed from a target. */
removeReference(txn: PersistenceTransaction, doc: DocumentKey): PersistencePromise<void>;
/**
* Notify the delegate that a target was removed. The delegate may, but is not obligated to,
* actually delete the target and associated data.
*/
removeTarget(txn: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
/** Notify the delegate that a document is no longer being mutated by the user. */
removeMutationReference(txn: PersistenceTransaction, doc: DocumentKey): PersistencePromise<void>;
/** Notify the delegate that a limbo document was updated. */
updateLimboDocument(txn: PersistenceTransaction, doc: DocumentKey): PersistencePromise<void>;
}
/**
* Persistence is the lowest-level shared interface to persistent storage in
* Firestore.
*
* Persistence is used to create MutationQueue and RemoteDocumentCache
* instances backed by persistence (which might be in-memory or LevelDB).
*
* Persistence also exposes an API to create and run PersistenceTransactions
* against persistence. All read / write operations must be wrapped in a
* transaction. Implementations of PersistenceTransaction / Persistence only
* need to guarantee that writes made against the transaction are not made to
* durable storage until the transaction resolves its PersistencePromise.
* Since memory-only storage components do not alter durable storage, they are
* free to ignore the transaction.
*
* This contract is enough to allow the LocalStore be be written
* independently of whether or not the stored state actually is durably
* persisted. If persistent storage is enabled, writes are grouped together to
* avoid inconsistent state that could cause crashes.
*
* Concretely, when persistent storage is enabled, the persistent versions of
* MutationQueue, RemoteDocumentCache, and others (the mutators) will
* defer their writes into a transaction. Once the local store has completed
* one logical operation, it commits the transaction.
*
* When persistent storage is disabled, the non-persistent versions of the
* mutators ignore the transaction. This short-cut is allowed because
* memory-only storage leaves no state so it cannot be inconsistent.
*
* This simplifies the implementations of the mutators and allows memory-only
* implementations to supplement the persistent ones without requiring any
* special dual-store implementation of Persistence. The cost is that the
* LocalStore needs to be slightly careful about the order of its reads and
* writes in order to avoid relying on being able to read back uncommitted
* writes.
*/
export interface Persistence {
/**
* Whether or not this persistence instance has been started.
*/
readonly started: boolean;
readonly referenceDelegate: ReferenceDelegate;
/**
* Releases any resources held during eager shutdown.
*/
shutdown(): Promise<void>;
/**
* Registers a listener that gets called when the primary state of the
* instance changes. Upon registering, this listener is invoked immediately
* with the current primary state.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setPrimaryStateListener(primaryStateListener: PrimaryStateListener): Promise<void>;
/**
* Registers a listener that gets called when the database receives a
* version change event indicating that it has deleted.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setDatabaseDeletedListener(databaseDeletedListener: () => Promise<void>): void;
/**
* Adjusts the current network state in the client's metadata, potentially
* affecting the primary lease.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setNetworkEnabled(networkEnabled: boolean): void;
/**
* Returns the IDs of the clients that are currently active. If multi-tab
* is not supported, returns an array that only contains the local client's
* ID.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
getActiveClients(): Promise<ClientId[]>;
/**
* Returns a MutationQueue representing the persisted mutations for the
* given user.
*
* Note: The implementation is free to return the same instance every time
* this is called for a given user. In particular, the memory-backed
* implementation does this to emulate the persisted implementation to the
* extent possible (e.g. in the case of uid switching from
* sally=>jack=>sally, sally's mutation queue will be preserved).
*/
getMutationQueue(user: User): MutationQueue;
/**
* Returns a TargetCache representing the persisted cache of targets.
*
* Note: The implementation is free to return the same instance every time
* this is called. In particular, the memory-backed implementation does this
* to emulate the persisted implementation to the extent possible.
*/
getTargetCache(): TargetCache;
/**
* Returns a RemoteDocumentCache representing the persisted cache of remote
* documents.
*
* Note: The implementation is free to return the same instance every time
* this is called. In particular, the memory-backed implementation does this
* to emulate the persisted implementation to the extent possible.
*/
getRemoteDocumentCache(): RemoteDocumentCache;
/**
* Returns an IndexManager instance that manages our persisted query indexes.
*
* Note: The implementation is free to return the same instance every time
* this is called. In particular, the memory-backed implementation does this
* to emulate the persisted implementation to the extent possible.
*/
getIndexManager(): IndexManager;
/**
* Performs an operation inside a persistence transaction. Any reads or writes
* against persistence must be performed within a transaction. Writes will be
* committed atomically once the transaction completes.
*
* Persistence operations are asynchronous and therefore the provided
* transactionOperation must return a PersistencePromise. When it is resolved,
* the transaction will be committed and the Promise returned by this method
* will resolve.
*
* @param action A description of the action performed by this transaction,
* used for logging.
* @param mode The underlying mode of the IndexedDb transaction. Can be
* 'readonly`, 'readwrite' or 'readwrite-primary'. Transactions marked
* 'readwrite-primary' can only be executed by the primary client. In this
* mode, the transactionOperation will not be run if the primary lease cannot
* be acquired and the returned promise will be rejected with a
* FAILED_PRECONDITION error.
* @param transactionOperation The operation to run inside a transaction.
* @return A promise that is resolved once the transaction completes.
*/
runTransaction<T>(action: string, mode: PersistenceTransactionMode, transactionOperation: (transaction: PersistenceTransaction) => PersistencePromise<T>): Promise<T>;
}

View File

@ -0,0 +1,72 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export declare type FulfilledHandler<T, R> = ((result: T) => R | PersistencePromise<R>) | null;
export declare type RejectedHandler<R> = ((reason: Error) => R | PersistencePromise<R>) | null;
export declare type Resolver<T> = (value?: T) => void;
export declare type Rejector = (error: Error) => void;
/**
* PersistencePromise<> is essentially a re-implementation of Promise<> except
* it has a .next() method instead of .then() and .next() and .catch() callbacks
* are executed synchronously when a PersistencePromise resolves rather than
* asynchronously (Promise<> implementations use setImmediate() or similar).
*
* This is necessary to interoperate with IndexedDB which will automatically
* commit transactions if control is returned to the event loop without
* synchronously initiating another operation on the transaction.
*
* NOTE: .then() and .catch() only allow a single consumer, unlike normal
* Promises.
*/
export declare class PersistencePromise<T> {
private nextCallback;
private catchCallback;
private result;
private error;
private isDone;
private callbackAttached;
constructor(callback: (resolve: Resolver<T>, reject: Rejector) => void);
catch<R>(fn: (error: Error) => R | PersistencePromise<R>): PersistencePromise<R>;
next<R>(nextFn?: FulfilledHandler<T, R>, catchFn?: RejectedHandler<R>): PersistencePromise<R>;
toPromise(): Promise<T>;
private wrapUserFunction;
private wrapSuccess;
private wrapFailure;
static resolve(): PersistencePromise<void>;
static resolve<R>(result: R): PersistencePromise<R>;
static reject<R>(error: Error): PersistencePromise<R>;
static waitFor(all: {
forEach: (cb: (el: PersistencePromise<any>) => void) => void;
}): PersistencePromise<void>;
/**
* Given an array of predicate functions that asynchronously evaluate to a
* boolean, implements a short-circuiting `or` between the results. Predicates
* will be evaluated until one of them returns `true`, then stop. The final
* result will be whether any of them returned `true`.
*/
static or(predicates: Array<() => PersistencePromise<boolean>>): PersistencePromise<boolean>;
/**
* Given an iterable, call the given function on each element in the
* collection and wait for all of the resulting concurrent PersistencePromises
* to resolve.
*/
static forEach<R, S>(collection: {
forEach: (cb: (r: R, s: S) => void) => void;
}, f: ((r: R, s: S) => PersistencePromise<void>) | ((r: R) => PersistencePromise<void>)): PersistencePromise<void>;
static forEach<R>(collection: {
forEach: (cb: (r: R) => void) => void;
}, f: (r: R) => PersistencePromise<void>): PersistencePromise<void>;
}

View File

@ -0,0 +1,32 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { Query } from '../core/query';
import { DocumentKeySet, DocumentMap } from '../model/collections';
import { LocalDocumentsView } from './local_documents_view';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
/**
* Represents a query engine capable of performing queries over the local
* document cache. You must call `setLocalDocumentsView()` before using.
*/
export interface QueryEngine {
/** Sets the document view to query against. */
setLocalDocumentsView(localDocuments: LocalDocumentsView): void;
/** Returns all local documents matching the specified query. */
getDocumentsMatchingQuery(transaction: PersistenceTransaction, query: Query, lastLimboFreeSnapshotVersion: SnapshotVersion, remoteKeys: DocumentKeySet): PersistencePromise<DocumentMap>;
}

View File

@ -0,0 +1,68 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { BatchId, TargetId } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
/**
* A collection of references to a document from some kind of numbered entity
* (either a target ID or batch ID). As references are added to or removed from
* the set corresponding events are emitted to a registered garbage collector.
*
* Each reference is represented by a DocumentReference object. Each of them
* contains enough information to uniquely identify the reference. They are all
* stored primarily in a set sorted by key. A document is considered garbage if
* there's no references in that set (this can be efficiently checked thanks to
* sorting by key).
*
* ReferenceSet also keeps a secondary set that contains references sorted by
* IDs. This one is used to efficiently implement removal of all references by
* some target ID.
*/
export declare class ReferenceSet {
private refsByKey;
private refsByTarget;
/** Returns true if the reference set contains no references. */
isEmpty(): boolean;
/** Adds a reference to the given document key for the given ID. */
addReference(key: DocumentKey, id: TargetId | BatchId): void;
/** Add references to the given document keys for the given ID. */
addReferences(keys: DocumentKeySet, id: TargetId | BatchId): void;
/**
* Removes a reference to the given document key for the given
* ID.
*/
removeReference(key: DocumentKey, id: TargetId | BatchId): void;
removeReferences(keys: DocumentKeySet, id: TargetId | BatchId): void;
/**
* Clears all references with a given ID. Calls removeRef() for each key
* removed.
*/
removeReferencesForId(id: TargetId | BatchId): DocumentKey[];
removeAllReferences(): void;
private removeRef;
referencesForId(id: TargetId | BatchId): DocumentKeySet;
containsKey(key: DocumentKey): boolean;
}
export declare class DocReference {
key: DocumentKey;
targetOrBatchId: TargetId | BatchId;
constructor(key: DocumentKey, targetOrBatchId: TargetId | BatchId);
/** Compare by key then by ID */
static compareByKey(left: DocReference, right: DocReference): number;
/** Compare by ID then by key */
static compareByTargetId(left: DocReference, right: DocReference): number;
}

View File

@ -0,0 +1,95 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Query } from '../core/query';
import { DocumentKeySet, DocumentMap, MaybeDocumentMap, NullableMaybeDocumentMap } from '../model/collections';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { RemoteDocumentChangeBuffer } from './remote_document_change_buffer';
import { SnapshotVersion } from '../core/snapshot_version';
/**
* Represents cached documents received from the remote backend.
*
* The cache is keyed by DocumentKey and entries in the cache are MaybeDocument
* instances, meaning we can cache both Document instances (an actual document
* with data) as well as NoDocument instances (indicating that the document is
* known to not exist).
*/
export interface RemoteDocumentCache {
/**
* Looks up an entry in the cache.
*
* @param documentKey The key of the entry to look up.
* @return The cached Document or NoDocument entry, or null if we have nothing
* cached.
*/
getEntry(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MaybeDocument | null>;
/**
* Looks up a set of entries in the cache.
*
* @param documentKeys The keys of the entries to look up.
* @return The cached Document or NoDocument entries indexed by key. If an entry is not cached,
* the corresponding key will be mapped to a null value.
*/
getEntries(transaction: PersistenceTransaction, documentKeys: DocumentKeySet): PersistencePromise<NullableMaybeDocumentMap>;
/**
* Executes a query against the cached Document entries.
*
* Implementations may return extra documents if convenient. The results
* should be re-filtered by the consumer before presenting them to the user.
*
* Cached NoDocument entries have no bearing on query results.
*
* @param query The query to match documents against.
* @param sinceReadTime If not set to SnapshotVersion.MIN, return only
* documents that have been read since this snapshot version (exclusive).
* @return The set of matching documents.
*/
getDocumentsMatchingQuery(transaction: PersistenceTransaction, query: Query, sinceReadTime: SnapshotVersion): PersistencePromise<DocumentMap>;
/**
* Returns the set of documents that have changed since the specified read
* time.
*/
getNewDocumentChanges(transaction: PersistenceTransaction, sinceReadTime: SnapshotVersion): PersistencePromise<{
changedDocs: MaybeDocumentMap;
readTime: SnapshotVersion;
}>;
/**
* Returns the read time of the most recently read document in the cache, or
* SnapshotVersion.MIN if not available.
*/
getLastReadTime(transaction: PersistenceTransaction): PersistencePromise<SnapshotVersion>;
/**
* Provides access to add or update the contents of the cache. The buffer
* handles proper size accounting for the change.
*
* Multi-Tab Note: This should only be called by the primary client.
*
* @param options.trackRemovals Whether to create sentinel entries for
* removed documents, which allows removals to be tracked by
* `getNewDocumentChanges()`.
*/
newChangeBuffer(options?: {
trackRemovals: boolean;
}): RemoteDocumentChangeBuffer;
/**
* Get an estimate of the size of the document cache. Note that for eager
* garbage collection, we don't track sizes so this will return 0.
*/
getSize(transaction: PersistenceTransaction): PersistencePromise<number>;
}

View File

@ -0,0 +1,92 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { DocumentKeySet, NullableMaybeDocumentMap } from '../model/collections';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { ObjectMap } from '../util/obj_map';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { SnapshotVersion } from '../core/snapshot_version';
/**
* An in-memory buffer of entries to be written to a RemoteDocumentCache.
* It can be used to batch up a set of changes to be written to the cache, but
* additionally supports reading entries back with the `getEntry()` method,
* falling back to the underlying RemoteDocumentCache if no entry is
* buffered.
*
* Entries added to the cache *must* be read first. This is to facilitate
* calculating the size delta of the pending changes.
*
* PORTING NOTE: This class was implemented then removed from other platforms.
* If byte-counting ends up being needed on the other platforms, consider
* porting this class as part of that implementation work.
*/
export declare abstract class RemoteDocumentChangeBuffer {
protected changes: ObjectMap<DocumentKey, MaybeDocument | null>;
private _readTime;
private changesApplied;
protected abstract getFromCache(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MaybeDocument | null>;
protected abstract getAllFromCache(transaction: PersistenceTransaction, documentKeys: DocumentKeySet): PersistencePromise<NullableMaybeDocumentMap>;
protected abstract applyChanges(transaction: PersistenceTransaction): PersistencePromise<void>;
protected set readTime(value: SnapshotVersion);
protected get readTime(): SnapshotVersion;
/**
* Buffers a `RemoteDocumentCache.addEntry()` call.
*
* You can only modify documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
addEntry(maybeDocument: MaybeDocument, readTime: SnapshotVersion): void;
/**
* Buffers a `RemoteDocumentCache.removeEntry()` call.
*
* You can only remove documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
removeEntry(key: DocumentKey, readTime?: SnapshotVersion): void;
/**
* Looks up an entry in the cache. The buffered changes will first be checked,
* and if no buffered change applies, this will forward to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction The transaction in which to perform any persistence
* operations.
* @param documentKey The key of the entry to look up.
* @return The cached Document or NoDocument entry, or null if we have nothing
* cached.
*/
getEntry(transaction: PersistenceTransaction, documentKey: DocumentKey): PersistencePromise<MaybeDocument | null>;
/**
* Looks up several entries in the cache, forwarding to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction The transaction in which to perform any persistence
* operations.
* @param documentKeys The keys of the entries to look up.
* @return A map of cached `Document`s or `NoDocument`s, indexed by key. If an
* entry cannot be found, the corresponding key will be mapped to a null
* value.
*/
getEntries(transaction: PersistenceTransaction, documentKeys: DocumentKeySet): PersistencePromise<NullableMaybeDocumentMap>;
/**
* Applies buffered changes to the underlying RemoteDocumentCache, using
* the provided transaction.
*/
apply(transaction: PersistenceTransaction): PersistencePromise<void>;
/** Helper to assert this.changes is not null */
protected assertNotApplied(): void;
}

View File

@ -0,0 +1,301 @@
/**
* @license
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { User } from '../auth/user';
import { BatchId, ListenSequenceNumber, MutationBatchState, OnlineState, TargetId } from '../core/types';
import { TargetIdSet } from '../model/collections';
import { Platform } from '../platform/platform';
import { AsyncQueue } from '../util/async_queue';
import { FirestoreError } from '../util/error';
import { SortedSet } from '../util/sorted_set';
import { QueryTargetState, SharedClientStateSyncer } from './shared_client_state_syncer';
/**
* A randomly-generated key assigned to each Firestore instance at startup.
*/
export declare type ClientId = string;
/**
* A `SharedClientState` keeps track of the global state of the mutations
* and query targets for all active clients with the same persistence key (i.e.
* project ID and FirebaseApp name). It relays local changes to other clients
* and updates its local state as new state is observed.
*
* `SharedClientState` is primarily used for synchronization in Multi-Tab
* environments. Each tab is responsible for registering its active query
* targets and mutations. `SharedClientState` will then notify the listener
* assigned to `.syncEngine` for updates to mutations and queries that
* originated in other clients.
*
* To receive notifications, `.syncEngine` and `.onlineStateHandler` has to be
* assigned before calling `start()`.
*/
export interface SharedClientState {
syncEngine: SharedClientStateSyncer | null;
onlineStateHandler: ((onlineState: OnlineState) => void) | null;
sequenceNumberHandler: ((sequenceNumber: ListenSequenceNumber) => void) | null;
/** Registers the Mutation Batch ID of a newly pending mutation. */
addPendingMutation(batchId: BatchId): void;
/**
* Records that a pending mutation has been acknowledged or rejected.
* Called by the primary client to notify secondary clients of mutation
* results as they come back from the backend.
*/
updateMutationState(batchId: BatchId, state: 'acknowledged' | 'rejected', error?: FirestoreError): void;
/**
* Associates a new Query Target ID with the local Firestore client. Returns
* the new query state for the query (which can be 'current' if the query is
* already associated with another tab).
*
* If the target id is already associated with local client, the method simply
* returns its `QueryTargetState`.
*/
addLocalQueryTarget(targetId: TargetId): QueryTargetState;
/** Removes the Query Target ID association from the local client. */
removeLocalQueryTarget(targetId: TargetId): void;
/** Checks whether the target is associated with the local client. */
isLocalQueryTarget(targetId: TargetId): boolean;
/**
* Processes an update to a query target.
*
* Called by the primary client to notify secondary clients of document
* changes or state transitions that affect the provided query target.
*/
updateQueryState(targetId: TargetId, state: QueryTargetState, error?: FirestoreError): void;
/**
* Removes the target's metadata entry.
*
* Called by the primary client when all clients stopped listening to a query
* target.
*/
clearQueryState(targetId: TargetId): void;
/**
* Gets the active Query Targets IDs for all active clients.
*
* The implementation for this may require O(n) runtime, where 'n' is the size
* of the result set.
*/
getAllActiveQueryTargets(): SortedSet<TargetId>;
/**
* Checks whether the provided target ID is currently being listened to by
* any of the active clients.
*
* The implementation may require O(n*log m) runtime, where 'n' is the number
* of clients and 'm' the number of targets.
*/
isActiveQueryTarget(targetId: TargetId): boolean;
/**
* Starts the SharedClientState, reads existing client data and registers
* listeners for updates to new and existing clients.
*/
start(): Promise<void>;
/** Shuts down the `SharedClientState` and its listeners. */
shutdown(): void;
/**
* Changes the active user and removes all existing user-specific data. The
* user change does not call back into SyncEngine (for example, no mutations
* will be marked as removed).
*/
handleUserChange(user: User, removedBatchIds: BatchId[], addedBatchIds: BatchId[]): void;
/** Changes the shared online state of all clients. */
setOnlineState(onlineState: OnlineState): void;
writeSequenceNumber(sequenceNumber: ListenSequenceNumber): void;
}
/**
* Holds the state of a mutation batch, including its user ID, batch ID and
* whether the batch is 'pending', 'acknowledged' or 'rejected'.
*/
export declare class MutationMetadata {
readonly user: User;
readonly batchId: BatchId;
readonly state: MutationBatchState;
readonly error?: FirestoreError | undefined;
constructor(user: User, batchId: BatchId, state: MutationBatchState, error?: FirestoreError | undefined);
/**
* Parses a MutationMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(user: User, batchId: BatchId, value: string): MutationMetadata | null;
toWebStorageJSON(): string;
}
/**
* Holds the state of a query target, including its target ID and whether the
* target is 'not-current', 'current' or 'rejected'.
*/
export declare class QueryTargetMetadata {
readonly targetId: TargetId;
readonly state: QueryTargetState;
readonly error?: FirestoreError | undefined;
constructor(targetId: TargetId, state: QueryTargetState, error?: FirestoreError | undefined);
/**
* Parses a QueryTargetMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(targetId: TargetId, value: string): QueryTargetMetadata | null;
toWebStorageJSON(): string;
}
/**
* Metadata state of a single client denoting the query targets it is actively
* listening to.
*/
export interface ClientState {
readonly activeTargetIds: TargetIdSet;
}
/**
* This class represents the online state for all clients participating in
* multi-tab. The online state is only written to by the primary client, and
* used in secondary clients to update their query views.
*/
export declare class SharedOnlineState {
readonly clientId: string;
readonly onlineState: OnlineState;
constructor(clientId: string, onlineState: OnlineState);
/**
* Parses a SharedOnlineState from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(value: string): SharedOnlineState | null;
}
/**
* Metadata state of the local client. Unlike `RemoteClientState`, this class is
* mutable and keeps track of all pending mutations, which allows us to
* update the range of pending mutation batch IDs as new mutations are added or
* removed.
*
* The data in `LocalClientState` is not read from WebStorage and instead
* updated via its instance methods. The updated state can be serialized via
* `toWebStorageJSON()`.
*/
export declare class LocalClientState implements ClientState {
activeTargetIds: SortedSet<number>;
addQueryTarget(targetId: TargetId): void;
removeQueryTarget(targetId: TargetId): void;
/**
* Converts this entry into a JSON-encoded format we can use for WebStorage.
* Does not encode `clientId` as it is part of the key in WebStorage.
*/
toWebStorageJSON(): string;
}
/**
* `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the
* backing store for the SharedClientState. It keeps track of all active
* clients and supports modifications of the local client's data.
*/
export declare class WebStorageSharedClientState implements SharedClientState {
private readonly queue;
private readonly platform;
private readonly persistenceKey;
private readonly localClientId;
syncEngine: SharedClientStateSyncer | null;
onlineStateHandler: ((onlineState: OnlineState) => void) | null;
sequenceNumberHandler: ((sequenceNumber: ListenSequenceNumber) => void) | null;
private readonly storage;
private readonly localClientStorageKey;
private readonly sequenceNumberKey;
private readonly activeClients;
private readonly storageListener;
private readonly onlineStateKey;
private readonly clientStateKeyRe;
private readonly mutationBatchKeyRe;
private readonly queryTargetKeyRe;
private started;
private currentUser;
/**
* Captures WebStorage events that occur before `start()` is called. These
* events are replayed once `WebStorageSharedClientState` is started.
*/
private earlyEvents;
constructor(queue: AsyncQueue, platform: Platform, persistenceKey: string, localClientId: ClientId, initialUser: User);
/** Returns 'true' if WebStorage is available in the current environment. */
static isAvailable(platform: Platform): boolean;
start(): Promise<void>;
writeSequenceNumber(sequenceNumber: ListenSequenceNumber): void;
getAllActiveQueryTargets(): TargetIdSet;
isActiveQueryTarget(targetId: TargetId): boolean;
addPendingMutation(batchId: BatchId): void;
updateMutationState(batchId: BatchId, state: 'acknowledged' | 'rejected', error?: FirestoreError): void;
addLocalQueryTarget(targetId: TargetId): QueryTargetState;
removeLocalQueryTarget(targetId: TargetId): void;
isLocalQueryTarget(targetId: TargetId): boolean;
clearQueryState(targetId: TargetId): void;
updateQueryState(targetId: TargetId, state: QueryTargetState, error?: FirestoreError): void;
handleUserChange(user: User, removedBatchIds: BatchId[], addedBatchIds: BatchId[]): void;
setOnlineState(onlineState: OnlineState): void;
shutdown(): void;
private getItem;
private setItem;
private removeItem;
private handleWebStorageEvent;
private get localClientState();
private persistClientState;
private persistMutationState;
private removeMutationState;
private persistOnlineState;
private persistQueryTargetState;
/**
* Parses a client state key in WebStorage. Returns null if the key does not
* match the expected key format.
*/
private fromWebStorageClientStateKey;
/**
* Parses a client state in WebStorage. Returns 'null' if the value could not
* be parsed.
*/
private fromWebStorageClientState;
/**
* Parses a mutation batch state in WebStorage. Returns 'null' if the value
* could not be parsed.
*/
private fromWebStorageMutationMetadata;
/**
* Parses a query target state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
private fromWebStorageQueryTargetMetadata;
/**
* Parses an online state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
private fromWebStorageOnlineState;
private handleMutationBatchEvent;
private handleQueryTargetEvent;
private handleClientStateEvent;
private handleOnlineStateEvent;
}
/**
* `MemorySharedClientState` is a simple implementation of SharedClientState for
* clients using memory persistence. The state in this class remains fully
* isolated and no synchronization is performed.
*/
export declare class MemorySharedClientState implements SharedClientState {
private localState;
private queryState;
syncEngine: SharedClientStateSyncer | null;
onlineStateHandler: ((onlineState: OnlineState) => void) | null;
sequenceNumberHandler: ((sequenceNumber: ListenSequenceNumber) => void) | null;
addPendingMutation(batchId: BatchId): void;
updateMutationState(batchId: BatchId, state: 'acknowledged' | 'rejected', error?: FirestoreError): void;
addLocalQueryTarget(targetId: TargetId): QueryTargetState;
updateQueryState(targetId: TargetId, state: QueryTargetState, error?: FirestoreError): void;
removeLocalQueryTarget(targetId: TargetId): void;
isLocalQueryTarget(targetId: TargetId): boolean;
clearQueryState(targetId: TargetId): void;
getAllActiveQueryTargets(): TargetIdSet;
isActiveQueryTarget(targetId: TargetId): boolean;
start(): Promise<void>;
handleUserChange(user: User, removedBatchIds: BatchId[], addedBatchIds: BatchId[]): void;
setOnlineState(onlineState: OnlineState): void;
shutdown(): void;
writeSequenceNumber(sequenceNumber: ListenSequenceNumber): void;
}

View File

@ -0,0 +1,82 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { BatchId, MutationBatchState, TargetId } from '../core/types';
import { QueryTargetState } from './shared_client_state_syncer';
import { ClientId } from './shared_client_state';
import { User } from '../auth/user';
export declare const CLIENT_STATE_KEY_PREFIX = "firestore_clients";
/** Assembles the key for a client state in WebStorage */
export declare function createWebStorageClientStateKey(persistenceKey: string, clientId: ClientId): string;
/**
* The JSON representation of a clients's metadata as used during WebStorage
* serialization. The ClientId is omitted here as it is encoded as part of the
* key.
*/
export interface ClientStateSchema {
activeTargetIds: number[];
updateTimeMs: number;
}
export declare const MUTATION_BATCH_KEY_PREFIX = "firestore_mutations";
/** Assembles the key for a mutation batch in WebStorage */
export declare function createWebStorageMutationBatchKey(persistenceKey: string, user: User, batchId: BatchId): string;
/**
* The JSON representation of a mutation batch's metadata as used during
* WebStorage serialization. The UserId and BatchId is omitted as it is
* encoded as part of the key.
*/
export interface MutationMetadataSchema {
state: MutationBatchState;
error?: {
code: string;
message: string;
};
updateTimeMs: number;
}
export declare const QUERY_TARGET_KEY_PREFIX = "firestore_targets";
/** Assembles the key for a query state in WebStorage */
export declare function createWebStorageQueryTargetMetadataKey(persistenceKey: string, targetId: TargetId): string;
/**
* The JSON representation of a query target's state as used during WebStorage
* serialization. The TargetId is omitted as it is encoded as part of the key.
*/
export interface QueryTargetStateSchema {
state: QueryTargetState;
error?: {
code: string;
message: string;
};
updateTimeMs: number;
}
export declare const ONLINE_STATE_KEY_PREFIX = "firestore_online_state";
/** Assembles the key for the online state of the primary tab. */
export declare function createWebStorageOnlineStateKey(persistenceKey: string): string;
/**
* The JSON representation of the system's online state, as written by the
* primary client.
*/
export interface SharedOnlineStateSchema {
/**
* The clientId of the client that wrote this onlineState value. Tracked so
* that on startup, clients can check if this client is still active when
* determining whether to apply this value or not.
*/
readonly clientId: string;
readonly onlineState: string;
}
export declare const SEQUENCE_NUMBER_KEY_PREFIX = "firestore_sequence_number";
/** Assembles the key for the current sequence number. */
export declare function createWebStorageSequenceNumberKey(persistenceKey: string): string;

View File

@ -0,0 +1,35 @@
/**
* @license
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { BatchId, MutationBatchState, TargetId } from '../core/types';
import { FirestoreError } from '../util/error';
import { ClientId } from './shared_client_state';
/** The different states of a watch target. */
export declare type QueryTargetState = 'not-current' | 'current' | 'rejected';
/**
* An interface that describes the actions the SharedClientState class needs to
* perform on a cooperating synchronization engine.
*/
export interface SharedClientStateSyncer {
/** Applies a mutation state to an existing batch. */
applyBatchState(batchId: BatchId, state: MutationBatchState, error?: FirestoreError): Promise<void>;
/** Applies a query target change from a different tab. */
applyTargetState(targetId: TargetId, state: QueryTargetState, error?: FirestoreError): Promise<void>;
/** Adds or removes Watch targets for queries from different tabs. */
applyActiveTargetsChange(added: TargetId[], removed: TargetId[]): Promise<void>;
/** Returns the IDs of the clients that are currently active. */
getActiveClients(): Promise<ClientId[]>;
}

View File

@ -0,0 +1,209 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { PersistencePromise } from './persistence_promise';
declare type SimpleDbTransactionMode = 'readonly' | 'readwrite' | 'readonly-idempotent' | 'readwrite-idempotent';
export interface SimpleDbSchemaConverter {
createOrUpgrade(db: IDBDatabase, txn: IDBTransaction, fromVersion: number, toVersion: number): PersistencePromise<void>;
}
/**
* Provides a wrapper around IndexedDb with a simplified interface that uses
* Promise-like return values to chain operations. Real promises cannot be used
* since .then() continuations are executed asynchronously (e.g. via
* .setImmediate), which would cause IndexedDB to end the transaction.
* See PersistencePromise for more details.
*/
export declare class SimpleDb {
private db;
/**
* Opens the specified database, creating or upgrading it if necessary.
*
* Note that `version` must not be a downgrade. IndexedDB does not support downgrading the schema
* version. We currently do not support any way to do versioning outside of IndexedDB's versioning
* mechanism, as only version-upgrade transactions are allowed to do things like create
* objectstores.
*/
static openOrCreate(name: string, version: number, schemaConverter: SimpleDbSchemaConverter): Promise<SimpleDb>;
/** Deletes the specified database. */
static delete(name: string): Promise<void>;
/** Returns true if IndexedDB is available in the current environment. */
static isAvailable(): boolean;
/**
* Returns true if the backing IndexedDB store is the Node IndexedDBShim
* (see https://github.com/axemclion/IndexedDBShim).
*/
static isMockPersistence(): boolean;
/** Helper to get a typed SimpleDbStore from a transaction. */
static getStore<KeyType extends IDBValidKey, ValueType extends unknown>(txn: SimpleDbTransaction, store: string): SimpleDbStore<KeyType, ValueType>;
/** Parse User Agent to determine iOS version. Returns -1 if not found. */
static getIOSVersion(ua: string): number;
/** Parse User Agent to determine Android version. Returns -1 if not found. */
static getAndroidVersion(ua: string): number;
constructor(db: IDBDatabase);
setVersionChangeListener(versionChangeListener: (event: IDBVersionChangeEvent) => void): void;
runTransaction<T>(mode: SimpleDbTransactionMode, objectStores: string[], transactionFn: (transaction: SimpleDbTransaction) => PersistencePromise<T>): Promise<T>;
close(): void;
}
/**
* A controller for iterating over a key range or index. It allows an iterate
* callback to delete the currently-referenced object, or jump to a new key
* within the key range or index.
*/
export declare class IterationController {
private dbCursor;
private shouldStop;
private nextKey;
constructor(dbCursor: IDBCursorWithValue);
get isDone(): boolean;
get skipToKey(): IDBValidKey | null;
set cursor(value: IDBCursorWithValue);
/**
* This function can be called to stop iteration at any point.
*/
done(): void;
/**
* This function can be called to skip to that next key, which could be
* an index or a primary key.
*/
skip(key: IDBValidKey): void;
/**
* Delete the current cursor value from the object store.
*
* NOTE: You CANNOT do this with a keysOnly query.
*/
delete(): PersistencePromise<void>;
}
/**
* Callback used with iterate() method.
*/
export declare type IterateCallback<KeyType, ValueType> = (key: KeyType, value: ValueType, control: IterationController) => void | PersistencePromise<void>;
/** Options available to the iterate() method. */
export interface IterateOptions {
/** Index to iterate over (else primary keys will be iterated) */
index?: string;
/** IndxedDB Range to iterate over (else entire store will be iterated) */
range?: IDBKeyRange;
/** If true, values aren't read while iterating. */
keysOnly?: boolean;
/** If true, iterate over the store in reverse. */
reverse?: boolean;
}
/**
* Wraps an IDBTransaction and exposes a store() method to get a handle to a
* specific object store.
*/
export declare class SimpleDbTransaction {
private readonly transaction;
private aborted;
/**
* A promise that resolves with the result of the IndexedDb transaction.
*/
private readonly completionDeferred;
static open(db: IDBDatabase, mode: IDBTransactionMode, objectStoreNames: string[]): SimpleDbTransaction;
constructor(transaction: IDBTransaction);
get completionPromise(): Promise<void>;
abort(error?: Error): void;
/**
* Returns a SimpleDbStore<KeyType, ValueType> for the specified store. All
* operations performed on the SimpleDbStore happen within the context of this
* transaction and it cannot be used anymore once the transaction is
* completed.
*
* Note that we can't actually enforce that the KeyType and ValueType are
* correct, but they allow type safety through the rest of the consuming code.
*/
store<KeyType extends IDBValidKey, ValueType extends unknown>(storeName: string): SimpleDbStore<KeyType, ValueType>;
}
/**
* A wrapper around an IDBObjectStore providing an API that:
*
* 1) Has generic KeyType / ValueType parameters to provide strongly-typed
* methods for acting against the object store.
* 2) Deals with IndexedDB's onsuccess / onerror event callbacks, making every
* method return a PersistencePromise instead.
* 3) Provides a higher-level API to avoid needing to do excessive wrapping of
* intermediate IndexedDB types (IDBCursorWithValue, etc.)
*/
export declare class SimpleDbStore<KeyType extends IDBValidKey, ValueType extends unknown> {
private store;
constructor(store: IDBObjectStore);
/**
* Writes a value into the Object Store.
*
* @param key Optional explicit key to use when writing the object, else the
* key will be auto-assigned (e.g. via the defined keyPath for the store).
* @param value The object to write.
*/
put(value: ValueType): PersistencePromise<void>;
put(key: KeyType, value: ValueType): PersistencePromise<void>;
/**
* Adds a new value into an Object Store and returns the new key. Similar to
* IndexedDb's `add()`, this method will fail on primary key collisions.
*
* @param value The object to write.
* @return The key of the value to add.
*/
add(value: ValueType): PersistencePromise<KeyType>;
/**
* Gets the object with the specified key from the specified store, or null
* if no object exists with the specified key.
*
* @key The key of the object to get.
* @return The object with the specified key or null if no object exists.
*/
get(key: KeyType): PersistencePromise<ValueType | null>;
delete(key: KeyType | IDBKeyRange): PersistencePromise<void>;
/**
* If we ever need more of the count variants, we can add overloads. For now,
* all we need is to count everything in a store.
*
* Returns the number of rows in the store.
*/
count(): PersistencePromise<number>;
loadAll(): PersistencePromise<ValueType[]>;
loadAll(range: IDBKeyRange): PersistencePromise<ValueType[]>;
loadAll(index: string, range: IDBKeyRange): PersistencePromise<ValueType[]>;
deleteAll(): PersistencePromise<void>;
deleteAll(range: IDBKeyRange): PersistencePromise<void>;
deleteAll(index: string, range: IDBKeyRange): PersistencePromise<void>;
/**
* Iterates over keys and values in an object store.
*
* @param options Options specifying how to iterate the objects in the store.
* @param callback will be called for each iterated object. Iteration can be
* canceled at any point by calling the doneFn passed to the callback.
* The callback can return a PersistencePromise if it performs async
* operations but note that iteration will continue without waiting for them
* to complete.
* @returns A PersistencePromise that resolves once all PersistencePromises
* returned by callbacks resolve.
*/
iterate(callback: IterateCallback<KeyType, ValueType>): PersistencePromise<void>;
iterate(options: IterateOptions, callback: IterateCallback<KeyType, ValueType>): PersistencePromise<void>;
/**
* Iterates over a store, but waits for the given callback to complete for
* each entry before iterating the next entry. This allows the callback to do
* asynchronous work to determine if this iteration should continue.
*
* The provided callback should return `true` to continue iteration, and
* `false` otherwise.
*/
iterateSerial(callback: (k: KeyType, v: ValueType) => PersistencePromise<boolean>): PersistencePromise<void>;
private iterateCursor;
private options;
private cursor;
}
export {};

View File

@ -0,0 +1,33 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { Query } from '../core/query';
import { DocumentKeySet, DocumentMap } from '../model/collections';
import { LocalDocumentsView } from './local_documents_view';
import { QueryEngine } from './query_engine';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
/**
* A naive implementation of QueryEngine that just loads all the documents in
* the queried collection and then filters them in memory.
*/
export declare class SimpleQueryEngine implements QueryEngine {
private localDocumentsView;
setLocalDocumentsView(localDocuments: LocalDocumentsView): void;
/** Returns all local documents matching the specified query. */
getDocumentsMatchingQuery(transaction: PersistenceTransaction, query: Query, lastLimboFreeSnapshotVersion: SnapshotVersion, remoteKeys: DocumentKeySet): PersistencePromise<DocumentMap>;
}

View File

@ -0,0 +1,138 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { ListenSequenceNumber, TargetId } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { DocumentKey } from '../model/document_key';
import { PersistenceTransaction } from './persistence';
import { PersistencePromise } from './persistence_promise';
import { TargetData } from './target_data';
import { Target } from '../core/target';
/**
* Represents cached targets received from the remote backend.
*
* The cache is keyed by `Target` and entries in the cache are `TargetData`
* instances.
*/
export interface TargetCache {
/**
* A global snapshot version representing the last consistent snapshot we
* received from the backend. This is monotonically increasing and any
* snapshots received from the backend prior to this version (e.g. for targets
* resumed with a resume_token) should be suppressed (buffered) until the
* backend has caught up to this snapshot version again. This prevents our
* cache from ever going backwards in time.
*
* This is updated whenever our we get a TargetChange with a read_time and
* empty target_ids.
*/
getLastRemoteSnapshotVersion(transaction: PersistenceTransaction): PersistencePromise<SnapshotVersion>;
/**
* @return The highest sequence number observed, including any that might be
* persisted on-disk.
*/
getHighestSequenceNumber(transaction: PersistenceTransaction): PersistencePromise<ListenSequenceNumber>;
/**
* Call provided function with each `TargetData` that we have cached.
*/
forEachTarget(txn: PersistenceTransaction, f: (q: TargetData) => void): PersistencePromise<void>;
/**
* Set the highest listen sequence number and optionally updates the
* snapshot version of the last consistent snapshot received from the backend
* (see getLastRemoteSnapshotVersion() for more details).
*
* @param highestListenSequenceNumber The new maximum listen sequence number.
* @param lastRemoteSnapshotVersion The new snapshot version. Optional.
*/
setTargetsMetadata(transaction: PersistenceTransaction, highestListenSequenceNumber: number, lastRemoteSnapshotVersion?: SnapshotVersion): PersistencePromise<void>;
/**
* Adds an entry in the cache.
*
* The cache key is extracted from `targetData.target`. The key must not already
* exist in the cache.
*
* @param targetData A TargetData instance to put in the cache.
*/
addTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
/**
* Updates an entry in the cache.
*
* The cache key is extracted from `targetData.target`. The entry must already
* exist in the cache, and it will be replaced.
* @param {TargetData} targetData The TargetData to be replaced into the cache.
*/
updateTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
/**
* Removes the cached entry for the given target data. It is an error to remove
* a target data that does not exist.
*
* Multi-Tab Note: This operation should only be called by the primary client.
*/
removeTargetData(transaction: PersistenceTransaction, targetData: TargetData): PersistencePromise<void>;
/**
* The number of targets currently in the cache.
*/
getTargetCount(transaction: PersistenceTransaction): PersistencePromise<number>;
/**
* Looks up a TargetData entry by target.
*
* @param target The query target corresponding to the entry to look up.
* @return The cached TargetData entry, or null if the cache has no entry for
* the target.
*/
getTargetData(transaction: PersistenceTransaction, target: Target): PersistencePromise<TargetData | null>;
/**
* Looks up a TargetData entry by target ID.
*
* @param targetId The target ID of the TargetData entry to look up.
* @return The cached TargetData entry, or null if the cache has no entry for
* the target.
*/
getTargetDataForTarget(txn: PersistenceTransaction, targetId: TargetId): PersistencePromise<TargetData | null>;
/**
* Adds the given document keys to cached query results of the given target
* ID.
*
* Multi-Tab Note: This operation should only be called by the primary client.
*/
addMatchingKeys(transaction: PersistenceTransaction, keys: DocumentKeySet, targetId: TargetId): PersistencePromise<void>;
/**
* Removes the given document keys from the cached query results of the
* given target ID.
*
* Multi-Tab Note: This operation should only be called by the primary client.
*/
removeMatchingKeys(transaction: PersistenceTransaction, keys: DocumentKeySet, targetId: TargetId): PersistencePromise<void>;
/**
* Removes all the keys in the query results of the given target ID.
*
* Multi-Tab Note: This operation should only be called by the primary client.
*/
removeMatchingKeysForTargetId(transaction: PersistenceTransaction, targetId: TargetId): PersistencePromise<void>;
/**
* Returns the document keys that match the provided target ID.
*/
getMatchingKeysForTargetId(transaction: PersistenceTransaction, targetId: TargetId): PersistencePromise<DocumentKeySet>;
/**
* Returns a new target ID that is higher than any query in the cache. If
* there are no queries in the cache, returns the first valid target ID.
* Allocated target IDs are persisted and `allocateTargetId()` will never
* return the same ID twice.
*/
allocateTargetId(transaction: PersistenceTransaction): PersistencePromise<TargetId>;
containsKey(transaction: PersistenceTransaction, key: DocumentKey): PersistencePromise<boolean>;
}

View File

@ -0,0 +1,105 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { Target } from '../core/target';
import { ListenSequenceNumber, ProtoByteString, TargetId } from '../core/types';
/** An enumeration of the different purposes we have for targets. */
export declare enum TargetPurpose {
/** A regular, normal query target. */
Listen = 0,
/**
* The query target was used to refill a query after an existence filter mismatch.
*/
ExistenceFilterMismatch = 1,
/** The query target was used to resolve a limbo document. */
LimboResolution = 2
}
/**
* An immutable set of metadata that the local store tracks for each target.
*/
export declare class TargetData {
/** The target being listened to. */
readonly target: Target;
/**
* The target ID to which the target corresponds; Assigned by the
* LocalStore for user listens and by the SyncEngine for limbo watches.
*/
readonly targetId: TargetId;
/** The purpose of the target. */
readonly purpose: TargetPurpose;
/**
* The sequence number of the last transaction during which this target data
* was modified.
*/
readonly sequenceNumber: ListenSequenceNumber;
/** The latest snapshot version seen for this target. */
readonly snapshotVersion: SnapshotVersion;
/**
* The maximum snapshot version at which the associated view
* contained no limbo documents.
*/
readonly lastLimboFreeSnapshotVersion: SnapshotVersion;
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
readonly resumeToken: ProtoByteString;
constructor(
/** The target being listened to. */
target: Target,
/**
* The target ID to which the target corresponds; Assigned by the
* LocalStore for user listens and by the SyncEngine for limbo watches.
*/
targetId: TargetId,
/** The purpose of the target. */
purpose: TargetPurpose,
/**
* The sequence number of the last transaction during which this target data
* was modified.
*/
sequenceNumber: ListenSequenceNumber,
/** The latest snapshot version seen for this target. */
snapshotVersion?: SnapshotVersion,
/**
* The maximum snapshot version at which the associated view
* contained no limbo documents.
*/
lastLimboFreeSnapshotVersion?: SnapshotVersion,
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken?: ProtoByteString);
/** Creates a new target data instance with an updated sequence number. */
withSequenceNumber(sequenceNumber: number): TargetData;
/**
* Creates a new target data instance with an updated resume token and
* snapshot version.
*/
withResumeToken(resumeToken: ProtoByteString, snapshotVersion: SnapshotVersion): TargetData;
/**
* Creates a new target data instance with an updated last limbo free
* snapshot version number.
*/
withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion: SnapshotVersion): TargetData;
isEqual(other: TargetData): boolean;
}

View File

@ -0,0 +1,43 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { SortedMap } from '../util/sorted_map';
import { SortedSet } from '../util/sorted_set';
import { TargetId } from '../core/types';
import { Document, MaybeDocument } from './document';
import { DocumentKey } from './document_key';
/** Miscellaneous collection types / constants. */
export interface DocumentSizeEntry {
maybeDocument: MaybeDocument;
size: number;
}
export declare type MaybeDocumentMap = SortedMap<DocumentKey, MaybeDocument>;
export declare function maybeDocumentMap(): MaybeDocumentMap;
export declare type NullableMaybeDocumentMap = SortedMap<DocumentKey, MaybeDocument | null>;
export declare function nullableMaybeDocumentMap(): NullableMaybeDocumentMap;
export interface DocumentSizeEntries {
maybeDocuments: NullableMaybeDocumentMap;
sizeMap: SortedMap<DocumentKey, number>;
}
export declare type DocumentMap = SortedMap<DocumentKey, Document>;
export declare function documentMap(): DocumentMap;
export declare type DocumentVersionMap = SortedMap<DocumentKey, SnapshotVersion>;
export declare function documentVersionMap(): DocumentVersionMap;
export declare type DocumentKeySet = SortedSet<DocumentKey>;
export declare function documentKeySet(...keys: DocumentKey[]): DocumentKeySet;
export declare type TargetIdSet = SortedSet<TargetId>;
export declare function targetIdSet(): SortedSet<TargetId>;

View File

@ -0,0 +1,92 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { DocumentKey } from './document_key';
import { FieldValue, JsonObject, ObjectValue } from './field_value';
import { FieldPath } from './path';
import * as api from '../protos/firestore_proto_api';
export interface DocumentOptions {
hasLocalMutations?: boolean;
hasCommittedMutations?: boolean;
}
/**
* The result of a lookup for a given path may be an existing document or a
* marker that this document does not exist at a given version.
*/
export declare abstract class MaybeDocument {
readonly key: DocumentKey;
readonly version: SnapshotVersion;
constructor(key: DocumentKey, version: SnapshotVersion);
static compareByKey(d1: MaybeDocument, d2: MaybeDocument): number;
/**
* Whether this document had a local mutation applied that has not yet been
* acknowledged by Watch.
*/
abstract get hasPendingWrites(): boolean;
abstract isEqual(other: MaybeDocument | null | undefined): boolean;
abstract toString(): string;
}
/**
* Represents a document in Firestore with a key, version, data and whether the
* data has local mutations applied to it.
*/
export declare class Document extends MaybeDocument {
private objectValue?;
readonly proto?: api.firestoreV1ApiClientInterfaces.Document | undefined;
private readonly converter?;
readonly hasLocalMutations: boolean;
readonly hasCommittedMutations: boolean;
/**
* A cache of canonicalized FieldPaths to FieldValues that have already been
* deserialized in `getField()`.
*/
private fieldValueCache?;
constructor(key: DocumentKey, version: SnapshotVersion, options: DocumentOptions, objectValue?: ObjectValue | undefined, proto?: api.firestoreV1ApiClientInterfaces.Document | undefined, converter?: ((value: api.firestoreV1ApiClientInterfaces.Value) => FieldValue) | undefined);
field(path: FieldPath): FieldValue | null;
data(): ObjectValue;
value(): JsonObject<unknown>;
isEqual(other: MaybeDocument | null | undefined): boolean;
toString(): string;
get hasPendingWrites(): boolean;
/**
* Returns the nested Protobuf value for 'path`. Can only be called if
* `proto` was provided at construction time.
*/
private getProtoField;
static compareByField(field: FieldPath, d1: Document, d2: Document): number;
}
/**
* A class representing a deleted document.
* Version is set to 0 if we don't point to any specific time, otherwise it
* denotes time we know it didn't exist at.
*/
export declare class NoDocument extends MaybeDocument {
readonly hasCommittedMutations: boolean;
constructor(key: DocumentKey, version: SnapshotVersion, options?: DocumentOptions);
toString(): string;
get hasPendingWrites(): boolean;
isEqual(other: MaybeDocument | null | undefined): boolean;
}
/**
* A class representing an existing document whose data is unknown (e.g. a
* document that was updated without a known base document).
*/
export declare class UnknownDocument extends MaybeDocument {
toString(): string;
get hasPendingWrites(): boolean;
isEqual(other: MaybeDocument | null | undefined): boolean;
}

View File

@ -0,0 +1,19 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Document } from './document';
export declare type DocumentComparator = (doc1: Document, doc2: Document) => number;
export declare function compareByKey(doc1: Document, doc2: Document): number;

View File

@ -0,0 +1,43 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ResourcePath } from './path';
export declare class DocumentKey {
readonly path: ResourcePath;
constructor(path: ResourcePath);
/** Returns true if the document is in the specified collectionId. */
hasCollectionId(collectionId: string): boolean;
isEqual(other: DocumentKey | null): boolean;
toString(): string;
static EMPTY: DocumentKey;
static comparator(k1: DocumentKey, k2: DocumentKey): number;
static isDocumentKey(path: ResourcePath): boolean;
/**
* Creates and returns a new document key with the given segments.
*
* @param path The segments of the path to the document
* @return A new instance of DocumentKey
*/
static fromSegments(segments: string[]): DocumentKey;
/**
* Creates and returns a new document key using '/' to split the string into
* segments.
*
* @param path The slash-separated path string to the document
* @return A new instance of DocumentKey
*/
static fromPathString(path: string): DocumentKey;
}

View File

@ -0,0 +1,57 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Document } from './document';
import { DocumentComparator } from './document_comparator';
import { DocumentKey } from './document_key';
/**
* DocumentSet is an immutable (copy-on-write) collection that holds documents
* in order specified by the provided comparator. We always add a document key
* comparator on top of what is provided to guarantee document equality based on
* the key.
*/
export declare class DocumentSet {
/**
* Returns an empty copy of the existing DocumentSet, using the same
* comparator.
*/
static emptySet(oldSet: DocumentSet): DocumentSet;
private comparator;
private keyedMap;
private sortedSet;
/** The default ordering is by key if the comparator is omitted */
constructor(comp?: DocumentComparator);
has(key: DocumentKey): boolean;
get(key: DocumentKey): Document | null;
first(): Document | null;
last(): Document | null;
isEmpty(): boolean;
/**
* Returns the index of the provided key in the document set, or -1 if the
* document key is not present in the set;
*/
indexOf(key: DocumentKey): number;
get size(): number;
/** Iterates documents in order defined by "comparator" */
forEach(cb: (doc: Document) => void): void;
/** Inserts or updates a document with the same key */
add(doc: Document): DocumentSet;
/** Deletes a document with a given key */
delete(key: DocumentKey): DocumentSet;
isEqual(other: DocumentSet | null | undefined): boolean;
toString(): string;
private copy;
}

View File

@ -0,0 +1,248 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Blob } from '../api/blob';
import { SnapshotOptions } from '../api/database';
import { GeoPoint } from '../api/geo_point';
import { Timestamp } from '../api/timestamp';
import { DatabaseId } from '../core/database_info';
import { DocumentKey } from './document_key';
import { FieldMask } from './mutation';
import { FieldPath } from './path';
import { SortedMap } from '../util/sorted_map';
/**
* Supported data value types:
* - Null
* - Boolean
* - Long
* - Double
* - String
* - Object
* - Array
* - Binary
* - Timestamp
* - ServerTimestamp (a sentinel used in uncommitted writes)
* - GeoPoint
* - (Document) References
*/
export interface JsonObject<T> {
[name: string]: T;
}
export declare enum TypeOrder {
NullValue = 0,
BooleanValue = 1,
NumberValue = 2,
TimestampValue = 3,
StringValue = 4,
BlobValue = 5,
RefValue = 6,
GeoPointValue = 7,
ArrayValue = 8,
ObjectValue = 9
}
/** Defines the return value for pending server timestamps. */
export declare enum ServerTimestampBehavior {
Default = 0,
Estimate = 1,
Previous = 2
}
/** Holds properties that define field value deserialization options. */
export declare class FieldValueOptions {
readonly serverTimestampBehavior: ServerTimestampBehavior;
readonly timestampsInSnapshots: boolean;
constructor(serverTimestampBehavior: ServerTimestampBehavior, timestampsInSnapshots: boolean);
static fromSnapshotOptions(options: SnapshotOptions, timestampsInSnapshots: boolean): FieldValueOptions;
}
/**
* Potential types returned by FieldValue.value(). This could be stricter
* (instead of using {}), but there's little benefit.
*
* Note that currently we use AnyJs (which is identical except includes
* undefined) for incoming user data as a convenience to the calling code (but
* we'll throw if the data contains undefined). This should probably be changed
* to use FieldType, but all consuming code will have to be updated to
* explicitly handle undefined and then cast to FieldType or similar. Perhaps
* we should tackle this when adding robust argument validation to the API.
*/
export declare type FieldType = null | boolean | number | string | {};
/**
* A field value represents a datatype as stored by Firestore.
*/
export declare abstract class FieldValue {
abstract readonly typeOrder: TypeOrder;
abstract value(options?: FieldValueOptions): FieldType;
abstract isEqual(other: FieldValue): boolean;
abstract compareTo(other: FieldValue): number;
/**
* Returns an approximate (and wildly inaccurate) in-memory size for the field
* value.
*
* The memory size takes into account only the actual user data as it resides
* in memory and ignores object overhead.
*/
abstract approximateByteSize(): number;
toString(): string;
defaultCompareTo(other: FieldValue): number;
}
export declare class NullValue extends FieldValue {
typeOrder: TypeOrder;
readonly internalValue: null;
private constructor();
value(options?: FieldValueOptions): null;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
static INSTANCE: NullValue;
}
export declare class BooleanValue extends FieldValue {
readonly internalValue: boolean;
typeOrder: TypeOrder;
private constructor();
value(options?: FieldValueOptions): boolean;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
static of(value: boolean): BooleanValue;
static TRUE: BooleanValue;
static FALSE: BooleanValue;
}
/** Base class for IntegerValue and DoubleValue. */
export declare abstract class NumberValue extends FieldValue {
readonly internalValue: number;
typeOrder: TypeOrder;
constructor(internalValue: number);
value(options?: FieldValueOptions): number;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
}
export declare class IntegerValue extends NumberValue {
isEqual(other: FieldValue): boolean;
}
export declare class DoubleValue extends NumberValue {
static NAN: DoubleValue;
static POSITIVE_INFINITY: DoubleValue;
static NEGATIVE_INFINITY: DoubleValue;
isEqual(other: FieldValue): boolean;
}
export declare class StringValue extends FieldValue {
readonly internalValue: string;
typeOrder: TypeOrder;
constructor(internalValue: string);
value(options?: FieldValueOptions): string;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
}
export declare class TimestampValue extends FieldValue {
readonly internalValue: Timestamp;
typeOrder: TypeOrder;
constructor(internalValue: Timestamp);
value(options?: FieldValueOptions): Date | Timestamp;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
}
/**
* Represents a locally-applied ServerTimestamp.
*
* Notes:
* - ServerTimestampValue instances are created as the result of applying a
* TransformMutation (see TransformMutation.applyTo()). They can only exist in
* the local view of a document. Therefore they do not need to be parsed or
* serialized.
* - When evaluated locally (e.g. for snapshot.data()), they by default
* evaluate to `null`. This behavior can be configured by passing custom
* FieldValueOptions to value().
* - With respect to other ServerTimestampValues, they sort by their
* localWriteTime.
*/
export declare class ServerTimestampValue extends FieldValue {
readonly localWriteTime: Timestamp;
readonly previousValue: FieldValue | null;
typeOrder: TypeOrder;
constructor(localWriteTime: Timestamp, previousValue: FieldValue | null);
value(options?: FieldValueOptions): FieldType;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
toString(): string;
approximateByteSize(): number;
}
export declare class BlobValue extends FieldValue {
readonly internalValue: Blob;
typeOrder: TypeOrder;
constructor(internalValue: Blob);
value(options?: FieldValueOptions): Blob;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
}
export declare class RefValue extends FieldValue {
readonly databaseId: DatabaseId;
readonly key: DocumentKey;
typeOrder: TypeOrder;
constructor(databaseId: DatabaseId, key: DocumentKey);
value(options?: FieldValueOptions): DocumentKey;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
}
export declare class GeoPointValue extends FieldValue {
readonly internalValue: GeoPoint;
typeOrder: TypeOrder;
constructor(internalValue: GeoPoint);
value(options?: FieldValueOptions): GeoPoint;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
}
export declare class ObjectValue extends FieldValue {
readonly internalValue: SortedMap<string, FieldValue>;
typeOrder: TypeOrder;
constructor(internalValue: SortedMap<string, FieldValue>);
value(options?: FieldValueOptions): JsonObject<FieldType>;
forEach(action: (key: string, value: FieldValue) => void): void;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
set(path: FieldPath, to: FieldValue): ObjectValue;
delete(path: FieldPath): ObjectValue;
contains(path: FieldPath): boolean;
field(path: FieldPath): FieldValue | null;
/**
* Returns a FieldMask built from all FieldPaths starting from this ObjectValue,
* including paths from nested objects.
*/
fieldMask(): FieldMask;
approximateByteSize(): number;
toString(): string;
private child;
private setChild;
static EMPTY: ObjectValue;
}
export declare class ArrayValue extends FieldValue {
readonly internalValue: FieldValue[];
typeOrder: TypeOrder;
constructor(internalValue: FieldValue[]);
value(options?: FieldValueOptions): FieldType[];
/**
* Returns true if the given value is contained in this array.
*/
contains(value: FieldValue): boolean;
forEach(action: (value: FieldValue) => void): void;
isEqual(other: FieldValue): boolean;
compareTo(other: FieldValue): number;
approximateByteSize(): number;
toString(): string;
}

View File

@ -0,0 +1,363 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Timestamp } from '../api/timestamp';
import { SnapshotVersion } from '../core/snapshot_version';
import { SortedSet } from '../util/sorted_set';
import { MaybeDocument } from './document';
import { DocumentKey } from './document_key';
import { FieldValue, ObjectValue } from './field_value';
import { FieldPath } from './path';
import { TransformOperation } from './transform_operation';
/**
* Provides a set of fields that can be used to partially patch a document.
* FieldMask is used in conjunction with ObjectValue.
* Examples:
* foo - Overwrites foo entirely with the provided value. If foo is not
* present in the companion ObjectValue, the field is deleted.
* foo.bar - Overwrites only the field bar of the object foo.
* If foo is not an object, foo is replaced with an object
* containing foo
*/
export declare class FieldMask {
readonly fields: SortedSet<FieldPath>;
constructor(fields: SortedSet<FieldPath>);
static fromSet(fields: SortedSet<FieldPath>): FieldMask;
static fromArray(fields: FieldPath[]): FieldMask;
/**
* Verifies that `fieldPath` is included by at least one field in this field
* mask.
*
* This is an O(n) operation, where `n` is the size of the field mask.
*/
covers(fieldPath: FieldPath): boolean;
isEqual(other: FieldMask): boolean;
}
/** A field path and the TransformOperation to perform upon it. */
export declare class FieldTransform {
readonly field: FieldPath;
readonly transform: TransformOperation;
constructor(field: FieldPath, transform: TransformOperation);
isEqual(other: FieldTransform): boolean;
}
/** The result of successfully applying a mutation to the backend. */
export declare class MutationResult {
/**
* The version at which the mutation was committed:
*
* - For most operations, this is the updateTime in the WriteResult.
* - For deletes, the commitTime of the WriteResponse (because deletes are
* not stored and have no updateTime).
*
* Note that these versions can be different: No-op writes will not change
* the updateTime even though the commitTime advances.
*/
readonly version: SnapshotVersion;
/**
* The resulting fields returned from the backend after a
* TransformMutation has been committed. Contains one FieldValue for each
* FieldTransform that was in the mutation.
*
* Will be null if the mutation was not a TransformMutation.
*/
readonly transformResults: Array<FieldValue | null> | null;
constructor(
/**
* The version at which the mutation was committed:
*
* - For most operations, this is the updateTime in the WriteResult.
* - For deletes, the commitTime of the WriteResponse (because deletes are
* not stored and have no updateTime).
*
* Note that these versions can be different: No-op writes will not change
* the updateTime even though the commitTime advances.
*/
version: SnapshotVersion,
/**
* The resulting fields returned from the backend after a
* TransformMutation has been committed. Contains one FieldValue for each
* FieldTransform that was in the mutation.
*
* Will be null if the mutation was not a TransformMutation.
*/
transformResults: Array<FieldValue | null> | null);
}
export declare enum MutationType {
Set = 0,
Patch = 1,
Transform = 2,
Delete = 3,
Verify = 4
}
/**
* Encodes a precondition for a mutation. This follows the model that the
* backend accepts with the special case of an explicit "empty" precondition
* (meaning no precondition).
*/
export declare class Precondition {
readonly updateTime?: SnapshotVersion | undefined;
readonly exists?: boolean | undefined;
static readonly NONE: Precondition;
private constructor();
/** Creates a new Precondition with an exists flag. */
static exists(exists: boolean): Precondition;
/** Creates a new Precondition based on a version a document exists at. */
static updateTime(version: SnapshotVersion): Precondition;
/** Returns whether this Precondition is empty. */
get isNone(): boolean;
/**
* Returns true if the preconditions is valid for the given document
* (or null if no document is available).
*/
isValidFor(maybeDoc: MaybeDocument | null): boolean;
isEqual(other: Precondition): boolean;
}
/**
* A mutation describes a self-contained change to a document. Mutations can
* create, replace, delete, and update subsets of documents.
*
* Mutations not only act on the value of the document but also its version.
*
* For local mutations (mutations that haven't been committed yet), we preserve
* the existing version for Set, Patch, and Transform mutations. For Delete
* mutations, we reset the version to 0.
*
* Here's the expected transition table.
*
* MUTATION APPLIED TO RESULTS IN
*
* SetMutation Document(v3) Document(v3)
* SetMutation NoDocument(v3) Document(v0)
* SetMutation null Document(v0)
* PatchMutation Document(v3) Document(v3)
* PatchMutation NoDocument(v3) NoDocument(v3)
* PatchMutation null null
* TransformMutation Document(v3) Document(v3)
* TransformMutation NoDocument(v3) NoDocument(v3)
* TransformMutation null null
* DeleteMutation Document(v3) NoDocument(v0)
* DeleteMutation NoDocument(v3) NoDocument(v0)
* DeleteMutation null NoDocument(v0)
*
* For acknowledged mutations, we use the updateTime of the WriteResponse as
* the resulting version for Set, Patch, and Transform mutations. As deletes
* have no explicit update time, we use the commitTime of the WriteResponse for
* Delete mutations.
*
* If a mutation is acknowledged by the backend but fails the precondition check
* locally, we return an `UnknownDocument` and rely on Watch to send us the
* updated version.
*
* Note that TransformMutations don't create Documents (in the case of being
* applied to a NoDocument), even though they would on the backend. This is
* because the client always combines the TransformMutation with a SetMutation
* or PatchMutation and we only want to apply the transform if the prior
* mutation resulted in a Document (always true for a SetMutation, but not
* necessarily for a PatchMutation).
*
* ## Subclassing Notes
*
* Subclasses of Mutation need to implement applyToRemoteDocument() and
* applyToLocalView() to implement the actual behavior of applying the mutation
* to some source document.
*/
export declare abstract class Mutation {
abstract readonly type: MutationType;
abstract readonly key: DocumentKey;
abstract readonly precondition: Precondition;
/**
* Applies this mutation to the given MaybeDocument or null for the purposes
* of computing a new remote document. If the input document doesn't match the
* expected state (e.g. it is null or outdated), an `UnknownDocument` can be
* returned.
*
* @param maybeDoc The document to mutate. The input document can be null if
* the client has no knowledge of the pre-mutation state of the document.
* @param mutationResult The result of applying the mutation from the backend.
* @return The mutated document. The returned document may be an
* UnknownDocument if the mutation could not be applied to the locally
* cached base document.
*/
abstract applyToRemoteDocument(maybeDoc: MaybeDocument | null, mutationResult: MutationResult): MaybeDocument;
/**
* Applies this mutation to the given MaybeDocument or null for the purposes
* of computing the new local view of a document. Both the input and returned
* documents can be null.
*
* @param maybeDoc The document to mutate. The input document can be null if
* the client has no knowledge of the pre-mutation state of the document.
* @param baseDoc The state of the document prior to this mutation batch. The
* input document can be null if the client has no knowledge of the
* pre-mutation state of the document.
* @param localWriteTime A timestamp indicating the local write time of the
* batch this mutation is a part of.
* @return The mutated document. The returned document may be null, but only
* if maybeDoc was null and the mutation would not create a new document.
*/
abstract applyToLocalView(maybeDoc: MaybeDocument | null, baseDoc: MaybeDocument | null, localWriteTime: Timestamp): MaybeDocument | null;
/**
* If this mutation is not idempotent, returns the base value to persist with
* this mutation. If a base value is returned, the mutation is always applied
* to this base value, even if document has already been updated.
*
* The base value is a sparse object that consists of only the document
* fields for which this mutation contains a non-idempotent transformation
* (e.g. a numeric increment). The provided value guarantees consistent
* behavior for non-idempotent transforms and allow us to return the same
* latency-compensated value even if the backend has already applied the
* mutation. The base value is null for idempotent mutations, as they can be
* re-played even if the backend has already applied them.
*
* @return a base value to store along with the mutation, or null for
* idempotent mutations.
*/
abstract extractBaseValue(maybeDoc: MaybeDocument | null): ObjectValue | null;
abstract isEqual(other: Mutation): boolean;
protected verifyKeyMatches(maybeDoc: MaybeDocument | null): void;
/**
* Returns the version from the given document for use as the result of a
* mutation. Mutations are defined to return the version of the base document
* only if it is an existing document. Deleted and unknown documents have a
* post-mutation version of SnapshotVersion.MIN.
*/
protected static getPostMutationVersion(maybeDoc: MaybeDocument | null): SnapshotVersion;
}
/**
* A mutation that creates or replaces the document at the given key with the
* object value contents.
*/
export declare class SetMutation extends Mutation {
readonly key: DocumentKey;
readonly value: ObjectValue;
readonly precondition: Precondition;
constructor(key: DocumentKey, value: ObjectValue, precondition: Precondition);
readonly type: MutationType;
applyToRemoteDocument(maybeDoc: MaybeDocument | null, mutationResult: MutationResult): MaybeDocument;
applyToLocalView(maybeDoc: MaybeDocument | null, baseDoc: MaybeDocument | null, localWriteTime: Timestamp): MaybeDocument | null;
extractBaseValue(maybeDoc: MaybeDocument | null): null;
isEqual(other: Mutation): boolean;
}
/**
* A mutation that modifies fields of the document at the given key with the
* given values. The values are applied through a field mask:
*
* * When a field is in both the mask and the values, the corresponding field
* is updated.
* * When a field is in neither the mask nor the values, the corresponding
* field is unmodified.
* * When a field is in the mask but not in the values, the corresponding field
* is deleted.
* * When a field is not in the mask but is in the values, the values map is
* ignored.
*/
export declare class PatchMutation extends Mutation {
readonly key: DocumentKey;
readonly data: ObjectValue;
readonly fieldMask: FieldMask;
readonly precondition: Precondition;
constructor(key: DocumentKey, data: ObjectValue, fieldMask: FieldMask, precondition: Precondition);
readonly type: MutationType;
applyToRemoteDocument(maybeDoc: MaybeDocument | null, mutationResult: MutationResult): MaybeDocument;
applyToLocalView(maybeDoc: MaybeDocument | null, baseDoc: MaybeDocument | null, localWriteTime: Timestamp): MaybeDocument | null;
extractBaseValue(maybeDoc: MaybeDocument | null): null;
isEqual(other: Mutation): boolean;
/**
* Patches the data of document if available or creates a new document. Note
* that this does not check whether or not the precondition of this patch
* holds.
*/
private patchDocument;
private patchObject;
}
/**
* A mutation that modifies specific fields of the document with transform
* operations. Currently the only supported transform is a server timestamp, but
* IP Address, increment(n), etc. could be supported in the future.
*
* It is somewhat similar to a PatchMutation in that it patches specific fields
* and has no effect when applied to a null or NoDocument (see comment on
* Mutation for rationale).
*/
export declare class TransformMutation extends Mutation {
readonly key: DocumentKey;
readonly fieldTransforms: FieldTransform[];
readonly type: MutationType;
readonly precondition: Precondition;
constructor(key: DocumentKey, fieldTransforms: FieldTransform[]);
applyToRemoteDocument(maybeDoc: MaybeDocument | null, mutationResult: MutationResult): MaybeDocument;
applyToLocalView(maybeDoc: MaybeDocument | null, baseDoc: MaybeDocument | null, localWriteTime: Timestamp): MaybeDocument | null;
extractBaseValue(maybeDoc: MaybeDocument | null): ObjectValue | null;
isEqual(other: Mutation): boolean;
/**
* Asserts that the given MaybeDocument is actually a Document and verifies
* that it matches the key for this mutation. Since we only support
* transformations with precondition exists this method is guaranteed to be
* safe.
*/
private requireDocument;
/**
* Creates a list of "transform results" (a transform result is a field value
* representing the result of applying a transform) for use after a
* TransformMutation has been acknowledged by the server.
*
* @param baseDoc The document prior to applying this mutation batch.
* @param serverTransformResults The transform results received by the server.
* @return The transform results list.
*/
private serverTransformResults;
/**
* Creates a list of "transform results" (a transform result is a field value
* representing the result of applying a transform) for use when applying a
* TransformMutation locally.
*
* @param localWriteTime The local time of the transform mutation (used to
* generate ServerTimestampValues).
* @param maybeDoc The current state of the document after applying all
* previous mutations.
* @param baseDoc The document prior to applying this mutation batch.
* @return The transform results list.
*/
private localTransformResults;
private transformObject;
}
/** A mutation that deletes the document at the given key. */
export declare class DeleteMutation extends Mutation {
readonly key: DocumentKey;
readonly precondition: Precondition;
constructor(key: DocumentKey, precondition: Precondition);
readonly type: MutationType;
applyToRemoteDocument(maybeDoc: MaybeDocument | null, mutationResult: MutationResult): MaybeDocument;
applyToLocalView(maybeDoc: MaybeDocument | null, baseDoc: MaybeDocument | null, localWriteTime: Timestamp): MaybeDocument | null;
extractBaseValue(maybeDoc: MaybeDocument | null): null;
isEqual(other: Mutation): boolean;
}
/**
* A mutation that verifies the existence of the document at the given key with
* the provided precondition.
*
* The `verify` operation is only used in Transactions, and this class serves
* primarily to facilitate serialization into protos.
*/
export declare class VerifyMutation extends Mutation {
readonly key: DocumentKey;
readonly precondition: Precondition;
constructor(key: DocumentKey, precondition: Precondition);
readonly type: MutationType;
applyToRemoteDocument(maybeDoc: MaybeDocument | null, mutationResult: MutationResult): MaybeDocument;
applyToLocalView(maybeDoc: MaybeDocument | null, baseDoc: MaybeDocument | null, localWriteTime: Timestamp): MaybeDocument | null;
extractBaseValue(maybeDoc: MaybeDocument | null): null;
isEqual(other: Mutation): boolean;
}

View File

@ -0,0 +1,89 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Timestamp } from '../api/timestamp';
import { SnapshotVersion } from '../core/snapshot_version';
import { BatchId, ProtoByteString } from '../core/types';
import { DocumentKeySet, DocumentVersionMap, MaybeDocumentMap } from './collections';
import { MaybeDocument } from './document';
import { DocumentKey } from './document_key';
import { Mutation, MutationResult } from './mutation';
export declare const BATCHID_UNKNOWN = -1;
/**
* A batch of mutations that will be sent as one unit to the backend.
*/
export declare class MutationBatch {
batchId: BatchId;
localWriteTime: Timestamp;
baseMutations: Mutation[];
mutations: Mutation[];
/**
* @param batchId The unique ID of this mutation batch.
* @param localWriteTime The original write time of this mutation.
* @param baseMutations Mutations that are used to populate the base
* values when this mutation is applied locally. This can be used to locally
* overwrite values that are persisted in the remote document cache. Base
* mutations are never sent to the backend.
* @param mutations The user-provided mutations in this mutation batch.
* User-provided mutations are applied both locally and remotely on the
* backend.
*/
constructor(batchId: BatchId, localWriteTime: Timestamp, baseMutations: Mutation[], mutations: Mutation[]);
/**
* Applies all the mutations in this MutationBatch to the specified document
* to create a new remote document
*
* @param docKey The key of the document to apply mutations to.
* @param maybeDoc The document to apply mutations to.
* @param batchResult The result of applying the MutationBatch to the
* backend.
*/
applyToRemoteDocument(docKey: DocumentKey, maybeDoc: MaybeDocument | null, batchResult: MutationBatchResult): MaybeDocument | null;
/**
* Computes the local view of a document given all the mutations in this
* batch.
*
* @param docKey The key of the document to apply mutations to.
* @param maybeDoc The document to apply mutations to.
*/
applyToLocalView(docKey: DocumentKey, maybeDoc: MaybeDocument | null): MaybeDocument | null;
/**
* Computes the local view for all provided documents given the mutations in
* this batch.
*/
applyToLocalDocumentSet(maybeDocs: MaybeDocumentMap): MaybeDocumentMap;
keys(): DocumentKeySet;
isEqual(other: MutationBatch): boolean;
}
/** The result of applying a mutation batch to the backend. */
export declare class MutationBatchResult {
readonly batch: MutationBatch;
readonly commitVersion: SnapshotVersion;
readonly mutationResults: MutationResult[];
readonly streamToken: ProtoByteString;
/**
* A pre-computed mapping from each mutated document to the resulting
* version.
*/
readonly docVersions: DocumentVersionMap;
private constructor();
/**
* Creates a new MutationBatchResult for the given batch and results. There
* must be one result for each mutation in the batch. This static factory
* caches a document=>version mapping (docVersions).
*/
static from(batch: MutationBatch, commitVersion: SnapshotVersion, results: MutationResult[], streamToken: ProtoByteString): MutationBatchResult;
}

View File

@ -0,0 +1,100 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export declare const DOCUMENT_KEY_NAME = "__name__";
/**
* Path represents an ordered sequence of string segments.
*/
declare abstract class BasePath<B extends BasePath<B>> {
private segments;
private offset;
private len;
constructor(segments: string[], offset?: number, length?: number);
/**
* Abstract constructor method to construct an instance of B with the given
* parameters.
*/
protected abstract construct(segments: string[], offset?: number, length?: number): B;
/**
* Returns a String representation.
*
* Implementing classes are required to provide deterministic implementations as
* the String representation is used to obtain canonical Query IDs.
*/
abstract toString(): string;
get length(): number;
isEqual(other: B): boolean;
child(nameOrPath: string | B): B;
/** The index of one past the last segment of the path. */
private limit;
popFirst(size?: number): B;
popLast(): B;
firstSegment(): string;
lastSegment(): string;
get(index: number): string;
isEmpty(): boolean;
isPrefixOf(other: this): boolean;
isImmediateParentOf(potentialChild: this): boolean;
forEach(fn: (segment: string) => void): void;
toArray(): string[];
static comparator<T extends BasePath<T>>(p1: BasePath<T>, p2: BasePath<T>): number;
}
/**
* A slash-separated path for navigating resources (documents and collections)
* within Firestore.
*/
export declare class ResourcePath extends BasePath<ResourcePath> {
protected construct(segments: string[], offset?: number, length?: number): ResourcePath;
canonicalString(): string;
toString(): string;
/**
* Creates a resource path from the given slash-delimited string.
*/
static fromString(path: string): ResourcePath;
static EMPTY_PATH: ResourcePath;
}
/** A dot-separated path for navigating sub-objects within a document. */
export declare class FieldPath extends BasePath<FieldPath> {
protected construct(segments: string[], offset?: number, length?: number): FieldPath;
/**
* Returns true if the string could be used as a segment in a field path
* without escaping.
*/
private static isValidIdentifier;
canonicalString(): string;
toString(): string;
/**
* Returns true if this field references the key of a document.
*/
isKeyField(): boolean;
/**
* The field designating the key of a document.
*/
static keyField(): FieldPath;
/**
* Parses a field string from the given server-formatted string.
*
* - Splitting the empty string is not allowed (for now at least).
* - Empty segments within the string (e.g. if there are two consecutive
* separators) are not allowed.
*
* TODO(b/37244157): we should make this more strict. Right now, it allows
* non-identifier path components, even if they aren't escaped.
*/
static fromServerFormat(path: string): FieldPath;
static EMPTY_PATH: FieldPath;
}
export {};

View File

@ -0,0 +1,95 @@
/**
* @license
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Timestamp } from '../api/timestamp';
import { FieldValue, NumberValue } from './field_value';
/** Represents a transform within a TransformMutation. */
export interface TransformOperation {
/**
* Computes the local transform result against the provided `previousValue`,
* optionally using the provided localWriteTime.
*/
applyToLocalView(previousValue: FieldValue | null, localWriteTime: Timestamp): FieldValue;
/**
* Computes a final transform result after the transform has been acknowledged
* by the server, potentially using the server-provided transformResult.
*/
applyToRemoteDocument(previousValue: FieldValue | null, transformResult: FieldValue | null): FieldValue;
/**
* If this transform operation is not idempotent, returns the base value to
* persist for this transform. If a base value is returned, the transform
* operation is always applied to this base value, even if document has
* already been updated.
*
* Base values provide consistent behavior for non-idempotent transforms and
* allow us to return the same latency-compensated value even if the backend
* has already applied the transform operation. The base value is null for
* idempotent transforms, as they can be re-played even if the backend has
* already applied them.
*
* @return a base value to store along with the mutation, or null for
* idempotent transforms.
*/
computeBaseValue(previousValue: FieldValue | null): FieldValue | null;
isEqual(other: TransformOperation): boolean;
}
/** Transforms a value into a server-generated timestamp. */
export declare class ServerTimestampTransform implements TransformOperation {
private constructor();
static instance: ServerTimestampTransform;
applyToLocalView(previousValue: FieldValue | null, localWriteTime: Timestamp): FieldValue;
applyToRemoteDocument(previousValue: FieldValue | null, transformResult: FieldValue | null): FieldValue;
computeBaseValue(previousValue: FieldValue | null): FieldValue | null;
isEqual(other: TransformOperation): boolean;
}
/** Transforms an array value via a union operation. */
export declare class ArrayUnionTransformOperation implements TransformOperation {
readonly elements: FieldValue[];
constructor(elements: FieldValue[]);
applyToLocalView(previousValue: FieldValue | null, localWriteTime: Timestamp): FieldValue;
applyToRemoteDocument(previousValue: FieldValue | null, transformResult: FieldValue | null): FieldValue;
private apply;
computeBaseValue(previousValue: FieldValue | null): FieldValue | null;
isEqual(other: TransformOperation): boolean;
}
/** Transforms an array value via a remove operation. */
export declare class ArrayRemoveTransformOperation implements TransformOperation {
readonly elements: FieldValue[];
constructor(elements: FieldValue[]);
applyToLocalView(previousValue: FieldValue | null, localWriteTime: Timestamp): FieldValue;
applyToRemoteDocument(previousValue: FieldValue | null, transformResult: FieldValue | null): FieldValue;
private apply;
computeBaseValue(previousValue: FieldValue | null): FieldValue | null;
isEqual(other: TransformOperation): boolean;
}
/**
* Implements the backend semantics for locally computed NUMERIC_ADD (increment)
* transforms. Converts all field values to integers or doubles, but unlike the
* backend does not cap integer values at 2^63. Instead, JavaScript number
* arithmetic is used and precision loss can occur for values greater than 2^53.
*/
export declare class NumericIncrementTransformOperation implements TransformOperation {
readonly operand: NumberValue;
constructor(operand: NumberValue);
applyToLocalView(previousValue: FieldValue | null, localWriteTime: Timestamp): FieldValue;
applyToRemoteDocument(previousValue: FieldValue | null, transformResult: FieldValue | null): FieldValue;
/**
* Inspects the provided value, returning the provided value if it is already
* a NumberValue, otherwise returning a coerced IntegerValue of 0.
*/
computeBaseValue(previousValue: FieldValue | null): NumberValue;
isEqual(other: TransformOperation): boolean;
}

View File

@ -0,0 +1,29 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirebaseNamespace } from '@firebase/app-types';
/**
* Configures Firestore as part of the Firebase SDK by calling registerService.
*/
export declare function configureForFirebase(firebase: FirebaseNamespace): void;
/**
* Exports the Firestore namespace into the provided `exportObject` object under
* the key 'firestore'. This is used for wrapped binary that exposes Firestore
* as a goog module.
*/
export declare function configureForStandalone(exportObject: {
[key: string]: {};
}): void;

View File

@ -0,0 +1,17 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export {};

View File

@ -0,0 +1,60 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { DatabaseId, DatabaseInfo } from '../core/database_info';
import { ProtoByteString } from '../core/types';
import { Connection } from '../remote/connection';
import { JsonProtoSerializer } from '../remote/serializer';
import { ConnectivityMonitor } from './../remote/connectivity_monitor';
/**
* Provides a common interface to load anything platform dependent, e.g.
* the connection implementation.
*
* An implementation of this must be provided at compile time for the platform.
*/
export interface Platform {
loadConnection(databaseInfo: DatabaseInfo): Promise<Connection>;
newConnectivityMonitor(): ConnectivityMonitor;
newSerializer(databaseId: DatabaseId): JsonProtoSerializer;
/** Formats an object as a JSON string, suitable for logging. */
formatJSON(value: unknown): string;
/** Converts a Base64 encoded string to a binary string. */
atob(encoded: string): string;
/** Converts a binary string to a Base64 encoded string. */
btoa(raw: string): string;
/** The Platform's 'window' implementation or null if not available. */
readonly window: Window | null;
/** The Platform's 'document' implementation or null if not available. */
readonly document: Document | null;
/** True if and only if the Base64 conversion functions are available. */
readonly base64Available: boolean;
readonly emptyByteString: ProtoByteString;
}
/**
* Provides singleton helpers where setup code can inject a platform at runtime.
* setPlatform needs to be set before Firestore is used and must be set exactly
* once.
*/
export declare class PlatformSupport {
private static platform;
static setPlatform(platform: Platform): void;
static getPlatform(): Platform;
}
/**
* Returns the representation of an empty "proto" byte string for the
* platform.
*/
export declare function emptyByteString(): ProtoByteString;

View File

@ -0,0 +1,33 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ConnectivityMonitor, NetworkStatus } from './../remote/connectivity_monitor';
/**
* Browser implementation of ConnectivityMonitor.
*/
export declare class BrowserConnectivityMonitor implements ConnectivityMonitor {
private readonly networkAvailableListener;
private readonly networkUnavailableListener;
private callbacks;
constructor();
addCallback(callback: (status: NetworkStatus) => void): void;
shutdown(): void;
private configureNetworkMonitoring;
private onNetworkAvailable;
private onNetworkUnavailable;
/** Checks that all used attributes of window are available. */
static isAvailable(): boolean;
}

View File

@ -0,0 +1,17 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export {};

View File

@ -0,0 +1,34 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { DatabaseId, DatabaseInfo } from '../core/database_info';
import { Platform } from '../platform/platform';
import { Connection } from '../remote/connection';
import { JsonProtoSerializer } from '../remote/serializer';
import { ConnectivityMonitor } from './../remote/connectivity_monitor';
export declare class BrowserPlatform implements Platform {
readonly base64Available: boolean;
readonly emptyByteString = "";
constructor();
get document(): Document | null;
get window(): Window | null;
loadConnection(databaseInfo: DatabaseInfo): Promise<Connection>;
newConnectivityMonitor(): ConnectivityMonitor;
newSerializer(databaseId: DatabaseId): JsonProtoSerializer;
formatJSON(value: unknown): string;
atob(encoded: string): string;
btoa(raw: string): string;
}

View File

@ -0,0 +1,34 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Token } from '../api/credentials';
import { DatabaseInfo } from '../core/database_info';
import { Connection, Stream } from '../remote/connection';
export declare class WebChannelConnection implements Connection {
private readonly databaseId;
private readonly baseUrl;
private readonly forceLongPolling;
constructor(info: DatabaseInfo);
/**
* Modifies the headers for a request, adding any authorization token if
* present and any additional headers for the request.
*/
private modifyHeadersForRequest;
invokeRPC<Req, Resp>(rpcName: string, request: Req, token: Token | null): Promise<Resp>;
invokeStreamingRPC<Req, Resp>(rpcName: string, request: Req, token: Token | null): Promise<Resp[]>;
openStream<Req, Resp>(rpcName: string, token: Token | null): Stream<Req, Resp>;
makeUrl(rpcName: string): string;
}

View File

@ -0,0 +1,33 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as grpc from 'grpc';
import { Token } from '../api/credentials';
import { DatabaseInfo } from '../core/database_info';
import { Connection, Stream } from '../remote/connection';
/**
* A Connection implemented by GRPC-Node.
*/
export declare class GrpcConnection implements Connection {
private databaseInfo;
private firestore;
private cachedStub;
constructor(protos: grpc.GrpcObject, databaseInfo: DatabaseInfo);
private ensureActiveStub;
invokeRPC<Req, Resp>(rpcName: string, request: Req, token: Token | null): Promise<Resp>;
invokeStreamingRPC<Req, Resp>(rpcName: string, request: Req, token: Token | null): Promise<Resp[]>;
openStream<Req, Resp>(rpcName: string, token: Token | null): Stream<Req, Resp>;
}

View File

@ -0,0 +1,28 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as grpc from 'grpc';
import * as ProtobufJS from 'protobufjs';
/** Used by tests so we can match @grpc/proto-loader behavior. */
export declare const protoLoaderOptions: ProtobufJS.IConversionOptions;
/**
* Loads the protocol buffer definitions for Firestore.
*
* @returns The GrpcObject representing our protos.
*/
export declare function loadProtos(): grpc.GrpcObject;
/** Used by tests so we can directly create ProtobufJS proto message objects from JSON protos. */
export declare function loadRawProtos(): ProtobufJS.Root;

View File

@ -0,0 +1,17 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export {};

View File

@ -0,0 +1,33 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { DatabaseId, DatabaseInfo } from '../core/database_info';
import { Platform } from '../platform/platform';
import { Connection } from '../remote/connection';
import { JsonProtoSerializer } from '../remote/serializer';
import { ConnectivityMonitor } from './../remote/connectivity_monitor';
export declare class NodePlatform implements Platform {
readonly base64Available = true;
readonly emptyByteString: Uint8Array;
readonly document: null;
get window(): Window | null;
loadConnection(databaseInfo: DatabaseInfo): Promise<Connection>;
newConnectivityMonitor(): ConnectivityMonitor;
newSerializer(partitionId: DatabaseId): JsonProtoSerializer;
formatJSON(value: unknown): string;
atob(encoded: string): string;
btoa(raw: string): string;
}

View File

@ -0,0 +1,105 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { AsyncQueue, TimerId } from '../util/async_queue';
/**
* A helper for running delayed tasks following an exponential backoff curve
* between attempts.
*
* Each delay is made up of a "base" delay which follows the exponential
* backoff curve, and a +/- 50% "jitter" that is calculated and added to the
* base delay. This prevents clients from accidentally synchronizing their
* delays causing spikes of load to the backend.
*/
export declare class ExponentialBackoff {
/**
* The AsyncQueue to run backoff operations on.
*/
private readonly queue;
/**
* The ID to use when scheduling backoff operations on the AsyncQueue.
*/
private readonly timerId;
/**
* The initial delay (used as the base delay on the first retry attempt).
* Note that jitter will still be applied, so the actual delay could be as
* little as 0.5*initialDelayMs.
*/
private readonly initialDelayMs;
/**
* The multiplier to use to determine the extended base delay after each
* attempt.
*/
private readonly backoffFactor;
/**
* The maximum base delay after which no further backoff is performed.
* Note that jitter will still be applied, so the actual delay could be as
* much as 1.5*maxDelayMs.
*/
private readonly maxDelayMs;
private currentBaseMs;
private timerPromise;
/** The last backoff attempt, as epoch milliseconds. */
private lastAttemptTime;
constructor(
/**
* The AsyncQueue to run backoff operations on.
*/
queue: AsyncQueue,
/**
* The ID to use when scheduling backoff operations on the AsyncQueue.
*/
timerId: TimerId,
/**
* The initial delay (used as the base delay on the first retry attempt).
* Note that jitter will still be applied, so the actual delay could be as
* little as 0.5*initialDelayMs.
*/
initialDelayMs?: number,
/**
* The multiplier to use to determine the extended base delay after each
* attempt.
*/
backoffFactor?: number,
/**
* The maximum base delay after which no further backoff is performed.
* Note that jitter will still be applied, so the actual delay could be as
* much as 1.5*maxDelayMs.
*/
maxDelayMs?: number);
/**
* Resets the backoff delay.
*
* The very next backoffAndWait() will have no delay. If it is called again
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
* subsequent ones will increase according to the backoffFactor.
*/
reset(): void;
/**
* Resets the backoff delay to the maximum delay (e.g. for use after a
* RESOURCE_EXHAUSTED error).
*/
resetToMax(): void;
/**
* Returns a promise that resolves after currentDelayMs, and increases the
* delay for any subsequent attempts. If there was a pending backoff operation
* already, it will be canceled.
*/
backoffAndRun(op: () => Promise<void>): void;
cancel(): void;
/** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */
private jitterDelayMs;
}

View File

@ -0,0 +1,77 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Token } from '../api/credentials';
import { FirestoreError } from '../util/error';
/**
* A connected RPC interface to a remote Datastore.
*
* Responsible for maintaining a connection to the backend (and informing when
* that connection state changes via onConnectionStateChange) and sending RPCs
* when possible.
*
* The Connection is not responsible for queueing RPCs to the backend when
* the connection is down.
*
* RPC messages are expected to be JavaScript objects representing the JSON that
* would be sent over the REST/JSON API to Datastore or used as input to
* creating the equivalent protocol buffers for GRPC.
*/
export interface Connection {
/**
* Invokes an RPC by name, given a request message as a JavaScript object
* representing the JSON to send.
*
* @param rpcName the name of the RPC to invoke
* @param request the Raw JSON object encoding of the request message
* @param token the Token to use for the RPC.
* @return a Promise containing the JSON object encoding of the response
*/
invokeRPC<Req, Resp>(rpcName: string, request: Req, token: Token | null): Promise<Resp>;
/**
* Invokes a streaming RPC by name, given a request message as a JavaScript
* object representing the JSON to send. The responses will be consumed to
* completion and then returned as an array.
*
* @param rpcName the name of the RPC to invoke
* @param request the Raw JSON object encoding of the request message
* @param token the Token to use for the RPC.
* @return a Promise containing an array with the JSON object encodings of the
* responses
*/
invokeStreamingRPC<Req, Resp>(rpcName: string, request: Req, token: Token | null): Promise<Resp[]>;
/**
* Opens a stream to the given stream RPC endpoint. Returns a stream which
* will try to open itself.
* @param rpcName the name of the RPC to open the stream on
* @param token the Token to use for the RPC.
*/
openStream<Req, Resp>(rpcName: string, token: Token | null): Stream<Req, Resp>;
}
/**
* A bidirectional stream that can be used to send an receive messages.
*
* A stream can be closed locally with close() or can be closed remotely or
* through network errors. onClose is guaranteed to be called. onOpen will only
* be called if the stream successfully established a connection.
*/
export interface Stream<I, O> {
onOpen(callback: () => void): void;
onClose(callback: (err?: FirestoreError) => void): void;
onMessage(callback: (msg: O) => void): void;
send(msg: I): void;
close(): void;
}

View File

@ -0,0 +1,47 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The set of network states is deliberately simplified -- we only care about
* states such that transition between them should break currently
* established connections.
*/
export declare const enum NetworkStatus {
AVAILABLE = 0,
UNAVAILABLE = 1
}
export declare type ConnectivityMonitorCallback = (status: NetworkStatus) => void;
/**
* A base class for monitoring changes in network connectivity; it is expected
* that each platform will have its own system-dependent implementation.
*/
export interface ConnectivityMonitor {
/**
* Adds a callback to be called when connectivity changes.
*
* Callbacks are not made on the initial state of connectivity, since this
* monitor is primarily used for resetting backoff in the remote store when
* connectivity changes. As such, the initial connectivity state is
* irrelevant here.
*/
addCallback(callback: ConnectivityMonitorCallback): void;
/**
* Stops monitoring connectivity. After this call completes, no further
* callbacks will be triggered. After shutdown() is called, no further calls
* are allowed on this instance.
*/
shutdown(): void;
}

View File

@ -0,0 +1,21 @@
/**
* @license
* Copyright 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ConnectivityMonitor, NetworkStatus } from './connectivity_monitor';
export declare class NoopConnectivityMonitor implements ConnectivityMonitor {
addCallback(callback: (status: NetworkStatus) => void): void;
shutdown(): void;
}

View File

@ -0,0 +1,44 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CredentialsProvider } from '../api/credentials';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { Mutation, MutationResult } from '../model/mutation';
import { AsyncQueue } from '../util/async_queue';
import { Connection } from './connection';
import { WatchStreamListener, WriteStreamListener, PersistentListenStream, PersistentWriteStream } from './persistent_stream';
import { JsonProtoSerializer } from './serializer';
/**
* Datastore is a wrapper around the external Google Cloud Datastore grpc API,
* which provides an interface that is more convenient for the rest of the
* client SDK architecture to consume.
*/
export declare class Datastore {
private queue;
private connection;
private credentials;
private serializer;
constructor(queue: AsyncQueue, connection: Connection, credentials: CredentialsProvider, serializer: JsonProtoSerializer);
newPersistentWriteStream(listener: WriteStreamListener): PersistentWriteStream;
newPersistentWatchStream(listener: WatchStreamListener): PersistentListenStream;
commit(mutations: Mutation[]): Promise<MutationResult[]>;
lookup(keys: DocumentKey[]): Promise<MaybeDocument[]>;
/** Gets an auth token and invokes the provided RPC. */
private invokeRPC;
/** Gets an auth token and invokes the provided RPC with streamed results. */
private invokeStreamingRPC;
}

View File

@ -0,0 +1,21 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export declare class ExistenceFilter {
count: number;
constructor(count: number);
isEqual(other: ExistenceFilter): boolean;
}

View File

@ -0,0 +1,81 @@
/**
* @license
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { OnlineState } from '../core/types';
import { AsyncQueue } from '../util/async_queue';
import { FirestoreError } from '../util/error';
/**
* A component used by the RemoteStore to track the OnlineState (that is,
* whether or not the client as a whole should be considered to be online or
* offline), implementing the appropriate heuristics.
*
* In particular, when the client is trying to connect to the backend, we
* allow up to MAX_WATCH_STREAM_FAILURES within ONLINE_STATE_TIMEOUT_MS for
* a connection to succeed. If we have too many failures or the timeout elapses,
* then we set the OnlineState to Offline, and the client will behave as if
* it is offline (get()s will return cached data, etc.).
*/
export declare class OnlineStateTracker {
private asyncQueue;
private onlineStateHandler;
/** The current OnlineState. */
private state;
/**
* A count of consecutive failures to open the stream. If it reaches the
* maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
* Offline.
*/
private watchStreamFailures;
/**
* A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
* transition from OnlineState.Unknown to OnlineState.Offline without waiting
* for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
*/
private onlineStateTimer;
/**
* Whether the client should log a warning message if it fails to connect to
* the backend (initially true, cleared after a successful stream, or if we've
* logged the message already).
*/
private shouldWarnClientIsOffline;
constructor(asyncQueue: AsyncQueue, onlineStateHandler: (onlineState: OnlineState) => void);
/**
* Called by RemoteStore when a watch stream is started (including on each
* backoff attempt).
*
* If this is the first attempt, it sets the OnlineState to Unknown and starts
* the onlineStateTimer.
*/
handleWatchStreamStart(): void;
/**
* Updates our OnlineState as appropriate after the watch stream reports a
* failure. The first failure moves us to the 'Unknown' state. We then may
* allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
* actually transition to the 'Offline' state.
*/
handleWatchStreamFailure(error: FirestoreError): void;
/**
* Explicitly sets the OnlineState to the specified state.
*
* Note that this resets our timers / failure counters, etc. used by our
* Offline heuristics, so must not be used in place of
* handleWatchStreamStart() and handleWatchStreamFailure().
*/
set(newState: OnlineState): void;
private setAndBroadcast;
private logClientOfflineWarningIfNecessary;
private clearOnlineStateTimer;
}

View File

@ -0,0 +1,286 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CredentialsProvider, Token } from '../api/credentials';
import { SnapshotVersion } from '../core/snapshot_version';
import { ProtoByteString, TargetId } from '../core/types';
import { TargetData } from '../local/target_data';
import { Mutation, MutationResult } from '../model/mutation';
import * as api from '../protos/firestore_proto_api';
import { AsyncQueue, TimerId } from '../util/async_queue';
import { FirestoreError } from '../util/error';
import { ExponentialBackoff } from './backoff';
import { Connection, Stream } from './connection';
import { JsonProtoSerializer } from './serializer';
import { WatchChange } from './watch_change';
export interface WriteRequest extends api.WriteRequest {
database?: string;
}
/**
* Provides a common interface that is shared by the listeners for stream
* events by the concrete implementation classes.
*/
export interface PersistentStreamListener {
/**
* Called after the stream was established and can accept outgoing
* messages
*/
onOpen: () => Promise<void>;
/**
* Called after the stream has closed. If there was an error, the
* FirestoreError will be set.
*/
onClose: (err?: FirestoreError) => Promise<void>;
}
/**
* A PersistentStream is an abstract base class that represents a streaming RPC
* to the Firestore backend. It's built on top of the connections own support
* for streaming RPCs, and adds several critical features for our clients:
*
* - Exponential backoff on failure
* - Authentication via CredentialsProvider
* - Dispatching all callbacks into the shared worker queue
* - Closing idle streams after 60 seconds of inactivity
*
* Subclasses of PersistentStream implement serialization of models to and
* from the JSON representation of the protocol buffers for a specific
* streaming RPC.
*
* ## Starting and Stopping
*
* Streaming RPCs are stateful and need to be start()ed before messages can
* be sent and received. The PersistentStream will call the onOpen() function
* of the listener once the stream is ready to accept requests.
*
* Should a start() fail, PersistentStream will call the registered onClose()
* listener with a FirestoreError indicating what went wrong.
*
* A PersistentStream can be started and stopped repeatedly.
*
* Generic types:
* SendType: The type of the outgoing message of the underlying
* connection stream
* ReceiveType: The type of the incoming message of the underlying
* connection stream
* ListenerType: The type of the listener that will be used for callbacks
*/
export declare abstract class PersistentStream<SendType, ReceiveType, ListenerType extends PersistentStreamListener> {
private queue;
private idleTimerId;
protected connection: Connection;
private credentialsProvider;
protected listener: ListenerType;
private state;
/**
* A close count that's incremented every time the stream is closed; used by
* getCloseGuardedDispatcher() to invalidate callbacks that happen after
* close.
*/
private closeCount;
private idleTimer;
private stream;
protected backoff: ExponentialBackoff;
constructor(queue: AsyncQueue, connectionTimerId: TimerId, idleTimerId: TimerId, connection: Connection, credentialsProvider: CredentialsProvider, listener: ListenerType);
/**
* Returns true if start() has been called and no error has occurred. True
* indicates the stream is open or in the process of opening (which
* encompasses respecting backoff, getting auth tokens, and starting the
* actual RPC). Use isOpen() to determine if the stream is open and ready for
* outbound requests.
*/
isStarted(): boolean;
/**
* Returns true if the underlying RPC is open (the onOpen() listener has been
* called) and the stream is ready for outbound requests.
*/
isOpen(): boolean;
/**
* Starts the RPC. Only allowed if isStarted() returns false. The stream is
* not immediately ready for use: onOpen() will be invoked when the RPC is
* ready for outbound requests, at which point isOpen() will return true.
*
* When start returns, isStarted() will return true.
*/
start(): void;
/**
* Stops the RPC. This call is idempotent and allowed regardless of the
* current isStarted() state.
*
* When stop returns, isStarted() and isOpen() will both return false.
*/
stop(): Promise<void>;
/**
* After an error the stream will usually back off on the next attempt to
* start it. If the error warrants an immediate restart of the stream, the
* sender can use this to indicate that the receiver should not back off.
*
* Each error will call the onClose() listener. That function can decide to
* inhibit backoff if required.
*/
inhibitBackoff(): void;
/**
* Marks this stream as idle. If no further actions are performed on the
* stream for one minute, the stream will automatically close itself and
* notify the stream's onClose() handler with Status.OK. The stream will then
* be in a !isStarted() state, requiring the caller to start the stream again
* before further use.
*
* Only streams that are in state 'Open' can be marked idle, as all other
* states imply pending network operations.
*/
markIdle(): void;
/** Sends a message to the underlying stream. */
protected sendRequest(msg: SendType): void;
/** Called by the idle timer when the stream should close due to inactivity. */
private handleIdleCloseTimer;
/** Marks the stream as active again. */
private cancelIdleCheck;
/**
* Closes the stream and cleans up as necessary:
*
* * closes the underlying GRPC stream;
* * calls the onClose handler with the given 'error';
* * sets internal stream state to 'finalState';
* * adjusts the backoff timer based on the error
*
* A new stream can be opened by calling start().
*
* @param finalState the intended state of the stream after closing.
* @param error the error the connection was closed with.
*/
private close;
/**
* Can be overridden to perform additional cleanup before the stream is closed.
* Calling super.tearDown() is not required.
*/
protected tearDown(): void;
/**
* Used by subclasses to start the concrete RPC and return the underlying
* connection stream.
*/
protected abstract startRpc(token: Token | null): Stream<SendType, ReceiveType>;
/**
* Called after the stream has received a message. The function will be
* called on the right queue and must return a Promise.
* @param message The message received from the stream.
*/
protected abstract onMessage(message: ReceiveType): Promise<void>;
private auth;
private startStream;
private performBackoff;
handleStreamClose(error?: FirestoreError): Promise<void>;
/**
* Returns a "dispatcher" function that dispatches operations onto the
* AsyncQueue but only runs them if closeCount remains unchanged. This allows
* us to turn auth / stream callbacks into no-ops if the stream is closed /
* re-opened, etc.
*/
private getCloseGuardedDispatcher;
}
/** Listener for the PersistentWatchStream */
export interface WatchStreamListener extends PersistentStreamListener {
/**
* Called on a watchChange. The snapshot parameter will be MIN if the watch
* change did not have a snapshot associated with it.
*/
onWatchChange: (watchChange: WatchChange, snapshot: SnapshotVersion) => Promise<void>;
}
/**
* A PersistentStream that implements the Listen RPC.
*
* Once the Listen stream has called the onOpen() listener, any number of
* listen() and unlisten() calls can be made to control what changes will be
* sent from the server for ListenResponses.
*/
export declare class PersistentListenStream extends PersistentStream<api.ListenRequest, api.ListenResponse, WatchStreamListener> {
private serializer;
constructor(queue: AsyncQueue, connection: Connection, credentials: CredentialsProvider, serializer: JsonProtoSerializer, listener: WatchStreamListener);
protected startRpc(token: Token | null): Stream<api.ListenRequest, api.ListenResponse>;
protected onMessage(watchChangeProto: api.ListenResponse): Promise<void>;
/**
* Registers interest in the results of the given target. If the target
* includes a resumeToken it will be included in the request. Results that
* affect the target will be streamed back as WatchChange messages that
* reference the targetId.
*/
watch(targetData: TargetData): void;
/**
* Unregisters interest in the results of the target associated with the
* given targetId.
*/
unwatch(targetId: TargetId): void;
}
/** Listener for the PersistentWriteStream */
export interface WriteStreamListener extends PersistentStreamListener {
/**
* Called by the PersistentWriteStream upon a successful handshake response
* from the server, which is the receiver's cue to send any pending writes.
*/
onHandshakeComplete: () => Promise<void>;
/**
* Called by the PersistentWriteStream upon receiving a StreamingWriteResponse
* from the server that contains a mutation result.
*/
onMutationResult: (commitVersion: SnapshotVersion, results: MutationResult[]) => Promise<void>;
}
/**
* A Stream that implements the Write RPC.
*
* The Write RPC requires the caller to maintain special streamToken
* state in between calls, to help the server understand which responses the
* client has processed by the time the next request is made. Every response
* will contain a streamToken; this value must be passed to the next
* request.
*
* After calling start() on this stream, the next request must be a handshake,
* containing whatever streamToken is on hand. Once a response to this
* request is received, all pending mutations may be submitted. When
* submitting multiple batches of mutations at the same time, it's
* okay to use the same streamToken for the calls to writeMutations.
*
* TODO(b/33271235): Use proto types
*/
export declare class PersistentWriteStream extends PersistentStream<api.WriteRequest, api.WriteResponse, WriteStreamListener> {
private serializer;
private handshakeComplete_;
constructor(queue: AsyncQueue, connection: Connection, credentials: CredentialsProvider, serializer: JsonProtoSerializer, listener: WriteStreamListener);
/**
* The last received stream token from the server, used to acknowledge which
* responses the client has processed. Stream tokens are opaque checkpoint
* markers whose only real value is their inclusion in the next request.
*
* PersistentWriteStream manages propagating this value from responses to the
* next request.
*/
lastStreamToken: ProtoByteString;
/**
* Tracks whether or not a handshake has been successfully exchanged and
* the stream is ready to accept mutations.
*/
get handshakeComplete(): boolean;
start(): void;
protected tearDown(): void;
protected startRpc(token: Token | null): Stream<api.WriteRequest, api.WriteResponse>;
protected onMessage(responseProto: api.WriteResponse): Promise<void>;
/**
* Sends an initial streamToken to the server, performing the handshake
* required to make the StreamingWrite RPC work. Subsequent
* calls should wait until onHandshakeComplete was called.
*/
writeHandshake(): void;
/** Sends a group of mutations to the Firestore backend to apply. */
writeMutations(mutations: Mutation[]): void;
}

View File

@ -0,0 +1,156 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { ProtoByteString, TargetId } from '../core/types';
import { DocumentKeySet, MaybeDocumentMap } from '../model/collections';
import { SortedSet } from '../util/sorted_set';
/**
* An event from the RemoteStore. It is split into targetChanges (changes to the
* state or the set of documents in our watched targets) and documentUpdates
* (changes to the actual documents).
*/
export declare class RemoteEvent {
/**
* The snapshot version this event brings us up to, or MIN if not set.
*/
readonly snapshotVersion: SnapshotVersion;
/**
* A map from target to changes to the target. See TargetChange.
*/
readonly targetChanges: {
[targetId: number]: TargetChange;
};
/**
* A set of targets that is known to be inconsistent. Listens for these
* targets should be re-established without resume tokens.
*/
readonly targetMismatches: SortedSet<TargetId>;
/**
* A set of which documents have changed or been deleted, along with the
* doc's new values (if not deleted).
*/
readonly documentUpdates: MaybeDocumentMap;
/**
* A set of which document updates are due only to limbo resolution targets.
*/
readonly resolvedLimboDocuments: DocumentKeySet;
constructor(
/**
* The snapshot version this event brings us up to, or MIN if not set.
*/
snapshotVersion: SnapshotVersion,
/**
* A map from target to changes to the target. See TargetChange.
*/
targetChanges: {
[targetId: number]: TargetChange;
},
/**
* A set of targets that is known to be inconsistent. Listens for these
* targets should be re-established without resume tokens.
*/
targetMismatches: SortedSet<TargetId>,
/**
* A set of which documents have changed or been deleted, along with the
* doc's new values (if not deleted).
*/
documentUpdates: MaybeDocumentMap,
/**
* A set of which document updates are due only to limbo resolution targets.
*/
resolvedLimboDocuments: DocumentKeySet);
/**
* HACK: Views require RemoteEvents in order to determine whether the view is
* CURRENT, but secondary tabs don't receive remote events. So this method is
* used to create a synthesized RemoteEvent that can be used to apply a
* CURRENT status change to a View, for queries executed in a different tab.
*/
static createSynthesizedRemoteEventForCurrentChange(targetId: TargetId, current: boolean): RemoteEvent;
}
/**
* A TargetChange specifies the set of changes for a specific target as part of
* a RemoteEvent. These changes track which documents are added, modified or
* removed, as well as the target's resume token and whether the target is
* marked CURRENT.
* The actual changes *to* documents are not part of the TargetChange since
* documents may be part of multiple targets.
*/
export declare class TargetChange {
/**
* An opaque, server-assigned token that allows watching a query to be resumed
* after disconnecting without retransmitting all the data that matches the
* query. The resume token essentially identifies a point in time from which
* the server should resume sending results.
*/
readonly resumeToken: ProtoByteString;
/**
* The "current" (synced) status of this target. Note that "current"
* has special meaning in the RPC protocol that implies that a target is
* both up-to-date and consistent with the rest of the watch stream.
*/
readonly current: boolean;
/**
* The set of documents that were newly assigned to this target as part of
* this remote event.
*/
readonly addedDocuments: DocumentKeySet;
/**
* The set of documents that were already assigned to this target but received
* an update during this remote event.
*/
readonly modifiedDocuments: DocumentKeySet;
/**
* The set of documents that were removed from this target as part of this
* remote event.
*/
readonly removedDocuments: DocumentKeySet;
constructor(
/**
* An opaque, server-assigned token that allows watching a query to be resumed
* after disconnecting without retransmitting all the data that matches the
* query. The resume token essentially identifies a point in time from which
* the server should resume sending results.
*/
resumeToken: ProtoByteString,
/**
* The "current" (synced) status of this target. Note that "current"
* has special meaning in the RPC protocol that implies that a target is
* both up-to-date and consistent with the rest of the watch stream.
*/
current: boolean,
/**
* The set of documents that were newly assigned to this target as part of
* this remote event.
*/
addedDocuments: DocumentKeySet,
/**
* The set of documents that were already assigned to this target but received
* an update during this remote event.
*/
modifiedDocuments: DocumentKeySet,
/**
* The set of documents that were removed from this target as part of this
* remote event.
*/
removedDocuments: DocumentKeySet);
/**
* This method is used to create a synthesized TargetChanges that can be used to
* apply a CURRENT status change to a View (for queries executed in a different
* tab) or for new queries (to raise snapshots with correct CURRENT status).
*/
static createSynthesizedTargetChangeForCurrentChange(targetId: TargetId, current: boolean): TargetChange;
}

View File

@ -0,0 +1,197 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Transaction } from '../core/transaction';
import { OnlineState, TargetId } from '../core/types';
import { LocalStore } from '../local/local_store';
import { TargetData } from '../local/target_data';
import { DocumentKeySet } from '../model/collections';
import { AsyncQueue } from '../util/async_queue';
import { ConnectivityMonitor } from './connectivity_monitor';
import { Datastore } from './datastore';
import { RemoteSyncer } from './remote_syncer';
import { TargetMetadataProvider } from './watch_change';
/**
* RemoteStore - An interface to remotely stored data, basically providing a
* wrapper around the Datastore that is more reliable for the rest of the
* system.
*
* RemoteStore is responsible for maintaining the connection to the server.
* - maintaining a list of active listens.
* - reconnecting when the connection is dropped.
* - resuming all the active listens on reconnect.
*
* RemoteStore handles all incoming events from the Datastore.
* - listening to the watch stream and repackaging the events as RemoteEvents
* - notifying SyncEngine of any changes to the active listens.
*
* RemoteStore takes writes from other components and handles them reliably.
* - pulling pending mutations from LocalStore and sending them to Datastore.
* - retrying mutations that failed because of network problems.
* - acking mutations to the SyncEngine once they are accepted or rejected.
*/
export declare class RemoteStore implements TargetMetadataProvider {
/**
* The local store, used to fill the write pipeline with outbound mutations.
*/
private localStore;
/** The client-side proxy for interacting with the backend. */
private datastore;
/**
* A list of up to MAX_PENDING_WRITES writes that we have fetched from the
* LocalStore via fillWritePipeline() and have or will send to the write
* stream.
*
* Whenever writePipeline.length > 0 the RemoteStore will attempt to start or
* restart the write stream. When the stream is established the writes in the
* pipeline will be sent in order.
*
* Writes remain in writePipeline until they are acknowledged by the backend
* and thus will automatically be re-sent if the stream is interrupted /
* restarted before they're acknowledged.
*
* Write responses from the backend are linked to their originating request
* purely based on order, and so we can just shift() writes from the front of
* the writePipeline as we receive responses.
*/
private writePipeline;
/**
* A mapping of watched targets that the client cares about tracking and the
* user has explicitly called a 'listen' for this target.
*
* These targets may or may not have been sent to or acknowledged by the
* server. On re-establishing the listen stream, these targets should be sent
* to the server. The targets removed with unlistens are removed eagerly
* without waiting for confirmation from the listen stream.
*/
private listenTargets;
private connectivityMonitor;
private watchStream;
private writeStream;
private watchChangeAggregator;
/**
* Set to true by enableNetwork() and false by disableNetwork() and indicates
* the user-preferred network state.
*/
private networkEnabled;
private isPrimary;
private onlineStateTracker;
constructor(
/**
* The local store, used to fill the write pipeline with outbound mutations.
*/
localStore: LocalStore,
/** The client-side proxy for interacting with the backend. */
datastore: Datastore, asyncQueue: AsyncQueue, onlineStateHandler: (onlineState: OnlineState) => void, connectivityMonitor: ConnectivityMonitor);
/**
* SyncEngine to notify of watch and write events. This must be set
* immediately after construction.
*/
syncEngine: RemoteSyncer;
/**
* Starts up the remote store, creating streams, restoring state from
* LocalStore, etc.
*/
start(): Promise<void>;
/** Re-enables the network. Idempotent. */
enableNetwork(): Promise<void>;
/**
* Temporarily disables the network. The network can be re-enabled using
* enableNetwork().
*/
disableNetwork(): Promise<void>;
private disableNetworkInternal;
shutdown(): Promise<void>;
/**
* Starts new listen for the given target. Uses resume token if provided. It
* is a no-op if the target of given `TargetData` is already being listened to.
*/
listen(targetData: TargetData): void;
/**
* Removes the listen from server. It is a no-op if the given target id is
* not being listened to.
*/
unlisten(targetId: TargetId): void;
/** {@link TargetMetadataProvider.getTargetDataForTarget} */
getTargetDataForTarget(targetId: TargetId): TargetData | null;
/** {@link TargetMetadataProvider.getRemoteKeysForTarget} */
getRemoteKeysForTarget(targetId: TargetId): DocumentKeySet;
/**
* We need to increment the the expected number of pending responses we're due
* from watch so we wait for the ack to process any messages from this target.
*/
private sendWatchRequest;
/**
* We need to increment the expected number of pending responses we're due
* from watch so we wait for the removal on the server before we process any
* messages from this target.
*/
private sendUnwatchRequest;
private startWatchStream;
/**
* Returns whether the watch stream should be started because it's necessary
* and has not yet been started.
*/
private shouldStartWatchStream;
canUseNetwork(): boolean;
private cleanUpWatchStreamState;
private onWatchStreamOpen;
private onWatchStreamClose;
private onWatchStreamChange;
/**
* Takes a batch of changes from the Datastore, repackages them as a
* RemoteEvent, and passes that on to the listener, which is typically the
* SyncEngine.
*/
private raiseWatchSnapshot;
/** Handles an error on a target */
private handleTargetError;
/**
* Attempts to fill our write pipeline with writes from the LocalStore.
*
* Called internally to bootstrap or refill the write pipeline and by
* SyncEngine whenever there are new mutations to process.
*
* Starts the write stream if necessary.
*/
fillWritePipeline(): Promise<void>;
/**
* Returns true if we can add to the write pipeline (i.e. the network is
* enabled and the write pipeline is not full).
*/
private canAddToWritePipeline;
outstandingWrites(): number;
/**
* Queues additional writes to be sent to the write stream, sending them
* immediately if the write stream is established.
*/
private addToWritePipeline;
private shouldStartWriteStream;
private startWriteStream;
private onWriteStreamOpen;
private onWriteHandshakeComplete;
private onMutationResult;
private onWriteStreamClose;
private handleHandshakeError;
private handleWriteError;
createTransaction(): Transaction;
private restartNetwork;
handleCredentialChange(): Promise<void>;
/**
* Toggles the network state when the client gains or loses its primary lease.
*/
applyPrimaryState(isPrimary: boolean): Promise<void>;
}

View File

@ -0,0 +1,62 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { BatchId, TargetId } from '../core/types';
import { DocumentKeySet } from '../model/collections';
import { MutationBatchResult } from '../model/mutation_batch';
import { FirestoreError } from '../util/error';
import { RemoteEvent } from './remote_event';
/**
* An interface that describes the actions the RemoteStore needs to perform on
* a cooperating synchronization engine.
*/
export interface RemoteSyncer {
/**
* Applies one remote event to the sync engine, notifying any views of the
* changes, and releasing any pending mutation batches that would become
* visible because of the snapshot version the remote event contains.
*/
applyRemoteEvent(remoteEvent: RemoteEvent): Promise<void>;
/**
* Rejects the listen for the given targetID. This can be triggered by the
* backend for any active target.
*
* @param targetId The targetID corresponds to one previously initiated by the
* user as part of TargetData passed to listen() on RemoteStore.
* @param error A description of the condition that has forced the rejection.
* Nearly always this will be an indication that the user is no longer
* authorized to see the data matching the target.
*/
rejectListen(targetId: TargetId, error: FirestoreError): Promise<void>;
/**
* Applies the result of a successful write of a mutation batch to the sync
* engine, emitting snapshots in any views that the mutation applies to, and
* removing the batch from the mutation queue.
*/
applySuccessfulWrite(result: MutationBatchResult): Promise<void>;
/**
* Rejects the batch, removing the batch from the mutation queue, recomputing
* the local view of any documents affected by the batch and then, emitting
* snapshots with the reverted value.
*/
rejectFailedWrite(batchId: BatchId, error: FirestoreError): Promise<void>;
/**
* Returns the set of remote document keys for the given target ID. This list
* includes the documents that were assigned to the target when we received
* the last snapshot.
*/
getRemoteKeysForTarget(targetId: TargetId): DocumentKeySet;
}

View File

@ -0,0 +1,74 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Code } from '../util/error';
/**
* Determines whether an error code represents a permanent error when received
* in response to a non-write operation.
*
* See isPermanentWriteError for classifying write errors.
*/
export declare function isPermanentError(code: Code): boolean;
/**
* Determines whether an error code represents a permanent error when received
* in response to a write operation.
*
* Write operations must be handled specially because as of b/119437764, ABORTED
* errors on the write stream should be retried too (even though ABORTED errors
* are not generally retryable).
*
* Note that during the initial handshake on the write stream an ABORTED error
* signals that we should discard our stream token (i.e. it is permanent). This
* means a handshake error should be classified with isPermanentError, above.
*/
export declare function isPermanentWriteError(code: Code): boolean;
/**
* Maps an error Code from a GRPC status identifier like 'NOT_FOUND'.
*
* @returns The Code equivalent to the given status string or undefined if
* there is no match.
*/
export declare function mapCodeFromRpcStatus(status: string): Code | undefined;
/**
* Maps an error Code from GRPC status code number, like 0, 1, or 14. These
* are not the same as HTTP status codes.
*
* @returns The Code equivalent to the given GRPC status code. Fails if there
* is no match.
*/
export declare function mapCodeFromRpcCode(code: number | undefined): Code;
/**
* Maps an RPC code from a Code. This is the reverse operation from
* mapCodeFromRpcCode and should really only be used in tests.
*/
export declare function mapRpcCodeFromCode(code: Code | undefined): number;
/**
* Converts an HTTP Status Code to the equivalent error code.
*
* @param status An HTTP Status Code, like 200, 404, 503, etc.
* @returns The equivalent Code. Unknown status codes are mapped to
* Code.UNKNOWN.
*/
export declare function mapCodeFromHttpStatus(status: number): Code;
/**
* Converts an HTTP response's error status to the equivalent error code.
*
* @param status An HTTP error response status ("FAILED_PRECONDITION",
* "UNKNOWN", etc.)
* @returns The equivalent Code. Non-matching responses are mapped to
* Code.UNKNOWN.
*/
export declare function mapCodeFromHttpResponseErrorStatus(status: string): Code;

View File

@ -0,0 +1,160 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { DatabaseId } from '../core/database_info';
import { Direction, FieldFilter, Filter, Operator, OrderBy } from '../core/query';
import { SnapshotVersion } from '../core/snapshot_version';
import { Target } from '../core/target';
import { TargetData } from '../local/target_data';
import { Document, MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import * as fieldValue from '../model/field_value';
import { FieldMask, Mutation, MutationResult } from '../model/mutation';
import { FieldPath, ResourcePath } from '../model/path';
import * as api from '../protos/firestore_proto_api';
import { FirestoreError } from '../util/error';
import { WatchChange, WatchTargetChangeState } from './watch_change';
export interface SerializerOptions {
/**
* The serializer supports both Protobuf.js and Proto3 JSON formats. By
* setting this flag to true, the serializer will use the Proto3 JSON format.
*
* For a description of the Proto3 JSON format check
* https://developers.google.com/protocol-buffers/docs/proto3#json
*/
useProto3Json: boolean;
}
/**
* Generates JsonObject values for the Datastore API suitable for sending to
* either GRPC stub methods or via the JSON/HTTP REST API.
* TODO(klimt): We can remove the databaseId argument if we keep the full
* resource name in documents.
*/
export declare class JsonProtoSerializer {
private databaseId;
private options;
constructor(databaseId: DatabaseId, options: SerializerOptions);
private emptyByteString;
private unsafeCastProtoByteString;
fromRpcStatus(status: api.Status): FirestoreError;
/**
* Returns a value for a number (or null) that's appropriate to put into
* a google.protobuf.Int32Value proto.
* DO NOT USE THIS FOR ANYTHING ELSE.
* This method cheats. It's typed as returning "number" because that's what
* our generated proto interfaces say Int32Value must be. But GRPC actually
* expects a { value: <number> } struct.
*/
private toInt32Value;
/**
* Returns a number (or null) from a google.protobuf.Int32Value proto.
* DO NOT USE THIS FOR ANYTHING ELSE.
* This method cheats. It's typed as accepting "number" because that's what
* our generated proto interfaces say Int32Value must be, but it actually
* accepts { value: number } to match our serialization in toInt32Value().
*/
private fromInt32Value;
/**
* Returns a value for a Date that's appropriate to put into a proto.
* DO NOT USE THIS FOR ANYTHING ELSE.
* This method cheats. It's typed as returning "string" because that's what
* our generated proto interfaces say dates must be. But it's easier and safer
* to actually return a Timestamp proto.
*/
private toTimestamp;
private fromTimestamp;
private fromIso8601String;
/**
* Returns a value for bytes that's appropriate to put in a proto.
* DO NOT USE THIS FOR ANYTHING ELSE.
* This method cheats. It's typed as returning "string" because that's what
* our generated proto interfaces say bytes must be. But it should return
* an Uint8Array in Node.
*/
private toBytes;
/**
* Parse the blob from the protos into the internal Blob class. Note that the
* typings assume all blobs are strings, but they are actually Uint8Arrays
* on Node.
*/
private fromBlob;
toVersion(version: SnapshotVersion): string;
fromVersion(version: string): SnapshotVersion;
toResourceName(databaseId: DatabaseId, path: ResourcePath): string;
fromResourceName(name: string): ResourcePath;
toName(key: DocumentKey): string;
fromName(name: string): DocumentKey;
toQueryPath(path: ResourcePath): string;
fromQueryPath(name: string): ResourcePath;
get encodedDatabaseId(): string;
private fullyQualifiedPrefixPath;
private extractLocalPathFromResourceName;
private isValidResourceName;
toValue(val: fieldValue.FieldValue): api.Value;
fromValue(obj: api.Value): fieldValue.FieldValue;
/** Creates an api.Document from key and fields (but no create/update time) */
toMutationDocument(key: DocumentKey, fields: fieldValue.ObjectValue): api.Document;
toDocument(document: Document): api.Document;
fromDocument(document: api.Document, hasCommittedMutations?: boolean): Document;
toFields(fields: fieldValue.ObjectValue): {
[key: string]: api.Value;
};
fromFields(object: {}): fieldValue.ObjectValue;
toMapValue(map: fieldValue.ObjectValue): api.MapValue;
toArrayValue(array: fieldValue.ArrayValue): api.ArrayValue;
private fromFound;
private fromMissing;
fromMaybeDocument(result: api.BatchGetDocumentsResponse): MaybeDocument;
private toWatchTargetChangeState;
toTestWatchChange(watchChange: WatchChange): api.ListenResponse;
fromWatchChange(change: api.ListenResponse): WatchChange;
fromWatchTargetChangeState(state: api.TargetChangeTargetChangeType): WatchTargetChangeState;
versionFromListenResponse(change: api.ListenResponse): SnapshotVersion;
toMutation(mutation: Mutation): api.Write;
fromMutation(proto: api.Write): Mutation;
private toPrecondition;
private fromPrecondition;
private fromWriteResult;
fromWriteResults(protos: api.WriteResult[] | undefined, commitTime?: string): MutationResult[];
private toFieldTransform;
private fromFieldTransform;
toDocumentsTarget(target: Target): api.DocumentsTarget;
fromDocumentsTarget(documentsTarget: api.DocumentsTarget): Target;
toQueryTarget(target: Target): api.QueryTarget;
fromQueryTarget(target: api.QueryTarget): Target;
toListenRequestLabels(targetData: TargetData): api.ApiClientObjectMap<string> | null;
private toLabel;
toTarget(targetData: TargetData): api.Target;
private toFilter;
private fromFilter;
private toOrder;
private fromOrder;
private toCursor;
private fromCursor;
toDirection(dir: Direction): api.OrderDirection;
fromDirection(dir: api.OrderDirection | undefined): Direction | undefined;
toOperatorName(op: Operator): api.FieldFilterOp;
fromOperatorName(op: api.FieldFilterOp): Operator;
toFieldPathReference(path: FieldPath): api.FieldReference;
fromFieldPathReference(fieldReference: api.FieldReference): FieldPath;
toPropertyOrder(orderBy: OrderBy): api.Order;
fromPropertyOrder(orderBy: api.Order): OrderBy;
fromFieldFilter(filter: api.Filter): Filter;
toUnaryOrFieldFilter(filter: FieldFilter): api.Filter;
fromUnaryFilter(filter: api.Filter): Filter;
toDocumentMask(fieldMask: FieldMask): api.DocumentMask;
fromDocumentMask(proto: api.DocumentMask): FieldMask;
}

View File

@ -0,0 +1,42 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirestoreError } from '../util/error';
import { Stream } from './connection';
/**
* Provides a simple helper class that implements the Stream interface to
* bridge to other implementations that are streams but do not implement the
* interface. The stream callbacks are invoked with the callOn... methods.
*/
export declare class StreamBridge<I, O> implements Stream<I, O> {
private wrappedOnOpen;
private wrappedOnClose;
private wrappedOnMessage;
private sendFn;
private closeFn;
constructor(args: {
sendFn: (msg: I) => void;
closeFn: () => void;
});
onOpen(callback: () => void): void;
onClose(callback: (err?: FirestoreError) => void): void;
onMessage(callback: (msg: O) => void): void;
close(): void;
send(msg: I): void;
callOnOpen(): void;
callOnClose(err?: FirestoreError): void;
callOnMessage(msg: O): void;
}

View File

@ -0,0 +1,208 @@
/**
* @license
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SnapshotVersion } from '../core/snapshot_version';
import { ProtoByteString, TargetId } from '../core/types';
import { TargetData } from '../local/target_data';
import { DocumentKeySet } from '../model/collections';
import { MaybeDocument } from '../model/document';
import { DocumentKey } from '../model/document_key';
import { FirestoreError } from '../util/error';
import { ExistenceFilter } from './existence_filter';
import { RemoteEvent } from './remote_event';
/**
* Internal representation of the watcher API protocol buffers.
*/
export declare type WatchChange = DocumentWatchChange | WatchTargetChange | ExistenceFilterChange;
/**
* Represents a changed document and a list of target ids to which this change
* applies.
*
* If document has been deleted NoDocument will be provided.
*/
export declare class DocumentWatchChange {
/** The new document applies to all of these targets. */
updatedTargetIds: TargetId[];
/** The new document is removed from all of these targets. */
removedTargetIds: TargetId[];
/** The key of the document for this change. */
key: DocumentKey;
/**
* The new document or NoDocument if it was deleted. Is null if the
* document went out of view without the server sending a new document.
*/
newDoc: MaybeDocument | null;
constructor(
/** The new document applies to all of these targets. */
updatedTargetIds: TargetId[],
/** The new document is removed from all of these targets. */
removedTargetIds: TargetId[],
/** The key of the document for this change. */
key: DocumentKey,
/**
* The new document or NoDocument if it was deleted. Is null if the
* document went out of view without the server sending a new document.
*/
newDoc: MaybeDocument | null);
}
export declare class ExistenceFilterChange {
targetId: TargetId;
existenceFilter: ExistenceFilter;
constructor(targetId: TargetId, existenceFilter: ExistenceFilter);
}
export declare enum WatchTargetChangeState {
NoChange = 0,
Added = 1,
Removed = 2,
Current = 3,
Reset = 4
}
export declare class WatchTargetChange {
/** What kind of change occurred to the watch target. */
state: WatchTargetChangeState;
/** The target IDs that were added/removed/set. */
targetIds: TargetId[];
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken: ProtoByteString;
/** An RPC error indicating why the watch failed. */
cause: FirestoreError | null;
constructor(
/** What kind of change occurred to the watch target. */
state: WatchTargetChangeState,
/** The target IDs that were added/removed/set. */
targetIds: TargetId[],
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken?: ProtoByteString,
/** An RPC error indicating why the watch failed. */
cause?: FirestoreError | null);
}
/**
* Interface implemented by RemoteStore to expose target metadata to the
* WatchChangeAggregator.
*/
export interface TargetMetadataProvider {
/**
* Returns the set of remote document keys for the given target ID as of the
* last raised snapshot.
*/
getRemoteKeysForTarget(targetId: TargetId): DocumentKeySet;
/**
* Returns the TargetData for an active target ID or 'null' if this target
* has become inactive
*/
getTargetDataForTarget(targetId: TargetId): TargetData | null;
}
/**
* A helper class to accumulate watch changes into a RemoteEvent.
*/
export declare class WatchChangeAggregator {
private metadataProvider;
constructor(metadataProvider: TargetMetadataProvider);
/** The internal state of all tracked targets. */
private targetStates;
/** Keeps track of the documents to update since the last raised snapshot. */
private pendingDocumentUpdates;
/** A mapping of document keys to their set of target IDs. */
private pendingDocumentTargetMapping;
/**
* A list of targets with existence filter mismatches. These targets are
* known to be inconsistent and their listens needs to be re-established by
* RemoteStore.
*/
private pendingTargetResets;
/**
* Processes and adds the DocumentWatchChange to the current set of changes.
*/
handleDocumentChange(docChange: DocumentWatchChange): void;
/** Processes and adds the WatchTargetChange to the current set of changes. */
handleTargetChange(targetChange: WatchTargetChange): void;
/**
* Iterates over all targetIds that the watch change applies to: either the
* targetIds explicitly listed in the change or the targetIds of all currently
* active targets.
*/
forEachTarget(targetChange: WatchTargetChange, fn: (targetId: TargetId) => void): void;
/**
* Handles existence filters and synthesizes deletes for filter mismatches.
* Targets that are invalidated by filter mismatches are added to
* `pendingTargetResets`.
*/
handleExistenceFilter(watchChange: ExistenceFilterChange): void;
/**
* Converts the currently accumulated state into a remote event at the
* provided snapshot version. Resets the accumulated changes before returning.
*/
createRemoteEvent(snapshotVersion: SnapshotVersion): RemoteEvent;
/**
* Adds the provided document to the internal list of document updates and
* its document key to the given target's mapping.
*/
addDocumentToTarget(targetId: TargetId, document: MaybeDocument): void;
/**
* Removes the provided document from the target mapping. If the
* document no longer matches the target, but the document's state is still
* known (e.g. we know that the document was deleted or we received the change
* that caused the filter mismatch), the new document can be provided
* to update the remote document cache.
*/
removeDocumentFromTarget(targetId: TargetId, key: DocumentKey, updatedDocument: MaybeDocument | null): void;
removeTarget(targetId: TargetId): void;
/**
* Returns the current count of documents in the target. This includes both
* the number of documents that the LocalStore considers to be part of the
* target as well as any accumulated changes.
*/
private getCurrentDocumentCountForTarget;
/**
* Increment the number of acks needed from watch before we can consider the
* server to be 'in-sync' with the client's active targets.
*/
recordPendingTargetRequest(targetId: TargetId): void;
private ensureTargetState;
private ensureDocumentTargetMapping;
/**
* Verifies that the user is still interested in this target (by calling
* `getTargetDataForTarget()`) and that we are not waiting for pending ADDs
* from watch.
*/
protected isActiveTarget(targetId: TargetId): boolean;
/**
* Returns the TargetData for an active target (i.e. a target that the user
* is still interested in that has no outstanding target change requests).
*/
protected targetDataForActiveTarget(targetId: TargetId): TargetData | null;
/**
* Resets the state of a Watch target to its initial state (e.g. sets
* 'current' to false, clears the resume token and removes its target mapping
* from all documents).
*/
private resetTarget;
/**
* Returns whether the LocalStore considers the document to be part of the
* specified target.
*/
private targetContainsDocument;
}

Some files were not shown because too many files have changed in this diff Show More