Commit 821afc5e authored by Xavier Guimard's avatar Xavier Guimard

New upstream version 3.1.13+~3.1.11

parent 6df78e12
......@@ -2,6 +2,42 @@
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
<a name="3.1.13"></a>
## [3.1.13](https://github.com/mongodb/node-mongodb-native/compare/v3.1.12...v3.1.13) (2019-01-23)
### Bug Fixes
* restore ability to webpack by removing `makeLazyLoader` ([050267d](https://github.com/mongodb/node-mongodb-native/commit/050267d))
* **bulk:** honor ignoreUndefined in initializeUnorderedBulkOp ([e806be4](https://github.com/mongodb/node-mongodb-native/commit/e806be4))
* **changeStream:** properly handle changeStream event mid-close ([#1902](https://github.com/mongodb/node-mongodb-native/issues/1902)) ([5ad9fa9](https://github.com/mongodb/node-mongodb-native/commit/5ad9fa9))
* **db_ops:** ensure we async resolve errors in createCollection ([210c71d](https://github.com/mongodb/node-mongodb-native/commit/210c71d))
<a name="3.1.12"></a>
## [3.1.12](https://github.com/mongodb/node-mongodb-native/compare/v3.1.11...v3.1.12) (2019-01-16)
### Features
* **core:** update to mongodb-core v3.1.11 ([9bef6e7](https://github.com/mongodb/node-mongodb-native/commit/9bef6e7))
<a name="3.1.11"></a>
## [3.1.11](https://github.com/mongodb/node-mongodb-native/compare/v3.1.10...v3.1.11) (2019-01-15)
### Bug Fixes
* **bulk:** fix error propagation in empty bulk.execute ([a3adb3f](https://github.com/mongodb/node-mongodb-native/commit/a3adb3f))
* **bulk:** make sure that any error in bulk write is propagated ([bedc2d2](https://github.com/mongodb/node-mongodb-native/commit/bedc2d2))
* **bulk:** properly calculate batch size for bulk writes ([aafe71b](https://github.com/mongodb/node-mongodb-native/commit/aafe71b))
* **operations:** do not call require in a hot path ([ff82ff4](https://github.com/mongodb/node-mongodb-native/commit/ff82ff4))
<a name="3.1.10"></a>
## [3.1.10](https://github.com/mongodb/node-mongodb-native/compare/v3.1.9...v3.1.10) (2018-11-16)
......
......@@ -702,6 +702,13 @@ class BulkOperationBase {
const maxWriteBatchSize =
isMaster && isMaster.maxWriteBatchSize ? isMaster.maxWriteBatchSize : 1000;
// Calculates the largest possible size of an Array key, represented as a BSON string
// element. This calculation:
// 1 byte for BSON type
// # of bytes = length of (string representation of (maxWriteBatchSize - 1))
// + 1 bytes for null terminator
const maxKeySize = (maxWriteBatchSize - 1).toString(10).length + 2;
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, collection.s.db);
......@@ -745,6 +752,7 @@ class BulkOperationBase {
// Max batch size options
maxBatchSizeBytes: maxBatchSizeBytes,
maxWriteBatchSize: maxWriteBatchSize,
maxKeySize,
// Namespace
namespace: namespace,
// BSON
......
......@@ -12,6 +12,7 @@ const executeOperation = utils.executeOperation;
const MongoWriteConcernError = require('mongodb-core').MongoWriteConcernError;
const handleMongoWriteConcernError = require('./common').handleMongoWriteConcernError;
const bson = common.bson;
const isPromiseLike = require('../utils').isPromiseLike;
/**
* Add to internal list of Operations
......@@ -35,10 +36,12 @@ function addToOperationsList(bulkOperation, docType, document) {
if (bulkOperation.s.currentBatch == null)
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
const maxKeySize = bulkOperation.s.maxKeySize;
// Check if we need to create a new batch
if (
bulkOperation.s.currentBatchSize + 1 >= bulkOperation.s.maxWriteBatchSize ||
bulkOperation.s.currentBatchSizeBytes + bulkOperation.s.currentBatchSizeBytes >=
bulkOperation.s.currentBatchSizeBytes + maxKeySize + bsonSize >=
bulkOperation.s.maxBatchSizeBytes ||
bulkOperation.s.currentBatch.batchType !== docType
) {
......@@ -51,10 +54,6 @@ function addToOperationsList(bulkOperation, docType, document) {
// Reset the current size trackers
bulkOperation.s.currentBatchSize = 0;
bulkOperation.s.currentBatchSizeBytes = 0;
} else {
// Update current batch size
bulkOperation.s.currentBatchSize = bulkOperation.s.currentBatchSize + 1;
bulkOperation.s.currentBatchSizeBytes = bulkOperation.s.currentBatchSizeBytes + bsonSize;
}
if (docType === common.INSERT) {
......@@ -67,13 +66,14 @@ function addToOperationsList(bulkOperation, docType, document) {
// We have an array of documents
if (Array.isArray(document)) {
throw toError('operation passed in cannot be an Array');
} else {
bulkOperation.s.currentBatch.originalIndexes.push(bulkOperation.s.currentIndex);
bulkOperation.s.currentBatch.operations.push(document);
bulkOperation.s.currentBatchSizeBytes = bulkOperation.s.currentBatchSizeBytes + bsonSize;
bulkOperation.s.currentIndex = bulkOperation.s.currentIndex + 1;
}
bulkOperation.s.currentBatch.originalIndexes.push(bulkOperation.s.currentIndex);
bulkOperation.s.currentBatch.operations.push(document);
bulkOperation.s.currentBatchSize += 1;
bulkOperation.s.currentBatchSizeBytes += maxKeySize + bsonSize;
bulkOperation.s.currentIndex += 1;
// Return bulkOperation
return bulkOperation;
}
......@@ -115,6 +115,10 @@ class OrderedBulkOperation extends BulkOperationBase {
*/
execute(_writeConcern, options, callback) {
const ret = this.bulkExecute(_writeConcern, options, callback);
if (isPromiseLike(ret)) {
return ret;
}
options = ret.options;
callback = ret.callback;
......
......@@ -12,6 +12,7 @@ const executeOperation = utils.executeOperation;
const MongoWriteConcernError = require('mongodb-core').MongoWriteConcernError;
const handleMongoWriteConcernError = require('./common').handleMongoWriteConcernError;
const bson = common.bson;
const isPromiseLike = require('../utils').isPromiseLike;
/**
* Add to internal list of Operations
......@@ -40,6 +41,8 @@ function addToOperationsList(bulkOperation, docType, document) {
bulkOperation.s.currentBatch = bulkOperation.s.currentRemoveBatch;
}
const maxKeySize = bulkOperation.s.maxKeySize;
// Create a new batch object if we don't have a current one
if (bulkOperation.s.currentBatch == null)
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
......@@ -47,7 +50,8 @@ function addToOperationsList(bulkOperation, docType, document) {
// Check if we need to create a new batch
if (
bulkOperation.s.currentBatch.size + 1 >= bulkOperation.s.maxWriteBatchSize ||
bulkOperation.s.currentBatch.sizeBytes + bsonSize >= bulkOperation.s.maxBatchSizeBytes ||
bulkOperation.s.currentBatch.sizeBytes + maxKeySize + bsonSize >=
bulkOperation.s.maxBatchSizeBytes ||
bulkOperation.s.currentBatch.batchType !== docType
) {
// Save the batch to the execution stack
......@@ -60,12 +64,12 @@ function addToOperationsList(bulkOperation, docType, document) {
// We have an array of documents
if (Array.isArray(document)) {
throw toError('operation passed in cannot be an Array');
} else {
bulkOperation.s.currentBatch.operations.push(document);
bulkOperation.s.currentBatch.originalIndexes.push(bulkOperation.s.currentIndex);
bulkOperation.s.currentIndex = bulkOperation.s.currentIndex + 1;
}
bulkOperation.s.currentBatch.operations.push(document);
bulkOperation.s.currentBatch.originalIndexes.push(bulkOperation.s.currentIndex);
bulkOperation.s.currentIndex = bulkOperation.s.currentIndex + 1;
// Save back the current Batch to the right type
if (docType === common.INSERT) {
bulkOperation.s.currentInsertBatch = bulkOperation.s.currentBatch;
......@@ -80,8 +84,8 @@ function addToOperationsList(bulkOperation, docType, document) {
}
// Update current batch size
bulkOperation.s.currentBatch.size = bulkOperation.s.currentBatch.size + 1;
bulkOperation.s.currentBatch.sizeBytes = bulkOperation.s.currentBatch.sizeBytes + bsonSize;
bulkOperation.s.currentBatch.size += 1;
bulkOperation.s.currentBatch.sizeBytes += maxKeySize + bsonSize;
// Return bulkOperation
return bulkOperation;
......@@ -123,6 +127,10 @@ class UnorderedBulkOperation extends BulkOperationBase {
*/
execute(_writeConcern, options, callback) {
const ret = this.bulkExecute(_writeConcern, options, callback);
if (isPromiseLike(ret)) {
return ret;
}
options = ret.options;
callback = ret.callback;
......@@ -169,9 +177,18 @@ function executeBatch(bulkOperation, batch, options, callback) {
*/
function executeBatches(bulkOperation, options, callback) {
let numberOfCommandsToExecute = bulkOperation.s.batches.length;
let hasErrored = false;
// Execute over all the batches
for (let i = 0; i < bulkOperation.s.batches.length; i++) {
executeBatch(bulkOperation, bulkOperation.s.batches[i], options, function(err) {
if (hasErrored) {
return;
}
if (err) {
hasErrored = true;
return handleCallback(callback, err);
}
// Count down the number of commands left to execute
numberOfCommandsToExecute = numberOfCommandsToExecute - 1;
......
......@@ -368,6 +368,20 @@ function processNewChange(args) {
const change = args.change;
const callback = args.callback;
const eventEmitter = args.eventEmitter || false;
// If the changeStream is closed, then it should not process a change.
if (changeStream.isClosed()) {
// We do not error in the eventEmitter case.
if (eventEmitter) {
return;
}
const error = new MongoError('ChangeStream is closed');
return typeof callback === 'function'
? callback(error, null)
: changeStream.promiseLibrary.reject(error);
}
const topology = changeStream.topology;
const options = changeStream.cursor.options;
......
......@@ -804,7 +804,7 @@ Collection.prototype.updateMany = function(filter, update, options, callback) {
* Updates documents.
* @method
* @param {object} selector The selector for the update operation.
* @param {object} document The update document.
* @param {object} update The update operations to be applied to the documents
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
......@@ -820,7 +820,7 @@ Collection.prototype.updateMany = function(filter, update, options, callback) {
* @return {Promise} returns Promise if no callback passed
* @deprecated use updateOne, updateMany or bulkWrite
*/
Collection.prototype.update = deprecate(function(selector, document, options, callback) {
Collection.prototype.update = deprecate(function(selector, update, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
......@@ -833,7 +833,7 @@ Collection.prototype.update = deprecate(function(selector, document, options, ca
return executeOperation(this.s.topology, updateDocuments, [
this,
selector,
document,
update,
options,
callback
]);
......@@ -1692,6 +1692,7 @@ Collection.prototype.findAndRemove = deprecate(function(query, sort, options, ca
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
* @param {object} [options.collation] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {string} [options.comment] Add a comment to an aggregation command
* @param {string|object} [options.hint] Add an index selection hint to an aggregation command
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~aggregationCallback} callback The command result callback
* @return {(null|AggregationCursor)}
......@@ -2047,11 +2048,17 @@ Collection.prototype.mapReduce = function(map, reduce, options, callback) {
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {UnorderedBulkOperation}
*/
Collection.prototype.initializeUnorderedBulkOp = function(options) {
options = options || {};
// Give function's options precedence over session options.
if (options.ignoreUndefined == null) {
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
options.promiseLibrary = this.s.promiseLibrary;
return unordered(this.s.topology, this, options);
};
......@@ -2065,11 +2072,16 @@ Collection.prototype.initializeUnorderedBulkOp = function(options) {
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
* @param {OrderedBulkOperation} callback The command result callback
* @return {null}
*/
Collection.prototype.initializeOrderedBulkOp = function(options) {
options = options || {};
// Give function's options precedence over session's options.
if (options.ignoreUndefined == null) {
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
options.promiseLibrary = this.s.promiseLibrary;
return ordered(this.s.topology, this, options);
};
......
......@@ -22,6 +22,21 @@ const MongoError = require('mongodb-core').MongoError;
const ReadPreference = require('mongodb-core').ReadPreference;
const toError = require('../utils').toError;
let collection;
function loadCollection() {
if (!collection) {
collection = require('../collection');
}
return collection;
}
let db;
function loadDb() {
if (!db) {
db = require('../db');
}
return db;
}
/**
* Group function helper
* @ignore
......@@ -987,7 +1002,7 @@ function mapReduce(coll, map, reduce, options, callback) {
if (result.result != null && typeof result.result === 'object') {
const doc = result.result;
// Return a collection from another db
const Db = require('../db');
let Db = loadDb();
collection = new Db(doc.db, coll.s.db.s.topology, coll.s.db.s.options).collection(
doc.collection
);
......@@ -1204,7 +1219,7 @@ function removeDocuments(coll, selector, options, callback) {
* @param {Collection~collectionResultCallback} [callback] The results callback
*/
function rename(coll, newName, options, callback) {
const Collection = require('../collection');
let Collection = loadCollection();
// Check the collection name
checkCollectionName(newName);
// Build the command
......
......@@ -6,6 +6,14 @@ const handleCallback = require('../utils').handleCallback;
const MongoError = require('mongodb-core').MongoError;
const push = Array.prototype.push;
let cursor;
function loadCursor() {
if (!cursor) {
cursor = require('../cursor');
}
return cursor;
}
/**
* Get the count of documents for this cursor.
*
......@@ -74,7 +82,7 @@ function count(cursor, applySkipLimit, opts, callback) {
* @param {Cursor~resultCallback} callback The result callback.
*/
function each(cursor, callback) {
const Cursor = require('../cursor');
let Cursor = loadCursor();
if (!callback) throw MongoError.create({ message: 'callback is mandatory', driver: true });
if (cursor.isNotified()) return;
......@@ -114,7 +122,7 @@ function each(cursor, callback) {
* @param {Cursor~resultCallback} [callback] The result callback.
*/
function hasNext(cursor, callback) {
const Cursor = require('../cursor');
let Cursor = loadCursor();
if (cursor.s.currentDoc) {
return callback(null, true);
......@@ -165,7 +173,7 @@ function next(cursor, callback) {
// Get the next available document from the cursor, returns null if no more documents are available.
function nextObject(cursor, callback) {
const Cursor = require('../cursor');
let Cursor = loadCursor();
if (cursor.s.state === Cursor.CLOSED || (cursor.isDead && cursor.isDead()))
return handleCallback(
......@@ -196,7 +204,7 @@ function nextObject(cursor, callback) {
* @param {Cursor~toArrayResultCallback} [callback] The result callback.
*/
function toArray(cursor, callback) {
const Cursor = require('../cursor');
let Cursor = loadCursor();
const items = [];
......
......@@ -17,6 +17,21 @@ const findOne = require('./collection_ops').findOne;
const remove = require('./collection_ops').remove;
const updateOne = require('./collection_ops').updateOne;
let collection;
function loadCollection() {
if (!collection) {
collection = require('../collection');
}
return collection;
}
let db;
function loadDb() {
if (!db) {
db = require('../db');
}
return db;
}
const debugFields = [
'authSource',
'w',
......@@ -64,7 +79,7 @@ const illegalCommandFields = [
* @param {Db~resultCallback} [callback] The command result callback
*/
function addUser(db, username, password, options, callback) {
const Db = require('../db');
let Db = loadDb();
// Did the user destroy the topology
if (db.serverConfig && db.serverConfig.isDestroyed())
......@@ -132,7 +147,7 @@ function addUser(db, username, password, options, callback) {
* @param {Db~collectionsResultCallback} [callback] The results callback
*/
function collections(db, options, callback) {
const Collection = require('../collection');
let Collection = loadCollection();
options = Object.assign({}, options, { nameOnly: true });
// Let's get the collection names
......@@ -172,7 +187,7 @@ function collections(db, options, callback) {
* @param {Db~collectionResultCallback} [callback] The results callback
*/
function createCollection(db, name, options, callback) {
const Collection = require('../collection');
let Collection = loadCollection();
// Get the write concern options
const finalOptions = applyWriteConcern(Object.assign({}, options), { db }, options);
......@@ -233,11 +248,16 @@ function createCollection(db, name, options, callback) {
// Execute command
executeCommand(db, cmd, finalOptions, err => {
if (err) return handleCallback(callback, err);
handleCallback(
callback,
null,
new Collection(db, db.s.topology, db.s.databaseName, name, db.s.pkFactory, options)
);
try {
return handleCallback(
callback,
null,
new Collection(db, db.s.topology, db.s.databaseName, name, db.s.pkFactory, options)
);
} catch (err) {
return handleCallback(callback, err);
}
});
});
}
......@@ -632,7 +652,7 @@ function profilingLevel(db, options, callback) {
* @param {Db~resultCallback} [callback] The command result callback
*/
function removeUser(db, username, options, callback) {
const Db = require('../db');
let Db = loadDb();
// Attempt to execute command
executeAuthRemoveUserCommand(db, username, options, (err, result) => {
......
......@@ -11,6 +11,14 @@ const ReplSet = require('../topologies/replset');
const Server = require('../topologies/server');
const ServerSessionPool = require('mongodb-core').Sessions.ServerSessionPool;
let client;
function loadClient() {
if (!client) {
client = require('../mongo_client');
}
return client;
}
const monitoringEvents = [
'timeout',
'close',
......@@ -127,7 +135,7 @@ function clearAllEvents(topology) {
// Collect all events in order from SDAM
function collectEvents(mongoClient, topology) {
const MongoClient = require('../mongo_client');
let MongoClient = loadClient();
const collectedEvents = [];
if (mongoClient instanceof MongoClient) {
......
......@@ -604,7 +604,10 @@ function decorateWithCollation(command, target, options) {
* @param {object} command the command on which to apply the read concern
* @param {Collection} coll the parent collection of the operation calling this method
*/
function decorateWithReadConcern(command, coll) {
function decorateWithReadConcern(command, coll, options) {
if (options && options.session && options.session.inTransaction()) {
return;
}
let readConcern = Object.assign({}, command.readConcern || {});
if (coll.s.readConcern) {
Object.assign(readConcern, coll.s.readConcern);
......
......@@ -2,6 +2,27 @@
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
<a name="3.1.11"></a>
## [3.1.11](https://github.com/mongodb-js/mongodb-core/compare/v3.1.10...v3.1.11) (2019-01-16)
### Bug Fixes
* **wire-protocol:** don't allow override of `slaveOk` ([8fcef69](https://github.com/mongodb-js/mongodb-core/commit/8fcef69))
<a name="3.1.10"></a>
## [3.1.10](https://github.com/mongodb-js/mongodb-core/compare/v3.1.9...v3.1.10) (2019-01-15)
### Bug Fixes
* **mongos-replset:** pass connect options to child server instances ([7ffb4bb](https://github.com/mongodb-js/mongodb-core/commit/7ffb4bb))
* **prettier:** fix prettier file paths for Windows ([00c631e](https://github.com/mongodb-js/mongodb-core/commit/00c631e))
<a name="3.1.9"></a>
## [3.1.9](https://github.com/mongodb-js/mongodb-core/compare/v3.1.8...v3.1.9) (2018-11-16)
......
......@@ -90,8 +90,7 @@ const extractCommand = command => {
}
Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => {
if (typeof command.options[key] !== 'undefined')
result[LEGACY_FIND_OPTIONS_MAP[key]] = command.options[key];
if (typeof command[key] !== 'undefined') result[LEGACY_FIND_OPTIONS_MAP[key]] = command[key];
});
OP_QUERY_KEYS.forEach(key => {
......
......@@ -22,7 +22,7 @@ var OPTS_EXHAUST = 64;
var OPTS_PARTIAL = 128;
// Response flags
var CURSOR_NOT_FOUND = 0;
var CURSOR_NOT_FOUND = 1;
var QUERY_FAILURE = 2;
var SHARD_CONFIG_STALE = 4;
var AWAIT_CAPABLE = 8;
......@@ -46,9 +46,6 @@ var Query = function(bson, ns, query, options) {
this.ns = ns;
this.query = query;
// Ensure empty options
this.options = options || {};
// Additional options
this.numberToSkip = options.numberToSkip || 0;
this.numberToReturn = options.numberToReturn || 0;
......
This diff is collapsed.
......@@ -156,10 +156,8 @@ class Server extends EventEmitter {
return;
}
// Are we executing against a specific topology
const topology = options.topology || {};
// Create the query object
const query = this.s.wireProtocolHandler.command(this.s.bson, ns, cmd, {}, topology, options);
const query = this.s.wireProtocolHandler.command(this, ns, cmd, {}, options);
// Set slave OK of the query
query.slaveOk = options.readPreference ? options.readPreference.slaveOk() : false;
......
......@@ -286,7 +286,7 @@ Mongos.prototype.connect = function(options) {
// Create server instances
var servers = this.s.seedlist.map(function(x) {
const server = new Server(
Object.assign({}, self.s.options, x, {
Object.assign({}, self.s.options, x, options, {
authProviders: self.authProviders,
reconnect: false,
monitoring: false,
......
......@@ -960,12 +960,14 @@ ReplSet.prototype.connect = function(options) {
var self = this;
// Add any connect level options to the internal state
this.s.connectOptions = options || {};
// Set connecting state
stateTransition(this, CONNECTING);
// Create server instances
var servers = this.s.seedlist.map(function(x) {
return new Server(
Object.assign({}, self.s.options, x, {
Object.assign({}, self.s.options, x, options, {
authProviders: self.authProviders,
reconnect: false,
monitoring: false,
......
......@@ -755,33 +755,7 @@ Server.prototype.command = function(ns, cmd, options, callback) {
return callback(new MongoError(`server ${this.name} does not support collation`));
}
// Are we executing against a specific topology
var topology = options.topology || {};
// Create the query object
var query = self.wireProtocolHandler.command(self.s.bson, ns, cmd, {}, topology, options);
if (query instanceof MongoError) {
return callback(query, null);
}
// Set slave OK of the query
query.slaveOk = options.readPreference ? options.readPreference.slaveOk() : false;
// Write options
var writeOptions = {
raw: typeof options.raw === 'boolean' ? options.raw : false,
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
command: true,
monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : false,
fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false,
requestId: query.requestId,
socketTimeout: typeof options.socketTimeout === 'number' ? options.socketTimeout : null,
session: options.session || null
};
// Write the operation to the pool
self.s.pool.write(query, writeOptions, callback);
self.wireProtocolHandler.command(self, ns, cmd, options, callback);
};
/**
......@@ -812,7 +786,7 @@ Server.prototype.insert = function(ns, ops, options, callback) {
ops = Array.isArray(ops) ? ops : [ops];
// Execute write
return self.wireProtocolHandler.insert(self.s.pool, ns, self.s.bson, ops, options, callback);
return self.wireProtocolHandler.insert(self, ns, ops, options, callback);
};
/**
......@@ -847,7 +821,7 @@ Server.prototype.update = function(ns, ops, options, callback) {
// Setup the docs as an array
ops = Array.isArray(ops) ? ops : [ops];
// Execute write
return self.wireProtocolHandler.update(self.s.pool, ns, self.s.bson, ops, options, callback);
return self.wireProtocolHandler.update(self, ns, ops, options, callback);
};
/**
......@@ -882,7 +856,7 @@ Server.prototype.remove = function(ns, ops, options, callback) {
// Setup the docs as an array
ops = Array.isArray(ops) ? ops : [ops];
// Execute write
return self.wireProtocolHandler.remove(self.s.pool, ns, self.s.bson, ops, options, callback);
return self.wireProtocolHandler.remove(self, ns, ops, options, callback);
};
/**
......
......@@ -47,9 +47,55 @@ var parseHeader = function(message) {
};
};
function applyCommonQueryOptions(queryOptions, options) {
Object.assign(queryOptions, {
raw: typeof options.raw === 'boolean' ? options.raw : false,
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : false,
fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false
});
if (typeof options.socketTimeout === 'number') {
queryOptions.socketTimeout = options.socketTimeout;
}
if (options.session) {
queryOptions.session = options.session;
}
if (typeof options.documentsReturnedIn === 'string') {
queryOptions.documentsReturnedIn = options.documentsReturnedIn;
}
return queryOptions;
}
function isMongos(server) {
if (server.type === 'mongos') return true;
if (server.parent && server.parent.type === 'mongos') return true;
// NOTE: handle unified topology
return false;
}
function databaseNamespace(ns) {
return ns.split('.')[0];
}
function collectionNamespace(ns) {
return ns
.split('.')
.slice(1)
.join('.');
}