From c4a7121bd9f6f7246d9f54b78bdd2eb4d18b4c76 Mon Sep 17 00:00:00 2001 From: Dom Date: Wed, 11 Oct 2017 14:01:23 +0100 Subject: [PATCH] Merge Development (#48) * add DropAllIndexes() method (#25) Create a new method to drop all the indexes of a collection in a single call * readme: credit @feliixx for #25 (#26) * send metadata during handshake (#28) fix [#484](https://github.com/go-mgo/mgo/issues/484) Annotate connections with metadata provided by the connecting client. informations send: { "aplication": { // optional "name": "myAppName" } "driver": { "name": "mgo", "version": "v2" }, "os": { "type": runtime.GOOS, "architecture": runtime.GOARCH } } to set "application.name", add `appname` param in options of string connection URI, for example : "mongodb://localhost:27017?appname=myAppName" * Update README to add appName (#32) * docs: elaborate on what appName does * readme: add appName to changes * add method CreateView() (#33) Fix #30. Thanks to @feliixx for the time and effort. * readme: credit @feliixx in the README (#36) * Don't panic on indexed int64 fields (#23) * Stop all db instances after tests (#462) If all tests pass, the builds for mongo earlier than 2.6 are still failing. Running a clean up fixes the issue. * fixing int64 type failing when getting indexes and trying to type them * requested changes relating to case statement and panic * Update README.md to credit @mapete94. * tests: ensure indexed int64 fields do not cause a panic in Indexes() See: * https://github.com/globalsign/mgo/pull/23 * https://github.com/go-mgo/mgo/issues/475 * https://github.com/go-mgo/mgo/pull/476 * Add collation option to collection.Create() (#37) - Allow specifying the default collation for the collection when creating it. - Add some documentation to query.Collation() method. fix #29 * Test against MongoDB 3.4.x (#35) * test against MongoDB 3.4.x * tests: use listIndexes to assert index state for 3.4+ * make test pass against v3.4.x - skip `TestViewWithCollation` because of SERVER-31049, cf: https://jira.mongodb.org/browse/SERVER-31049 - add versionAtLeast() method in init.js script to better detect server version fixes #31 * Introduce constants for BSON element types (#41) * bson.Unmarshal returns time in UTC (#42) * readme: add missing features / credit * Adds missing collation feature description (by @feliixx). * Adds missing 3.4 tests description (by @feliixx). * Adds BSON constants description (by @bozaro). * Adds UTC time.Time unmarshalling (by @gazoon). * fix golint, go vet and gofmt warnings (#44) Fixes #43 * readme: credit @feliixx (#46) * Fix GetBSON() method usage (#40) * Fix GetBSON() method usage Original issue --- You can't use type with custom GetBSON() method mixed with structure field type and structure field reference type. For example, you can't create custom GetBSON() for Bar type: ``` struct Foo { a Bar b *Bar } ``` Type implementation (`func (t Bar) GetBSON()` ) would crash on `Foo.b = nil` value encoding. Reference implementation (`func (t *Bar) GetBSON()` ) would not call on `Foo.a` value encoding. After this change --- For type implementation `func (t Bar) GetBSON()` would not call on `Foo.b = nil` value encoding. In this case `nil` value would be seariazied as `nil` BSON value. For reference implementation `func (t *Bar) GetBSON()` would call even on `Foo.a` value encoding. * Minor refactoring * readme: credit @bozaro (#47) --- .travis.yml | 3 + README.md | 9 +- auth.go | 2 +- auth_test.go | 10 +- bson/bson.go | 65 ++++-- bson/bson_test.go | 249 ++++++++++++-------- bson/decimal.go | 2 + bson/decimal_test.go | 238 +++++++++---------- bson/decode.go | 30 +-- bson/encode.go | 66 +++++- bson/json_test.go | 2 +- bulk_test.go | 6 +- cluster.go | 3 +- cluster_test.go | 17 +- dbtest/dbserver.go | 2 +- dbtest/dbserver_test.go | 2 + gridfs.go | 51 ++-- internal/json/decode.go | 2 +- internal/json/decode_test.go | 2 +- internal/json/encode.go | 6 +- internal/json/stream_test.go | 2 +- internal/sasl/sasl.go | 6 +- internal/scram/scram.go | 6 +- log.go | 12 +- server.go | 3 +- session.go | 441 +++++++++++++++++++++-------------- session_test.go | 98 ++++---- socket.go | 24 +- stats.go | 10 + txn/debug.go | 10 +- txn/flusher.go | 143 ++++++------ txn/txn.go | 35 +-- txn/txn_test.go | 23 +- 33 files changed, 951 insertions(+), 629 deletions(-) diff --git a/.travis.yml b/.travis.yml index 430844718..8d4428d1f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,8 +33,11 @@ install: - go get gopkg.in/check.v1 - go get gopkg.in/yaml.v2 - go get gopkg.in/tomb.v2 + - go get github.com/golang/lint/golint before_script: + - golint ./... | grep -v 'ID' | cat + - go vet github.com/globalsign/mgo/bson github.com/globalsign/mgo/txn github.com/globalsign/mgo - export NOIPV6=1 - make startdb diff --git a/README.md b/README.md index 349aaee43..36688b3d0 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ Further PR's (with tests) are welcome, but please maintain backwards compatibili * Hides SASL warnings ([details](https://github.com/globalsign/mgo/pull/7)) * Support for partial indexes ([detials](https://github.com/domodwyer/mgo/commit/5efe8eccb028238d93c222828cae4806aeae9f51)) * Fixes timezone handling ([details](https://github.com/go-mgo/mgo/pull/464)) -* Integration tests run against newest MongoDB 3.2 releases ([details](https://github.com/globalsign/mgo/pull/4), [more](https://github.com/globalsign/mgo/pull/24)) +* Integration tests run against MongoDB 3.2 & 3.4 releases ([details](https://github.com/globalsign/mgo/pull/4), [more](https://github.com/globalsign/mgo/pull/24), [more](https://github.com/globalsign/mgo/pull/35)) * Improved multi-document transaction performance ([details](https://github.com/globalsign/mgo/pull/10), [more](https://github.com/globalsign/mgo/pull/11), [more](https://github.com/globalsign/mgo/pull/16)) * Fixes cursor timeouts ([details](https://jira.mongodb.org/browse/SERVER-24899)) * Support index hints and timeouts for count queries ([details](https://github.com/globalsign/mgo/pull/17)) @@ -25,10 +25,16 @@ Further PR's (with tests) are welcome, but please maintain backwards compatibili * Supports dropping all indexes on a collection ([details](https://github.com/globalsign/mgo/pull/25)) * Annotates log entries/profiler output with optional appName on 3.4+ ([details](https://github.com/globalsign/mgo/pull/28)) * Support for read-only [views](https://docs.mongodb.com/manual/core/views/) in 3.4+ ([details](https://github.com/globalsign/mgo/pull/33)) +* Support for [collations](https://docs.mongodb.com/manual/reference/collation/) in 3.4+ ([details](https://github.com/globalsign/mgo/pull/37)) +* Provide BSON constants for convenience/sanity ([details](https://github.com/globalsign/mgo/pull/41)) +* Consistently unmarshal time.Time values as UTC ([details](https://github.com/globalsign/mgo/pull/42)) +* Enforces best practise coding guidelines ([details](https://github.com/globalsign/mgo/pull/44)) +* GetBSON correctly handles structs with both fields and pointers ([details](https://github.com/globalsign/mgo/pull/40)) --- ### Thanks to +* @bozaro * @BenLubar * @carter2000 * @cezarsa @@ -37,6 +43,7 @@ Further PR's (with tests) are welcome, but please maintain backwards compatibili * @feliixx * @fmpwizard * @jameinel +* @gazoon * @mapete94 * @Reenjii * @smoya diff --git a/auth.go b/auth.go index 388e62105..75d2ebc36 100644 --- a/auth.go +++ b/auth.go @@ -61,7 +61,7 @@ type getNonceCmd struct { type getNonceResult struct { Nonce string - Err string "$err" + Err string `bson:"$err"` Code int } diff --git a/auth_test.go b/auth_test.go index 9f04b2246..ed1af5abf 100644 --- a/auth_test.go +++ b/auth_test.go @@ -580,7 +580,7 @@ func (s *S) TestAuthLoginCachingWithNewSession(c *C) { } func (s *S) TestAuthLoginCachingAcrossPool(c *C) { - // Logins are cached even when the conenction goes back + // Logins are cached even when the connection goes back // into the pool. session, err := mgo.Dial("localhost:40002") @@ -934,7 +934,7 @@ func (s *S) TestAuthX509Cred(c *C) { x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" externalDB := session.DB("$external") - var x509User mgo.User = mgo.User{ + var x509User = mgo.User{ Username: x509Subject, OtherDBRoles: map[string][]mgo.Role{"admin": {mgo.RoleRoot}}, } @@ -1080,11 +1080,11 @@ func (kerberosSuite *KerberosSuite) TestAuthKerberosURL(c *C) { c.Skip("no -kerberos") } c.Logf("Connecting to %s...", kerberosHost) - connectUri := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI" + connectURI := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI" if runtime.GOOS == "windows" { - connectUri = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI" + connectURI = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI" } - session, err := mgo.Dial(connectUri) + session, err := mgo.Dial(connectURI) c.Assert(err, IsNil) defer session.Close() n, err := session.DB("kerberos").C("test").Find(M{}).Count() diff --git a/bson/bson.go b/bson/bson.go index ca1420825..d960f7a37 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -56,7 +56,40 @@ import ( // -------------------------------------------------------------------------- // The public API. -// A value implementing the bson.Getter interface will have its GetBSON +// Element types constants from BSON specification. +const ( + ElementFloat64 byte = 0x01 + ElementString byte = 0x02 + ElementDocument byte = 0x03 + ElementArray byte = 0x04 + ElementBinary byte = 0x05 + Element06 byte = 0x06 + ElementObjectId byte = 0x07 + ElementBool byte = 0x08 + ElementDatetime byte = 0x09 + ElementNil byte = 0x0A + ElementRegEx byte = 0x0B + ElementDBPointer byte = 0x0C + ElementJavaScriptWithoutScope byte = 0x0D + ElementSymbol byte = 0x0E + ElementJavaScriptWithScope byte = 0x0F + ElementInt32 byte = 0x10 + ElementTimestamp byte = 0x11 + ElementInt64 byte = 0x12 + ElementDecimal128 byte = 0x13 + ElementMinKey byte = 0xFF + ElementMaxKey byte = 0x7F + + BinaryGeneric byte = 0x00 + BinaryFunction byte = 0x01 + BinaryBinaryOld byte = 0x02 + BinaryUUIDOld byte = 0x03 + BinaryUUID byte = 0x04 + BinaryMD5 byte = 0x05 + BinaryUserDefined byte = 0x80 +) + +// Getter interface: a value implementing the bson.Getter interface will have its GetBSON // method called when the given value has to be marshalled, and the result // of this method will be marshaled in place of the actual object. // @@ -66,12 +99,12 @@ type Getter interface { GetBSON() (interface{}, error) } -// A value implementing the bson.Setter interface will receive the BSON +// Setter interface: a value implementing the bson.Setter interface will receive the BSON // value via the SetBSON method during unmarshaling, and the object // itself will not be changed as usual. // // If setting the value works, the method should return nil or alternatively -// bson.SetZero to set the respective field to its zero value (nil for +// bson.ErrSetZero to set the respective field to its zero value (nil for // pointer types). If SetBSON returns a value of type bson.TypeError, the // BSON value will be omitted from a map or slice being decoded and the // unmarshalling will continue. If it returns any other non-nil error, the @@ -97,10 +130,10 @@ type Setter interface { SetBSON(raw Raw) error } -// SetZero may be returned from a SetBSON method to have the value set to +// ErrSetZero may be returned from a SetBSON method to have the value set to // its respective zero value. When used in pointer values, this will set the // field to nil rather than to the pre-allocated value. -var SetZero = errors.New("set to zero") +var ErrSetZero = errors.New("set to zero") // M is a convenient alias for a map[string]interface{} map, useful for // dealing with BSON in a native way. For instance: @@ -156,7 +189,7 @@ type Raw struct { // documents in general. type RawD []RawDocElem -// See the RawD type. +// RawDocElem elements of RawD type. type RawDocElem struct { Name string Value Raw @@ -166,7 +199,7 @@ type RawDocElem struct { // long. MongoDB objects by default have such a property set in their "_id" // property. // -// http://www.mongodb.org/display/DOCS/Object+IDs +// http://www.mongodb.org/display/DOCS/Object+Ids type ObjectId string // ObjectIdHex returns an ObjectId from the provided hex representation. @@ -192,7 +225,7 @@ func IsObjectIdHex(s string) bool { // objectIdCounter is atomically incremented when generating a new ObjectId // using NewObjectId() function. It's used as a counter part of an id. -var objectIdCounter uint32 = readRandomUint32() +var objectIdCounter = readRandomUint32() // readRandomUint32 returns a random objectIdCounter. func readRandomUint32() uint32 { @@ -300,12 +333,12 @@ func (id *ObjectId) UnmarshalJSON(data []byte) error { return nil } if len(data) != 26 || data[0] != '"' || data[25] != '"' { - return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data))) + return fmt.Errorf("invalid ObjectId in JSON: %s", string(data)) } var buf [12]byte _, err := hex.Decode(buf[:], data[1:25]) if err != nil { - return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err)) + return fmt.Errorf("invalid ObjectId in JSON: %s (%s)", string(data), err) } *id = ObjectId(string(buf[:])) return nil @@ -571,12 +604,12 @@ func Unmarshal(in []byte, out interface{}) (err error) { d := newDecoder(in) d.readDocTo(v) if d.i < len(d.in) { - return errors.New("Document is corrupted") + return errors.New("document is corrupted") } case reflect.Struct: - return errors.New("Unmarshal can't deal with struct values. Use a pointer.") + return errors.New("unmarshal can't deal with struct values. Use a pointer") default: - return errors.New("Unmarshal needs a map or a pointer to a struct.") + return errors.New("unmarshal needs a map or a pointer to a struct") } return nil } @@ -600,13 +633,15 @@ func (raw Raw) Unmarshal(out interface{}) (err error) { return &TypeError{v.Type(), raw.Kind} } case reflect.Struct: - return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.") + return errors.New("raw Unmarshal can't deal with struct values. Use a pointer") default: - return errors.New("Raw Unmarshal needs a map or a valid pointer.") + return errors.New("raw Unmarshal needs a map or a valid pointer") } return nil } +// TypeError store details for type error occuring +// during unmarshaling type TypeError struct { Type reflect.Type Kind byte diff --git a/bson/bson_test.go b/bson/bson_test.go index 35bcc52f0..695f9029d 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -36,6 +36,7 @@ import ( "reflect" "testing" "time" + "strings" "github.com/globalsign/mgo/bson" . "gopkg.in/check.v1" @@ -134,29 +135,29 @@ var allItems = []testItemType{ "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, {bson.M{"_": []byte("yo")}, "\x05_\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"_": bson.Binary{0x80, []byte("udef")}}, + {bson.M{"_": bson.Binary{Kind: 0x80, Data: []byte("udef")}}, "\x05_\x00\x04\x00\x00\x00\x80udef"}, {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild. "\x06_\x00"}, {bson.M{"_": bson.ObjectId("0123456789ab")}, "\x07_\x000123456789ab"}, - {bson.M{"_": bson.DBPointer{"testnamespace", bson.ObjectId("0123456789ab")}}, + {bson.M{"_": bson.DBPointer{Namespace: "testnamespace", Id: bson.ObjectId("0123456789ab")}}, "\x0C_\x00\x0e\x00\x00\x00testnamespace\x000123456789ab"}, {bson.M{"_": false}, "\x08_\x00\x00"}, {bson.M{"_": true}, "\x08_\x00\x01"}, - {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion. + {bson.M{"_": time.Unix(0, 258e6).UTC()}, // Note the NS <=> MS conversion. "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"_": nil}, "\x0A_\x00"}, - {bson.M{"_": bson.RegEx{"ab", "cd"}}, + {bson.M{"_": bson.RegEx{Pattern: "ab", Options: "cd"}}, "\x0B_\x00ab\x00cd\x00"}, - {bson.M{"_": bson.JavaScript{"code", nil}}, + {bson.M{"_": bson.JavaScript{Code: "code", Scope: nil}}, "\x0D_\x00\x05\x00\x00\x00code\x00"}, {bson.M{"_": bson.Symbol("sym")}, "\x0E_\x00\x04\x00\x00\x00sym\x00"}, - {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}}, + {bson.M{"_": bson.JavaScript{Code: "code", Scope: bson.M{"": nil}}}, "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + "\x07\x00\x00\x00\x0A\x00\x00"}, {bson.M{"_": 258}, @@ -200,7 +201,7 @@ func (s *S) TestUnmarshalRawAllItems(c *C) { continue } pv := reflect.New(reflect.ValueOf(value).Type()) - raw := bson.Raw{item.data[0], []byte(item.data[3:])} + raw := bson.Raw{Kind: item.data[0], Data: []byte(item.data[3:])} c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface()) err := raw.Unmarshal(pv.Interface()) c.Assert(err, IsNil) @@ -209,7 +210,7 @@ func (s *S) TestUnmarshalRawAllItems(c *C) { } func (s *S) TestUnmarshalRawIncompatible(c *C) { - raw := bson.Raw{0x08, []byte{0x01}} // true + raw := bson.Raw{Kind: 0x08, Data: []byte{0x01}} // true err := raw.Unmarshal(&struct{}{}) c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}") } @@ -258,15 +259,15 @@ func (s *S) TestMarshalBuffer(c *C) { var oneWayMarshalItems = []testItemType{ // These are being passed as pointers, and will unmarshal as values. - {bson.M{"": &bson.Binary{0x02, []byte("old")}}, + {bson.M{"": &bson.Binary{Kind: 0x02, Data: []byte("old")}}, "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, - {bson.M{"": &bson.Binary{0x80, []byte("udef")}}, + {bson.M{"": &bson.Binary{Kind: 0x80, Data: []byte("udef")}}, "\x05\x00\x04\x00\x00\x00\x80udef"}, - {bson.M{"": &bson.RegEx{"ab", "cd"}}, + {bson.M{"": &bson.RegEx{Pattern: "ab", Options: "cd"}}, "\x0B\x00ab\x00cd\x00"}, - {bson.M{"": &bson.JavaScript{"code", nil}}, + {bson.M{"": &bson.JavaScript{Code: "code", Scope: nil}}, "\x0D\x00\x05\x00\x00\x00code\x00"}, - {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}}, + {bson.M{"": &bson.JavaScript{Code: "code", Scope: bson.M{"": nil}}}, "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + "\x07\x00\x00\x00\x0A\x00\x00"}, @@ -283,9 +284,9 @@ var oneWayMarshalItems = []testItemType{ "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, // Will unmarshal as a []byte. - {bson.M{"": bson.Binary{0x00, []byte("yo")}}, + {bson.M{"": bson.Binary{Kind: 0x00, Data: []byte("yo")}}, "\x05\x00\x02\x00\x00\x00\x00yo"}, - {bson.M{"": bson.Binary{0x02, []byte("old")}}, + {bson.M{"": bson.Binary{Kind: 0x02, Data: []byte("old")}}, "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, // No way to preserve the type information here. We might encode as a zero @@ -338,7 +339,7 @@ type specSample1 struct { } type specSample2 struct { - BSON []interface{} "BSON" + BSON []interface{} `bson:"BSON"` } var structSampleItems = []testItemType{ @@ -381,8 +382,54 @@ func (s *S) Test64bitInt(c *C) { // -------------------------------------------------------------------------- // Generic two-way struct marshaling tests. +type prefixPtr string +type prefixVal string + +func (t *prefixPtr) GetBSON() (interface{}, error) { + if t == nil { + return nil, nil + } + return "foo-" + string(*t), nil +} + +func (t *prefixPtr) SetBSON(raw bson.Raw) error { + var s string + if raw.Kind == 0x0A { + return bson.ErrSetZero + } + if err := raw.Unmarshal(&s); err != nil { + return err + } + if !strings.HasPrefix(s, "foo-") { + return errors.New("Prefix not found: " + s) + } + *t = prefixPtr(s[4:]) + return nil +} + +func (t prefixVal) GetBSON() (interface{}, error) { + return "foo-" + string(t), nil +} + +func (t *prefixVal) SetBSON(raw bson.Raw) error { + var s string + if raw.Kind == 0x0A { + return bson.ErrSetZero + } + if err := raw.Unmarshal(&s); err != nil { + return err + } + if !strings.HasPrefix(s, "foo-") { + return errors.New("Prefix not found: " + s) + } + *t = prefixVal(s[4:]) + return nil +} + var bytevar = byte(8) var byteptr = &bytevar +var prefixptr = prefixPtr("bar") +var prefixval = prefixVal("bar") var structItems = []testItemType{ {&struct{ Ptr *byte }{nil}, @@ -396,7 +443,7 @@ var structItems = []testItemType{ {&struct{ Byte byte }{0}, "\x10byte\x00\x00\x00\x00\x00"}, {&struct { - V byte "Tag" + V byte `bson:"Tag"` }{8}, "\x10Tag\x00\x08\x00\x00\x00"}, {&struct { @@ -411,14 +458,32 @@ var structItems = []testItemType{ {&struct{ A, C, B, D, F, E *byte }{}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"}, - {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, + {&struct{ V bson.Raw }{bson.Raw{Kind: 0x03, Data: []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, - {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}}, + {&struct{ V bson.Raw }{bson.Raw{Kind: 0x10, Data: []byte("\x00\x00\x00\x00")}}, "\x10v\x00" + "\x00\x00\x00\x00"}, // Byte arrays. {&struct{ V [2]byte }{[2]byte{'y', 'o'}}, "\x05v\x00\x02\x00\x00\x00\x00yo"}, + + {&struct{ V prefixPtr }{prefixPtr("buzz")}, + "\x02v\x00\x09\x00\x00\x00foo-buzz\x00"}, + + {&struct{ V *prefixPtr }{&prefixptr}, + "\x02v\x00\x08\x00\x00\x00foo-bar\x00"}, + + {&struct{ V *prefixPtr }{nil}, + "\x0Av\x00"}, + + {&struct{ V prefixVal }{prefixVal("buzz")}, + "\x02v\x00\x09\x00\x00\x00foo-buzz\x00"}, + + {&struct{ V *prefixVal }{&prefixval}, + "\x02v\x00\x08\x00\x00\x00foo-bar\x00"}, + + {&struct{ V *prefixVal }{nil}, + "\x0Av\x00"}, } func (s *S) TestMarshalStructItems(c *C) { @@ -438,7 +503,7 @@ func (s *S) TestUnmarshalStructItems(c *C) { func (s *S) TestUnmarshalRawStructItems(c *C) { for i, item := range structItems { - raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))} + raw := bson.Raw{Kind: 0x03, Data: []byte(wrapInDoc(item.data))} zero := makeZeroDoc(item.obj) err := raw.Unmarshal(zero) c.Assert(err, IsNil) @@ -449,7 +514,7 @@ func (s *S) TestUnmarshalRawStructItems(c *C) { func (s *S) TestUnmarshalRawNil(c *C) { // Regression test: shouldn't try to nil out the pointer itself, // as it's not settable. - raw := bson.Raw{0x0A, []byte{}} + raw := bson.Raw{Kind: 0x0A, Data: []byte{}} err := raw.Unmarshal(&struct{}{}) c.Assert(err, IsNil) } @@ -469,25 +534,25 @@ type ignoreField struct { var marshalItems = []testItemType{ // Ordered document dump. Will unmarshal as a dictionary by default. - {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, + {bson.D{{Name: "a", Value: nil}, {Name: "c", Value: nil}, {Name: "b", Value: nil}, {Name: "d", Value: nil}, {Name: "f", Value: nil}, {Name: "e", Value: true}}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, + {MyD{{Name: "a", Value: nil}, {Name: "c", Value: nil}, {Name: "b", Value: nil}, {Name: "d", Value: nil}, {Name: "f", Value: nil}, {Name: "e", Value: true}}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, - {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, + {&dOnIface{bson.D{{Name: "a", Value: nil}, {Name: "c", Value: nil}, {Name: "b", Value: nil}, {Name: "d", Value: true}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, - {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, + {bson.RawD{{Name: "a", Value: bson.Raw{Kind: 0x0A, Data: nil}}, {Name: "c", Value: bson.Raw{Kind: 0x0A, Data: nil}}, {Name: "b", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}, "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, + {MyRawD{{Name: "a", Value: bson.Raw{Kind: 0x0A, Data: nil}}, {Name: "c", Value: bson.Raw{Kind: 0x0A, Data: nil}}, {Name: "b", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}, "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, - {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, + {&dOnIface{bson.RawD{{Name: "a", Value: bson.Raw{Kind: 0x0A, Data: nil}}, {Name: "c", Value: bson.Raw{Kind: 0x0A, Data: nil}}, {Name: "b", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")}, {&ignoreField{"before", "ignore", "after"}, "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"}, // Marshalling a Raw document does nothing. - {bson.Raw{0x03, []byte(wrapInDoc("anything"))}, + {bson.Raw{Kind: 0x03, Data: []byte(wrapInDoc("anything"))}, "anything"}, {bson.Raw{Data: []byte(wrapInDoc("anything"))}, "anything"}, @@ -536,15 +601,15 @@ var unmarshalItems = []testItemType{ "\x02str\x00\x02\x00\x00\x00s\x00"}, // Ordered document. - {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, + {&struct{ bson.D }{bson.D{{Name: "a", Value: nil}, {Name: "c", Value: nil}, {Name: "b", Value: nil}, {Name: "d", Value: true}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, // Raw document. - {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, + {&bson.Raw{Kind: 0x03, Data: []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, "\x10byte\x00\x08\x00\x00\x00"}, // RawD document. - {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, + {&struct{ bson.RawD }{bson.RawD{{Name: "a", Value: bson.Raw{Kind: 0x0A, Data: []byte{}}}, {Name: "c", Value: bson.Raw{Kind: 0x0A, Data: []byte{}}}, {Name: "b", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}}, "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")}, // Decode old binary. @@ -580,7 +645,7 @@ func (s *S) TestUnmarshalNilInStruct(c *C) { type structWithDupKeys struct { Name byte - Other byte "name" // Tag should precede. + Other byte `bson:"name"` // Tag should precede. } var marshalErrorItems = []testItemType{ @@ -594,11 +659,11 @@ var marshalErrorItems = []testItemType{ "Can't marshal complex128 in a BSON document"}, {&structWithDupKeys{}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - {bson.Raw{0xA, []byte{}}, + {bson.Raw{Kind: 0xA, Data: []byte{}}, "Attempted to marshal Raw kind 10 as a document"}, - {bson.Raw{0x3, []byte{}}, + {bson.Raw{Kind: 0x3, Data: []byte{}}, "Attempted to marshal empty Raw document"}, - {bson.M{"w": bson.Raw{0x3, []byte{}}}, + {bson.M{"w": bson.Raw{Kind: 0x3, Data: []byte{}}}, "Attempted to marshal empty Raw document"}, {&inlineCantPtr{&struct{ A, B int }{1, 2}}, "Option ,inline needs a struct value or map field"}, @@ -646,11 +711,11 @@ var unmarshalErrorItems = []unmarshalErrorType{ {struct{ Name bool }{}, "\x10name\x00\x08\x00\x00\x00", - "Unmarshal can't deal with struct values. Use a pointer."}, + "unmarshal can't deal with struct values. Use a pointer"}, {123, "\x10name\x00\x08\x00\x00\x00", - "Unmarshal needs a map or a pointer to a struct."}, + "unmarshal needs a map or a pointer to a struct"}, {nil, "\x08\x62\x00\x02", @@ -683,20 +748,20 @@ type unmarshalRawErrorType struct { var unmarshalRawErrorItems = []unmarshalRawErrorType{ // Tag name conflicts with existing parameter. {&structWithDupKeys{}, - bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")}, + bson.Raw{Kind: 0x03, Data: []byte("\x10byte\x00\x08\x00\x00\x00")}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, {&struct{}{}, - bson.Raw{0xEE, []byte{}}, + bson.Raw{Kind: 0xEE, Data: []byte{}}, "Unknown element kind \\(0xEE\\)"}, {struct{ Name bool }{}, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal can't deal with struct values. Use a pointer."}, + bson.Raw{Kind: 0x10, Data: []byte("\x08\x00\x00\x00")}, + "raw Unmarshal can't deal with struct values. Use a pointer"}, {123, - bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, - "Raw Unmarshal needs a map or a valid pointer."}, + bson.Raw{Kind: 0x10, Data: []byte("\x08\x00\x00\x00")}, + "raw Unmarshal needs a map or a valid pointer"}, } func (s *S) TestUnmarshalRawErrorItems(c *C) { @@ -768,11 +833,11 @@ func (o *setterType) SetBSON(raw bson.Raw) error { } type ptrSetterDoc struct { - Field *setterType "_" + Field *setterType `bson:"_"` } type valSetterDoc struct { - Field setterType "_" + Field setterType `bson:"_"` } func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) { @@ -856,12 +921,12 @@ func (s *S) TestUnmarshalSetterErrors(c *C) { } func (s *S) TestDMap(c *C) { - d := bson.D{{"a", 1}, {"b", 2}} + d := bson.D{{Name: "a", Value: 1}, {Name: "b", Value: 2}} c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2}) } -func (s *S) TestUnmarshalSetterSetZero(c *C) { - setterResult["foo"] = bson.SetZero +func (s *S) TestUnmarshalSetterErrSetZero(c *C) { + setterResult["foo"] = bson.ErrSetZero defer delete(setterResult, "field") data, err := bson.Marshal(bson.M{"field": "foo"}) @@ -892,7 +957,7 @@ func (t *typeWithGetter) GetBSON() (interface{}, error) { } type docWithGetterField struct { - Field *typeWithGetter "_" + Field *typeWithGetter `bson:"_"` } func (s *S) TestMarshalAllItemsWithGetter(c *C) { @@ -938,7 +1003,7 @@ func (t intGetter) GetBSON() (interface{}, error) { } type typeWithIntGetter struct { - V intGetter ",minsize" + V intGetter `bson:",minsize"` } func (s *S) TestMarshalShortWithGetter(c *C) { @@ -970,96 +1035,96 @@ type crossTypeItem struct { } type condStr struct { - V string ",omitempty" + V string `bson:",omitempty"` } type condStrNS struct { V string `a:"A" bson:",omitempty" b:"B"` } type condBool struct { - V bool ",omitempty" + V bool `bson:",omitempty"` } type condInt struct { - V int ",omitempty" + V int `bson:",omitempty"` } type condUInt struct { - V uint ",omitempty" + V uint `bson:",omitempty"` } type condFloat struct { - V float64 ",omitempty" + V float64 `bson:",omitempty"` } type condIface struct { - V interface{} ",omitempty" + V interface{} `bson:",omitempty"` } type condPtr struct { - V *bool ",omitempty" + V *bool `bson:",omitempty"` } type condSlice struct { - V []string ",omitempty" + V []string `bson:",omitempty"` } type condMap struct { - V map[string]int ",omitempty" + V map[string]int `bson:",omitempty"` } type namedCondStr struct { - V string "myv,omitempty" + V string `bson:"myv,omitempty"` } type condTime struct { - V time.Time ",omitempty" + V time.Time `bson:",omitempty"` } type condStruct struct { - V struct{ A []int } ",omitempty" + V struct{ A []int } `bson:",omitempty"` } type condRaw struct { - V bson.Raw ",omitempty" + V bson.Raw `bson:",omitempty"` } type shortInt struct { - V int64 ",minsize" + V int64 `bson:",minsize"` } type shortUint struct { - V uint64 ",minsize" + V uint64 `bson:",minsize"` } type shortIface struct { - V interface{} ",minsize" + V interface{} `bson:",minsize"` } type shortPtr struct { - V *int64 ",minsize" + V *int64 `bson:",minsize"` } type shortNonEmptyInt struct { - V int64 ",minsize,omitempty" + V int64 `bson:",minsize,omitempty"` } type inlineInt struct { - V struct{ A, B int } ",inline" + V struct{ A, B int } `bson:",inline"` } type inlineCantPtr struct { - V *struct{ A, B int } ",inline" + V *struct{ A, B int } `bson:",inline"` } type inlineDupName struct { A int - V struct{ A, B int } ",inline" + V struct{ A, B int } `bson:",inline"` } type inlineMap struct { A int - M map[string]interface{} ",inline" + M map[string]interface{} `bson:",inline"` } type inlineMapInt struct { A int - M map[string]int ",inline" + M map[string]int `bson:",inline"` } type inlineMapMyM struct { A int - M MyM ",inline" + M MyM `bson:",inline"` } type inlineDupMap struct { - M1 map[string]interface{} ",inline" - M2 map[string]interface{} ",inline" + M1 map[string]interface{} `bson:",inline"` + M2 map[string]interface{} `bson:",inline"` } type inlineBadKeyMap struct { - M map[int]int ",inline" + M map[int]int `bson:",inline"` } type inlineUnexported struct { - M map[string]interface{} ",inline" - unexported ",inline" + M map[string]interface{} `bson:",inline"` + unexported `bson:",inline"` } type unexported struct { A int @@ -1077,7 +1142,7 @@ func (s getterSetterD) GetBSON() (interface{}, error) { func (s *getterSetterD) SetBSON(raw bson.Raw) error { var doc bson.D err := raw.Unmarshal(&doc) - doc = append(doc, bson.DocElem{"suffix", true}) + doc = append(doc, bson.DocElem{Name: "suffix", Value: true}) *s = getterSetterD(doc) return err } @@ -1085,7 +1150,7 @@ func (s *getterSetterD) SetBSON(raw bson.Raw) error { type getterSetterInt int func (i getterSetterInt) GetBSON() (interface{}, error) { - return bson.D{{"a", int(i)}}, nil + return bson.D{{Name: "a", Value: int(i)}}, nil } func (i *getterSetterInt) SetBSON(raw bson.Raw) error { @@ -1265,7 +1330,7 @@ var twoWayCrossItems = []crossTypeItem{ {&condPtr{&falsevar}, map[string]bool{"v": false}}, {&condPtr{}, map[string]string{}}, - {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, + {&condTime{time.Unix(123456789, 123e6).UTC()}, map[string]time.Time{"v": time.Unix(123456789, 123e6).UTC()}}, {&condTime{}, map[string]string{}}, {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, @@ -1320,17 +1385,17 @@ var twoWayCrossItems = []crossTypeItem{ {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds - {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, - map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, + {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).UTC()}, + map[string]interface{}{"v": time.Unix(-62135596799, 1e6).UTC()}}, // bson.D <=> []DocElem - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, - {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, - {&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}}, + {&bson.D{{Name: "a", Value: bson.D{{Name: "b", Value: 1}, {Name: "c", Value: 2}}}}, &bson.D{{Name: "a", Value: bson.D{{Name: "b", Value: 1}, {Name: "c", Value: 2}}}}}, + {&bson.D{{Name: "a", Value: bson.D{{Name: "b", Value: 1}, {Name: "c", Value: 2}}}}, &MyD{{Name: "a", Value: MyD{{Name: "b", Value: 1}, {Name: "c", Value: 2}}}}}, + {&struct{ V MyD }{MyD{{Name: "a", Value: 1}}}, &bson.D{{Name: "v", Value: bson.D{{Name: "a", Value: 1}}}}}, // bson.RawD <=> []RawDocElem - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, - {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, + {&bson.RawD{{Name: "a", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}, &bson.RawD{{Name: "a", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}}, + {&bson.RawD{{Name: "a", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}, &MyRawD{{Name: "a", Value: bson.Raw{Kind: 0x08, Data: []byte{0x01}}}}}, // bson.M <=> map {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, @@ -1345,8 +1410,8 @@ var twoWayCrossItems = []crossTypeItem{ {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, // bson.D <=> non-struct getter/setter - {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, - {&bson.D{{"a", 42}}, &gsintvar}, + {&bson.D{{Name: "a", Value: 1}}, &getterSetterD{{Name: "a", Value: 1}, {Name: "suffix", Value: true}}}, + {&bson.D{{Name: "a", Value: 42}}, &gsintvar}, // Interface slice setter. {&struct{ V ifaceSlice }{ifaceSlice{nil, nil, nil}}, bson.M{"v": []interface{}{3}}}, @@ -1368,7 +1433,7 @@ var oneWayCrossItems = []crossTypeItem{ // Ensure omitempty on struct with private fields works properly. {&struct { - V struct{ v time.Time } ",omitempty" + V struct{ v time.Time } `bson:",omitempty"` }{}, map[string]interface{}{}}, // Attempt to marshal slice into RawD (issue #120). diff --git a/bson/decimal.go b/bson/decimal.go index 3d2f70020..672ba1825 100644 --- a/bson/decimal.go +++ b/bson/decimal.go @@ -144,6 +144,8 @@ func dErr(s string) (Decimal128, error) { return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s) } +// ParseDecimal128 parse a string and return the corresponding value as +// a decimal128 func ParseDecimal128(s string) (Decimal128, error) { orig := s if s == "" { diff --git a/bson/decimal_test.go b/bson/decimal_test.go index 2cc510cca..142adc4a4 100644 --- a/bson/decimal_test.go +++ b/bson/decimal_test.go @@ -27,162 +27,162 @@ package bson_test import ( - "encoding/hex" - "encoding/json" - "fmt" - "regexp" - "strings" + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strings" - "github.com/globalsign/mgo/bson" + "github.com/globalsign/mgo/bson" - . "gopkg.in/check.v1" + . "gopkg.in/check.v1" ) // -------------------------------------------------------------------------- // Decimal tests type decimalTests struct { - Valid []struct { - Description string `json:"description"` - BSON string `json:"bson"` - CanonicalBSON string `json:"canonical_bson"` - ExtJSON string `json:"extjson"` - CanonicalExtJSON string `json:"canonical_extjson"` - Lossy bool `json:"lossy"` - } `json:"valid"` + Valid []struct { + Description string `json:"description"` + BSON string `json:"bson"` + CanonicalBSON string `json:"canonical_bson"` + ExtJSON string `json:"extjson"` + CanonicalExtJSON string `json:"canonical_extjson"` + Lossy bool `json:"lossy"` + } `json:"valid"` - ParseErrors []struct { - Description string `json:"description"` - String string `json:"string"` - } `json:"parseErrors"` + ParseErrors []struct { + Description string `json:"description"` + String string `json:"string"` + } `json:"parseErrors"` } func extJSONRepr(s string) string { - var value struct { - D struct { - Repr string `json:"$numberDecimal"` - } `json:"d"` - } - err := json.Unmarshal([]byte(s), &value) - if err != nil { - panic(err) - } - return value.D.Repr + var value struct { + D struct { + Repr string `json:"$numberDecimal"` + } `json:"d"` + } + err := json.Unmarshal([]byte(s), &value) + if err != nil { + panic(err) + } + return value.D.Repr } func (s *S) TestDecimalTests(c *C) { - // These also conform to the spec and are used by Go elsewhere. - // (e.g. math/big won't parse "Infinity"). - goStr := func(s string) string { - switch s { - case "Infinity": - return "Inf" - case "-Infinity": - return "-Inf" - } - return s - } + // These also conform to the spec and are used by Go elsewhere. + // (e.g. math/big won't parse "Infinity"). + goStr := func(s string) string { + switch s { + case "Infinity": + return "Inf" + case "-Infinity": + return "-Inf" + } + return s + } - for _, testEntry := range decimalTestsJSON { - testFile := testEntry.file + for _, testEntry := range decimalTestsJSON { + testFile := testEntry.file - var tests decimalTests - err := json.Unmarshal([]byte(testEntry.json), &tests) - c.Assert(err, IsNil) + var tests decimalTests + err := json.Unmarshal([]byte(testEntry.json), &tests) + c.Assert(err, IsNil) - for _, test := range tests.Valid { - c.Logf("Running %s test: %s", testFile, test.Description) + for _, test := range tests.Valid { + c.Logf("Running %s test: %s", testFile, test.Description) - test.BSON = strings.ToLower(test.BSON) + test.BSON = strings.ToLower(test.BSON) - // Unmarshal value from BSON data. - bsonData, err := hex.DecodeString(test.BSON) - var bsonValue struct{ D interface{} } - err = bson.Unmarshal(bsonData, &bsonValue) - c.Assert(err, IsNil) - dec128, ok := bsonValue.D.(bson.Decimal128) - c.Assert(ok, Equals, true) + // Unmarshal value from BSON data. + bsonData, err := hex.DecodeString(test.BSON) + var bsonValue struct{ D interface{} } + err = bson.Unmarshal(bsonData, &bsonValue) + c.Assert(err, IsNil) + dec128, ok := bsonValue.D.(bson.Decimal128) + c.Assert(ok, Equals, true) - // Extract ExtJSON representations (canonical and not). - extjRepr := extJSONRepr(test.ExtJSON) - cextjRepr := extjRepr - if test.CanonicalExtJSON != "" { - cextjRepr = extJSONRepr(test.CanonicalExtJSON) - } + // Extract ExtJSON representations (canonical and not). + extjRepr := extJSONRepr(test.ExtJSON) + cextjRepr := extjRepr + if test.CanonicalExtJSON != "" { + cextjRepr = extJSONRepr(test.CanonicalExtJSON) + } - wantRepr := goStr(cextjRepr) + wantRepr := goStr(cextjRepr) - // Generate canonical representation. - c.Assert(dec128.String(), Equals, wantRepr) + // Generate canonical representation. + c.Assert(dec128.String(), Equals, wantRepr) - // Parse original canonical representation. - parsed, err := bson.ParseDecimal128(cextjRepr) - c.Assert(err, IsNil) - c.Assert(parsed.String(), Equals, wantRepr) + // Parse original canonical representation. + parsed, err := bson.ParseDecimal128(cextjRepr) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantRepr) - // Parse non-canonical representation. - parsed, err = bson.ParseDecimal128(extjRepr) - c.Assert(err, IsNil) - c.Assert(parsed.String(), Equals, wantRepr) + // Parse non-canonical representation. + parsed, err = bson.ParseDecimal128(extjRepr) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantRepr) - // Parse Go canonical representation (Inf vs. Infinity). - parsed, err = bson.ParseDecimal128(wantRepr) - c.Assert(err, IsNil) - c.Assert(parsed.String(), Equals, wantRepr) + // Parse Go canonical representation (Inf vs. Infinity). + parsed, err = bson.ParseDecimal128(wantRepr) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantRepr) - // Marshal original value back into BSON data. - data, err := bson.Marshal(bsonValue) - c.Assert(err, IsNil) - c.Assert(hex.EncodeToString(data), Equals, test.BSON) + // Marshal original value back into BSON data. + data, err := bson.Marshal(bsonValue) + c.Assert(err, IsNil) + c.Assert(hex.EncodeToString(data), Equals, test.BSON) - if test.Lossy { - continue - } + if test.Lossy { + continue + } - // Marshal the parsed canonical representation. - var parsedValue struct{ D interface{} } - parsedValue.D = parsed - data, err = bson.Marshal(parsedValue) - c.Assert(err, IsNil) - c.Assert(hex.EncodeToString(data), Equals, test.BSON) - } + // Marshal the parsed canonical representation. + var parsedValue struct{ D interface{} } + parsedValue.D = parsed + data, err = bson.Marshal(parsedValue) + c.Assert(err, IsNil) + c.Assert(hex.EncodeToString(data), Equals, test.BSON) + } - for _, test := range tests.ParseErrors { - c.Logf("Running %s parse error test: %s (string %q)", testFile, test.Description, test.String) + for _, test := range tests.ParseErrors { + c.Logf("Running %s parse error test: %s (string %q)", testFile, test.Description, test.String) - _, err := bson.ParseDecimal128(test.String) - quoted := regexp.QuoteMeta(fmt.Sprintf("%q", test.String)) - c.Assert(err, ErrorMatches, `cannot parse `+quoted+` as a decimal128`) - } - } + _, err := bson.ParseDecimal128(test.String) + quoted := regexp.QuoteMeta(fmt.Sprintf("%q", test.String)) + c.Assert(err, ErrorMatches, `cannot parse `+quoted+` as a decimal128`) + } + } } const decBenchNum = "9.999999999999999999999999999999999E+6144" func (s *S) BenchmarkDecimal128String(c *C) { - d, err := bson.ParseDecimal128(decBenchNum) - c.Assert(err, IsNil) - c.Assert(d.String(), Equals, decBenchNum) + d, err := bson.ParseDecimal128(decBenchNum) + c.Assert(err, IsNil) + c.Assert(d.String(), Equals, decBenchNum) - c.ResetTimer() - for i := 0; i < c.N; i++ { - d.String() - } + c.ResetTimer() + for i := 0; i < c.N; i++ { + _ = d.String() + } } func (s *S) BenchmarkDecimal128Parse(c *C) { - var err error - c.ResetTimer() - for i := 0; i < c.N; i++ { - _, err = bson.ParseDecimal128(decBenchNum) - } - if err != nil { - panic(err) - } + var err error + c.ResetTimer() + for i := 0; i < c.N; i++ { + _, err = bson.ParseDecimal128(decBenchNum) + } + if err != nil { + panic(err) + } } var decimalTestsJSON = []struct{ file, json string }{ - {"decimal128-1.json", ` + {"decimal128-1.json", ` { "description": "Decimal128", "bson_type": "0x13", @@ -502,7 +502,7 @@ var decimalTestsJSON = []struct{ file, json string }{ } `}, - {"decimal128-2.json", ` + {"decimal128-2.json", ` { "description": "Decimal128", "bson_type": "0x13", @@ -1297,7 +1297,7 @@ var decimalTestsJSON = []struct{ file, json string }{ } `}, - {"decimal128-3.json", ` + {"decimal128-3.json", ` { "description": "Decimal128", "bson_type": "0x13", @@ -3071,7 +3071,7 @@ var decimalTestsJSON = []struct{ file, json string }{ } `}, - {"decimal128-4.json", ` + {"decimal128-4.json", ` { "description": "Decimal128", "bson_type": "0x13", @@ -3239,7 +3239,7 @@ var decimalTestsJSON = []struct{ file, json string }{ } `}, - {"decimal128-5.json", ` + {"decimal128-5.json", ` { "description": "Decimal128", "bson_type": "0x13", @@ -3643,7 +3643,7 @@ var decimalTestsJSON = []struct{ file, json string }{ } `}, - {"decimal128-6.json", ` + {"decimal128-6.json", ` { "description": "Decimal128", "bson_type": "0x13", @@ -3777,7 +3777,7 @@ var decimalTestsJSON = []struct{ file, json string }{ } `}, - {"decimal128-7.json", ` + {"decimal128-7.json", ` { "description": "Decimal128", "bson_type": "0x13", diff --git a/bson/decode.go b/bson/decode.go index 244eb3af1..3e257f846 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -87,18 +87,20 @@ func setterStyle(outt reflect.Type) int { setterMutex.RLock() style := setterStyles[outt] setterMutex.RUnlock() - if style == setterUnknown { - setterMutex.Lock() - defer setterMutex.Unlock() - if outt.Implements(setterIface) { - setterStyles[outt] = setterType - } else if reflect.PtrTo(outt).Implements(setterIface) { - setterStyles[outt] = setterAddr - } else { - setterStyles[outt] = setterNone - } - style = setterStyles[outt] + if style != setterUnknown { + return style + } + + setterMutex.Lock() + defer setterMutex.Unlock() + if outt.Implements(setterIface) { + style = setterType + } else if reflect.PtrTo(outt).Implements(setterIface) { + style = setterAddr + } else { + style = setterNone } + setterStyles[outt] = style return style } @@ -457,7 +459,7 @@ func (d *decoder) dropElem(kind byte) { } d.i += l case 0x06: // undefined - case 0x07: // objectID + case 0x07: // objectId d.i += 12 case 0x08: k := d.readByte() @@ -575,7 +577,7 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { if i == -62135596800000 { in = time.Time{} // In UTC for convenience. } else { - in = time.Unix(i/1e3, i%1e3*1e6) + in = time.Unix(i/1e3, i%1e3*1e6).UTC() } case 0x0A: // Nil in = nil @@ -629,7 +631,7 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { if setter := getSetter(outt, out); setter != nil { err := setter.SetBSON(Raw{kind, d.in[start:d.i]}) - if err == SetZero { + if err == ErrSetZero { out.Set(reflect.Zero(outt)) return true } diff --git a/bson/encode.go b/bson/encode.go index 2ce66339f..61f388fa1 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -35,6 +35,7 @@ import ( "reflect" "sort" "strconv" + "sync" "time" ) @@ -60,13 +61,28 @@ var ( const itoaCacheSize = 32 +const ( + getterUnknown = iota + getterNone + getterTypeVal + getterTypePtr + getterAddr +) + var itoaCache []string +var getterStyles map[reflect.Type]int +var getterIface reflect.Type +var getterMutex sync.RWMutex + func init() { itoaCache = make([]string, itoaCacheSize) for i := 0; i != itoaCacheSize; i++ { itoaCache[i] = strconv.Itoa(i) } + var iface Getter + getterIface = reflect.TypeOf(&iface).Elem() + getterStyles = make(map[reflect.Type]int) } func itoa(i int) string { @@ -76,6 +92,52 @@ func itoa(i int) string { return strconv.Itoa(i) } +func getterStyle(outt reflect.Type) int { + getterMutex.RLock() + style := getterStyles[outt] + getterMutex.RUnlock() + if style != getterUnknown { + return style + } + + getterMutex.Lock() + defer getterMutex.Unlock() + if outt.Implements(getterIface) { + vt := outt + for vt.Kind() == reflect.Ptr { + vt = vt.Elem() + } + if vt.Implements(getterIface) { + style = getterTypeVal + } else { + style = getterTypePtr + } + } else if reflect.PtrTo(outt).Implements(getterIface) { + style = getterAddr + } else { + style = getterNone + } + getterStyles[outt] = style + return style +} + +func getGetter(outt reflect.Type, out reflect.Value) Getter { + style := getterStyle(outt) + if style == getterNone { + return nil + } + if style == getterAddr { + if !out.CanAddr() { + return nil + } + return out.Addr().Interface().(Getter) + } + if style == getterTypeVal && out.Kind() == reflect.Ptr && out.IsNil() { + return nil + } + return out.Interface().(Getter) +} + // -------------------------------------------------------------------------- // Marshaling of the document value itself. @@ -253,7 +315,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { return } - if getter, ok := v.Interface().(Getter); ok { + if getter := getGetter(v.Type(), v); getter != nil { getv, err := getter.GetBSON() if err != nil { panic(err) @@ -331,7 +393,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { // Stored as int64 e.addElemName(0x12, name) - e.addInt64(int64(v.Int()/1e6)) + e.addInt64(int64(v.Int() / 1e6)) default: i := v.Int() if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { diff --git a/bson/json_test.go b/bson/json_test.go index 880fb87c2..fb8a4ac5b 100644 --- a/bson/json_test.go +++ b/bson/json_test.go @@ -65,7 +65,7 @@ var jsonTests = []jsonTest{ // $regex { - a: bson.RegEx{"pattern", "options"}, + a: bson.RegEx{Pattern: "pattern", Options: "options"}, b: `{"$regex":"pattern","$options":"options"}`, }, diff --git a/bulk_test.go b/bulk_test.go index b1a5fbb3c..fa91dc44c 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -64,7 +64,7 @@ func (s *S) TestBulkInsertError(c *C) { c.Assert(mgo.IsDup(err), Equals, true) type doc struct { - N int `_id` + N int `bson:"_id"` } var res []doc err = coll.Find(nil).Sort("_id").All(&res) @@ -85,7 +85,7 @@ func (s *S) TestBulkInsertErrorUnordered(c *C) { c.Assert(err, ErrorMatches, ".*duplicate key.*") type doc struct { - N int `_id` + N int `bson:"_id"` } var res []doc err = coll.Find(nil).Sort("_id").All(&res) @@ -110,7 +110,7 @@ func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) { const total = 4096 type doc struct { - Id int `_id` + Id int `bson:"_id"` } docs := make([]interface{}, total) for i := 0; i < total; i++ { diff --git a/cluster.go b/cluster.go index 81e4f7ff5..7fc639c24 100644 --- a/cluster.go +++ b/cluster.go @@ -157,7 +157,7 @@ func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResul if cluster.appName != "" { metaInfo["application"] = bson.M{"name": cluster.appName} } - err := session.Run(bson.D{{"isMaster", 1}, {"client", metaInfo}}, result) + err := session.Run(bson.D{{Name: "isMaster", Value: 1}, {Name: "client", Value: metaInfo}}, result) session.Close() return err } @@ -667,7 +667,6 @@ func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout } return s, nil } - panic("unreached") } func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) { diff --git a/cluster_test.go b/cluster_test.go index 1436cc317..539422be7 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -500,7 +500,7 @@ func (s *S) TestModePrimaryHiccup(c *C) { sessions[i].Close() } - // Kill the master, but bring it back immediatelly. + // Kill the master, but bring it back immediately. host := result.Host s.Stop(host) s.StartAll() @@ -1526,7 +1526,7 @@ func (s *S) TestRemovalOfClusterMember(c *C) { "40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`, } master.Refresh() - master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil) + master.Run(bson.D{{Name: "$eval", Value: `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil) master.Close() slave.Close() @@ -1541,7 +1541,7 @@ func (s *S) TestRemovalOfClusterMember(c *C) { c.Logf("========== Removing slave: %s ==========", slaveAddr) - master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil) + master.Run(bson.D{{Name: "$eval", Value: `rs.remove("` + slaveAddr + `")`}}, nil) master.Refresh() @@ -1563,7 +1563,7 @@ func (s *S) TestRemovalOfClusterMember(c *C) { } live := master.LiveServers() if len(live) != 2 { - c.Errorf("Removed server still considered live: %#s", live) + c.Errorf("Removed server still considered live: %v", live) } c.Log("========== Test succeeded. ==========") @@ -1812,6 +1812,7 @@ func (s *S) TestPrimaryShutdownOnAuthShard(c *C) { c.Assert(err, IsNil) count, err := coll.Count() + c.Assert(err, IsNil) c.Assert(count > 1, Equals, true) } @@ -1977,13 +1978,13 @@ func (s *S) TestSelectServers(c *C) { var result struct{ Host string } session.Refresh() - session.SelectServers(bson.D{{"rs1", "b"}}) + session.SelectServers(bson.D{{Name: "rs1", Value: "b"}}) err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, "40012") session.Refresh() - session.SelectServers(bson.D{{"rs1", "c"}}) + session.SelectServers(bson.D{{Name: "rs1", Value: "c"}}) err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, "40013") @@ -2035,7 +2036,7 @@ func (s *S) TestSelectServersWithMongos(c *C) { mongos.SetMode(mgo.Monotonic, true) mongos.Refresh() - mongos.SelectServers(bson.D{{"rs2", slave1}}) + mongos.SelectServers(bson.D{{Name: "rs2", Value: slave1}}) coll := mongos.DB("mydb").C("mycoll") result := &struct{}{} for i := 0; i != 5; i++ { @@ -2044,7 +2045,7 @@ func (s *S) TestSelectServersWithMongos(c *C) { } mongos.Refresh() - mongos.SelectServers(bson.D{{"rs2", slave2}}) + mongos.SelectServers(bson.D{{Name: "rs2", Value: slave2}}) coll = mongos.DB("mydb").C("mycoll") for i := 0; i != 7; i++ { err := coll.Find(nil).One(result) diff --git a/dbtest/dbserver.go b/dbtest/dbserver.go index 4fe530c90..b74280801 100644 --- a/dbtest/dbserver.go +++ b/dbtest/dbserver.go @@ -142,7 +142,7 @@ func (dbs *DBServer) Session() *mgo.Session { // checkSessions ensures all mgo sessions opened were properly closed. // For slightly faster tests, it may be disabled setting the -// environmnet variable CHECK_SESSIONS to 0. +// environment variable CHECK_SESSIONS to 0. func (dbs *DBServer) checkSessions() { if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil { return diff --git a/dbtest/dbserver_test.go b/dbtest/dbserver_test.go index f0576c25c..b3cc45a8a 100644 --- a/dbtest/dbserver_test.go +++ b/dbtest/dbserver_test.go @@ -78,6 +78,8 @@ func (s *S) TestStop(c *C) { // Server should not be running anymore. session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) + c.Assert(err, IsNil) + if session != nil { session.Close() c.Fatalf("Stop did not stop the server") diff --git a/gridfs.go b/gridfs.go index 71ca609fa..0954b166b 100644 --- a/gridfs.go +++ b/gridfs.go @@ -39,6 +39,26 @@ import ( "github.com/globalsign/mgo/bson" ) +// GridFS stores files in two collections: +// +// - chunks stores the binary chunks. For details, see the chunks Collection. +// - files stores the file’s metadata. For details, see the files Collection. +// +// GridFS places the collections in a common bucket by prefixing each with the bucket name. +// By default, GridFS uses two collections with a bucket named fs: +// +// - fs.files +// - fs.chunks +// +// You can choose a different bucket name, as well as create multiple buckets in a single database. +// The full collection name, which includes the bucket name, is subject to the namespace length limit. +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/core/gridfs/ +// https://docs.mongodb.com/manual/core/gridfs/#gridfs-chunks-collection +// https://docs.mongodb.com/manual/core/gridfs/#gridfs-files-collection +// type GridFS struct { Files *Collection Chunks *Collection @@ -52,6 +72,7 @@ const ( gfsWriting gfsFileMode = 2 ) +// GridFile document in files collection type GridFile struct { m sync.Mutex c sync.Cond @@ -73,19 +94,19 @@ type GridFile struct { } type gfsFile struct { - Id interface{} "_id" - ChunkSize int "chunkSize" - UploadDate time.Time "uploadDate" - Length int64 ",minsize" + Id interface{} `bson:"_id"` + ChunkSize int `bson:"chunkSize"` + UploadDate time.Time `bson:"uploadDate"` + Length int64 `bson:",minsize"` MD5 string - Filename string ",omitempty" - ContentType string "contentType,omitempty" - Metadata *bson.Raw ",omitempty" + Filename string `bson:",omitempty"` + ContentType string `bson:"contentType,omitempty"` + Metadata *bson.Raw `bson:",omitempty"` } type gfsChunk struct { - Id interface{} "_id" - FilesId interface{} "files_id" + Id interface{} `bson:"_id"` + FilesId interface{} `bson:"files_id"` N int Data []byte } @@ -319,12 +340,12 @@ func (gfs *GridFS) RemoveId(id interface{}) error { if err != nil { return err } - _, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}}) + _, err = gfs.Chunks.RemoveAll(bson.D{{Name: "files_id", Value: id}}) return err } type gfsDocId struct { - Id interface{} "_id" + Id interface{} `bson:"_id"` } // Remove deletes all files with the provided name from the GridFS. @@ -411,7 +432,7 @@ func (file *GridFile) ContentType() string { return file.doc.ContentType } -// ContentType changes the optional file content type. An empty string may be +// SetContentType changes the optional file content type. An empty string may be // used to unset it. // // It is a runtime error to call this function when the file is not open @@ -530,7 +551,7 @@ func (file *GridFile) completeWrite() { file.err = file.gfs.Files.Insert(file.doc) } if file.err != nil { - file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) + file.gfs.Chunks.RemoveAll(bson.D{{Name: "files_id", Value: file.doc.Id}}) } if file.err == nil { index := Index{ @@ -734,7 +755,7 @@ func (file *GridFile) getChunk() (data []byte, err error) { } else { debugf("GridFile %p: Fetching chunk %d", file, file.chunk) var doc gfsChunk - err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc) + err = file.gfs.Chunks.Find(bson.D{{Name: "files_id", Value: file.doc.Id}, {Name: "n", Value: file.chunk}}).One(&doc) data = doc.Data } file.chunk++ @@ -750,7 +771,7 @@ func (file *GridFile) getChunk() (data []byte, err error) { defer session.Close() chunks = chunks.With(session) var doc gfsChunk - cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc) + cache.err = chunks.Find(bson.D{{Name: "files_id", Value: id}, {Name: "n", Value: n}}).One(&doc) cache.data = doc.Data cache.wait.Unlock() }(file.doc.Id, file.chunk) diff --git a/internal/json/decode.go b/internal/json/decode.go index 2171d91a7..d5ca1f9a8 100644 --- a/internal/json/decode.go +++ b/internal/json/decode.go @@ -899,7 +899,7 @@ func (d *decodeState) name(v reflect.Value) { } // Check for unmarshaler on func field itself. - u, ut, pv = d.indirect(v, false) + u, _, _ = d.indirect(v, false) if u != nil { d.off = nameStart err := u.UnmarshalJSON(d.next()) diff --git a/internal/json/decode_test.go b/internal/json/decode_test.go index 30e46ca44..e921d48bd 100644 --- a/internal/json/decode_test.go +++ b/internal/json/decode_test.go @@ -109,7 +109,7 @@ var ( umstructXY = ustructText{unmarshalerText{"x", "y"}} ummapType = map[unmarshalerText]bool{} - ummapXY = map[unmarshalerText]bool{unmarshalerText{"x", "y"}: true} + ummapXY = map[unmarshalerText]bool{{"x", "y"}: true} ) // Test data structures for anonymous fields. diff --git a/internal/json/encode.go b/internal/json/encode.go index 67a0f0062..e4b8f8648 100644 --- a/internal/json/encode.go +++ b/internal/json/encode.go @@ -209,6 +209,8 @@ func (e *UnsupportedTypeError) Error() string { return "json: unsupported type: " + e.Type.String() } +// An UnsupportedValueError is returned by Marshal when attempting +// to encode an unsupported value. type UnsupportedValueError struct { Value reflect.Value Str string @@ -218,7 +220,7 @@ func (e *UnsupportedValueError) Error() string { return "json: unsupported value: " + e.Str } -// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when +// InvalidUTF8Error before Go 1.2, an InvalidUTF8Error was returned by Marshal when // attempting to encode a string value with invalid UTF-8 sequences. // As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by // replacing invalid bytes with the Unicode replacement rune U+FFFD. @@ -232,6 +234,8 @@ func (e *InvalidUTF8Error) Error() string { return "json: invalid UTF-8 in string: " + strconv.Quote(e.S) } +// A MarshalerError is returned by Marshal when attempting +// to marshal an invalid JSON type MarshalerError struct { Type reflect.Type Err error diff --git a/internal/json/stream_test.go b/internal/json/stream_test.go index 4ebeaba96..6c40f0ab2 100644 --- a/internal/json/stream_test.go +++ b/internal/json/stream_test.go @@ -282,7 +282,7 @@ type decodeThis struct { v interface{} } -var tokenStreamCases []tokenStreamCase = []tokenStreamCase{ +var tokenStreamCases = []tokenStreamCase{ // streaming token cases {json: `10`, expTokens: []interface{}{float64(10)}}, {json: ` [10] `, expTokens: []interface{}{ diff --git a/internal/sasl/sasl.go b/internal/sasl/sasl.go index 870a0add4..25a537426 100644 --- a/internal/sasl/sasl.go +++ b/internal/sasl/sasl.go @@ -26,7 +26,8 @@ import ( "unsafe" ) -type saslStepper interface { +// Stepper interface for saslSession +type Stepper interface { Step(serverData []byte) (clientData []byte, done bool, err error) Close() } @@ -50,7 +51,8 @@ func initSASL() { } } -func New(username, password, mechanism, service, host string) (saslStepper, error) { +// New creates a new saslSession +func New(username, password, mechanism, service, host string) (Stepper, error) { initOnce.Do(initSASL) if initError != nil { return nil, initError diff --git a/internal/scram/scram.go b/internal/scram/scram.go index 80cda9135..d3ddd02fd 100644 --- a/internal/scram/scram.go +++ b/internal/scram/scram.go @@ -24,7 +24,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. // // http://tools.ietf.org/html/rfc5802 // @@ -96,7 +96,7 @@ func (c *Client) Out() []byte { return c.out.Bytes() } -// Err returns the error that ocurred, or nil if there were no errors. +// Err returns the error that occurred, or nil if there were no errors. func (c *Client) Err() error { return c.err } @@ -133,7 +133,7 @@ func (c *Client) Step(in []byte) bool { func (c *Client) step1(in []byte) error { if len(c.clientNonce) == 0 { const nonceLen = 6 - buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen)) + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) if _, err := rand.Read(buf[:nonceLen]); err != nil { return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err) } diff --git a/log.go b/log.go index 53eb4237b..d83779498 100644 --- a/log.go +++ b/log.go @@ -34,15 +34,15 @@ import ( // --------------------------------------------------------------------------- // Logging integration. -// Avoid importing the log type information unnecessarily. There's a small cost +// LogLogger avoid importing the log type information unnecessarily. There's a small cost // associated with using an interface rather than the type. Depending on how // often the logger is plugged in, it would be worth using the type instead. -type log_Logger interface { +type logLogger interface { Output(calldepth int, s string) error } var ( - globalLogger log_Logger + globalLogger logLogger globalDebug bool globalMutex sync.Mutex ) @@ -53,8 +53,8 @@ var ( // the application starts. Having raceDetector as a constant, the compiler // should elide the locks altogether in actual use. -// Specify the *log.Logger object where log messages should be sent to. -func SetLogger(logger log_Logger) { +// SetLogger specify the *log.Logger object where log messages should be sent to. +func SetLogger(logger logLogger) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() @@ -62,7 +62,7 @@ func SetLogger(logger log_Logger) { globalLogger = logger } -// Enable the delivery of debug messages to the logger. Only meaningful +// SetDebug enable the delivery of debug messages to the logger. Only meaningful // if a logger is also set. func SetDebug(debug bool) { if raceDetector { diff --git a/server.go b/server.go index 7b31e243f..7ad955255 100644 --- a/server.go +++ b/server.go @@ -143,7 +143,6 @@ func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) ( } return } - panic("unreachable") } // Connect establishes a new connection to the server. This should @@ -306,7 +305,7 @@ func (server *mongoServer) pinger(loop bool) { } op := queryOp{ collection: "admin.$cmd", - query: bson.D{{"ping", 1}}, + query: bson.D{{Name: "ping", Value: 1}}, flags: flagSlaveOk, limit: -1, } diff --git a/session.go b/session.go index 2b383ad40..074f48688 100644 --- a/session.go +++ b/session.go @@ -44,23 +44,32 @@ import ( "github.com/globalsign/mgo/bson" ) +// Mode read preference mode. See Eventual, Monotonic and Strong for details +// +// Relevant documentation on read preference modes: +// +// http://docs.mongodb.org/manual/reference/read-preference/ +// type Mode int const ( - // Relevant documentation on read preference modes: - // - // http://docs.mongodb.org/manual/reference/read-preference/ - // - Primary Mode = 2 // Default mode. All operations read from the current replica set primary. - PrimaryPreferred Mode = 3 // Read from the primary if available. Read from the secondary otherwise. - Secondary Mode = 4 // Read from one of the nearest secondary members of the replica set. - SecondaryPreferred Mode = 5 // Read from one of the nearest secondaries if available. Read from primary otherwise. - Nearest Mode = 6 // Read from one of the nearest members, irrespective of it being primary or secondary. - - // Read preference modes are specific to mgo: - Eventual Mode = 0 // Same as Nearest, but may change servers between reads. - Monotonic Mode = 1 // Same as SecondaryPreferred before first write. Same as Primary after first write. - Strong Mode = 2 // Same as Primary. + // Primary mode is default mode. All operations read from the current replica set primary. + Primary Mode = 2 + // PrimaryPreferred mode: read from the primary if available. Read from the secondary otherwise. + PrimaryPreferred Mode = 3 + // Secondary mode: read from one of the nearest secondary members of the replica set. + Secondary Mode = 4 + // SecondaryPreferred mode: read from one of the nearest secondaries if available. Read from primary otherwise. + SecondaryPreferred Mode = 5 + // Nearest mode: read from one of the nearest members, irrespective of it being primary or secondary. + Nearest Mode = 6 + + // Eventual mode is specific to mgo, and is same as Nearest, but may change servers between reads. + Eventual Mode = 0 + // Monotonic mode is specifc to mgo, and is same as SecondaryPreferred before first write. Same as Primary after first write. + Monotonic Mode = 1 + // Strong mode is specific to mgo, and is same as Primary. + Strong Mode = 2 ) // mgo.v3: Drop Strong mode, suffix all modes with "Mode". @@ -84,7 +93,7 @@ type Session struct { creds []Credential dialCred *Credential safeOp *queryOp - cluster_ *mongoCluster + mgoCluster *mongoCluster slaveSocket *mongoSocket masterSocket *mongoSocket m sync.RWMutex @@ -93,17 +102,30 @@ type Session struct { slaveOk bool } +// Database holds collections of documents +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/core/databases-and-collections/#databases +// type Database struct { Session *Session Name string } +// Collection stores documents +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/core/databases-and-collections/#collections +// type Collection struct { Database *Database Name string // "collection" FullName string // "db.collection" } +// Query keeps info on the query. type Query struct { m sync.Mutex session *Session @@ -117,13 +139,19 @@ type query struct { } type getLastError struct { - CmdName int "getLastError,omitempty" - W interface{} "w,omitempty" - WTimeout int "wtimeout,omitempty" - FSync bool "fsync,omitempty" - J bool "j,omitempty" + CmdName int `bson:"getLastError,omitempty"` + W interface{} `bson:"w,omitempty"` + WTimeout int `bson:"wtimeout,omitempty"` + FSync bool `bson:"fsync,omitempty"` + J bool `bson:"j,omitempty"` } +// Iter stores informations about a Cursor +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/ +// type Iter struct { m sync.Mutex gotReply sync.Cond @@ -142,8 +170,11 @@ type Iter struct { } var ( + // ErrNotFound error returned when a document could not be found ErrNotFound = errors.New("not found") - ErrCursor = errors.New("invalid cursor") + // ErrCursor error returned when trying to retrieve documents from + // an invalid cursor + ErrCursor = errors.New("invalid cursor") ) const ( @@ -599,7 +630,7 @@ func extractURL(s string) (*urlInfo, error) { func newSession(consistency Mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { cluster.Acquire() session = &Session{ - cluster_: cluster, + mgoCluster: cluster, syncTimeout: timeout, sockTimeout: timeout, poolLimit: 4096, @@ -627,9 +658,24 @@ func copySession(session *Session, keepCreds bool) (s *Session) { } else if session.dialCred != nil { creds = []Credential{*session.dialCred} } - scopy := *session - scopy.m = sync.RWMutex{} - scopy.creds = creds + scopy := Session{ + defaultdb: session.defaultdb, + sourcedb: session.sourcedb, + syncTimeout: session.syncTimeout, + sockTimeout: session.sockTimeout, + poolLimit: session.poolLimit, + consistency: session.consistency, + creds: creds, + dialCred: session.dialCred, + safeOp: session.safeOp, + mgoCluster: session.mgoCluster, + slaveSocket: session.slaveSocket, + masterSocket: session.masterSocket, + m: sync.RWMutex{}, + queryConfig: session.queryConfig, + bypassValidation: session.bypassValidation, + slaveOk: session.slaveOk, + } s = &scopy debugf("New session %p on cluster %p (copy from %p)", s, cluster, session) return s @@ -683,9 +729,9 @@ func (db *Database) C(name string) *Collection { // https://docs.mongodb.com/manual/reference/method/db.createView/ // func (db *Database) CreateView(view string, source string, pipeline interface{}, collation *Collation) error { - command := bson.D{{"create", view}, {"viewOn", source}, {"pipeline", pipeline}} + command := bson.D{{Name: "create", Value: view}, {Name: "viewOn", Value: source}, {Name: "pipeline", Value: pipeline}} if collation != nil { - command = append(command, bson.DocElem{"collation", collation}) + command = append(command, bson.DocElem{Name: "collation", Value: collation}) } return db.Run(command, nil) } @@ -910,23 +956,51 @@ type User struct { UserSource string `bson:"userSource,omitempty"` } +// Role available role for users +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/user-privileges/ +// type Role string const ( - // Relevant documentation: - // - // http://docs.mongodb.org/manual/reference/user-privileges/ - // - RoleRoot Role = "root" - RoleRead Role = "read" - RoleReadAny Role = "readAnyDatabase" - RoleReadWrite Role = "readWrite" + // RoleRoot provides access to the operations and all the resources + // of the readWriteAnyDatabase, dbAdminAnyDatabase, userAdminAnyDatabase, + // clusterAdmin roles, restore, and backup roles combined. + RoleRoot Role = "root" + // RoleRead provides the ability to read data on all non-system collections + // and on the following system collections: system.indexes, system.js, and + // system.namespaces collections on a specific database. + RoleRead Role = "read" + // RoleReadAny provides the same read-only permissions as read, except it + // applies to it applies to all but the local and config databases in the cluster. + // The role also provides the listDatabases action on the cluster as a whole. + RoleReadAny Role = "readAnyDatabase" + //RoleReadWrite provides all the privileges of the read role plus ability to modify data on + //all non-system collections and the system.js collection on a specific database. + RoleReadWrite Role = "readWrite" + // RoleReadWriteAny provides the same read and write permissions as readWrite, except it + // applies to all but the local and config databases in the cluster. The role also provides + // the listDatabases action on the cluster as a whole. RoleReadWriteAny Role = "readWriteAnyDatabase" - RoleDBAdmin Role = "dbAdmin" - RoleDBAdminAny Role = "dbAdminAnyDatabase" - RoleUserAdmin Role = "userAdmin" + // RoleDBAdmin provides all the privileges of the dbAdmin role on a specific database + RoleDBAdmin Role = "dbAdmin" + // RoleDBAdminAny provides all the privileges of the dbAdmin role on all databases + RoleDBAdminAny Role = "dbAdminAnyDatabase" + // RoleUserAdmin Provides the ability to create and modify roles and users on the + // current database. This role also indirectly provides superuser access to either + // the database or, if scoped to the admin database, the cluster. The userAdmin role + // allows users to grant any user any privilege, including themselves. + RoleUserAdmin Role = "userAdmin" + // RoleUserAdminAny provides the same access to user administration operations as userAdmin, + // except it applies to all but the local and config databases in the cluster RoleUserAdminAny Role = "userAdminAnyDatabase" + // RoleClusterAdmin Provides the greatest cluster-management access. This role combines + // the privileges granted by the clusterManager, clusterMonitor, and hostManager roles. + // Additionally, the role provides the dropDatabase action. RoleClusterAdmin Role = "clusterAdmin" + // TODO some roles are missing: dbOwner/clusterManager/clusterMonitor/hostManager/backup/restore ) // UpsertUser updates the authentication credentials and the roles for @@ -972,32 +1046,32 @@ func (db *Database) UpsertUser(user *User) error { if user.Password != "" { psum := md5.New() psum.Write([]byte(user.Username + ":mongo:" + user.Password)) - set = append(set, bson.DocElem{"pwd", hex.EncodeToString(psum.Sum(nil))}) - unset = append(unset, bson.DocElem{"userSource", 1}) + set = append(set, bson.DocElem{Name: "pwd", Value: hex.EncodeToString(psum.Sum(nil))}) + unset = append(unset, bson.DocElem{Name: "userSource", Value: 1}) } else if user.PasswordHash != "" { - set = append(set, bson.DocElem{"pwd", user.PasswordHash}) - unset = append(unset, bson.DocElem{"userSource", 1}) + set = append(set, bson.DocElem{Name: "pwd", Value: user.PasswordHash}) + unset = append(unset, bson.DocElem{Name: "userSource", Value: 1}) } if user.UserSource != "" { - set = append(set, bson.DocElem{"userSource", user.UserSource}) - unset = append(unset, bson.DocElem{"pwd", 1}) + set = append(set, bson.DocElem{Name: "userSource", Value: user.UserSource}) + unset = append(unset, bson.DocElem{Name: "pwd", Value: 1}) } if user.Roles != nil || user.OtherDBRoles != nil { - set = append(set, bson.DocElem{"roles", user.Roles}) + set = append(set, bson.DocElem{Name: "roles", Value: user.Roles}) if len(user.OtherDBRoles) > 0 { - set = append(set, bson.DocElem{"otherDBRoles", user.OtherDBRoles}) + set = append(set, bson.DocElem{Name: "otherDBRoles", Value: user.OtherDBRoles}) } else { - unset = append(unset, bson.DocElem{"otherDBRoles", 1}) + unset = append(unset, bson.DocElem{Name: "otherDBRoles", Value: 1}) } } users := db.C("system.users") - err = users.Update(bson.D{{"user", user.Username}}, bson.D{{"$unset", unset}, {"$set", set}}) + err = users.Update(bson.D{{Name: "user", Value: user.Username}}, bson.D{{Name: "$unset", Value: unset}, {Name: "$set", Value: set}}) if err == ErrNotFound { - set = append(set, bson.DocElem{"user", user.Username}) + set = append(set, bson.DocElem{Name: "user", Value: user.Username}) if user.Roles == nil && user.OtherDBRoles == nil { // Roles must be sent, as it's the way MongoDB distinguishes // old-style documents from new-style documents in pre-2.6. - set = append(set, bson.DocElem{"roles", user.Roles}) + set = append(set, bson.DocElem{Name: "roles", Value: user.Roles}) } err = users.Insert(set) } @@ -1021,9 +1095,9 @@ func isAuthError(err error) bool { func (db *Database) runUserCmd(cmdName string, user *User) error { cmd := make(bson.D, 0, 16) - cmd = append(cmd, bson.DocElem{cmdName, user.Username}) + cmd = append(cmd, bson.DocElem{Name: cmdName, Value: user.Username}) if user.Password != "" { - cmd = append(cmd, bson.DocElem{"pwd", user.Password}) + cmd = append(cmd, bson.DocElem{Name: "pwd", Value: user.Password}) } var roles []interface{} for _, role := range user.Roles { @@ -1031,11 +1105,11 @@ func (db *Database) runUserCmd(cmdName string, user *User) error { } for db, dbroles := range user.OtherDBRoles { for _, role := range dbroles { - roles = append(roles, bson.D{{"role", role}, {"db", db}}) + roles = append(roles, bson.D{{Name: "role", Value: role}, {Name: "db", Value: db}}) } } if roles != nil || user.Roles != nil || cmdName == "createUser" { - cmd = append(cmd, bson.DocElem{"roles", roles}) + cmd = append(cmd, bson.DocElem{Name: "roles", Value: roles}) } err := db.Run(cmd, nil) if !isNoCmd(err) && user.UserSource != "" && (user.UserSource != "$external" || db.Name != "$external") { @@ -1084,7 +1158,7 @@ func (db *Database) AddUser(username, password string, readOnly bool) error { // RemoveUser removes the authentication credentials of user from the database. func (db *Database) RemoveUser(user string) error { - err := db.Run(bson.D{{"dropUser", user}}, nil) + err := db.Run(bson.D{{Name: "dropUser", Value: user}}, nil) if isNoCmd(err) { users := db.C("system.users") return users.Remove(bson.M{"user": user}) @@ -1098,23 +1172,28 @@ func (db *Database) RemoveUser(user string) error { type indexSpec struct { Name, NS string Key bson.D - Unique bool ",omitempty" - DropDups bool "dropDups,omitempty" - Background bool ",omitempty" - Sparse bool ",omitempty" - Bits int ",omitempty" - Min, Max float64 ",omitempty" - BucketSize float64 "bucketSize,omitempty" - ExpireAfter int "expireAfterSeconds,omitempty" - Weights bson.D ",omitempty" - DefaultLanguage string "default_language,omitempty" - LanguageOverride string "language_override,omitempty" - TextIndexVersion int "textIndexVersion,omitempty" - PartialFilterExpression bson.M "partialFilterExpression,omitempty" - - Collation *Collation "collation,omitempty" -} - + Unique bool `bson:",omitempty"` + DropDups bool `bson:"dropDups,omitempty"` + Background bool `bson:",omitempty"` + Sparse bool `bson:",omitempty"` + Bits int `bson:",omitempty"` + Min, Max float64 `bson:",omitempty"` + BucketSize float64 `bson:"bucketSize,omitempty"` + ExpireAfter int `bson:"expireAfterSeconds,omitempty"` + Weights bson.D `bson:",omitempty"` + DefaultLanguage string `bson:"default_language,omitempty"` + LanguageOverride string `bson:"language_override,omitempty"` + TextIndexVersion int `bson:"textIndexVersion,omitempty"` + PartialFilterExpression bson.M `bson:"partialFilterExpression,omitempty"` + + Collation *Collation `bson:"collation,omitempty"` +} + +// Index are special data structures that store a small portion of the collection’s +// data set in an easy to traverse form. The index stores the value of a specific +// field or set of fields, ordered by the value of the field. The ordering of the +// index entries supports efficient equality matches and range-based query operations. +// In addition, MongoDB can return sorted results by using the ordering in the index. type Index struct { Key []string // Index key fields; prefix name with dash (-) for descending order Unique bool // Prevent two documents from having the same index key @@ -1157,6 +1236,8 @@ type Index struct { Collation *Collation } +// Collation allows users to specify language-specific rules for string comparison, +// such as rules for lettercase and accent marks. type Collation struct { // Locale defines the collation locale. @@ -1271,12 +1352,12 @@ func parseIndexKey(key []string) (*indexKeyInfo, error) { } if kind == "text" { if !isText { - keyInfo.key = append(keyInfo.key, bson.DocElem{"_fts", "text"}, bson.DocElem{"_ftsx", 1}) + keyInfo.key = append(keyInfo.key, bson.DocElem{Name: "_fts", Value: "text"}, bson.DocElem{Name: "_ftsx", Value: 1}) isText = true } - keyInfo.weights = append(keyInfo.weights, bson.DocElem{field, 1}) + keyInfo.weights = append(keyInfo.weights, bson.DocElem{Name: field, Value: 1}) } else { - keyInfo.key = append(keyInfo.key, bson.DocElem{field, order}) + keyInfo.key = append(keyInfo.key, bson.DocElem{Name: field, Value: order}) } } if keyInfo.name == "" { @@ -1436,7 +1517,7 @@ NextField: db := c.Database.With(cloned) // Try with a command first. - err = db.Run(bson.D{{"createIndexes", c.Name}, {"indexes", []indexSpec{spec}}}, nil) + err = db.Run(bson.D{{Name: "createIndexes", Value: c.Name}, {Name: "indexes", Value: []indexSpec{spec}}}, nil) if isNoCmd(err) { // Command not yet supported. Insert into the indexes collection instead. err = db.C("system.indexes").Insert(&spec) @@ -1475,7 +1556,7 @@ func (c *Collection) DropIndex(key ...string) error { ErrMsg string Ok bool }{} - err = db.Run(bson.D{{"dropIndexes", c.Name}, {"index", keyInfo.name}}, &result) + err = db.Run(bson.D{{Name: "dropIndexes", Value: c.Name}, {Name: "index", Value: keyInfo.name}}, &result) if err != nil { return err } @@ -1527,7 +1608,7 @@ func (c *Collection) DropIndexName(name string) error { ErrMsg string Ok bool }{} - err = c.Database.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result) + err = c.Database.Run(bson.D{{Name: "dropIndexes", Value: c.Name}, {Name: "index", Value: name}}, &result) if err != nil { return err } @@ -1550,7 +1631,7 @@ func (c *Collection) DropAllIndexes() error { ErrMsg string Ok bool }{} - err := db.Run(bson.D{{"dropIndexes", c.Name}, {"index", "*"}}, &result) + err := db.Run(bson.D{{Name: "dropIndexes", Value: c.Name}, {Name: "index", Value: "*"}}, &result) if err != nil { return err } @@ -1563,8 +1644,8 @@ func (c *Collection) DropAllIndexes() error { // nonEventual returns a clone of session and ensures it is not Eventual. // This guarantees that the server that is used for queries may be reused // afterwards when a cursor is received. -func (session *Session) nonEventual() *Session { - cloned := session.Clone() +func (s *Session) nonEventual() *Session { + cloned := s.Clone() if cloned.consistency == Eventual { cloned.SetMode(Monotonic, false) } @@ -1586,7 +1667,7 @@ func (c *Collection) Indexes() (indexes []Index, err error) { Cursor cursorData } var iter *Iter - err = c.Database.With(cloned).Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) + err = c.Database.With(cloned).Run(bson.D{{Name: "listIndexes", Value: c.Name}, {Name: "cursor", Value: bson.D{{Name: "batchSize", Value: batchSize}}}}, &result) if err == nil { firstBatch := result.Indexes if firstBatch == nil { @@ -1691,7 +1772,7 @@ func simpleIndexKey(realKey bson.D) (key []string) { return } -// ResetIndexCache() clears the cache of previously ensured indexes. +// ResetIndexCache clears the cache of previously ensured indexes. // Following requests to EnsureIndex will contact the server. func (s *Session) ResetIndexCache() { s.cluster().ResetIndexCache() @@ -1744,20 +1825,20 @@ func (s *Session) Clone() *Session { // after it has been closed. func (s *Session) Close() { s.m.Lock() - if s.cluster_ != nil { + if s.mgoCluster != nil { debugf("Closing session %p", s) s.unsetSocket() - s.cluster_.Release() - s.cluster_ = nil + s.mgoCluster.Release() + s.mgoCluster = nil } s.m.Unlock() } func (s *Session) cluster() *mongoCluster { - if s.cluster_ == nil { + if s.mgoCluster == nil { panic("Session already closed") } - return s.cluster_ + return s.mgoCluster } // Refresh puts back any reserved sockets in use and restarts the consistency @@ -1942,7 +2023,7 @@ func (s *Session) SetPrefetch(p float64) { s.m.Unlock() } -// See SetSafe for details on the Safe type. +// Safe session safety mode. See SetSafe for details on the Safe type. type Safe struct { W int // Min # of servers to ack before success WMode string // Write mode for MongoDB 2.0+ (e.g. "majority") @@ -2191,7 +2272,7 @@ func (s *Session) Ping() error { // is established with. If async is true, the call returns immediately, // otherwise it returns after the flush has been made. func (s *Session) Fsync(async bool) error { - return s.Run(bson.D{{"fsync", 1}, {"async", async}}, nil) + return s.Run(bson.D{{Name: "fsync", Value: 1}, {Name: "async", Value: async}}, nil) } // FsyncLock locks all writes in the specific server the session is @@ -2220,12 +2301,12 @@ func (s *Session) Fsync(async bool) error { // http://www.mongodb.org/display/DOCS/Backups // func (s *Session) FsyncLock() error { - return s.Run(bson.D{{"fsync", 1}, {"lock", true}}, nil) + return s.Run(bson.D{{Name: "fsync", Value: 1}, {Name: "lock", Value: true}}, nil) } // FsyncUnlock releases the server for writes. See FsyncLock for details. func (s *Session) FsyncUnlock() error { - err := s.Run(bson.D{{"fsyncUnlock", 1}}, nil) + err := s.Run(bson.D{{Name: "fsyncUnlock", Value: 1}}, nil) if isNoCmd(err) { err = s.DB("admin").C("$cmd.sys.unlock").Find(nil).One(nil) // WTF? } @@ -2266,7 +2347,7 @@ func (c *Collection) Find(query interface{}) *Query { type repairCmd struct { RepairCursor string `bson:"repairCursor"` - Cursor *repairCmdCursor ",omitempty" + Cursor *repairCmdCursor `bson:",omitempty"` } type repairCmdCursor struct { @@ -2307,9 +2388,11 @@ func (c *Collection) Repair() *Iter { // // See the Find method for more details. func (c *Collection) FindId(id interface{}) *Query { - return c.Find(bson.D{{"_id", id}}) + return c.Find(bson.D{{Name: "_id", Value: id}}) } +// Pipe is used to run aggregation queries against a +// collection. type Pipe struct { session *Session collection *Collection @@ -2321,9 +2404,9 @@ type Pipe struct { type pipeCmd struct { Aggregate string Pipeline interface{} - Cursor *pipeCmdCursor ",omitempty" - Explain bool ",omitempty" - AllowDisk bool "allowDiskUse,omitempty" + Cursor *pipeCmdCursor `bson:",omitempty"` + Explain bool `bson:",omitempty"` + AllowDisk bool `bson:"allowDiskUse,omitempty"` } type pipeCmdCursor struct { @@ -2552,8 +2635,13 @@ func (p *Pipe) Batch(n int) *Pipe { return p } +// LastError the error status of the preceding write operation on the current connection. +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/reference/command/getLastError/ +// // mgo.v3: Use a single user-visible error type. - type LastError struct { Err string Code, N, Waited int @@ -2571,13 +2659,14 @@ func (err *LastError) Error() string { } type queryError struct { - Err string "$err" + Err string `bson:"$err"` ErrMsg string Assertion string Code int - AssertionCode int "assertionCode" + AssertionCode int `bson:"assertionCode"` } +// QueryError is returned when a query fails type QueryError struct { Code int Message string @@ -2652,7 +2741,7 @@ func (c *Collection) Update(selector interface{}, update interface{}) error { // // See the Update method for more details. func (c *Collection) UpdateId(id interface{}, update interface{}) error { - return c.Update(bson.D{{"_id", id}}, update) + return c.Update(bson.D{{Name: "_id", Value: id}}, update) } // ChangeInfo holds details about the outcome of an update operation. @@ -2747,7 +2836,7 @@ func (c *Collection) Upsert(selector interface{}, update interface{}) (info *Cha // // See the Upsert method for more details. func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeInfo, err error) { - return c.Upsert(bson.D{{"_id", id}}, update) + return c.Upsert(bson.D{{Name: "_id", Value: id}}, update) } // Remove finds a single document matching the provided selector document @@ -2777,7 +2866,7 @@ func (c *Collection) Remove(selector interface{}) error { // // See the Remove method for more details. func (c *Collection) RemoveId(id interface{}) error { - return c.Remove(bson.D{{"_id", id}}) + return c.Remove(bson.D{{Name: "_id", Value: id}}) } // RemoveAll finds all documents matching the provided selector document @@ -2802,12 +2891,12 @@ func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err erro // DropDatabase removes the entire database including all of its collections. func (db *Database) DropDatabase() error { - return db.Run(bson.D{{"dropDatabase", 1}}, nil) + return db.Run(bson.D{{Name: "dropDatabase", Value: 1}}, nil) } // DropCollection removes the entire collection including all of its documents. func (c *Collection) DropCollection() error { - return c.Database.Run(bson.D{{"drop", c.Name}}, nil) + return c.Database.Run(bson.D{{Name: "drop", Value: c.Name}}, nil) } // The CollectionInfo type holds metadata about a collection. @@ -2875,37 +2964,37 @@ type CollectionInfo struct { // func (c *Collection) Create(info *CollectionInfo) error { cmd := make(bson.D, 0, 4) - cmd = append(cmd, bson.DocElem{"create", c.Name}) + cmd = append(cmd, bson.DocElem{Name: "create", Value: c.Name}) if info.Capped { if info.MaxBytes < 1 { return fmt.Errorf("Collection.Create: with Capped, MaxBytes must also be set") } - cmd = append(cmd, bson.DocElem{"capped", true}) - cmd = append(cmd, bson.DocElem{"size", info.MaxBytes}) + cmd = append(cmd, bson.DocElem{Name: "capped", Value: true}) + cmd = append(cmd, bson.DocElem{Name: "size", Value: info.MaxBytes}) if info.MaxDocs > 0 { - cmd = append(cmd, bson.DocElem{"max", info.MaxDocs}) + cmd = append(cmd, bson.DocElem{Name: "max", Value: info.MaxDocs}) } } if info.DisableIdIndex { - cmd = append(cmd, bson.DocElem{"autoIndexId", false}) + cmd = append(cmd, bson.DocElem{Name: "autoIndexId", Value: false}) } if info.ForceIdIndex { - cmd = append(cmd, bson.DocElem{"autoIndexId", true}) + cmd = append(cmd, bson.DocElem{Name: "autoIndexId", Value: true}) } if info.Validator != nil { - cmd = append(cmd, bson.DocElem{"validator", info.Validator}) + cmd = append(cmd, bson.DocElem{Name: "validator", Value: info.Validator}) } if info.ValidationLevel != "" { - cmd = append(cmd, bson.DocElem{"validationLevel", info.ValidationLevel}) + cmd = append(cmd, bson.DocElem{Name: "validationLevel", Value: info.ValidationLevel}) } if info.ValidationAction != "" { - cmd = append(cmd, bson.DocElem{"validationAction", info.ValidationAction}) + cmd = append(cmd, bson.DocElem{Name: "validationAction", Value: info.ValidationAction}) } if info.StorageEngine != nil { - cmd = append(cmd, bson.DocElem{"storageEngine", info.StorageEngine}) + cmd = append(cmd, bson.DocElem{Name: "storageEngine", Value: info.StorageEngine}) } if info.Collation != nil { - cmd = append(cmd, bson.DocElem{"collation", info.Collation}) + cmd = append(cmd, bson.DocElem{Name: "collation", Value: info.Collation}) } return c.Database.Run(cmd, nil) @@ -2914,7 +3003,7 @@ func (c *Collection) Create(info *CollectionInfo) error { // Batch sets the batch size used when fetching documents from the database. // It's possible to change this setting on a per-session basis as well, using // the Batch method of Session. - +// // The default batch size is defined by the database itself. As of this // writing, MongoDB will use an initial size of min(100 docs, 4MB) on the // first batch, and 4MB on remaining ones. @@ -3036,9 +3125,9 @@ func (q *Query) Sort(fields ...string) *Query { panic("Sort: empty field name") } if kind == "textScore" { - order = append(order, bson.DocElem{field, bson.M{"$meta": kind}}) + order = append(order, bson.DocElem{Name: field, Value: bson.M{"$meta": kind}}) } else { - order = append(order, bson.DocElem{field, n}) + order = append(order, bson.DocElem{Name: field, Value: n}) } } q.op.options.OrderBy = order @@ -3406,15 +3495,15 @@ func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { op.hasOptions = false if explain { - op.query = bson.D{{"explain", op.query}} + op.query = bson.D{{Name: "explain", Value: op.query}} return false } return true } type cursorData struct { - FirstBatch []bson.Raw "firstBatch" - NextBatch []bson.Raw "nextBatch" + FirstBatch []bson.Raw `bson:"firstBatch"` + NextBatch []bson.Raw `bson:"nextBatch"` NS string Id int64 } @@ -3477,7 +3566,7 @@ type getMoreCmd struct { func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error) { // Database.Run: if name, ok := cmd.(string); ok { - cmd = bson.D{{name, 1}} + cmd = bson.D{{Name: name, Value: 1}} } // Collection.Find: @@ -3566,7 +3655,7 @@ func (db *Database) FindRef(ref *DBRef) *Query { // func (s *Session) FindRef(ref *DBRef) *Query { if ref.Database == "" { - panic(errors.New(fmt.Sprintf("Can't resolve database for %#v", ref))) + panic(fmt.Errorf("Can't resolve database for %#v", ref)) } c := s.DB(ref.Database).C(ref.Collection) return c.FindId(ref.Id) @@ -3587,7 +3676,7 @@ func (db *Database) CollectionNames() (names []string, err error) { Collections []bson.Raw Cursor cursorData } - err = db.With(cloned).Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) + err = db.With(cloned).Run(bson.D{{Name: "listCollections", Value: 1}, {Name: "cursor", Value: bson.D{{Name: "batchSize", Value: batchSize}}}}, &result) if err == nil { firstBatch := result.Collections if firstBatch == nil { @@ -4052,13 +4141,13 @@ func (q *Query) All(result interface{}) error { return q.Iter().All(result) } -// The For method is obsolete and will be removed in a future release. +// For method is obsolete and will be removed in a future release. // See Iter as an elegant replacement. func (q *Query) For(result interface{}, f func() error) error { return q.Iter().For(result, f) } -// The For method is obsolete and will be removed in a future release. +// For method is obsolete and will be removed in a future release. // See Iter as an elegant replacement. func (iter *Iter) For(result interface{}, f func() error) (err error) { valid := false @@ -4177,8 +4266,8 @@ func (iter *Iter) getMoreCmd() *queryOp { type countCmd struct { Count string Query interface{} - Limit int32 ",omitempty" - Skip int32 ",omitempty" + Limit int32 `bson:",omitempty"` + Skip int32 `bson:",omitempty"` Hint bson.D `bson:"hint,omitempty"` MaxTimeMS int `bson:"maxTimeMS,omitempty"` } @@ -4217,9 +4306,9 @@ func (c *Collection) Count() (n int, err error) { } type distinctCmd struct { - Collection string "distinct" + Collection string `bson:"distinct"` Key string - Query interface{} ",omitempty" + Query interface{} `bson:",omitempty"` } // Distinct unmarshals into result the list of distinct values for the given key. @@ -4256,28 +4345,34 @@ func (q *Query) Distinct(key string, result interface{}) error { } type mapReduceCmd struct { - Collection string "mapreduce" - Map string ",omitempty" - Reduce string ",omitempty" - Finalize string ",omitempty" + Collection string `bson:"mapreduce"` + Map string `bson:",omitempty"` + Reduce string `bson:",omitempty"` + Finalize string `bson:",omitempty"` Out interface{} - Query interface{} ",omitempty" - Sort interface{} ",omitempty" - Scope interface{} ",omitempty" - Limit int32 ",omitempty" - Verbose bool ",omitempty" + Query interface{} `bson:",omitempty"` + Sort interface{} `bson:",omitempty"` + Scope interface{} `bson:",omitempty"` + Limit int32 `bson:",omitempty"` + Verbose bool `bson:",omitempty"` } type mapReduceResult struct { Results bson.Raw Result bson.Raw - TimeMillis int64 "timeMillis" + TimeMillis int64 `bson:"timeMillis"` Counts struct{ Input, Emit, Output int } Ok bool Err string Timing *MapReduceTime } +// MapReduce used to perform Map Reduce operations +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/core/map-reduce/ +// type MapReduce struct { Map string // Map Javascript function code (required) Reduce string // Reduce Javascript function code (required) @@ -4287,6 +4382,7 @@ type MapReduce struct { Verbose bool } +// MapReduceInfo stores informations on a MapReduce operation type MapReduceInfo struct { InputCount int // Number of documents mapped EmitCount int // Number of times reduce called emit @@ -4297,10 +4393,11 @@ type MapReduceInfo struct { VerboseTime *MapReduceTime // Only defined if Verbose was true } +// MapReduceTime stores execution time of a MapReduce operation type MapReduceTime struct { Total int64 // Total time, in nanoseconds - Map int64 "mapTime" // Time within map function, in nanoseconds - EmitLoop int64 "emitLoop" // Time within the emit/map loop, in nanoseconds + Map int64 `bson:"mapTime"` // Time within map function, in nanoseconds + EmitLoop int64 `bson:"emitLoop"` // Time within the emit/map loop, in nanoseconds } // MapReduce executes a map/reduce job for documents covered by the query. @@ -4391,7 +4488,7 @@ func (q *Query) MapReduce(job *MapReduce, result interface{}) (info *MapReduceIn } if cmd.Out == nil { - cmd.Out = bson.D{{"inline", 1}} + cmd.Out = bson.D{{Name: "inline", Value: 1}} } var doc mapReduceResult @@ -4476,14 +4573,14 @@ type Change struct { } type findModifyCmd struct { - Collection string "findAndModify" - Query, Update, Sort, Fields interface{} ",omitempty" - Upsert, Remove, New bool ",omitempty" + Collection string `bson:"findAndModify"` + Query, Update, Sort, Fields interface{} `bson:",omitempty"` + Upsert, Remove, New bool `bson:",omitempty"` } type valueResult struct { Value bson.Raw - LastError LastError "lastErrorObject" + LastError LastError `bson:"lastErrorObject"` } // Apply runs the findAndModify MongoDB command, which allows updating, upserting @@ -4615,7 +4712,7 @@ func (bi *BuildInfo) VersionAtLeast(version ...int) bool { // BuildInfo retrieves the version and other details about the // running MongoDB server. func (s *Session) BuildInfo() (info BuildInfo, err error) { - err = s.Run(bson.D{{"buildInfo", "1"}}, &info) + err = s.Run(bson.D{{Name: "buildInfo", Value: "1"}}, &info) if len(info.VersionArray) == 0 { for _, a := range strings.Split(info.Version, ".") { i, err := strconv.Atoi(a) @@ -4809,7 +4906,7 @@ type writeCmdResult struct { NModified int `bson:"nModified"` Upserted []struct { Index int - Id interface{} `_id` + Id interface{} `bson:"_id"` } ConcernError writeConcernError `bson:"writeConcernError"` Errors []writeCmdError `bson:"writeErrors"` @@ -5022,7 +5119,7 @@ func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op inter func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered, bypassValidation bool) (lerr *LastError, err error) { var writeConcern interface{} if safeOp == nil { - writeConcern = bson.D{{"w", 0}} + writeConcern = bson.D{{Name: "w", Value: 0}} } else { writeConcern = safeOp.query.(*getLastError) } @@ -5032,46 +5129,46 @@ func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op int case *insertOp: // http://docs.mongodb.org/manual/reference/command/insert cmd = bson.D{ - {"insert", c.Name}, - {"documents", op.documents}, - {"writeConcern", writeConcern}, - {"ordered", op.flags&1 == 0}, + {Name: "insert", Value: c.Name}, + {Name: "documents", Value: op.documents}, + {Name: "writeConcern", Value: writeConcern}, + {Name: "ordered", Value: op.flags&1 == 0}, } case *updateOp: // http://docs.mongodb.org/manual/reference/command/update cmd = bson.D{ - {"update", c.Name}, - {"updates", []interface{}{op}}, - {"writeConcern", writeConcern}, - {"ordered", ordered}, + {Name: "update", Value: c.Name}, + {Name: "updates", Value: []interface{}{op}}, + {Name: "writeConcern", Value: writeConcern}, + {Name: "ordered", Value: ordered}, } case bulkUpdateOp: // http://docs.mongodb.org/manual/reference/command/update cmd = bson.D{ - {"update", c.Name}, - {"updates", op}, - {"writeConcern", writeConcern}, - {"ordered", ordered}, + {Name: "update", Value: c.Name}, + {Name: "updates", Value: op}, + {Name: "writeConcern", Value: writeConcern}, + {Name: "ordered", Value: ordered}, } case *deleteOp: // http://docs.mongodb.org/manual/reference/command/delete cmd = bson.D{ - {"delete", c.Name}, - {"deletes", []interface{}{op}}, - {"writeConcern", writeConcern}, - {"ordered", ordered}, + {Name: "delete", Value: c.Name}, + {Name: "deletes", Value: []interface{}{op}}, + {Name: "writeConcern", Value: writeConcern}, + {Name: "ordered", Value: ordered}, } case bulkDeleteOp: // http://docs.mongodb.org/manual/reference/command/delete cmd = bson.D{ - {"delete", c.Name}, - {"deletes", op}, - {"writeConcern", writeConcern}, - {"ordered", ordered}, + {Name: "delete", Value: c.Name}, + {Name: "deletes", Value: op}, + {Name: "writeConcern", Value: writeConcern}, + {Name: "ordered", Value: ordered}, } } if bypassValidation { - cmd = append(cmd, bson.DocElem{"bypassDocumentValidation", true}) + cmd = append(cmd, bson.DocElem{Name: "bypassDocumentValidation", Value: true}) } var result writeCmdResult diff --git a/session_test.go b/session_test.go index a4cc04f01..227052719 100644 --- a/session_test.go +++ b/session_test.go @@ -173,10 +173,10 @@ func (s *S) TestURLReadPreferenceTags(c *C) { } tests := []test{ - {"localhost:40001?readPreference=secondary&readPreferenceTags=dc:ny,rack:1", []bson.D{{{"dc", "ny"}, {"rack", "1"}}}}, - {"localhost:40001?readPreference=secondary&readPreferenceTags= dc : ny , rack : 1 ", []bson.D{{{"dc", "ny"}, {"rack", "1"}}}}, - {"localhost:40001?readPreference=secondary&readPreferenceTags=dc:ny", []bson.D{{{"dc", "ny"}}}}, - {"localhost:40001?readPreference=secondary&readPreferenceTags=rack:1&readPreferenceTags=dc:ny", []bson.D{{{"rack", "1"}}, {{"dc", "ny"}}}}, + {"localhost:40001?readPreference=secondary&readPreferenceTags=dc:ny,rack:1", []bson.D{{{Name: "dc", Value: "ny"}, {Name: "rack", Value: "1"}}}}, + {"localhost:40001?readPreference=secondary&readPreferenceTags= dc : ny , rack : 1 ", []bson.D{{{Name: "dc", Value: "ny"}, {Name: "rack", Value: "1"}}}}, + {"localhost:40001?readPreference=secondary&readPreferenceTags=dc:ny", []bson.D{{{Name: "dc", Value: "ny"}}}}, + {"localhost:40001?readPreference=secondary&readPreferenceTags=rack:1&readPreferenceTags=dc:ny", []bson.D{{{Name: "rack", Value: "1"}}, {{Name: "dc", Value: "ny"}}}}, } for _, test := range tests { @@ -211,7 +211,7 @@ func (s *S) TestURLWithAppName(c *C) { db := session.DB("mydb") - err = db.Run(bson.D{{"profile", 2}}, nil) + err = db.Run(bson.D{{Name: "profile", Value: 2}}, nil) c.Assert(err, IsNil) coll := db.C("mycoll") @@ -229,8 +229,8 @@ func (s *S) TestURLWithAppName(c *C) { err = db.C("system.profile").Find(nil).Sort("-ts").One(&profileResult) c.Assert(err, IsNil) c.Assert(appName, Equals, profileResult.AppName) - // reset profiling to 0 as it add unecessary overhead to all other test - err = db.Run(bson.D{{"profile", 0}}, nil) + // reset profiling to 0 as it add unnecessary overhead to all other test + err = db.Run(bson.D{{Name: "profile", Value: 0}}, nil) c.Assert(err, IsNil) } @@ -453,7 +453,7 @@ func (s *S) TestInlineMap(c *C) { var v, result1 struct { A int - M map[string]int ",inline" + M map[string]int `bson:",inline"` } v.A = 1 @@ -574,11 +574,11 @@ func (s *S) TestUpsert(c *C) { ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { - err := coll.Insert(bson.D{{"k", n}, {"n", n}}) + err := coll.Insert(bson.D{{Name: "k", Value: n}, {Name: "n", Value: n}}) c.Assert(err, IsNil) } - info, err := coll.Upsert(M{"k": 42}, bson.D{{"k", 42}, {"n", 24}}) + info, err := coll.Upsert(M{"k": 42}, bson.D{{Name: "k", Value: 42}, {Name: "n", Value: 24}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 1) c.Assert(info.Matched, Equals, 1) @@ -590,7 +590,7 @@ func (s *S) TestUpsert(c *C) { c.Assert(result["n"], Equals, 24) // Match but do not change. - info, err = coll.Upsert(M{"k": 42}, bson.D{{"k", 42}, {"n", 24}}) + info, err = coll.Upsert(M{"k": 42}, bson.D{{Name: "k", Value: 42}, {Name: "n", Value: 24}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 1) // On 2.6+ this feels like a server mistake. c.Assert(info.Matched, Equals, 1) @@ -963,6 +963,7 @@ func (s *S) TestCreateCollectionForceIndex(c *C) { c.Assert(err, IsNil) indexes, err := coll.Indexes() + c.Assert(err, IsNil) c.Assert(indexes, HasLen, 1) } @@ -1008,7 +1009,7 @@ func (s *S) TestCreateCollectionValidator(c *C) { err = coll.Create(info) err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) - err = db.Run(bson.D{{"collMod", "mycoll"}, {"validator", M{"b": M{"$exists": true}}}}, nil) + err = db.Run(bson.D{{Name: "collMod", Value: "mycoll"}, {Name: "validator", Value: M{"b": M{"$exists": true}}}}, nil) c.Assert(err, IsNil) err = coll.Insert(M{"a": 2}) c.Assert(err, ErrorMatches, "Document failed validation") @@ -1910,7 +1911,7 @@ func (s *S) TestResumeIter(c *C) { c.Assert(iter.Err(), IsNil) c.Assert(got.N, Equals, 0) - // Test state returns the cursor ID, and firstBatch + // Test state returns the cursor Id, and firstBatch id, batch := iter.State() c.Assert(id, Not(Equals), 0) c.Assert(len(batch), Equals, 1) @@ -1933,7 +1934,7 @@ func (s *S) TestResumeIter(c *C) { // Done returns true c.Assert(newIter.Done(), Equals, true) - // Ensure state reports no data, no cursor ID + // Ensure state reports no data, no cursor Id id, batch = newIter.State() c.Assert(id, Equals, int64(0)) c.Assert(len(batch), Equals, 0) @@ -1950,7 +1951,7 @@ func (s *S) TestFindIterCursorTimeout(c *C) { defer session.Close() type Doc struct { - Id int "_id" + Id int `bson:"_id"` } coll := session.DB("test").C("test") @@ -1994,7 +1995,7 @@ func (s *S) TestFindIterCursorNoTimeout(c *C) { session.SetCursorTimeout(0) type Doc struct { - Id int "_id" + Id int `bson:"_id"` } coll := session.DB("test").C("test") @@ -2049,7 +2050,7 @@ func (s *S) TestTooManyItemsLimitBug(c *C) { for i := 0; i < 5; i++ { words = append(words, words...) } - doc := bson.D{{"words", words}} + doc := bson.D{{Name: "words", Value: words}} inserts := 10000 limit := 5000 iters := 0 @@ -2085,7 +2086,7 @@ func (s *S) TestBatchSizeZeroGetMore(c *C) { for i := 0; i < 5; i++ { words = append(words, words...) } - doc := bson.D{{"words", words}} + doc := bson.D{{Name: "words", Value: words}} inserts := 10000 iters := 0 for i := 0; i < inserts; i++ { @@ -2316,7 +2317,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { cresult := struct{ ErrMsg string }{} db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) + err = db.Run(bson.D{{Name: "create", Value: "mycoll"}, {Name: "capped", Value: true}, {Name: "size", Value: 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") @@ -2410,7 +2411,7 @@ func (s *S) TestFindTailTimeoutNoSleep(c *C) { cresult := struct{ ErrMsg string }{} db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) + err = db.Run(bson.D{{Name: "create", Value: "mycoll"}, {Name: "capped", Value: true}, {Name: "size", Value: 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") @@ -2495,7 +2496,7 @@ func (s *S) TestFindTailNoTimeout(c *C) { cresult := struct{ ErrMsg string }{} db := session.DB("mydb") - err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) + err = db.Run(bson.D{{Name: "create", Value: "mycoll"}, {Name: "capped", Value: true}, {Name: "size", Value: 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") @@ -2872,7 +2873,7 @@ func (s *S) TestFindIterSnapshot(c *C) { seen := map[int]bool{} result := struct { - Id int "_id" + Id int `bson:"_id"` }{} for iter.Next(&result) { if len(seen) == 2 { @@ -3014,7 +3015,7 @@ func (s *S) TestPrefetching(c *C) { mgo.SetDebug(false) docs := make([]interface{}, total) for i := 0; i != total; i++ { - docs[i] = bson.D{{"n", i}} + docs[i] = bson.D{{Name: "n", Value: i}} } err = coll.Insert(docs...) c.Assert(err, IsNil) @@ -3959,13 +3960,13 @@ func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { coll := session.DB("mydb").C("mycoll") - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({b: -1})"}}, nil) + err = session.Run(bson.D{{Name: "eval", Value: "db.getSiblingDB('mydb').mycoll.ensureIndex({b: -1})"}}, nil) c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil) + err = session.Run(bson.D{{Name: "eval", Value: "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil) c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: -1, e: 1})"}}, nil) + err = session.Run(bson.D{{Name: "eval", Value: "db.getSiblingDB('mydb').mycoll.ensureIndex({c: -1, e: 1})"}}, nil) c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: '2d'})"}}, nil) + err = session.Run(bson.D{{Name: "eval", Value: "db.getSiblingDB('mydb').mycoll.ensureIndex({d: '2d'})"}}, nil) c.Assert(err, IsNil) indexes, err := coll.Indexes() @@ -4050,7 +4051,7 @@ func (s *S) TestDistinct(c *C) { var result []int err = coll.Find(M{"n": M{"$gt": 2}}).Sort("n").Distinct("n", &result) - + c.Assert(err, IsNil) sort.IntSlice(result).Sort() c.Assert(result, DeepEquals, []int{3, 4, 6}) } @@ -4071,7 +4072,7 @@ func (s *S) TestMapReduce(c *C) { Reduce: "function(key, values) { return Array.sum(values); }", } var result []struct { - Id int "_id" + Id int `bson:"_id"` Value int } @@ -4107,7 +4108,7 @@ func (s *S) TestMapReduceFinalize(c *C) { Finalize: "function(key, count) { return {count: count} }", } var result []struct { - Id int "_id" + Id int `bson:"_id"` Value struct{ Count int } } _, err = coll.Find(nil).MapReduce(job, &result) @@ -4148,7 +4149,7 @@ func (s *S) TestMapReduceToCollection(c *C) { expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} var item *struct { - Id int "_id" + Id int `bson:"_id"` Value int } mr := session.DB("mydb").C("mr") @@ -4175,7 +4176,7 @@ func (s *S) TestMapReduceToOtherDb(c *C) { job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1); }", Reduce: "function(key, values) { return Array.sum(values); }", - Out: bson.D{{"replace", "mr"}, {"db", "otherdb"}}, + Out: bson.D{{Name: "replace", Value: "mr"}, {Name: "db", Value: "otherdb"}}, } info, err := coll.Find(nil).MapReduce(job, nil) @@ -4188,7 +4189,7 @@ func (s *S) TestMapReduceToOtherDb(c *C) { expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} var item *struct { - Id int "_id" + Id int `bson:"_id"` Value int } mr := session.DB("otherdb").C("mr") @@ -4241,6 +4242,7 @@ func (s *S) TestMapReduceScope(c *C) { var result []bson.M _, err = coll.Find(nil).MapReduce(job, &result) + c.Assert(err, IsNil) c.Assert(len(result), Equals, 1) c.Assert(result[0]["value"], Equals, 42.0) } @@ -4404,7 +4406,12 @@ func (s *S) TestRepairCursor(c *C) { coll := session.DB("mydb").C("mycoll3") err = coll.DropCollection() - + if s.versionAtLeast(3, 0) && !s.versionAtLeast(3, 2) { + c.Assert(err.(*mgo.QueryError).Code, Equals, 0) + } else { + c.Assert(err.(*mgo.QueryError).Code, Equals, 26) + c.Assert(err.(*mgo.QueryError).Message, Equals, "ns not found") + } ns := []int{0, 10, 20, 30, 40, 50} for _, n := range ns { coll.Insert(M{"n": n}) @@ -4624,7 +4631,7 @@ func (s *S) TestFindIterDoneWithBatches(c *C) { result := struct{ N int }{} for i := 2; i < 7; i++ { // first check will be with pending local record; - // second will be with open cursor ID but no local + // second will be with open cursor Id but no local // records c.Assert(iter.Done(), Equals, false) ok := iter.Next(&result) @@ -4694,6 +4701,7 @@ func (s *S) TestSetCursorTimeout(c *C) { coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 42}) + c.Assert(err, IsNil) // This is just a smoke test. Won't wait 10 minutes for an actual timeout. @@ -4712,9 +4720,10 @@ func (s *S) TestNewIterNoServer(c *C) { defer session.Close() data, err := bson.Marshal(bson.M{"a": 1}) + c.Assert(err, IsNil) coll := session.DB("mydb").C("mycoll") - iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, nil) + iter := coll.NewIter(nil, []bson.Raw{{Kind: 3, Data: data}}, 42, nil) var result struct{ A int } ok := iter.Next(&result) @@ -4733,9 +4742,10 @@ func (s *S) TestNewIterNoServerPresetErr(c *C) { defer session.Close() data, err := bson.Marshal(bson.M{"a": 1}) + c.Assert(err, IsNil) coll := session.DB("mydb").C("mycoll") - iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, fmt.Errorf("my error")) + iter := coll.NewIter(nil, []bson.Raw{{Kind: 3, Data: data}}, 42, fmt.Errorf("my error")) var result struct{ A int } ok := iter.Next(&result) @@ -4761,8 +4771,8 @@ func (s *S) TestBypassValidation(c *C) { c.Assert(err, IsNil) err = coll.Database.Run(bson.D{ - {"collMod", "mycoll"}, - {"validator", M{"s": M{"$type": "string"}}}, + {Name: "collMod", Value: "mycoll"}, + {Name: "validator", Value: M{"s": M{"$type": "string"}}}, }, nil) c.Assert(err, IsNil) @@ -4870,9 +4880,9 @@ func (s *S) BenchmarkFindIterRaw(c *C) { coll := session.DB("mydb").C("mycoll") doc := bson.D{ - {"f2", "a short string"}, - {"f3", bson.D{{"1", "one"}, {"2", 2.0}}}, - {"f4", []string{"a", "b", "c", "d", "e", "f", "g"}}, + {Name: "f2", Value: "a short string"}, + {Name: "f3", Value: bson.D{{Name: "1", Value: "one"}, {Name: "2", Value: 2.0}}}, + {Name: "f4", Value: []string{"a", "b", "c", "d", "e", "f", "g"}}, } for i := 0; i < c.N+1; i++ { @@ -4903,7 +4913,7 @@ func BenchmarkInsertSingle(b *testing.B) { defer session.Close() doc := bson.D{ - {"A", strings.Repeat("*", 256)}, + {Name: "A", Value: strings.Repeat("*", 256)}, } coll := session.DB("mydb").C("benchmarkcoll") b.ResetTimer() @@ -4925,7 +4935,7 @@ func BenchmarkInsertMultiple(b *testing.B) { docs := make([]interface{}, 100) for i := range docs { docs[i] = bson.D{ - {"A", strings.Repeat("*", 256)}, + {Name: "A", Value: strings.Repeat("*", 256)}, } } coll := session.DB("mydb").C("benchmarkcoll") diff --git a/socket.go b/socket.go index c31c8312b..72fab9cf7 100644 --- a/socket.go +++ b/socket.go @@ -83,16 +83,16 @@ type queryOp struct { } type queryWrapper struct { - Query interface{} "$query" - OrderBy interface{} "$orderby,omitempty" - Hint interface{} "$hint,omitempty" - Explain bool "$explain,omitempty" - Snapshot bool "$snapshot,omitempty" - ReadPreference bson.D "$readPreference,omitempty" - MaxScan int "$maxScan,omitempty" - MaxTimeMS int "$maxTimeMS,omitempty" - Comment string "$comment,omitempty" - Collation *Collation "$collation,omitempty" + Query interface{} `bson:"$query"` + OrderBy interface{} `bson:"$orderby,omitempty"` + Hint interface{} `bson:"$hint,omitempty"` + Explain bool `bson:"$explain,omitempty"` + Snapshot bool `bson:"$snapshot,omitempty"` + ReadPreference bson.D `bson:"$readPreference,omitempty"` + MaxScan int `bson:"$maxScan,omitempty"` + MaxTimeMS int `bson:"$maxTimeMS,omitempty"` + Comment string `bson:"$comment,omitempty"` + Collation *Collation `bson:"$collation,omitempty"` } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { @@ -116,9 +116,9 @@ func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { } op.hasOptions = true op.options.ReadPreference = make(bson.D, 0, 2) - op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName}) + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{Name: "mode", Value: modeName}) if len(op.serverTags) > 0 { - op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags}) + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{Name: "tags", Value: op.serverTags}) } } if op.hasOptions { diff --git a/stats.go b/stats.go index 59723e60c..dcbd01045 100644 --- a/stats.go +++ b/stats.go @@ -33,6 +33,7 @@ import ( var stats *Stats var statsMutex sync.Mutex +// SetStats enable database state monitoring func SetStats(enabled bool) { statsMutex.Lock() if enabled { @@ -45,6 +46,7 @@ func SetStats(enabled bool) { statsMutex.Unlock() } +// GetStats return the current database state func GetStats() (snapshot Stats) { statsMutex.Lock() snapshot = *stats @@ -52,6 +54,7 @@ func GetStats() (snapshot Stats) { return } +// ResetStats reset Stats to the previous database state func ResetStats() { statsMutex.Lock() debug("Resetting stats") @@ -66,6 +69,13 @@ func ResetStats() { return } +// Stats holds info on the database state +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/reference/command/serverStatus/ +// +// TODO outdated fields ? type Stats struct { Clusters int MasterConns int diff --git a/txn/debug.go b/txn/debug.go index e23d277af..73ae8db3b 100644 --- a/txn/debug.go +++ b/txn/debug.go @@ -11,15 +11,15 @@ import ( var ( debugEnabled bool - logger log_Logger + logger logLogger ) -type log_Logger interface { +type logLogger interface { Output(calldepth int, s string) error } -// Specify the *log.Logger where logged messages should be sent to. -func SetLogger(l log_Logger) { +// SetLogger specify the *log.Logger where logged messages should be sent to. +func SetLogger(l logLogger) { logger = l } @@ -28,6 +28,8 @@ func SetDebug(debug bool) { debugEnabled = debug } +// ErrChaos error returned when operation failed due to +// the failure injection mechanism. var ErrChaos = fmt.Errorf("interrupted by chaos") var debugId uint32 diff --git a/txn/flusher.go b/txn/flusher.go index 43e01f4a9..3d1882d7f 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -35,7 +35,7 @@ type tokenAndId struct { bid bson.ObjectId } -func (ti tokenAndId) id() bson.ObjectId { +func (ti tokenAndId) Id() bson.ObjectId { return ti.bid } @@ -83,7 +83,7 @@ func (f *flusher) run() (err error) { NextPair: for i := 0; i < len(dqueue); i++ { pred := dqueue[i] - predid := pred.id() + predid := pred.Id() predt := seen[predid] if predt == nil || predt.Nonce != pred.nonce() { continue @@ -95,7 +95,7 @@ func (f *flusher) run() (err error) { for j := i + 1; j < len(dqueue); j++ { succ := dqueue[j] - succid := succ.id() + succid := succ.Id() succt := seen[succid] if succt == nil || succt.Nonce != succ.nonce() { continue @@ -142,14 +142,14 @@ func (f *flusher) run() (err error) { if len(scc) == 1 { pull[scc[0]] = seen[scc[0]] } - for _, id := range scc { - if err := f.advance(seen[id], pull, true); err != nil { + for _, Id := range scc { + if err := f.advance(seen[Id], pull, true); err != nil { return err } } if len(scc) > 1 { - for _, id := range scc { - pull[id] = seen[id] + for _, Id := range scc { + pull[Id] = seen[Id] } } } @@ -169,15 +169,13 @@ func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction, p remaining := make([]bson.ObjectId, 0, len(f.queue[dkey])) toPreload := make(map[bson.ObjectId]struct{}, len(f.queue[dkey])) for _, dtt := range f.queue[dkey] { - id := dtt.id() - if _, scheduled := toPreload[id]; seen[id] != nil || scheduled || preloaded[id] != nil { + Id := dtt.Id() + if _, scheduled := toPreload[Id]; seen[Id] != nil || scheduled || preloaded[Id] != nil { continue } - toPreload[id] = struct{}{} - remaining = append(remaining, id) + toPreload[Id] = struct{}{} + remaining = append(remaining, Id) } - // done with this map - toPreload = nil for len(remaining) > 0 { batch := remaining if len(batch) > preloadBatchSize { @@ -188,13 +186,13 @@ func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction, p if err != nil { return err } - for _, id := range batch { - if seen[id] != nil { + for _, Id := range batch { + if seen[Id] != nil { continue } - qt, ok := preloaded[id] + qt, ok := preloaded[Id] if !ok { - qt, err = f.load(id) + qt, err = f.load(Id) if err != nil { return err } @@ -239,7 +237,6 @@ func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, f panic(fmt.Errorf("transaction in unknown state: %q", t.State)) } } - panic("unreachable") } type stash string @@ -264,11 +261,15 @@ const ( stashInserting stashState = "inserting" ) -var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}} +var txnFields = bson.D{ + {Name: "txn-queue", Value: 1}, + {Name: "txn-revno", Value: 1}, + {Name: "txn-remove", Value: 1}, + {Name: "txn-insert", Value: 1}} var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false") -// prepare injects t's id onto txn-queue for all affected documents +// prepare injects t's Id onto txn-queue for all affected documents // and collects the current txn-queue and txn-revno values during // the process. If the prepared txn-queue indicates that there are // pre-requisite transactions to be applied and the force parameter @@ -290,7 +291,7 @@ func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error NextDoc: for _, dkey := range dkeys { change := mgo.Change{ - Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}}, + Update: bson.D{{Name: "$addToSet", Value: bson.D{{Name: "txn-queue", Value: tt}}}}, ReturnNew: true, } c := f.tc.Database.C(dkey.C) @@ -303,7 +304,7 @@ NextDoc: if f.opts.MaxTxnQueueLength > 0 && len(info.Queue) > f.opts.MaxTxnQueueLength { // abort with TXN Queue too long, but remove the entry we just added innerErr := c.UpdateId(dkey.Id, - bson.D{{"$pullAll", bson.D{{"txn-queue", []token{tt}}}}}) + bson.D{{Name: "$pullAll", Value: bson.D{{Name: "txn-queue", Value: []token{tt}}}}}) if innerErr != nil { f.debugf("error while backing out of queue-too-long: %v", innerErr) } @@ -387,8 +388,8 @@ NextDoc: // Save the prepared nonce onto t. nonce := tt.nonce() - qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}} - udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}} + qdoc := bson.D{{Name: "_id", Value: t.Id}, {Name: "s", Value: tpreparing}} + udoc := bson.D{{Name: "$set", Value: bson.D{{Name: "s", Value: tprepared}, {Name: "n", Value: nonce}}}} chaos("set-prepared") err = f.tc.Update(qdoc, udoc) if err == nil { @@ -426,12 +427,12 @@ NextDoc: } func (f *flusher) unstashToken(tt token, dkey docKey) error { - qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}} - udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}} + qdoc := bson.D{{Name: "_id", Value: dkey}, {Name: "txn-queue", Value: tt}} + udoc := bson.D{{Name: "$pull", Value: bson.D{{Name: "txn-queue", Value: tt}}}} chaos("") if err := f.sc.Update(qdoc, udoc); err == nil { chaos("") - err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}}) + err = f.sc.Remove(bson.D{{Name: "_id", Value: dkey}, {Name: "txn-queue", Value: bson.D{}}}) } else if err != mgo.ErrNotFound { return err } @@ -511,15 +512,15 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) revno[dkey] = info.Revno found := false - for _, id := range info.Queue { - if id == tt { + for _, Id := range info.Queue { + if Id == tt { found = true break } } f.queue[dkey] = tokensWithIds(info.Queue) if !found { - // Rescanned transaction id was not in the queue. This could mean one + // Rescanned transaction Id was not in the queue. This could mean one // of three things: // 1) The transaction was applied and popped by someone else. This is // the common case. @@ -587,7 +588,7 @@ NextDoc: for _, dtt := range f.queue[dkey] { if dtt.tt == tt { continue NextDoc - } else if dtt.id() != ttId { + } else if dtt.Id() != ttId { prereqs = true } } @@ -599,7 +600,7 @@ NextDoc: func (f *flusher) reload(t *transaction) error { var newt transaction query := f.tc.FindId(t.Id) - query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}}) + query.Select(bson.D{{Name: "s", Value: 1}, {Name: "n", Value: 1}, {Name: "r", Value: 1}}) if err := query.One(&newt); err != nil { return fmt.Errorf("failed to reload transaction: %v", err) } @@ -610,8 +611,8 @@ func (f *flusher) reload(t *transaction) error { return nil } -func (f *flusher) loadAndApply(id bson.ObjectId) error { - t, err := f.load(id) +func (f *flusher) loadAndApply(Id bson.ObjectId) error { + t, err := f.load(Id) if err != nil { return err } @@ -643,28 +644,28 @@ func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId] continue } if op.Insert != nil { - return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert) + return fmt.Errorf("Insert can only Assert txn.DocMissing, was %v", op.Assert) } // if revnos[i] < 0 { abort }? - qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id}) + qdoc = append(qdoc[:0], bson.DocElem{Name: "_id", Value: op.Id}) if op.Assert != DocMissing { var revnoq interface{} if n := revno[dkey]; n == 0 { - revnoq = bson.D{{"$exists", false}} + revnoq = bson.D{{Name: "$exists", Value: false}} } else { revnoq = n } // XXX Add tt to the query here, once we're sure it's all working. // Not having it increases the chances of breaking on bad logic. - qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq}) + qdoc = append(qdoc, bson.DocElem{Name: "txn-revno", Value: revnoq}) if op.Assert != DocExists { - qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}}) + qdoc = append(qdoc, bson.DocElem{Name: "$or", Value: []interface{}{op.Assert}}) } } c := f.tc.Database.C(op.C) - if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound { + if err := c.Find(qdoc).Select(bson.D{{Name: "_id", Value: 1}}).One(nil); err == mgo.ErrNotFound { // Assertion failed or someone else started applying. return f.abortOrReload(t, revnos, pull) } else if err != nil { @@ -678,8 +679,8 @@ func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId] func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) { f.debugf("Aborting or reloading %s (was %q)", t, t.State) if t.State == tprepared { - qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} - udoc := bson.D{{"$set", bson.D{{"s", taborting}}}} + qdoc := bson.D{{Name: "_id", Value: t.Id}, {Name: "s", Value: tprepared}} + udoc := bson.D{{Name: "$set", Value: bson.D{{Name: "s", Value: taborting}}}} chaos("set-aborting") if err = f.tc.Update(qdoc, udoc); err == nil { t.State = taborting @@ -711,7 +712,7 @@ func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.Ob if len(pullAll) == 0 { continue } - udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}} + udoc := bson.D{{Name: "$pullAll", Value: bson.D{{Name: "txn-queue", Value: pullAll}}}} chaos("") if revnos[i] < 0 { err = f.sc.UpdateId(dkey, udoc) @@ -724,7 +725,7 @@ func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.Ob } } } - udoc := bson.D{{"$set", bson.D{{"s", taborted}}}} + udoc := bson.D{{Name: "$set", Value: bson.D{{Name: "s", Value: taborted}}}} chaos("set-aborted") if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound { return err @@ -746,8 +747,8 @@ func (f *flusher) checkpoint(t *transaction, revnos []int64) error { } // Save in t the txn-revno values the transaction must run on. - qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} - udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}} + qdoc := bson.D{{Name: "_id", Value: t.Id}, {Name: "s", Value: tprepared}} + udoc := bson.D{{Name: "$set", Value: bson.D{{Name: "s", Value: tapplying}, {Name: "r", Value: revnos}}}} chaos("set-applying") err := f.tc.Update(qdoc, udoc) if err == nil { @@ -771,7 +772,7 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err } logRevnos := append([]int64(nil), t.Revnos...) - logDoc := bson.D{{"_id", t.Id}} + logDoc := bson.D{{Name: "_id", Value: t.Id}} tt := tokenFor(t) for i := range t.Ops { @@ -788,18 +789,18 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err c := f.tc.Database.C(op.C) - qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}} + qdoc := bson.D{{Name: "_id", Value: dkey.Id}, {Name: "txn-revno", Value: revno}, {Name: "txn-queue", Value: tt}} if op.Insert != nil { qdoc[0].Value = dkey if revno == -1 { - qdoc[1].Value = bson.D{{"$exists", false}} + qdoc[1].Value = bson.D{{Name: "$exists", Value: false}} } } else if revno == 0 { // There's no document with revno 0. The only way to see it is // when an existent document participates in a transaction the // first time. Txn-inserted documents get revno -1 while in the // stash for the first time, and -revno-1 == 2 when they go live. - qdoc[1].Value = bson.D{{"$exists", false}} + qdoc[1].Value = bson.D{{Name: "$exists", Value: false}} } pullAll := tokensToPull(dqueue, pull, tt) @@ -818,10 +819,10 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err if d, err = objToDoc(op.Update); err != nil { return err } - if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil { + if d, err = addToDoc(d, "$pullAll", bson.D{{Name: "txn-queue", Value: pullAll}}); err != nil { return err } - if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil { + if d, err = addToDoc(d, "$set", bson.D{{Name: "txn-revno", Value: newRevno}}); err != nil { return err } chaos("") @@ -836,7 +837,7 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err nonce := newNonce() stash := txnInfo{} change := mgo.Change{ - Update: bson.D{{"$push", bson.D{{"n", nonce}}}}, + Update: bson.D{{Name: "$push", Value: bson.D{{Name: "n", Value: nonce}}}}, Upsert: true, ReturnNew: true, } @@ -844,7 +845,7 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err return err } change = mgo.Change{ - Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}}, + Update: bson.D{{Name: "$set", Value: bson.D{{Name: "txn-remove", Value: t.Id}}}}, ReturnNew: true, } var info txnInfo @@ -858,14 +859,14 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err var set, unset bson.D if revno == 0 { // Missing revno in stash means -1. - set = bson.D{{"txn-queue", info.Queue}} - unset = bson.D{{"n", 1}, {"txn-revno", 1}} + set = bson.D{{Name: "txn-queue", Value: info.Queue}} + unset = bson.D{{Name: "n", Value: 1}, {Name: "txn-revno", Value: 1}} } else { - set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}} - unset = bson.D{{"n", 1}} + set = bson.D{{Name: "txn-queue", Value: info.Queue}, {Name: "txn-revno", Value: newRevno}} + unset = bson.D{{Name: "n", Value: 1}} } - qdoc := bson.D{{"_id", dkey}, {"n", nonce}} - udoc := bson.D{{"$set", set}, {"$unset", unset}} + qdoc := bson.D{{Name: "_id", Value: dkey}, {Name: "n", Value: nonce}} + udoc := bson.D{{Name: "$set", Value: set}, {Name: "$unset", Value: unset}} if err = f.sc.Update(qdoc, udoc); err == nil { updated = true } else if err != mgo.ErrNotFound { @@ -890,14 +891,14 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err return err } change := mgo.Change{ - Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}}, + Update: bson.D{{Name: "$set", Value: bson.D{{Name: "txn-insert", Value: t.Id}}}}, ReturnNew: true, } chaos("") var info txnInfo if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil { f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue) - d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}}) + d = setInDoc(d, bson.D{{Name: "_id", Value: op.Id}, {Name: "txn-revno", Value: newRevno}, {Name: "txn-queue", Value: info.Queue}}) // Unlikely yet unfortunate race in here if this gets seriously // delayed. If someone inserts+removes meanwhile, this will // reinsert, and there's no way to avoid that while keeping the @@ -947,7 +948,7 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err } } if dr == nil { - logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}}) + logDoc = append(logDoc, bson.DocElem{Name: op.C, Value: bson.D{{Name: "d", Value: []interface{}{}}, {Name: "r", Value: []int64{}}}}) dr = logDoc[len(logDoc)-1].Value.(bson.D) } dr[0].Value = append(dr[0].Value.([]interface{}), op.Id) @@ -971,7 +972,9 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err // it has been applied and mark it at such. f.debugf("Marking %s as applied", t) chaos("set-applied") - f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}}) + f.tc.Update( + bson.D{{Name: "_id", Value: t.Id}, {Name: "s", Value: tapplying}}, + bson.D{{Name: "$set", Value: bson.D{{Name: "s", Value: tapplied}}}}) return nil } @@ -982,7 +985,7 @@ func tokensToPull(dqueue []tokenAndId, pull map[bson.ObjectId]*transaction, dont if dtt.tt == dontPull { continue } - if _, ok := pull[dtt.id()]; ok { + if _, ok := pull[dtt.Id()]; ok { // It was handled before and this is a leftover invalid // nonce in the queue. Cherry-pick it out. result = append(result, dtt.tt) @@ -1009,14 +1012,14 @@ func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) { if elem.Name != key { continue } - if old, ok := elem.Value.(bson.D); ok { - elem.Value = append(old, add...) - return doc, nil - } else { + old, ok := elem.Value.(bson.D) + if !ok { return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value) } + elem.Value = append(old, add...) + return doc, nil } - return append(doc, bson.DocElem{key, add}), nil + return append(doc, bson.DocElem{Name: key, Value: add}), nil } func setInDoc(doc bson.D, set bson.D) bson.D { diff --git a/txn/txn.go b/txn/txn.go index 8b44c8339..6e5c89f34 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -1,4 +1,4 @@ -// The txn package implements support for multi-document transactions. +// Package txn implements support for multi-document transactions. // // For details check the following blog post: // @@ -113,7 +113,7 @@ NextOp: } // tokenFor returns a unique transaction token that -// is composed by t's id and a nonce. If t already has +// is composed by t's Id and a nonce. If t already has // a nonce assigned to it, it will be used, otherwise // a new nonce will be generated. func tokenFor(t *transaction) token { @@ -207,10 +207,13 @@ func (op *Op) name() string { } const ( - // DocExists and DocMissing may be used on an operation's + // DocExists may be used on an operation's // Assert value to assert that the document with the given - // Id exists or does not exist, respectively. - DocExists = "d+" + // ID exists. + DocExists = "d+" + // DocMissing may be used on an operation's + // Assert value to assert that the document with the given + // ID does not exist. DocMissing = "d-" ) @@ -268,13 +271,15 @@ func DefaultRunnerOptions() RunnerOptions { } } +// ErrAborted error returned if one or more operations +// can't be applied. var ErrAborted = fmt.Errorf("transaction aborted") // Run creates a new transaction with ops and runs it immediately. -// The id parameter specifies the transaction id, and may be written +// The id parameter specifies the transaction Id, and may be written // down ahead of time to later verify the success of the change and // resume it, when the procedure is interrupted for any reason. If -// empty, a random id will be generated. +// empty, a random Id will be generated. // The info parameter, if not nil, is included under the "i" // field of the transaction document. // @@ -291,7 +296,7 @@ var ErrAborted = fmt.Errorf("transaction aborted") // reason, it may be resumed explicitly or by attempting to apply // another transaction on any of the documents targeted by ops, as // long as the interruption was made after the transaction document -// itself was inserted. Run Resume with the obtained transaction id +// itself was inserted. Run Resume with the obtained transaction Id // to confirm whether the transaction was applied or not. // // Any number of transactions may be run concurrently, with one @@ -349,7 +354,7 @@ func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) { // from individual transactions are ignored. func (r *Runner) ResumeAll() (err error) { debugf("Resuming all unfinished transactions") - iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter() + iter := r.tc.Find(bson.D{{Name: "s", Value: bson.D{{Name: "$in", Value: []state{tpreparing, tprepared, tapplying}}}}}).Iter() var t transaction for iter.Next(&t) { if t.State == tapplied || t.State == taborted { @@ -366,7 +371,7 @@ func (r *Runner) ResumeAll() (err error) { return nil } -// Resume resumes the transaction with id. It returns mgo.ErrNotFound +// Resume resumes the transaction with Id. It returns mgo.ErrNotFound // if the transaction is not found. Otherwise, it has the same semantics // of the Run method after the transaction is inserted. func (r *Runner) Resume(id bson.ObjectId) (err error) { @@ -417,8 +422,8 @@ func (r *Runner) PurgeMissing(collections ...string) error { type S []interface{} type TDoc struct { - Id interface{} "_id" - TxnQueue []string "txn-queue" + Id interface{} `bson:"_id"` + TxnQueue []string `bson:"txn-queue"` } found := make(map[bson.ObjectId]bool) @@ -451,8 +456,8 @@ func (r *Runner) PurgeMissing(collections ...string) error { } type StashTDoc struct { - Id docKey "_id" - TxnQueue []string "txn-queue" + Id docKey `bson:"_id"` + TxnQueue []string `bson:"txn-queue"` } iter := r.sc.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter() @@ -511,7 +516,6 @@ func (r *Runner) loadMulti(ids []bson.ObjectId, preloaded map[bson.ObjectId]*tra return nil } - type typeNature int const ( @@ -644,7 +648,6 @@ func structcmp(a, b interface{}) int { return 1 } } - panic("unreachable") } func isExported(name string) bool { diff --git a/txn/txn_test.go b/txn/txn_test.go index 8b85986b5..fdc00eb3d 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -121,7 +121,7 @@ func (s *S) TestInsert(c *C) { c.Assert(account.Balance, Equals, 200) } -func (s *S) TestInsertStructID(c *C) { +func (s *S) TestInsertStructId(c *C) { type id struct { FirstName string LastName string @@ -374,8 +374,8 @@ func (s *S) TestAssertNestedOr(c *C) { ops := []txn.Op{{ C: "accounts", Id: 0, - Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}}, - Update: bson.D{{"$inc", bson.D{{"balance", 100}}}}, + Assert: bson.D{{Name: "$or", Value: []bson.D{{{Name: "balance", Value: 100}}, {{Name: "balance", Value: 300}}}}}, + Update: bson.D{{Name: "$inc", Value: bson.D{{Name: "balance", Value: 100}}}}, }} err = s.runner.Run(ops, "", nil) @@ -390,7 +390,7 @@ func (s *S) TestAssertNestedOr(c *C) { func (s *S) TestVerifyFieldOrdering(c *C) { // Used to have a map in certain operations, which means // the ordering of fields would be messed up. - fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}} + fields := bson.D{{Name: "a", Value: 1}, {Name: "b", Value: 2}, {Name: "c", Value: 3}} ops := []txn.Op{{ C: "accounts", Id: 0, @@ -441,8 +441,8 @@ func (s *S) TestChangeLog(c *C) { type IdList []interface{} type Log struct { - Docs IdList "d" - Revnos []int64 "r" + Docs IdList `bson:"d"` + Revnos []int64 `bson:"r"` } var m map[string]*Log err = chglog.FindId(id).One(&m) @@ -567,7 +567,7 @@ func (s *S) TestPurgeMissing(c *C) { err = s.accounts.FindId(want.Id).One(&got) if want.Balance == -1 { if err != mgo.ErrNotFound { - c.Errorf("Account %d should not exist, find got err=%#v", err) + c.Errorf("Account %d should not exist, find got err=%#v", got, err) } } else if err != nil { c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance) @@ -776,7 +776,7 @@ func (s *S) TestPurgeMissingPipelineSizeLimit(c *C) { // processing the txn-queue fields of stash documents so insert // the large txn-queue there too to ensure that no longer happens. err = s.sc.Insert( - bson.D{{"c", "accounts"}, {"id", 0}}, + bson.D{{Name: "c", Value: "accounts"}, {Name: "id", Value: 0}}, bson.M{"txn-queue": fakeTxnQueue}, ) c.Assert(err, IsNil) @@ -789,7 +789,6 @@ func (s *S) TestPurgeMissingPipelineSizeLimit(c *C) { var flaky = flag.Bool("flaky", false, "Include flaky tests") var txnQueueLength = flag.Int("qlength", 100, "txn-queue length for tests") - func (s *S) TestTxnQueueStressTest(c *C) { // This fails about 20% of the time on Mongo 3.2 (I haven't tried // other versions) with account balance being 3999 instead of @@ -925,11 +924,6 @@ func (s *S) TestTxnQueueBrokenPrepared(c *C) { c.Logf("%8.3fs to set up %d 'prepared' txns", time.Since(t).Seconds(), *txnQueueLength) t = time.Now() s.accounts.UpdateId(0, bson.M{"$pullAll": bson.M{"txn-queue": []string{badTxnToken}}}) - ops = []txn.Op{{ - C: "accounts", - Id: 0, - Update: M{"$inc": M{"balance": 100}}, - }} err = s.runner.ResumeAll() c.Assert(err, IsNil) c.Logf("%8.3fs to ResumeAll N=%d 'prepared' txns", @@ -975,4 +969,3 @@ func (s *S) TestTxnQueuePreparing(c *C) { } c.Check(len(qdoc.Queue), Equals, expectedCount) } -