summaryrefslogtreecommitdiff
path: root/vendor/github.com/oschwald/maxminddb-golang
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/oschwald/maxminddb-golang')
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/LICENSE15
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/README.md38
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/appveyor.yml19
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/decoder.go721
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/errors.go42
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go15
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go85
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/reader.go259
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go28
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/reader_other.go63
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/traverse.go108
-rw-r--r--vendor/github.com/oschwald/maxminddb-golang/verifier.go185
12 files changed, 1578 insertions, 0 deletions
diff --git a/vendor/github.com/oschwald/maxminddb-golang/LICENSE b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
new file mode 100644
index 0000000..2969677
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/README.md b/vendor/github.com/oschwald/maxminddb-golang/README.md
new file mode 100644
index 0000000..cdd6bd1
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/README.md
@@ -0,0 +1,38 @@
+# MaxMind DB Reader for Go #
+
+[![Build Status](https://travis-ci.org/oschwald/maxminddb-golang.png?branch=master)](https://travis-ci.org/oschwald/maxminddb-golang)
+[![Windows Build Status](https://ci.appveyor.com/api/projects/status/4j2f9oep8nnfrmov/branch/master?svg=true)](https://ci.appveyor.com/project/oschwald/maxminddb-golang/branch/master)
+[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.png)](https://godoc.org/github.com/oschwald/maxminddb-golang)
+
+This is a Go reader for the MaxMind DB format. Although this can be used to
+read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
+[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
+[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
+API for doing so.
+
+This is not an official MaxMind API.
+
+## Installation ##
+
+```
+go get github.com/oschwald/maxminddb-golang
+```
+
+## Usage ##
+
+[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
+documentation and examples.
+
+## Examples ##
+
+See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
+`example_test.go` for examples.
+
+## Contributing ##
+
+Contributions welcome! Please fork the repository and open a pull request
+with your changes.
+
+## License ##
+
+This is free software, licensed under the ISC License.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml b/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
new file mode 100644
index 0000000..e2bb9dd
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
@@ -0,0 +1,19 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\oschwald\maxminddb-golang
+
+environment:
+ GOPATH: c:\gopath
+
+install:
+ - echo %PATH%
+ - echo %GOPATH%
+ - git submodule update --init --recursive
+ - go version
+ - go env
+ - go get -v -t ./...
+
+build_script:
+ - go test -v ./...
diff --git a/vendor/github.com/oschwald/maxminddb-golang/decoder.go b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
new file mode 100644
index 0000000..6e4d7e5
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
@@ -0,0 +1,721 @@
+package maxminddb
+
+import (
+ "encoding/binary"
+ "math"
+ "math/big"
+ "reflect"
+ "sync"
+)
+
+type decoder struct {
+ buffer []byte
+}
+
+type dataType int
+
+const (
+ _Extended dataType = iota
+ _Pointer
+ _String
+ _Float64
+ _Bytes
+ _Uint16
+ _Uint32
+ _Map
+ _Int32
+ _Uint64
+ _Uint128
+ _Slice
+ _Container
+ _Marker
+ _Bool
+ _Float32
+)
+
+const (
+ // This is the value used in libmaxminddb
+ maximumDataStructureDepth = 512
+)
+
+func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
+ if depth > maximumDataStructureDepth {
+ return 0, newInvalidDatabaseError("exceeded maximum data structure depth; database is likely corrupt")
+ }
+ typeNum, size, newOffset, err := d.decodeCtrlData(offset)
+ if err != nil {
+ return 0, err
+ }
+
+ if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
+ result.Set(reflect.ValueOf(uintptr(offset)))
+ return d.nextValueOffset(offset, 1)
+ }
+ return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
+}
+
+func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
+ newOffset := offset + 1
+ if offset >= uint(len(d.buffer)) {
+ return 0, 0, 0, newOffsetError()
+ }
+ ctrlByte := d.buffer[offset]
+
+ typeNum := dataType(ctrlByte >> 5)
+ if typeNum == _Extended {
+ if newOffset >= uint(len(d.buffer)) {
+ return 0, 0, 0, newOffsetError()
+ }
+ typeNum = dataType(d.buffer[newOffset] + 7)
+ newOffset++
+ }
+
+ var size uint
+ size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
+ return typeNum, size, newOffset, err
+}
+
+func (d *decoder) sizeFromCtrlByte(ctrlByte byte, offset uint, typeNum dataType) (uint, uint, error) {
+ size := uint(ctrlByte & 0x1f)
+ if typeNum == _Extended {
+ return size, offset, nil
+ }
+
+ var bytesToRead uint
+ if size < 29 {
+ return size, offset, nil
+ }
+
+ bytesToRead = size - 28
+ newOffset := offset + bytesToRead
+ if newOffset > uint(len(d.buffer)) {
+ return 0, 0, newOffsetError()
+ }
+ if size == 29 {
+ return 29 + uint(d.buffer[offset]), offset + 1, nil
+ }
+
+ sizeBytes := d.buffer[offset:newOffset]
+
+ switch {
+ case size == 30:
+ size = 285 + uintFromBytes(0, sizeBytes)
+ case size > 30:
+ size = uintFromBytes(0, sizeBytes) + 65821
+ }
+ return size, newOffset, nil
+}
+
+func (d *decoder) decodeFromType(
+ dtype dataType,
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ result = d.indirect(result)
+
+ // For these types, size has a special meaning
+ switch dtype {
+ case _Bool:
+ return d.unmarshalBool(size, offset, result)
+ case _Map:
+ return d.unmarshalMap(size, offset, result, depth)
+ case _Pointer:
+ return d.unmarshalPointer(size, offset, result, depth)
+ case _Slice:
+ return d.unmarshalSlice(size, offset, result, depth)
+ }
+
+ // For the remaining types, size is the byte size
+ if offset+size > uint(len(d.buffer)) {
+ return 0, newOffsetError()
+ }
+ switch dtype {
+ case _Bytes:
+ return d.unmarshalBytes(size, offset, result)
+ case _Float32:
+ return d.unmarshalFloat32(size, offset, result)
+ case _Float64:
+ return d.unmarshalFloat64(size, offset, result)
+ case _Int32:
+ return d.unmarshalInt32(size, offset, result)
+ case _String:
+ return d.unmarshalString(size, offset, result)
+ case _Uint16:
+ return d.unmarshalUint(size, offset, result, 16)
+ case _Uint32:
+ return d.unmarshalUint(size, offset, result, 32)
+ case _Uint64:
+ return d.unmarshalUint(size, offset, result, 64)
+ case _Uint128:
+ return d.unmarshalUint128(size, offset, result)
+ default:
+ return 0, newInvalidDatabaseError("unknown type: %d", dtype)
+ }
+}
+
+func (d *decoder) unmarshalBool(size uint, offset uint, result reflect.Value) (uint, error) {
+ if size > 1 {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
+ }
+ value, newOffset, err := d.decodeBool(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ switch result.Kind() {
+ case reflect.Bool:
+ result.SetBool(value)
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+// indirect follows pointers and create values as necessary. This is
+// heavily based on encoding/json as my original version had a subtle
+// bug. This method should be considered to be licensed under
+// https://golang.org/LICENSE
+func (d *decoder) indirect(result reflect.Value) reflect.Value {
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if result.Kind() == reflect.Interface && !result.IsNil() {
+ e := result.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() {
+ result = e
+ continue
+ }
+ }
+
+ if result.Kind() != reflect.Ptr {
+ break
+ }
+
+ if result.IsNil() {
+ result.Set(reflect.New(result.Type().Elem()))
+ }
+ result = result.Elem()
+ }
+ return result
+}
+
+var sliceType = reflect.TypeOf([]byte{})
+
+func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
+ value, newOffset, err := d.decodeBytes(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ switch result.Kind() {
+ case reflect.Slice:
+ if result.Type() == sliceType {
+ result.SetBytes(value)
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value) (uint, error) {
+ if size != 4 {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
+ }
+ value, newOffset, err := d.decodeFloat32(size, offset)
+ if err != nil {
+ return 0, err
+ }
+
+ switch result.Kind() {
+ case reflect.Float32, reflect.Float64:
+ result.SetFloat(float64(value))
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalFloat64(size uint, offset uint, result reflect.Value) (uint, error) {
+
+ if size != 8 {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
+ }
+ value, newOffset, err := d.decodeFloat64(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ switch result.Kind() {
+ case reflect.Float32, reflect.Float64:
+ if result.OverflowFloat(value) {
+ return 0, newUnmarshalTypeError(value, result.Type())
+ }
+ result.SetFloat(value)
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (uint, error) {
+ if size > 4 {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
+ }
+ value, newOffset, err := d.decodeInt(size, offset)
+ if err != nil {
+ return 0, err
+ }
+
+ switch result.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n := int64(value)
+ if !result.OverflowInt(n) {
+ result.SetInt(n)
+ return newOffset, nil
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n := uint64(value)
+ if !result.OverflowUint(n) {
+ result.SetUint(n)
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalMap(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ result = d.indirect(result)
+ switch result.Kind() {
+ default:
+ return 0, newUnmarshalTypeError("map", result.Type())
+ case reflect.Struct:
+ return d.decodeStruct(size, offset, result, depth)
+ case reflect.Map:
+ return d.decodeMap(size, offset, result, depth)
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ rv := reflect.ValueOf(make(map[string]interface{}, size))
+ newOffset, err := d.decodeMap(size, offset, rv, depth)
+ result.Set(rv)
+ return newOffset, err
+ }
+ return 0, newUnmarshalTypeError("map", result.Type())
+ }
+}
+
+func (d *decoder) unmarshalPointer(size uint, offset uint, result reflect.Value, depth int) (uint, error) {
+ pointer, newOffset, err := d.decodePointer(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ _, err = d.decode(pointer, result, depth)
+ return newOffset, err
+}
+
+func (d *decoder) unmarshalSlice(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ switch result.Kind() {
+ case reflect.Slice:
+ return d.decodeSlice(size, offset, result, depth)
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ a := []interface{}{}
+ rv := reflect.ValueOf(&a).Elem()
+ newOffset, err := d.decodeSlice(size, offset, rv, depth)
+ result.Set(rv)
+ return newOffset, err
+ }
+ }
+ return 0, newUnmarshalTypeError("array", result.Type())
+}
+
+func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
+ value, newOffset, err := d.decodeString(size, offset)
+
+ if err != nil {
+ return 0, err
+ }
+ switch result.Kind() {
+ case reflect.String:
+ result.SetString(value)
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+
+}
+
+func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, uintType uint) (uint, error) {
+ if size > uintType/8 {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
+ }
+
+ value, newOffset, err := d.decodeUint(size, offset)
+ if err != nil {
+ return 0, err
+ }
+
+ switch result.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n := int64(value)
+ if !result.OverflowInt(n) {
+ result.SetInt(n)
+ return newOffset, nil
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ if !result.OverflowUint(value) {
+ result.SetUint(value)
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+var bigIntType = reflect.TypeOf(big.Int{})
+
+func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value) (uint, error) {
+ if size > 16 {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
+ }
+ value, newOffset, err := d.decodeUint128(size, offset)
+ if err != nil {
+ return 0, err
+ }
+
+ switch result.Kind() {
+ case reflect.Struct:
+ if result.Type() == bigIntType {
+ result.Set(reflect.ValueOf(*value))
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) decodeBool(size uint, offset uint) (bool, uint, error) {
+ return size != 0, offset, nil
+}
+
+func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint, error) {
+ newOffset := offset + size
+ bytes := make([]byte, size)
+ copy(bytes, d.buffer[offset:newOffset])
+ return bytes, newOffset, nil
+}
+
+func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint, error) {
+ newOffset := offset + size
+ bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
+ return math.Float64frombits(bits), newOffset, nil
+}
+
+func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint, error) {
+ newOffset := offset + size
+ bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
+ return math.Float32frombits(bits), newOffset, nil
+}
+
+func (d *decoder) decodeInt(size uint, offset uint) (int, uint, error) {
+ newOffset := offset + size
+ var val int32
+ for _, b := range d.buffer[offset:newOffset] {
+ val = (val << 8) | int32(b)
+ }
+ return int(val), newOffset, nil
+}
+
+func (d *decoder) decodeMap(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ if result.IsNil() {
+ result.Set(reflect.MakeMap(result.Type()))
+ }
+
+ for i := uint(0); i < size; i++ {
+ var key []byte
+ var err error
+ key, offset, err = d.decodeKey(offset)
+
+ if err != nil {
+ return 0, err
+ }
+
+ value := reflect.New(result.Type().Elem())
+ offset, err = d.decode(offset, value, depth)
+ if err != nil {
+ return 0, err
+ }
+ result.SetMapIndex(reflect.ValueOf(string(key)), value.Elem())
+ }
+ return offset, nil
+}
+
+func (d *decoder) decodePointer(
+ size uint,
+ offset uint,
+) (uint, uint, error) {
+ pointerSize := ((size >> 3) & 0x3) + 1
+ newOffset := offset + pointerSize
+ if newOffset > uint(len(d.buffer)) {
+ return 0, 0, newOffsetError()
+ }
+ pointerBytes := d.buffer[offset:newOffset]
+ var prefix uint
+ if pointerSize == 4 {
+ prefix = 0
+ } else {
+ prefix = uint(size & 0x7)
+ }
+ unpacked := uintFromBytes(prefix, pointerBytes)
+
+ var pointerValueOffset uint
+ switch pointerSize {
+ case 1:
+ pointerValueOffset = 0
+ case 2:
+ pointerValueOffset = 2048
+ case 3:
+ pointerValueOffset = 526336
+ case 4:
+ pointerValueOffset = 0
+ }
+
+ pointer := unpacked + pointerValueOffset
+
+ return pointer, newOffset, nil
+}
+
+func (d *decoder) decodeSlice(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
+ for i := 0; i < int(size); i++ {
+ var err error
+ offset, err = d.decode(offset, result.Index(i), depth)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return offset, nil
+}
+
+func (d *decoder) decodeString(size uint, offset uint) (string, uint, error) {
+ newOffset := offset + size
+ return string(d.buffer[offset:newOffset]), newOffset, nil
+}
+
+type fieldsType struct {
+ namedFields map[string]int
+ anonymousFields []int
+}
+
+var (
+ fieldMap = map[reflect.Type]*fieldsType{}
+ fieldMapMu sync.RWMutex
+)
+
+func (d *decoder) decodeStruct(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ resultType := result.Type()
+
+ fieldMapMu.RLock()
+ fields, ok := fieldMap[resultType]
+ fieldMapMu.RUnlock()
+ if !ok {
+ numFields := resultType.NumField()
+ namedFields := make(map[string]int, numFields)
+ var anonymous []int
+ for i := 0; i < numFields; i++ {
+ field := resultType.Field(i)
+
+ fieldName := field.Name
+ if tag := field.Tag.Get("maxminddb"); tag != "" {
+ if tag == "-" {
+ continue
+ }
+ fieldName = tag
+ }
+ if field.Anonymous {
+ anonymous = append(anonymous, i)
+ continue
+ }
+ namedFields[fieldName] = i
+ }
+ fieldMapMu.Lock()
+ fields = &fieldsType{namedFields, anonymous}
+ fieldMap[resultType] = fields
+ fieldMapMu.Unlock()
+ }
+
+ // This fills in embedded structs
+ for _, i := range fields.anonymousFields {
+ _, err := d.unmarshalMap(size, offset, result.Field(i), depth)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // This handles named fields
+ for i := uint(0); i < size; i++ {
+ var (
+ err error
+ key []byte
+ )
+ key, offset, err = d.decodeKey(offset)
+ if err != nil {
+ return 0, err
+ }
+ // The string() does not create a copy due to this compiler
+ // optimization: https://github.com/golang/go/issues/3512
+ j, ok := fields.namedFields[string(key)]
+ if !ok {
+ offset, err = d.nextValueOffset(offset, 1)
+ if err != nil {
+ return 0, err
+ }
+ continue
+ }
+
+ offset, err = d.decode(offset, result.Field(j), depth)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return offset, nil
+}
+
+func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
+ newOffset := offset + size
+ bytes := d.buffer[offset:newOffset]
+
+ var val uint64
+ for _, b := range bytes {
+ val = (val << 8) | uint64(b)
+ }
+ return val, newOffset, nil
+}
+
+func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint, error) {
+ newOffset := offset + size
+ val := new(big.Int)
+ val.SetBytes(d.buffer[offset:newOffset])
+
+ return val, newOffset, nil
+}
+
+func uintFromBytes(prefix uint, uintBytes []byte) uint {
+ val := prefix
+ for _, b := range uintBytes {
+ val = (val << 8) | uint(b)
+ }
+ return val
+}
+
+// decodeKey decodes a map key into []byte slice. We use a []byte so that we
+// can take advantage of https://github.com/golang/go/issues/3512 to avoid
+// copying the bytes when decoding a struct. Previously, we achieved this by
+// using unsafe.
+func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
+ typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
+ if err != nil {
+ return nil, 0, err
+ }
+ if typeNum == _Pointer {
+ pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
+ if err != nil {
+ return nil, 0, err
+ }
+ key, _, err := d.decodeKey(pointer)
+ return key, ptrOffset, err
+ }
+ if typeNum != _String {
+ return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
+ }
+ newOffset := dataOffset + size
+ if newOffset > uint(len(d.buffer)) {
+ return nil, 0, newOffsetError()
+ }
+ return d.buffer[dataOffset:newOffset], newOffset, nil
+}
+
+// This function is used to skip ahead to the next value without decoding
+// the one at the offset passed in. The size bits have different meanings for
+// different data types
+func (d *decoder) nextValueOffset(offset uint, numberToSkip uint) (uint, error) {
+ if numberToSkip == 0 {
+ return offset, nil
+ }
+ typeNum, size, offset, err := d.decodeCtrlData(offset)
+ if err != nil {
+ return 0, err
+ }
+ switch typeNum {
+ case _Pointer:
+ _, offset, err = d.decodePointer(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ case _Map:
+ numberToSkip += 2 * size
+ case _Slice:
+ numberToSkip += size
+ case _Bool:
+ default:
+ offset += size
+ }
+ return d.nextValueOffset(offset, numberToSkip-1)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/errors.go b/vendor/github.com/oschwald/maxminddb-golang/errors.go
new file mode 100644
index 0000000..1327800
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/errors.go
@@ -0,0 +1,42 @@
+package maxminddb
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// InvalidDatabaseError is returned when the database contains invalid data
+// and cannot be parsed.
+type InvalidDatabaseError struct {
+ message string
+}
+
+func newOffsetError() InvalidDatabaseError {
+ return InvalidDatabaseError{"unexpected end of database"}
+}
+
+func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
+ return InvalidDatabaseError{fmt.Sprintf(format, args...)}
+}
+
+func (e InvalidDatabaseError) Error() string {
+ return e.message
+}
+
+// UnmarshalTypeError is returned when the value in the database cannot be
+// assigned to the specified data type.
+type UnmarshalTypeError struct {
+ Value string // stringified copy of the database value that caused the error
+ Type reflect.Type // type of the value that could not be assign to
+}
+
+func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
+ return UnmarshalTypeError{
+ Value: fmt.Sprintf("%v", value),
+ Type: rType,
+ }
+}
+
+func (e UnmarshalTypeError) Error() string {
+ return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
new file mode 100644
index 0000000..d898d25
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
@@ -0,0 +1,15 @@
+// +build !windows,!appengine
+
+package maxminddb
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func mmap(fd int, length int) (data []byte, err error) {
+ return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
+}
+
+func munmap(b []byte) (err error) {
+ return unix.Munmap(b)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
new file mode 100644
index 0000000..661250e
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
@@ -0,0 +1,85 @@
+// +build windows,!appengine
+
+package maxminddb
+
+// Windows support largely borrowed from mmap-go.
+//
+// Copyright 2011 Evan Shaw. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "os"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+type memoryMap []byte
+
+// Windows
+var handleLock sync.Mutex
+var handleMap = map[uintptr]windows.Handle{}
+
+func mmap(fd int, length int) (data []byte, err error) {
+ h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
+ uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
+ if h == 0 {
+ return nil, os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
+ 0, uintptr(length))
+ if addr == 0 {
+ return nil, os.NewSyscallError("MapViewOfFile", errno)
+ }
+ handleLock.Lock()
+ handleMap[addr] = h
+ handleLock.Unlock()
+
+ m := memoryMap{}
+ dh := m.header()
+ dh.Data = addr
+ dh.Len = length
+ dh.Cap = dh.Len
+
+ return m, nil
+}
+
+func (m *memoryMap) header() *reflect.SliceHeader {
+ return (*reflect.SliceHeader)(unsafe.Pointer(m))
+}
+
+func flush(addr, len uintptr) error {
+ errno := windows.FlushViewOfFile(addr, len)
+ return os.NewSyscallError("FlushViewOfFile", errno)
+}
+
+func munmap(b []byte) (err error) {
+ m := memoryMap(b)
+ dh := m.header()
+
+ addr := dh.Data
+ length := uintptr(dh.Len)
+
+ flush(addr, length)
+ err = windows.UnmapViewOfFile(addr)
+ if err != nil {
+ return err
+ }
+
+ handleLock.Lock()
+ defer handleLock.Unlock()
+ handle, ok := handleMap[addr]
+ if !ok {
+ // should be impossible; we would've errored above
+ return errors.New("unknown base address")
+ }
+ delete(handleMap, addr)
+
+ e := windows.CloseHandle(windows.Handle(handle))
+ return os.NewSyscallError("CloseHandle", e)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader.go b/vendor/github.com/oschwald/maxminddb-golang/reader.go
new file mode 100644
index 0000000..97b9607
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader.go
@@ -0,0 +1,259 @@
+package maxminddb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "reflect"
+)
+
+const (
+ // NotFound is returned by LookupOffset when a matched root record offset
+ // cannot be found.
+ NotFound = ^uintptr(0)
+
+ dataSectionSeparatorSize = 16
+)
+
+var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
+
+// Reader holds the data corresponding to the MaxMind DB file. Its only public
+// field is Metadata, which contains the metadata from the MaxMind DB file.
+type Reader struct {
+ hasMappedFile bool
+ buffer []byte
+ decoder decoder
+ Metadata Metadata
+ ipv4Start uint
+}
+
+// Metadata holds the metadata decoded from the MaxMind DB file. In particular
+// in has the format version, the build time as Unix epoch time, the database
+// type and description, the IP version supported, and a slice of the natural
+// languages included.
+type Metadata struct {
+ BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
+ BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
+ BuildEpoch uint `maxminddb:"build_epoch"`
+ DatabaseType string `maxminddb:"database_type"`
+ Description map[string]string `maxminddb:"description"`
+ IPVersion uint `maxminddb:"ip_version"`
+ Languages []string `maxminddb:"languages"`
+ NodeCount uint `maxminddb:"node_count"`
+ RecordSize uint `maxminddb:"record_size"`
+}
+
+// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
+// a Reader structure or an error.
+func FromBytes(buffer []byte) (*Reader, error) {
+ metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
+
+ if metadataStart == -1 {
+ return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
+ }
+
+ metadataStart += len(metadataStartMarker)
+ metadataDecoder := decoder{buffer[metadataStart:]}
+
+ var metadata Metadata
+
+ rvMetdata := reflect.ValueOf(&metadata)
+ _, err := metadataDecoder.decode(0, rvMetdata, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
+ dataSectionStart := searchTreeSize + dataSectionSeparatorSize
+ dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
+ if dataSectionStart > dataSectionEnd {
+ return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
+ }
+ d := decoder{
+ buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
+ }
+
+ reader := &Reader{
+ buffer: buffer,
+ decoder: d,
+ Metadata: metadata,
+ ipv4Start: 0,
+ }
+
+ reader.ipv4Start, err = reader.startNode()
+
+ return reader, err
+}
+
+func (r *Reader) startNode() (uint, error) {
+ if r.Metadata.IPVersion != 6 {
+ return 0, nil
+ }
+
+ nodeCount := r.Metadata.NodeCount
+
+ node := uint(0)
+ var err error
+ for i := 0; i < 96 && node < nodeCount; i++ {
+ node, err = r.readNode(node, 0)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return node, err
+}
+
+// Lookup takes an IP address as a net.IP structure and a pointer to the
+// result value to Decode into.
+func (r *Reader) Lookup(ipAddress net.IP, result interface{}) error {
+ if r.buffer == nil {
+ return errors.New("cannot call Lookup on a closed database")
+ }
+ pointer, err := r.lookupPointer(ipAddress)
+ if pointer == 0 || err != nil {
+ return err
+ }
+ return r.retrieveData(pointer, result)
+}
+
+// LookupOffset maps an argument net.IP to a corresponding record offset in the
+// database. NotFound is returned if no such record is found, and a record may
+// otherwise be extracted by passing the returned offset to Decode. LookupOffset
+// is an advanced API, which exists to provide clients with a means to cache
+// previously-decoded records.
+func (r *Reader) LookupOffset(ipAddress net.IP) (uintptr, error) {
+ if r.buffer == nil {
+ return 0, errors.New("cannot call LookupOffset on a closed database")
+ }
+ pointer, err := r.lookupPointer(ipAddress)
+ if pointer == 0 || err != nil {
+ return NotFound, err
+ }
+ return r.resolveDataPointer(pointer)
+}
+
+// Decode the record at |offset| into |result|. The result value pointed to
+// must be a data value that corresponds to a record in the database. This may
+// include a struct representation of the data, a map capable of holding the
+// data or an empty interface{} value.
+//
+// If result is a pointer to a struct, the struct need not include a field
+// for every value that may be in the database. If a field is not present in
+// the structure, the decoder will not decode that field, reducing the time
+// required to decode the record.
+//
+// As a special case, a struct field of type uintptr will be used to capture
+// the offset of the value. Decode may later be used to extract the stored
+// value from the offset. MaxMind DBs are highly normalized: for example in
+// the City database, all records of the same country will reference a
+// single representative record for that country. This uintptr behavior allows
+// clients to leverage this normalization in their own sub-record caching.
+func (r *Reader) Decode(offset uintptr, result interface{}) error {
+ if r.buffer == nil {
+ return errors.New("cannot call Decode on a closed database")
+ }
+ return r.decode(offset, result)
+}
+
+func (r *Reader) decode(offset uintptr, result interface{}) error {
+ rv := reflect.ValueOf(result)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return errors.New("result param must be a pointer")
+ }
+
+ _, err := r.decoder.decode(uint(offset), reflect.ValueOf(result), 0)
+ return err
+}
+
+func (r *Reader) lookupPointer(ipAddress net.IP) (uint, error) {
+ if ipAddress == nil {
+ return 0, errors.New("ipAddress passed to Lookup cannot be nil")
+ }
+
+ ipV4Address := ipAddress.To4()
+ if ipV4Address != nil {
+ ipAddress = ipV4Address
+ }
+ if len(ipAddress) == 16 && r.Metadata.IPVersion == 4 {
+ return 0, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ipAddress.String())
+ }
+
+ return r.findAddressInTree(ipAddress)
+}
+
+func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
+
+ bitCount := uint(len(ipAddress) * 8)
+
+ var node uint
+ if bitCount == 32 {
+ node = r.ipv4Start
+ }
+
+ nodeCount := r.Metadata.NodeCount
+
+ for i := uint(0); i < bitCount && node < nodeCount; i++ {
+ bit := uint(1) & (uint(ipAddress[i>>3]) >> (7 - (i % 8)))
+
+ var err error
+ node, err = r.readNode(node, bit)
+ if err != nil {
+ return 0, err
+ }
+ }
+ if node == nodeCount {
+ // Record is empty
+ return 0, nil
+ } else if node > nodeCount {
+ return node, nil
+ }
+
+ return 0, newInvalidDatabaseError("invalid node in search tree")
+}
+
+func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
+ RecordSize := r.Metadata.RecordSize
+
+ baseOffset := nodeNumber * RecordSize / 4
+
+ var nodeBytes []byte
+ var prefix uint
+ switch RecordSize {
+ case 24:
+ offset := baseOffset + index*3
+ nodeBytes = r.buffer[offset : offset+3]
+ case 28:
+ prefix = uint(r.buffer[baseOffset+3])
+ if index != 0 {
+ prefix &= 0x0F
+ } else {
+ prefix = (0xF0 & prefix) >> 4
+ }
+ offset := baseOffset + index*4
+ nodeBytes = r.buffer[offset : offset+3]
+ case 32:
+ offset := baseOffset + index*4
+ nodeBytes = r.buffer[offset : offset+4]
+ default:
+ return 0, newInvalidDatabaseError("unknown record size: %d", RecordSize)
+ }
+ return uintFromBytes(prefix, nodeBytes), nil
+}
+
+func (r *Reader) retrieveData(pointer uint, result interface{}) error {
+ offset, err := r.resolveDataPointer(pointer)
+ if err != nil {
+ return err
+ }
+ return r.decode(offset, result)
+}
+
+func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
+ var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
+
+ if resolved > uintptr(len(r.buffer)) {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
+ }
+ return resolved, nil
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
new file mode 100644
index 0000000..d200f9f
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
@@ -0,0 +1,28 @@
+// +build appengine
+
+package maxminddb
+
+import "io/ioutil"
+
+// Open takes a string path to a MaxMind DB file and returns a Reader
+// structure or an error. The database file is opened using a memory map,
+// except on Google App Engine where mmap is not supported; there the database
+// is loaded into memory. Use the Close method on the Reader object to return
+// the resources to the system.
+func Open(file string) (*Reader, error) {
+ bytes, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ return FromBytes(bytes)
+}
+
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system. If called on a Reader opened using FromBytes
+// or Open on Google App Engine, this method sets the underlying buffer
+// to nil, returning the resources to the system.
+func (r *Reader) Close() error {
+ r.buffer = nil
+ return nil
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_other.go b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
new file mode 100644
index 0000000..2a89fa6
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
@@ -0,0 +1,63 @@
+// +build !appengine
+
+package maxminddb
+
+import (
+ "os"
+ "runtime"
+)
+
+// Open takes a string path to a MaxMind DB file and returns a Reader
+// structure or an error. The database file is opened using a memory map,
+// except on Google App Engine where mmap is not supported; there the database
+// is loaded into memory. Use the Close method on the Reader object to return
+// the resources to the system.
+func Open(file string) (*Reader, error) {
+ mapFile, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if rerr := mapFile.Close(); rerr != nil {
+ err = rerr
+ }
+ }()
+
+ stats, err := mapFile.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ fileSize := int(stats.Size())
+ mmap, err := mmap(int(mapFile.Fd()), fileSize)
+ if err != nil {
+ return nil, err
+ }
+
+ reader, err := FromBytes(mmap)
+ if err != nil {
+ if err2 := munmap(mmap); err2 != nil {
+ // failing to unmap the file is probably the more severe error
+ return nil, err2
+ }
+ return nil, err
+ }
+
+ reader.hasMappedFile = true
+ runtime.SetFinalizer(reader, (*Reader).Close)
+ return reader, err
+}
+
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system. If called on a Reader opened using FromBytes
+// or Open on Google App Engine, this method does nothing.
+func (r *Reader) Close() error {
+ var err error
+ if r.hasMappedFile {
+ runtime.SetFinalizer(r, nil)
+ r.hasMappedFile = false
+ err = munmap(r.buffer)
+ }
+ r.buffer = nil
+ return err
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/traverse.go b/vendor/github.com/oschwald/maxminddb-golang/traverse.go
new file mode 100644
index 0000000..f9b443c
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/traverse.go
@@ -0,0 +1,108 @@
+package maxminddb
+
+import "net"
+
+// Internal structure used to keep track of nodes we still need to visit.
+type netNode struct {
+ ip net.IP
+ bit uint
+ pointer uint
+}
+
+// Networks represents a set of subnets that we are iterating over.
+type Networks struct {
+ reader *Reader
+ nodes []netNode // Nodes we still have to visit.
+ lastNode netNode
+ err error
+}
+
+// Networks returns an iterator that can be used to traverse all networks in
+// the database.
+//
+// Please note that a MaxMind DB may map IPv4 networks into several locations
+// in in an IPv6 database. This iterator will iterate over all of these
+// locations separately.
+func (r *Reader) Networks() *Networks {
+ s := 4
+ if r.Metadata.IPVersion == 6 {
+ s = 16
+ }
+ return &Networks{
+ reader: r,
+ nodes: []netNode{
+ {
+ ip: make(net.IP, s),
+ },
+ },
+ }
+}
+
+// Next prepares the next network for reading with the Network method. It
+// returns true if there is another network to be processed and false if there
+// are no more networks or if there is an error.
+func (n *Networks) Next() bool {
+ for len(n.nodes) > 0 {
+ node := n.nodes[len(n.nodes)-1]
+ n.nodes = n.nodes[:len(n.nodes)-1]
+
+ for {
+ if node.pointer < n.reader.Metadata.NodeCount {
+ ipRight := make(net.IP, len(node.ip))
+ copy(ipRight, node.ip)
+ if len(ipRight) <= int(node.bit>>3) {
+ n.err = newInvalidDatabaseError(
+ "invalid search tree at %v/%v", ipRight, node.bit)
+ return false
+ }
+ ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
+
+ rightPointer, err := n.reader.readNode(node.pointer, 1)
+ if err != nil {
+ n.err = err
+ return false
+ }
+
+ node.bit++
+ n.nodes = append(n.nodes, netNode{
+ pointer: rightPointer,
+ ip: ipRight,
+ bit: node.bit,
+ })
+
+ node.pointer, err = n.reader.readNode(node.pointer, 0)
+ if err != nil {
+ n.err = err
+ return false
+ }
+
+ } else if node.pointer > n.reader.Metadata.NodeCount {
+ n.lastNode = node
+ return true
+ } else {
+ break
+ }
+ }
+ }
+
+ return false
+}
+
+// Network returns the current network or an error if there is a problem
+// decoding the data for the network. It takes a pointer to a result value to
+// decode the network's data into.
+func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
+ if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
+ return nil, err
+ }
+
+ return &net.IPNet{
+ IP: n.lastNode.ip,
+ Mask: net.CIDRMask(int(n.lastNode.bit), len(n.lastNode.ip)*8),
+ }, nil
+}
+
+// Err returns an error, if any, that was encountered during iteration.
+func (n *Networks) Err() error {
+ return n.err
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/verifier.go b/vendor/github.com/oschwald/maxminddb-golang/verifier.go
new file mode 100644
index 0000000..ace9d35
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/verifier.go
@@ -0,0 +1,185 @@
+package maxminddb
+
+import "reflect"
+
+type verifier struct {
+ reader *Reader
+}
+
+// Verify checks that the database is valid. It validates the search tree,
+// the data section, and the metadata section. This verifier is stricter than
+// the specification and may return errors on databases that are readable.
+func (r *Reader) Verify() error {
+ v := verifier{r}
+ if err := v.verifyMetadata(); err != nil {
+ return err
+ }
+
+ return v.verifyDatabase()
+}
+
+func (v *verifier) verifyMetadata() error {
+ metadata := v.reader.Metadata
+
+ if metadata.BinaryFormatMajorVersion != 2 {
+ return testError(
+ "binary_format_major_version",
+ 2,
+ metadata.BinaryFormatMajorVersion,
+ )
+ }
+
+ if metadata.BinaryFormatMinorVersion != 0 {
+ return testError(
+ "binary_format_minor_version",
+ 0,
+ metadata.BinaryFormatMinorVersion,
+ )
+ }
+
+ if metadata.DatabaseType == "" {
+ return testError(
+ "database_type",
+ "non-empty string",
+ metadata.DatabaseType,
+ )
+ }
+
+ if len(metadata.Description) == 0 {
+ return testError(
+ "description",
+ "non-empty slice",
+ metadata.Description,
+ )
+ }
+
+ if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
+ return testError(
+ "ip_version",
+ "4 or 6",
+ metadata.IPVersion,
+ )
+ }
+
+ if metadata.RecordSize != 24 &&
+ metadata.RecordSize != 28 &&
+ metadata.RecordSize != 32 {
+ return testError(
+ "record_size",
+ "24, 28, or 32",
+ metadata.RecordSize,
+ )
+ }
+
+ if metadata.NodeCount == 0 {
+ return testError(
+ "node_count",
+ "positive integer",
+ metadata.NodeCount,
+ )
+ }
+ return nil
+}
+
+func (v *verifier) verifyDatabase() error {
+ offsets, err := v.verifySearchTree()
+ if err != nil {
+ return err
+ }
+
+ if err := v.verifyDataSectionSeparator(); err != nil {
+ return err
+ }
+
+ return v.verifyDataSection(offsets)
+}
+
+func (v *verifier) verifySearchTree() (map[uint]bool, error) {
+ offsets := make(map[uint]bool)
+
+ it := v.reader.Networks()
+ for it.Next() {
+ offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
+ if err != nil {
+ return nil, err
+ }
+ offsets[uint(offset)] = true
+ }
+ if err := it.Err(); err != nil {
+ return nil, err
+ }
+ return offsets, nil
+}
+
+func (v *verifier) verifyDataSectionSeparator() error {
+ separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
+
+ separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
+
+ for _, b := range separator {
+ if b != 0 {
+ return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
+ }
+ }
+ return nil
+}
+
+func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
+ pointerCount := len(offsets)
+
+ decoder := v.reader.decoder
+
+ var offset uint
+ bufferLen := uint(len(decoder.buffer))
+ for offset < bufferLen {
+ var data interface{}
+ rv := reflect.ValueOf(&data)
+ newOffset, err := decoder.decode(offset, rv, 0)
+ if err != nil {
+ return newInvalidDatabaseError("received decoding error (%v) at offset of %v", err, offset)
+ }
+ if newOffset <= offset {
+ return newInvalidDatabaseError("data section offset unexpectedly went from %v to %v", offset, newOffset)
+ }
+
+ pointer := offset
+
+ if _, ok := offsets[pointer]; ok {
+ delete(offsets, pointer)
+ } else {
+ return newInvalidDatabaseError("found data (%v) at %v that the search tree does not point to", data, pointer)
+ }
+
+ offset = newOffset
+ }
+
+ if offset != bufferLen {
+ return newInvalidDatabaseError(
+ "unexpected data at the end of the data section (last offset: %v, end: %v)",
+ offset,
+ bufferLen,
+ )
+ }
+
+ if len(offsets) != 0 {
+ return newInvalidDatabaseError(
+ "found %v pointers (of %v) in the search tree that we did not see in the data section",
+ len(offsets),
+ pointerCount,
+ )
+ }
+ return nil
+}
+
+func testError(
+ field string,
+ expected interface{},
+ actual interface{},
+) error {
+ return newInvalidDatabaseError(
+ "%v - Expected: %v Actual: %v",
+ field,
+ expected,
+ actual,
+ )
+}