Vendored Deps
This commit is contained in:
parent
9347c2e4aa
commit
34df547e09
24
Godeps/Godeps.json
generated
Normal file
24
Godeps/Godeps.json
generated
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"ImportPath": "git.bullercodeworks.com/brian/gime",
|
||||
"GoVersion": "go1.8",
|
||||
"GodepVersion": "v79",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.3.0-21-gf0d0212",
|
||||
"Rev": "f0d021274dede8e672f17a2dbcb997c5f0760c41"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/br0xen/boltease",
|
||||
"Rev": "50a16b90567dacab0e724ba3620174a19d80489f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "dee7705ef7b324f27ceb85a121c61f2c2e8ce988"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
4
vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
Normal file
4
vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
*.prof
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Ben Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
Normal file
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
857
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
857
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
@ -0,0 +1,857 @@
|
||||
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||
test coverage and randomized black box testing are used to ensure database
|
||||
consistency and thread safety. Bolt is currently in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ := b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Please note that keys and values in `ForEach()` are only valid while
|
||||
the transaction is open. If you need to use a key or value outside of
|
||||
the transaction, you must use `copy()` to copy it to another byte
|
||||
slice.
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
constructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
Normal file
18
vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
10
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
10
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
12
vendor/github.com/boltdb/bolt/bolt_arm64.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_arm64.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build arm64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
10
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return syscall.Fdatasync(int(db.file.Fd()))
|
||||
}
|
27
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
27
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
msAsync = 1 << iota // perform asynchronous writes
|
||||
msSync // perform synchronous writes
|
||||
msInvalidate // invalidate cached data
|
||||
)
|
||||
|
||||
func msync(db *DB) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fdatasync(db *DB) error {
|
||||
if db.data != nil {
|
||||
return msync(db)
|
||||
}
|
||||
return db.file.Sync()
|
||||
}
|
9
vendor/github.com/boltdb/bolt/bolt_ppc.go
generated
vendored
Normal file
9
vendor/github.com/boltdb/bolt/bolt_ppc.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// +build ppc
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
9
vendor/github.com/boltdb/bolt/bolt_ppc64.go
generated
vendored
Normal file
9
vendor/github.com/boltdb/bolt/bolt_ppc64.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// +build ppc64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build ppc64le
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
12
vendor/github.com/boltdb/bolt/bolt_s390x.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_s390x.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build s390x
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
89
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
89
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// +build !windows,!plan9,!solaris
|
||||
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := syscall.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: This function is copied from stdlib because it is not available on darwin.
|
||||
func madvise(b []byte, advice int) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
generated
vendored
Normal file
90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
144
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
144
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
)
|
||||
|
||||
const (
|
||||
lockExt = ".lock"
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
flagLockExclusive = 2
|
||||
flagLockFailImmediately = 1
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
||||
errLockViolation syscall.Errno = 0x21
|
||||
)
|
||||
|
||||
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
// Create a separate lock file on windows because a process
|
||||
// cannot share an exclusive lock on the same file. This is
|
||||
// needed during Tx.WriteTo().
|
||||
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.lockfile = f
|
||||
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
db.lockfile.Close()
|
||||
os.Remove(db.path+lockExt)
|
||||
return err
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func mmap(db *DB, sz int) error {
|
||||
if !db.readOnly {
|
||||
// Truncate the database to the size of the mmap.
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("truncate: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open a file mapping handle.
|
||||
sizelo := uint32(sz >> 32)
|
||||
sizehi := uint32(sz) & 0xffffffff
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||
if h == 0 {
|
||||
return os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Create the memory map.
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||
if addr == 0 {
|
||||
return os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
// Close mapping handle.
|
||||
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||
return os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
|
||||
// Convert to a byte array.
|
||||
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||
db.datasz = sz
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a pointer from a file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func munmap(db *DB) error {
|
||||
if db.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
}
|
8
vendor/github.com/boltdb/bolt/boltsync_unix.go
generated
vendored
Normal file
8
vendor/github.com/boltdb/bolt/boltsync_unix.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
// +build !windows,!plan9,!linux,!openbsd
|
||||
|
||||
package bolt
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
778
vendor/github.com/boltdb/bolt/bucket.go
generated
vendored
Normal file
778
vendor/github.com/boltdb/bolt/bucket.go
generated
vendored
Normal file
@ -0,0 +1,778 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxKeySize is the maximum length of a key, in bytes.
|
||||
MaxKeySize = 32768
|
||||
|
||||
// MaxValueSize is the maximum length of a value, in bytes.
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
minFillPercent = 0.1
|
||||
maxFillPercent = 1.0
|
||||
)
|
||||
|
||||
// DefaultFillPercent is the percentage that split pages are filled.
|
||||
// This value can be changed by setting Bucket.FillPercent.
|
||||
const DefaultFillPercent = 0.5
|
||||
|
||||
// Bucket represents a collection of key/value pairs inside the database.
|
||||
type Bucket struct {
|
||||
*bucket
|
||||
tx *Tx // the associated transaction
|
||||
buckets map[string]*Bucket // subbucket cache
|
||||
page *page // inline page reference
|
||||
rootNode *node // materialized node for the root page.
|
||||
nodes map[pgid]*node // node cache
|
||||
|
||||
// Sets the threshold for filling nodes when they split. By default,
|
||||
// the bucket will fill to 50% but it can be useful to increase this
|
||||
// amount if you know that your write workloads are mostly append-only.
|
||||
//
|
||||
// This is non-persisted across transactions so it must be set in every Tx.
|
||||
FillPercent float64
|
||||
}
|
||||
|
||||
// bucket represents the on-file representation of a bucket.
|
||||
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||
// then its root page can be stored inline in the "value", after the bucket
|
||||
// header. In the case of inline buckets, the "root" will be 0.
|
||||
type bucket struct {
|
||||
root pgid // page id of the bucket's root-level page
|
||||
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||
}
|
||||
|
||||
// newBucket returns a new bucket associated with a transaction.
|
||||
func newBucket(tx *Tx) Bucket {
|
||||
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||
if tx.writable {
|
||||
b.buckets = make(map[string]*Bucket)
|
||||
b.nodes = make(map[pgid]*node)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Tx returns the tx of the bucket.
|
||||
func (b *Bucket) Tx() *Tx {
|
||||
return b.tx
|
||||
}
|
||||
|
||||
// Root returns the root of the bucket.
|
||||
func (b *Bucket) Root() pgid {
|
||||
return b.root
|
||||
}
|
||||
|
||||
// Writable returns whether the bucket is writable.
|
||||
func (b *Bucket) Writable() bool {
|
||||
return b.tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the bucket.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (b *Bucket) Cursor() *Cursor {
|
||||
// Update transaction statistics.
|
||||
b.tx.stats.CursorCount++
|
||||
|
||||
// Allocate and return a cursor.
|
||||
return &Cursor{
|
||||
bucket: b,
|
||||
stack: make([]elemRef, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket retrieves a nested bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
if b.buckets != nil {
|
||||
if child := b.buckets[string(name)]; child != nil {
|
||||
return child
|
||||
}
|
||||
}
|
||||
|
||||
// Move cursor to key.
|
||||
c := b.Cursor()
|
||||
k, v, flags := c.seek(name)
|
||||
|
||||
// Return nil if the key doesn't exist or it is not a bucket.
|
||||
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise create a bucket and cache it.
|
||||
var child = b.openBucket(v)
|
||||
if b.buckets != nil {
|
||||
b.buckets[string(name)] = child
|
||||
}
|
||||
|
||||
return child
|
||||
}
|
||||
|
||||
// Helper method that re-interprets a sub-bucket value
|
||||
// from a parent into a Bucket
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable && !unaligned {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
// Save a reference to the inline page if the bucket is inline.
|
||||
if child.root == 0 {
|
||||
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
}
|
||||
|
||||
return &child
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket at the given key and returns the new bucket.
|
||||
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if b.tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key.
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
} else {
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
var bucket = Bucket{
|
||||
bucket: &bucket{},
|
||||
rootNode: &node{isLeaf: true},
|
||||
FillPercent: DefaultFillPercent,
|
||||
}
|
||||
var value = bucket.write()
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||
|
||||
// Since subbuckets are not allowed on inline buckets, we need to
|
||||
// dereference the inline page, if it exists. This will cause the bucket
|
||||
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||
b.page = nil
|
||||
|
||||
return b.Bucket(key), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
child, err := b.CreateBucket(key)
|
||||
if err == ErrBucketExists {
|
||||
return b.Bucket(key), nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return child, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if bucket doesn't exist or is not a bucket.
|
||||
if !bytes.Equal(key, k) {
|
||||
return ErrBucketNotFound
|
||||
} else if (flags & bucketLeafFlag) == 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove cached copy.
|
||||
delete(b.buckets, string(key))
|
||||
|
||||
// Release all bucket pages to freelist.
|
||||
child.nodes = nil
|
||||
child.rootNode = nil
|
||||
child.free()
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the value for a key in the bucket.
|
||||
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||
// The returned value is only valid for the life of the transaction.
|
||||
func (b *Bucket) Get(key []byte) []byte {
|
||||
k, v, flags := b.Cursor().seek(key)
|
||||
|
||||
// Return nil if this is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If our target node isn't the same key as what's passed in then return nil.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Put sets the value for a key in the bucket.
|
||||
// If the key exist then its previous value will be overwritten.
|
||||
// Supplied value must remain valid for the life of the transaction.
|
||||
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
|
||||
func (b *Bucket) Put(key []byte, value []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return ErrKeyRequired
|
||||
} else if len(key) > MaxKeySize {
|
||||
return ErrKeyTooLarge
|
||||
} else if int64(len(value)) > MaxValueSize {
|
||||
return ErrValueTooLarge
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key with a bucket value.
|
||||
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the bucket.
|
||||
// If the key does not exist then nothing is done and a nil error is returned.
|
||||
// Returns an error if the bucket was created from a read-only transaction.
|
||||
func (b *Bucket) Delete(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current integer for the bucket without incrementing it.
|
||||
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *Bucket) SetSequence(v uint64) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
||||
return 0, ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return 0, ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence++
|
||||
return b.bucket.sequence, nil
|
||||
}
|
||||
|
||||
// ForEach executes a function for each key/value pair in a bucket.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller. The provided function must not modify
|
||||
// the bucket; this will result in undefined behavior.
|
||||
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
c := b.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if err := fn(k, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns stats on a bucket.
|
||||
func (b *Bucket) Stats() BucketStats {
|
||||
var s, subStats BucketStats
|
||||
pageSize := b.tx.db.pageSize
|
||||
s.BucketN += 1
|
||||
if b.root == 0 {
|
||||
s.InlineBucketN += 1
|
||||
}
|
||||
b.forEachPage(func(p *page, depth int) {
|
||||
if (p.flags & leafPageFlag) != 0 {
|
||||
s.KeyN += int(p.count)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
used := pageHeaderSize
|
||||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
// of the last element's key/value equals to the total of the sizes
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
// Do that by iterating over all element headers
|
||||
// looking for the ones with the bucketLeafFlag.
|
||||
for i := uint16(0); i < p.count; i++ {
|
||||
e := p.leafPageElement(i)
|
||||
if (e.flags & bucketLeafFlag) != 0 {
|
||||
// For any bucket element, open the element value
|
||||
// and recursively call Stats on the contained bucket.
|
||||
subStats.Add(b.openBucket(e.value()).Stats())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (p.flags & branchPageFlag) != 0 {
|
||||
s.BranchPageN++
|
||||
lastElement := p.branchPageElement(p.count - 1)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
// Keep track of maximum page depth.
|
||||
if depth+1 > s.Depth {
|
||||
s.Depth = (depth + 1)
|
||||
}
|
||||
})
|
||||
|
||||
// Alloc stats can be computed from page counts and pageSize.
|
||||
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
|
||||
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
|
||||
|
||||
// Add the max depth of sub-buckets to get total nested depth.
|
||||
s.Depth += subStats.Depth
|
||||
// Add the stats for all sub-buckets
|
||||
s.Add(subStats)
|
||||
return s
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page in a bucket, including inline pages.
|
||||
func (b *Bucket) forEachPage(fn func(*page, int)) {
|
||||
// If we have an inline page then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise traverse the page hierarchy.
|
||||
b.tx.forEachPage(b.root, 0, fn)
|
||||
}
|
||||
|
||||
// forEachPageNode iterates over every page (or node) in a bucket.
|
||||
// This also includes inline pages.
|
||||
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
|
||||
// If we have an inline page or root node then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, nil, 0)
|
||||
return
|
||||
}
|
||||
b._forEachPageNode(b.root, 0, fn)
|
||||
}
|
||||
|
||||
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
|
||||
var p, n = b.pageNode(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, n, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if p != nil {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
b._forEachPageNode(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !n.isLeaf {
|
||||
for _, inode := range n.inodes {
|
||||
b._forEachPageNode(inode.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spill writes all the nodes for this bucket to dirty pages.
|
||||
func (b *Bucket) spill() error {
|
||||
// Spill all child buckets first.
|
||||
for name, child := range b.buckets {
|
||||
// If the child bucket is small enough and it has no child buckets then
|
||||
// write it inline into the parent bucket's page. Otherwise spill it
|
||||
// like a normal bucket and make the parent value a pointer to the page.
|
||||
var value []byte
|
||||
if child.inlineable() {
|
||||
child.free()
|
||||
value = child.write()
|
||||
} else {
|
||||
if err := child.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the child bucket header in this bucket.
|
||||
value = make([]byte, unsafe.Sizeof(bucket{}))
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *child.bucket
|
||||
}
|
||||
|
||||
// Skip writing the bucket if there are no materialized nodes.
|
||||
if child.rootNode == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update parent node.
|
||||
var c = b.Cursor()
|
||||
k, _, flags := c.seek([]byte(name))
|
||||
if !bytes.Equal([]byte(name), k) {
|
||||
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||
}
|
||||
if flags&bucketLeafFlag == 0 {
|
||||
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||
}
|
||||
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||
}
|
||||
|
||||
// Ignore if there's not a materialized root node.
|
||||
if b.rootNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill nodes.
|
||||
if err := b.rootNode.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.rootNode = b.rootNode.root()
|
||||
|
||||
// Update the root node for this bucket.
|
||||
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||
}
|
||||
b.root = b.rootNode.pgid
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// inlineable returns true if a bucket is small enough to be written inline
|
||||
// and if it contains no subbuckets. Otherwise returns false.
|
||||
func (b *Bucket) inlineable() bool {
|
||||
var n = b.rootNode
|
||||
|
||||
// Bucket must only contain a single leaf node.
|
||||
if n == nil || !n.isLeaf {
|
||||
return false
|
||||
}
|
||||
|
||||
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
} else if size > b.maxInlineBucketSize() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
func (b *Bucket) write() []byte {
|
||||
// Allocate the appropriate size.
|
||||
var n = b.rootNode
|
||||
var value = make([]byte, bucketHeaderSize+n.size())
|
||||
|
||||
// Write a bucket header.
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *b.bucket
|
||||
|
||||
// Convert byte slice to a fake page and write the root node.
|
||||
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
n.write(p)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// rebalance attempts to balance all nodes.
|
||||
func (b *Bucket) rebalance() {
|
||||
for _, n := range b.nodes {
|
||||
n.rebalance()
|
||||
}
|
||||
for _, child := range b.buckets {
|
||||
child.rebalance()
|
||||
}
|
||||
}
|
||||
|
||||
// node creates a node from a page and associates it with a given parent.
|
||||
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||
_assert(b.nodes != nil, "nodes map expected")
|
||||
|
||||
// Retrieve node if it's already been created.
|
||||
if n := b.nodes[pgid]; n != nil {
|
||||
return n
|
||||
}
|
||||
|
||||
// Otherwise create a node and cache it.
|
||||
n := &node{bucket: b, parent: parent}
|
||||
if parent == nil {
|
||||
b.rootNode = n
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
}
|
||||
|
||||
// Use the inline page if this is an inline bucket.
|
||||
var p = b.page
|
||||
if p == nil {
|
||||
p = b.tx.page(pgid)
|
||||
}
|
||||
|
||||
// Read the page into the node and cache it.
|
||||
n.read(p)
|
||||
b.nodes[pgid] = n
|
||||
|
||||
// Update statistics.
|
||||
b.tx.stats.NodeCount++
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// free recursively frees all pages in the bucket.
|
||||
func (b *Bucket) free() {
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var tx = b.tx
|
||||
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||
if p != nil {
|
||||
tx.db.freelist.free(tx.meta.txid, p)
|
||||
} else {
|
||||
n.free()
|
||||
}
|
||||
})
|
||||
b.root = 0
|
||||
}
|
||||
|
||||
// dereference removes all references to the old mmap.
|
||||
func (b *Bucket) dereference() {
|
||||
if b.rootNode != nil {
|
||||
b.rootNode.root().dereference()
|
||||
}
|
||||
|
||||
for _, child := range b.buckets {
|
||||
child.dereference()
|
||||
}
|
||||
}
|
||||
|
||||
// pageNode returns the in-memory node, if it exists.
|
||||
// Otherwise returns the underlying page.
|
||||
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||
// Inline buckets have a fake page embedded in their value so treat them
|
||||
// differently. We'll return the rootNode (if available) or the fake page.
|
||||
if b.root == 0 {
|
||||
if id != 0 {
|
||||
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||
}
|
||||
if b.rootNode != nil {
|
||||
return nil, b.rootNode
|
||||
}
|
||||
return b.page, nil
|
||||
}
|
||||
|
||||
// Check the node cache for non-inline buckets.
|
||||
if b.nodes != nil {
|
||||
if n := b.nodes[id]; n != nil {
|
||||
return nil, n
|
||||
}
|
||||
}
|
||||
|
||||
// Finally lookup the page from the transaction if no node is materialized.
|
||||
return b.tx.page(id), nil
|
||||
}
|
||||
|
||||
// BucketStats records statistics about resources used by a bucket.
|
||||
type BucketStats struct {
|
||||
// Page count statistics.
|
||||
BranchPageN int // number of logical branch pages
|
||||
BranchOverflowN int // number of physical branch overflow pages
|
||||
LeafPageN int // number of logical leaf pages
|
||||
LeafOverflowN int // number of physical leaf overflow pages
|
||||
|
||||
// Tree statistics.
|
||||
KeyN int // number of keys/value pairs
|
||||
Depth int // number of levels in B+tree
|
||||
|
||||
// Page size utilization.
|
||||
BranchAlloc int // bytes allocated for physical branch pages
|
||||
BranchInuse int // bytes actually used for branch data
|
||||
LeafAlloc int // bytes allocated for physical leaf pages
|
||||
LeafInuse int // bytes actually used for leaf data
|
||||
|
||||
// Bucket statistics
|
||||
BucketN int // total number of buckets including the top bucket
|
||||
InlineBucketN int // total number on inlined buckets
|
||||
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
|
||||
}
|
||||
|
||||
func (s *BucketStats) Add(other BucketStats) {
|
||||
s.BranchPageN += other.BranchPageN
|
||||
s.BranchOverflowN += other.BranchOverflowN
|
||||
s.LeafPageN += other.LeafPageN
|
||||
s.LeafOverflowN += other.LeafOverflowN
|
||||
s.KeyN += other.KeyN
|
||||
if s.Depth < other.Depth {
|
||||
s.Depth = other.Depth
|
||||
}
|
||||
s.BranchAlloc += other.BranchAlloc
|
||||
s.BranchInuse += other.BranchInuse
|
||||
s.LeafAlloc += other.LeafAlloc
|
||||
s.LeafInuse += other.LeafInuse
|
||||
|
||||
s.BucketN += other.BucketN
|
||||
s.InlineBucketN += other.InlineBucketN
|
||||
s.InlineBucketInuse += other.InlineBucketInuse
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
400
vendor/github.com/boltdb/bolt/cursor.go
generated
vendored
Normal file
400
vendor/github.com/boltdb/bolt/cursor.go
generated
vendored
Normal file
@ -0,0 +1,400 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||
// Cursors see nested buckets with value == nil.
|
||||
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||
//
|
||||
// Keys and values returned from the cursor are only valid for the life of the transaction.
|
||||
//
|
||||
// Changing data while traversing with a cursor may cause it to be invalidated
|
||||
// and return unexpected keys and/or values. You must reposition your cursor
|
||||
// after mutating data.
|
||||
type Cursor struct {
|
||||
bucket *Bucket
|
||||
stack []elemRef
|
||||
}
|
||||
|
||||
// Bucket returns the bucket that this cursor was created from.
|
||||
func (c *Cursor) Bucket() *Bucket {
|
||||
return c.bucket
|
||||
}
|
||||
|
||||
// First moves the cursor to the first item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) First() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
c.first()
|
||||
|
||||
// If we land on an empty page then move to the next value.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
c.next()
|
||||
}
|
||||
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
|
||||
}
|
||||
|
||||
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
ref := elemRef{page: p, node: n}
|
||||
ref.index = ref.count() - 1
|
||||
c.stack = append(c.stack, ref)
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
||||
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.next()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Attempt to move back one element until we're successful.
|
||||
// Move up the stack as we hit the beginning of each page in our stack.
|
||||
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index > 0 {
|
||||
elem.index--
|
||||
break
|
||||
}
|
||||
c.stack = c.stack[:i]
|
||||
}
|
||||
|
||||
// If we've hit the end then return nil.
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Move down the stack to find the last element of the last leaf under this branch.
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used. If no keys
|
||||
// follow, a nil key is returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
k, v, flags := c.seek(seek)
|
||||
|
||||
// If we ended up after the last element of a page then move to the next one.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
|
||||
k, v, flags = c.next()
|
||||
}
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Delete removes the current key/value under the cursor from the bucket.
|
||||
// Delete fails if current key/value is a bucket or if the transaction is not writable.
|
||||
func (c *Cursor) Delete() error {
|
||||
if c.bucket.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !c.bucket.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
key, _, flags := c.keyValue()
|
||||
// Return an error if current value is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used.
|
||||
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Start from root page/node and traverse to correct page.
|
||||
c.stack = c.stack[:0]
|
||||
c.search(seek, c.bucket.root)
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
|
||||
// If the cursor is pointing to the end of page/node then return nil.
|
||||
if ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// If this is a bucket then return a nil value.
|
||||
return c.keyValue()
|
||||
}
|
||||
|
||||
// first moves the cursor to the first leaf element under the last page in the stack.
|
||||
func (c *Cursor) first() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
var ref = &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the first element to the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
}
|
||||
}
|
||||
|
||||
// last moves the cursor to the last leaf element under the last page in the stack.
|
||||
func (c *Cursor) last() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the last element in the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
|
||||
var nextRef = elemRef{page: p, node: n}
|
||||
nextRef.index = nextRef.count() - 1
|
||||
c.stack = append(c.stack, nextRef)
|
||||
}
|
||||
}
|
||||
|
||||
// next moves to the next leaf element and returns the key and value.
|
||||
// If the cursor is at the last leaf element then it stays there and returns nil.
|
||||
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||
for {
|
||||
// Attempt to move over one element until we're successful.
|
||||
// Move up the stack as we hit the end of each page in our stack.
|
||||
var i int
|
||||
for i = len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index < elem.count()-1 {
|
||||
elem.index++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we've hit the root page then stop and return. This will leave the
|
||||
// cursor on the last element of the last page.
|
||||
if i == -1 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Otherwise start from where we left off in the stack and find the
|
||||
// first element of the first leaf page.
|
||||
c.stack = c.stack[:i+1]
|
||||
c.first()
|
||||
|
||||
// If this is an empty page then restart and move back up the stack.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return c.keyValue()
|
||||
}
|
||||
}
|
||||
|
||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||
}
|
||||
e := elemRef{page: p, node: n}
|
||||
c.stack = append(c.stack, e)
|
||||
|
||||
// If we're on a leaf page/node then find the specific node.
|
||||
if e.isLeaf() {
|
||||
c.nsearch(key)
|
||||
return
|
||||
}
|
||||
|
||||
if n != nil {
|
||||
c.searchNode(key, n)
|
||||
return
|
||||
}
|
||||
c.searchPage(key, p)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchNode(key []byte, n *node) {
|
||||
var exact bool
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(n.inodes[i].key, key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, n.inodes[index].pgid)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchPage(key []byte, p *page) {
|
||||
// Binary search for the correct range.
|
||||
inodes := p.branchPageElements()
|
||||
|
||||
var exact bool
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(inodes[i].key(), key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, inodes[index].pgid)
|
||||
}
|
||||
|
||||
// nsearch searches the leaf node on the top of the stack for a key.
|
||||
func (c *Cursor) nsearch(key []byte) {
|
||||
e := &c.stack[len(c.stack)-1]
|
||||
p, n := e.page, e.node
|
||||
|
||||
// If we have a node then search its inodes.
|
||||
if n != nil {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
return bytes.Compare(n.inodes[i].key, key) != -1
|
||||
})
|
||||
e.index = index
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a page then search its leaf elements.
|
||||
inodes := p.leafPageElements()
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
return bytes.Compare(inodes[i].key(), key) != -1
|
||||
})
|
||||
e.index = index
|
||||
}
|
||||
|
||||
// keyValue returns the key and value of the current leaf element.
|
||||
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.count() == 0 || ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Retrieve value from node.
|
||||
if ref.node != nil {
|
||||
inode := &ref.node.inodes[ref.index]
|
||||
return inode.key, inode.value, inode.flags
|
||||
}
|
||||
|
||||
// Or retrieve value from page.
|
||||
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||
return elem.key(), elem.value(), elem.flags
|
||||
}
|
||||
|
||||
// node returns the node that the cursor is currently positioned on.
|
||||
func (c *Cursor) node() *node {
|
||||
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||
|
||||
// If the top of the stack is a leaf node then just return it.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
|
||||
return ref.node
|
||||
}
|
||||
|
||||
// Start from root and traverse down the hierarchy.
|
||||
var n = c.stack[0].node
|
||||
if n == nil {
|
||||
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(int(ref.index))
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
}
|
||||
|
||||
// elemRef represents a reference to an element on a given page/node.
|
||||
type elemRef struct {
|
||||
page *page
|
||||
node *node
|
||||
index int
|
||||
}
|
||||
|
||||
// isLeaf returns whether the ref is pointing at a leaf page/node.
|
||||
func (r *elemRef) isLeaf() bool {
|
||||
if r.node != nil {
|
||||
return r.node.isLeaf
|
||||
}
|
||||
return (r.page.flags & leafPageFlag) != 0
|
||||
}
|
||||
|
||||
// count returns the number of inodes or page elements.
|
||||
func (r *elemRef) count() int {
|
||||
if r.node != nil {
|
||||
return len(r.node.inodes)
|
||||
}
|
||||
return int(r.page.count)
|
||||
}
|
1036
vendor/github.com/boltdb/bolt/db.go
generated
vendored
Normal file
1036
vendor/github.com/boltdb/bolt/db.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
44
vendor/github.com/boltdb/bolt/doc.go
generated
vendored
Normal file
44
vendor/github.com/boltdb/bolt/doc.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Package bolt implements a low-level key/value store in pure Go. It supports
|
||||
fully serializable transactions, ACID semantics, and lock-free MVCC with
|
||||
multiple readers and a single writer. Bolt can be used for projects that
|
||||
want a simple data store without the need to add large dependencies such as
|
||||
Postgres or MySQL.
|
||||
|
||||
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
|
||||
optimized for fast read access and does not require recovery in the event of a
|
||||
system crash. Transactions which have not finished committing will simply be
|
||||
rolled back in the event of a crash.
|
||||
|
||||
The design of Bolt is based on Howard Chu's LMDB database project.
|
||||
|
||||
Bolt currently works on Windows, Mac OS X, and Linux.
|
||||
|
||||
|
||||
Basics
|
||||
|
||||
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
|
||||
a collection of buckets and is represented by a single file on disk. A bucket is
|
||||
a collection of unique keys that are associated with values.
|
||||
|
||||
Transactions provide either read-only or read-write access to the database.
|
||||
Read-only transactions can retrieve key/value pairs and can use Cursors to
|
||||
iterate over the dataset sequentially. Read-write transactions can create and
|
||||
delete buckets and can insert and remove keys. Only one read-write transaction
|
||||
is allowed at a time.
|
||||
|
||||
|
||||
Caveats
|
||||
|
||||
The database uses a read-only, memory-mapped data file to ensure that
|
||||
applications cannot corrupt the database, however, this means that keys and
|
||||
values returned from Bolt cannot be changed. Writing to a read-only byte slice
|
||||
will cause Go to panic.
|
||||
|
||||
Keys and values retrieved from the database are only valid for the life of
|
||||
the transaction. When used outside the transaction, these byte slices can
|
||||
point to different data or can point to invalid memory which will cause a panic.
|
||||
|
||||
|
||||
*/
|
||||
package bolt
|
71
vendor/github.com/boltdb/bolt/errors.go
generated
vendored
Normal file
71
vendor/github.com/boltdb/bolt/errors.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package bolt
|
||||
|
||||
import "errors"
|
||||
|
||||
// These errors can be returned when opening or calling methods on a DB.
|
||||
var (
|
||||
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||
// is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = errors.New("database not open")
|
||||
|
||||
// ErrDatabaseOpen is returned when opening a database that is
|
||||
// already open.
|
||||
ErrDatabaseOpen = errors.New("database already open")
|
||||
|
||||
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||
// This typically occurs when a file is not a bolt database.
|
||||
ErrInvalid = errors.New("invalid database")
|
||||
|
||||
// ErrVersionMismatch is returned when the data file was created with a
|
||||
// different version of Bolt.
|
||||
ErrVersionMismatch = errors.New("version mismatch")
|
||||
|
||||
// ErrChecksum is returned when either meta page checksum does not match.
|
||||
ErrChecksum = errors.New("checksum error")
|
||||
|
||||
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||
// on the data file after the timeout passed to Open().
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
// These errors can occur when beginning or committing a Tx.
|
||||
var (
|
||||
// ErrTxNotWritable is returned when performing a write operation on a
|
||||
// read-only transaction.
|
||||
ErrTxNotWritable = errors.New("tx not writable")
|
||||
|
||||
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||
// that has already been committed or rolled back.
|
||||
ErrTxClosed = errors.New("tx closed")
|
||||
|
||||
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||
// read-only database.
|
||||
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||
)
|
||||
|
||||
// These errors can occur when putting or deleting a value or a bucket.
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||
ErrBucketExists = errors.New("bucket already exists")
|
||||
|
||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||
ErrBucketNameRequired = errors.New("bucket name required")
|
||||
|
||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||
ErrKeyRequired = errors.New("key required")
|
||||
|
||||
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||
ErrKeyTooLarge = errors.New("key too large")
|
||||
|
||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||
ErrValueTooLarge = errors.New("value too large")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||
// on an existing non-bucket key or when trying to create or delete a
|
||||
// non-bucket key on an existing bucket key.
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
)
|
248
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
Normal file
248
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// all returns a list of all free ids and all pending ids in one sorted list.
|
||||
func (f *freelist) all() []pgid {
|
||||
m := make(pgids, 0)
|
||||
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
|
||||
sort.Sort(m)
|
||||
return pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
ids := f.all()
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
if len(ids) == 0 {
|
||||
p.count = uint16(len(ids))
|
||||
} else if len(ids) < 0xFFFF {
|
||||
p.count = uint16(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
604
vendor/github.com/boltdb/bolt/node.go
generated
vendored
Normal file
604
vendor/github.com/boltdb/bolt/node.go
generated
vendored
Normal file
@ -0,0 +1,604 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// node represents an in-memory, deserialized page.
|
||||
type node struct {
|
||||
bucket *Bucket
|
||||
isLeaf bool
|
||||
unbalanced bool
|
||||
spilled bool
|
||||
key []byte
|
||||
pgid pgid
|
||||
parent *node
|
||||
children nodes
|
||||
inodes inodes
|
||||
}
|
||||
|
||||
// root returns the top-level node this node is attached to.
|
||||
func (n *node) root() *node {
|
||||
if n.parent == nil {
|
||||
return n
|
||||
}
|
||||
return n.parent.root()
|
||||
}
|
||||
|
||||
// minKeys returns the minimum number of inodes this node should have.
|
||||
func (n *node) minKeys() int {
|
||||
if n.isLeaf {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
|
||||
// size returns the size of the node after serialization.
|
||||
func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
return branchPageElementSize
|
||||
}
|
||||
|
||||
// childAt returns the child node at a given index.
|
||||
func (n *node) childAt(index int) *node {
|
||||
if n.isLeaf {
|
||||
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||
}
|
||||
return n.bucket.node(n.inodes[index].pgid, n)
|
||||
}
|
||||
|
||||
// childIndex returns the index of a given child node.
|
||||
func (n *node) childIndex(child *node) int {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
|
||||
return index
|
||||
}
|
||||
|
||||
// numChildren returns the number of children.
|
||||
func (n *node) numChildren() int {
|
||||
return len(n.inodes)
|
||||
}
|
||||
|
||||
// nextSibling returns the next node with the same parent.
|
||||
func (n *node) nextSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index >= n.parent.numChildren()-1 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index + 1)
|
||||
}
|
||||
|
||||
// prevSibling returns the previous node with the same parent.
|
||||
func (n *node) prevSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index - 1)
|
||||
}
|
||||
|
||||
// put inserts a key/value.
|
||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||
if pgid >= n.bucket.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||
} else if len(oldKey) <= 0 {
|
||||
panic("put: zero-length old key")
|
||||
} else if len(newKey) <= 0 {
|
||||
panic("put: zero-length new key")
|
||||
}
|
||||
|
||||
// Find insertion index.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||
|
||||
// Add capacity and shift nodes if we don't have an exact match and need to insert.
|
||||
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
|
||||
if !exact {
|
||||
n.inodes = append(n.inodes, inode{})
|
||||
copy(n.inodes[index+1:], n.inodes[index:])
|
||||
}
|
||||
|
||||
inode := &n.inodes[index]
|
||||
inode.flags = flags
|
||||
inode.key = newKey
|
||||
inode.value = value
|
||||
inode.pgid = pgid
|
||||
_assert(len(inode.key) > 0, "put: zero-length inode key")
|
||||
}
|
||||
|
||||
// del removes a key from the node.
|
||||
func (n *node) del(key []byte) {
|
||||
// Find index of key.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
|
||||
|
||||
// Exit if the key isn't found.
|
||||
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete inode from the node.
|
||||
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
|
||||
|
||||
// Mark the node as needing rebalancing.
|
||||
n.unbalanced = true
|
||||
}
|
||||
|
||||
// read initializes the node from a page.
|
||||
func (n *node) read(p *page) {
|
||||
n.pgid = p.id
|
||||
n.isLeaf = ((p.flags & leafPageFlag) != 0)
|
||||
n.inodes = make(inodes, int(p.count))
|
||||
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
inode := &n.inodes[i]
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
inode.flags = elem.flags
|
||||
inode.key = elem.key()
|
||||
inode.value = elem.value()
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
inode.pgid = elem.pgid
|
||||
inode.key = elem.key()
|
||||
}
|
||||
_assert(len(inode.key) > 0, "read: zero-length inode key")
|
||||
}
|
||||
|
||||
// Save first key so we can find the node in the parent when we spill.
|
||||
if len(n.inodes) > 0 {
|
||||
n.key = n.inodes[0].key
|
||||
_assert(len(n.key) > 0, "read: zero-length node key")
|
||||
} else {
|
||||
n.key = nil
|
||||
}
|
||||
}
|
||||
|
||||
// write writes the items onto one or more pages.
|
||||
func (n *node) write(p *page) {
|
||||
// Initialize page.
|
||||
if n.isLeaf {
|
||||
p.flags |= leafPageFlag
|
||||
} else {
|
||||
p.flags |= branchPageFlag
|
||||
}
|
||||
|
||||
if len(n.inodes) >= 0xFFFF {
|
||||
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||
}
|
||||
p.count = uint16(len(n.inodes))
|
||||
|
||||
// Stop here if there are no items to write.
|
||||
if p.count == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
}
|
||||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
for {
|
||||
// Split node into two.
|
||||
a, b := node.splitTwo(pageSize)
|
||||
nodes = append(nodes, a)
|
||||
|
||||
// If we can't split then exit the loop.
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Set node to b so it gets split on the next iteration.
|
||||
node = b
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Determine the threshold before starting a new node.
|
||||
var fillPercent = n.bucket.FillPercent
|
||||
if fillPercent < minFillPercent {
|
||||
fillPercent = minFillPercent
|
||||
} else if fillPercent > maxFillPercent {
|
||||
fillPercent = maxFillPercent
|
||||
}
|
||||
threshold := int(float64(pageSize) * fillPercent)
|
||||
|
||||
// Determine split position and sizes of the two pages.
|
||||
splitIndex, _ := n.splitIndex(threshold)
|
||||
|
||||
// Split node into two separate nodes.
|
||||
// If there's no parent then we'll need to create one.
|
||||
if n.parent == nil {
|
||||
n.parent = &node{bucket: n.bucket, children: []*node{n}}
|
||||
}
|
||||
|
||||
// Create a new node and add it to the parent.
|
||||
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
|
||||
n.parent.children = append(n.parent.children, next)
|
||||
|
||||
// Split inodes across two nodes.
|
||||
next.inodes = n.inodes[splitIndex:]
|
||||
n.inodes = n.inodes[:splitIndex]
|
||||
|
||||
// Update the statistics.
|
||||
n.bucket.tx.stats.Split++
|
||||
|
||||
return n, next
|
||||
}
|
||||
|
||||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
break
|
||||
}
|
||||
|
||||
// Add the element size to the total size.
|
||||
sz += elsize
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// spill writes the nodes to dirty pages and splits nodes as it goes.
|
||||
// Returns an error if dirty pages cannot be allocated.
|
||||
func (n *node) spill() error {
|
||||
var tx = n.bucket.tx
|
||||
if n.spilled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill child nodes first. Child nodes can materialize sibling nodes in
|
||||
// the case of split-merge so we cannot use a range loop. We have to check
|
||||
// the children size on every loop iteration.
|
||||
sort.Sort(n.children)
|
||||
for i := 0; i < len(n.children); i++ {
|
||||
if err := n.children[i].spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We no longer need the child list because it's only used for spill tracking.
|
||||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||
node.pgid = 0
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the node.
|
||||
if p.id >= tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||
}
|
||||
node.pgid = p.id
|
||||
node.write(p)
|
||||
node.spilled = true
|
||||
|
||||
// Insert into parent inodes.
|
||||
if node.parent != nil {
|
||||
var key = node.key
|
||||
if key == nil {
|
||||
key = node.inodes[0].key
|
||||
}
|
||||
|
||||
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
|
||||
node.key = node.inodes[0].key
|
||||
_assert(len(node.key) > 0, "spill: zero-length node key")
|
||||
}
|
||||
|
||||
// Update the statistics.
|
||||
tx.stats.Spill++
|
||||
}
|
||||
|
||||
// If the root node split and created a new root then we need to spill that
|
||||
// as well. We'll clear out the children to make sure it doesn't try to respill.
|
||||
if n.parent != nil && n.parent.pgid == 0 {
|
||||
n.children = nil
|
||||
return n.parent.spill()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rebalance attempts to combine the node with sibling nodes if the node fill
|
||||
// size is below a threshold or if there are not enough keys.
|
||||
func (n *node) rebalance() {
|
||||
if !n.unbalanced {
|
||||
return
|
||||
}
|
||||
n.unbalanced = false
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.Rebalance++
|
||||
|
||||
// Ignore if node is above threshold (25%) and has enough keys.
|
||||
var threshold = n.bucket.tx.db.pageSize / 4
|
||||
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||
return
|
||||
}
|
||||
|
||||
// Root node has special handling.
|
||||
if n.parent == nil {
|
||||
// If root node is a branch and only has one node then collapse it.
|
||||
if !n.isLeaf && len(n.inodes) == 1 {
|
||||
// Move root's child up.
|
||||
child := n.bucket.node(n.inodes[0].pgid, n)
|
||||
n.isLeaf = child.isLeaf
|
||||
n.inodes = child.inodes[:]
|
||||
n.children = child.children
|
||||
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent = n
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old child.
|
||||
child.parent = nil
|
||||
delete(n.bucket.nodes, child.pgid)
|
||||
child.free()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If node has no keys then just remove it.
|
||||
if n.numChildren() == 0 {
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
n.parent.rebalance()
|
||||
return
|
||||
}
|
||||
|
||||
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||
|
||||
// Destination node is right sibling if idx == 0, otherwise left sibling.
|
||||
var target *node
|
||||
var useNextSibling = (n.parent.childIndex(n) == 0)
|
||||
if useNextSibling {
|
||||
target = n.nextSibling()
|
||||
} else {
|
||||
target = n.prevSibling()
|
||||
}
|
||||
|
||||
// If both this node and the target node are too small then merge them.
|
||||
if useNextSibling {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range target.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes from target and remove target.
|
||||
n.inodes = append(n.inodes, target.inodes...)
|
||||
n.parent.del(target.key)
|
||||
n.parent.removeChild(target)
|
||||
delete(n.bucket.nodes, target.pgid)
|
||||
target.free()
|
||||
} else {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = target
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes to target and remove node.
|
||||
target.inodes = append(target.inodes, n.inodes...)
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
}
|
||||
|
||||
// Either this node or the target node was deleted from the parent so rebalance it.
|
||||
n.parent.rebalance()
|
||||
}
|
||||
|
||||
// removes a node from the list of in-memory children.
|
||||
// This does not affect the inodes.
|
||||
func (n *node) removeChild(target *node) {
|
||||
for i, child := range n.children {
|
||||
if child == target {
|
||||
n.children = append(n.children[:i], n.children[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dereference causes the node to copy all its inode key/value references to heap memory.
|
||||
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
|
||||
func (n *node) dereference() {
|
||||
if n.key != nil {
|
||||
key := make([]byte, len(n.key))
|
||||
copy(key, n.key)
|
||||
n.key = key
|
||||
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||
}
|
||||
|
||||
for i := range n.inodes {
|
||||
inode := &n.inodes[i]
|
||||
|
||||
key := make([]byte, len(inode.key))
|
||||
copy(key, inode.key)
|
||||
inode.key = key
|
||||
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
|
||||
|
||||
value := make([]byte, len(inode.value))
|
||||
copy(value, inode.value)
|
||||
inode.value = value
|
||||
}
|
||||
|
||||
// Recursively dereference children.
|
||||
for _, child := range n.children {
|
||||
child.dereference()
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.NodeDeref++
|
||||
}
|
||||
|
||||
// free adds the node's underlying page to the freelist.
|
||||
func (n *node) free() {
|
||||
if n.pgid != 0 {
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||
n.pgid = 0
|
||||
}
|
||||
}
|
||||
|
||||
// dump writes the contents of the node to STDERR for debugging purposes.
|
||||
/*
|
||||
func (n *node) dump() {
|
||||
// Write node header.
|
||||
var typ = "branch"
|
||||
if n.isLeaf {
|
||||
typ = "leaf"
|
||||
}
|
||||
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
|
||||
|
||||
// Write out abbreviated version of each item.
|
||||
for _, item := range n.inodes {
|
||||
if n.isLeaf {
|
||||
if item.flags&bucketLeafFlag != 0 {
|
||||
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
|
||||
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
|
||||
} else {
|
||||
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
|
||||
}
|
||||
} else {
|
||||
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
|
||||
}
|
||||
}
|
||||
warn("")
|
||||
}
|
||||
*/
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
// to an element which hasn't been added to a page yet.
|
||||
type inode struct {
|
||||
flags uint32
|
||||
pgid pgid
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
type inodes []inode
|
178
vendor/github.com/boltdb/bolt/page.go
generated
vendored
Normal file
178
vendor/github.com/boltdb/bolt/page.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
leafPageFlag = 0x02
|
||||
metaPageFlag = 0x04
|
||||
freelistPageFlag = 0x10
|
||||
)
|
||||
|
||||
const (
|
||||
bucketLeafFlag = 0x01
|
||||
)
|
||||
|
||||
type pgid uint64
|
||||
|
||||
type page struct {
|
||||
id pgid
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
func (p *page) typ() string {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
return "branch"
|
||||
} else if (p.flags & leafPageFlag) != 0 {
|
||||
return "leaf"
|
||||
} else if (p.flags & metaPageFlag) != 0 {
|
||||
return "meta"
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
return "freelist"
|
||||
}
|
||||
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||
}
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
type pages []*page
|
||||
|
||||
func (s pages) Len() int { return len(s) }
|
||||
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||
|
||||
// branchPageElement represents a node on a branch page.
|
||||
type branchPageElement struct {
|
||||
pos uint32
|
||||
ksize uint32
|
||||
pgid pgid
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
type leafPageElement struct {
|
||||
flags uint32
|
||||
pos uint32
|
||||
ksize uint32
|
||||
vsize uint32
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
type PageInfo struct {
|
||||
ID int
|
||||
Type string
|
||||
Count int
|
||||
OverflowCount int
|
||||
}
|
||||
|
||||
type pgids []pgid
|
||||
|
||||
func (s pgids) Len() int { return len(s) }
|
||||
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// merge returns the sorted union of a and b.
|
||||
func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
} else if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
|
||||
// Create a list to hold all elements from both lists.
|
||||
merged := make(pgids, 0, len(a)+len(b))
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
if b[0] < a[0] {
|
||||
lead, follow = b, a
|
||||
}
|
||||
|
||||
// Continue while there are elements in the lead.
|
||||
for len(lead) > 0 {
|
||||
// Merge largest prefix of lead that is ahead of follow[0].
|
||||
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||
merged = append(merged, lead[:n]...)
|
||||
if n >= len(lead) {
|
||||
break
|
||||
}
|
||||
|
||||
// Swap lead and follow.
|
||||
lead, follow = follow, lead[n:]
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
merged = append(merged, follow...)
|
||||
|
||||
return merged
|
||||
}
|
682
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
Normal file
682
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
Normal file
@ -0,0 +1,682 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txid represents the internal transaction identifier.
|
||||
type txid uint64
|
||||
|
||||
// Tx represents a read-only or read/write transaction on the database.
|
||||
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
||||
// Read/write transactions can create and remove buckets and create and remove keys.
|
||||
//
|
||||
// IMPORTANT: You must commit or rollback transactions when you are done with
|
||||
// them. Pages can not be reclaimed by the writer until no more transactions
|
||||
// are using them. A long running read transaction can cause the database to
|
||||
// quickly grow.
|
||||
type Tx struct {
|
||||
writable bool
|
||||
managed bool
|
||||
db *DB
|
||||
meta *meta
|
||||
root Bucket
|
||||
pages map[pgid]*page
|
||||
stats TxStats
|
||||
commitHandlers []func()
|
||||
|
||||
// WriteFlag specifies the flag for write-related methods like WriteTo().
|
||||
// Tx opens the database file with the specified flag to copy the data.
|
||||
//
|
||||
// By default, the flag is unset, which works well for mostly in-memory
|
||||
// workloads. For databases that are much larger than available RAM,
|
||||
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
|
||||
WriteFlag int
|
||||
}
|
||||
|
||||
// init initializes the transaction.
|
||||
func (tx *Tx) init(db *DB) {
|
||||
tx.db = db
|
||||
tx.pages = nil
|
||||
|
||||
// Copy the meta page since it can be changed by the writer.
|
||||
tx.meta = &meta{}
|
||||
db.meta().copy(tx.meta)
|
||||
|
||||
// Copy over the root bucket.
|
||||
tx.root = newBucket(tx)
|
||||
tx.root.bucket = &bucket{}
|
||||
*tx.root.bucket = tx.meta.root
|
||||
|
||||
// Increment the transaction id and add a page cache for writable transactions.
|
||||
if tx.writable {
|
||||
tx.pages = make(map[pgid]*page)
|
||||
tx.meta.txid += txid(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the transaction id.
|
||||
func (tx *Tx) ID() int {
|
||||
return int(tx.meta.txid)
|
||||
}
|
||||
|
||||
// DB returns a reference to the database that created the transaction.
|
||||
func (tx *Tx) DB() *DB {
|
||||
return tx.db
|
||||
}
|
||||
|
||||
// Size returns current database size in bytes as seen by this transaction.
|
||||
func (tx *Tx) Size() int64 {
|
||||
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
||||
}
|
||||
|
||||
// Writable returns whether the transaction can perform write operations.
|
||||
func (tx *Tx) Writable() bool {
|
||||
return tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the root bucket.
|
||||
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (tx *Tx) Cursor() *Cursor {
|
||||
return tx.root.Cursor()
|
||||
}
|
||||
|
||||
// Stats retrieves a copy of the current transaction statistics.
|
||||
func (tx *Tx) Stats() TxStats {
|
||||
return tx.stats
|
||||
}
|
||||
|
||||
// Bucket retrieves a bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) Bucket(name []byte) *Bucket {
|
||||
return tx.root.Bucket(name)
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket.
|
||||
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucket(name)
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucketIfNotExists(name)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
||||
func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
return tx.root.DeleteBucket(name)
|
||||
}
|
||||
|
||||
// ForEach executes a function for each bucket in the root.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
||||
func (tx *Tx) OnCommit(fn func()) {
|
||||
tx.commitHandlers = append(tx.commitHandlers, fn)
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk and updates the meta page.
|
||||
// Returns an error if a disk write error occurs, or if Commit is
|
||||
// called on a read-only transaction.
|
||||
func (tx *Tx) Commit() error {
|
||||
_assert(!tx.managed, "managed tx commit not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !tx.writable {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
||||
|
||||
// Rebalance nodes which have had deletions.
|
||||
var startTime = time.Now()
|
||||
tx.root.rebalance()
|
||||
if tx.stats.Rebalance > 0 {
|
||||
tx.stats.RebalanceTime += time.Since(startTime)
|
||||
}
|
||||
|
||||
// spill data onto dirty pages.
|
||||
startTime = time.Now()
|
||||
if err := tx.root.spill(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.SpillTime += time.Since(startTime)
|
||||
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
startTime = time.Now()
|
||||
if err := tx.write(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// If strict mode is enabled then perform a consistency check.
|
||||
// Only the first consistency error is reported in the panic.
|
||||
if tx.db.StrictMode {
|
||||
ch := tx.Check()
|
||||
var errs []string
|
||||
for {
|
||||
err, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
panic("check fail: " + strings.Join(errs, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// Write meta to disk.
|
||||
if err := tx.writeMeta(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.WriteTime += time.Since(startTime)
|
||||
|
||||
// Finalize the transaction.
|
||||
tx.close()
|
||||
|
||||
// Execute commit handlers now that the locks have been removed.
|
||||
for _, fn := range tx.commitHandlers {
|
||||
fn()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
tx.rollback()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) rollback() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
|
||||
func (tx *Tx) close() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
// Grab freelist stats.
|
||||
var freelistFreeN = tx.db.freelist.free_count()
|
||||
var freelistPendingN = tx.db.freelist.pending_count()
|
||||
var freelistAlloc = tx.db.freelist.size()
|
||||
|
||||
// Remove transaction ref & writer lock.
|
||||
tx.db.rwtx = nil
|
||||
tx.db.rwlock.Unlock()
|
||||
|
||||
// Merge statistics.
|
||||
tx.db.statlock.Lock()
|
||||
tx.db.stats.FreePageN = freelistFreeN
|
||||
tx.db.stats.PendingPageN = freelistPendingN
|
||||
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
|
||||
tx.db.stats.FreelistInuse = freelistAlloc
|
||||
tx.db.stats.TxStats.add(&tx.stats)
|
||||
tx.db.statlock.Unlock()
|
||||
} else {
|
||||
tx.db.removeTx(tx)
|
||||
}
|
||||
|
||||
// Clear all references.
|
||||
tx.db = nil
|
||||
tx.meta = nil
|
||||
tx.root = Bucket{tx: tx}
|
||||
tx.pages = nil
|
||||
}
|
||||
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility. Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteTo writes the entire database to a writer.
|
||||
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
||||
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
// Attempt to open reader with WriteFlag
|
||||
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||
page.flags = metaPageFlag
|
||||
*page.meta() = *tx.meta
|
||||
|
||||
// Write meta 0.
|
||||
page.id = 0
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err := w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 0 copy: %s", err)
|
||||
}
|
||||
|
||||
// Write meta 1 with a lower transaction id.
|
||||
page.id = 1
|
||||
page.meta().txid -= 1
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err = w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 1 copy: %s", err)
|
||||
}
|
||||
|
||||
// Move past the meta pages in the file.
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
||||
return n, fmt.Errorf("seek: %s", err)
|
||||
}
|
||||
|
||||
// Copy data pages.
|
||||
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
|
||||
n += wn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
// A reader transaction is maintained during the copy so it is safe to continue
|
||||
// using the database while a copy is in progress.
|
||||
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Copy(f)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Check performs several consistency checks on the database for this transaction.
|
||||
// An error is returned if any inconsistency is found.
|
||||
//
|
||||
// It can be safely run concurrently on a writable transaction. However, this
|
||||
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||
// because of caching. This overhead can be removed if running on a read-only
|
||||
// transaction, however, it is not safe to execute other writer transactions at
|
||||
// the same time.
|
||||
func (tx *Tx) Check() <-chan error {
|
||||
ch := make(chan error)
|
||||
go tx.check(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
for _, id := range tx.db.freelist.all() {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
freed[id] = true
|
||||
}
|
||||
|
||||
// Track every reachable page.
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
tx.checkBucket(&tx.root, reachable, freed, ch)
|
||||
|
||||
// Ensure all pages below high water mark are either reachable or freed.
|
||||
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||
_, isReachable := reachable[i]
|
||||
if !isReachable && !freed[i] {
|
||||
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel to signal completion.
|
||||
close(ch)
|
||||
}
|
||||
|
||||
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
||||
// Ignore inline buckets.
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Check every page used by this bucket.
|
||||
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||
if p.id > tx.meta.pgid {
|
||||
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
||||
}
|
||||
|
||||
// Ensure each page is only referenced once.
|
||||
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||
var id = p.id + i
|
||||
if _, ok := reachable[id]; ok {
|
||||
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
||||
}
|
||||
reachable[id] = p
|
||||
}
|
||||
|
||||
// We should only encounter un-freed leaf and branch pages.
|
||||
if freed[p.id] {
|
||||
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
||||
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
||||
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
||||
}
|
||||
})
|
||||
|
||||
// Check each bucket within this bucket.
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
if child := b.Bucket(k); child != nil {
|
||||
tx.checkBucket(child, reachable, freed, ch)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save to our page cache.
|
||||
tx.pages[p.id] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount++
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// write writes any dirty pages to disk.
|
||||
func (tx *Tx) write() error {
|
||||
// Sort pages by id.
|
||||
pages := make(pages, 0, len(tx.pages))
|
||||
for _, p := range tx.pages {
|
||||
pages = append(pages, p)
|
||||
}
|
||||
// Clear out page cache early.
|
||||
tx.pages = make(map[pgid]*page)
|
||||
sort.Sort(pages)
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore file sync if flag is set on DB.
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Put small pages back to page pool.
|
||||
for _, p := range pages {
|
||||
// Ignore page sizes over 1 page.
|
||||
// These are allocated using make() instead of the page pool.
|
||||
if int(p.overflow) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
buf[i] = 0
|
||||
}
|
||||
tx.db.pagePool.Put(buf)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMeta writes the meta to the disk.
|
||||
func (tx *Tx) writeMeta() error {
|
||||
// Create a temporary buffer for the meta page.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
p := tx.db.pageInBuffer(buf, 0)
|
||||
tx.meta.write(p)
|
||||
|
||||
// Write the meta page to file.
|
||||
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
||||
return err
|
||||
}
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// page returns a reference to the page with a given id.
|
||||
// If page has been written to then a temporary buffered page is returned.
|
||||
func (tx *Tx) page(id pgid) *page {
|
||||
// Check the dirty pages first.
|
||||
if tx.pages != nil {
|
||||
if p, ok := tx.pages[id]; ok {
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise return directly from the mmap.
|
||||
return tx.db.page(id)
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page within a given page and executes a function.
|
||||
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||
p := tx.page(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
tx.forEachPage(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Page returns page information for a given page number.
|
||||
// This is only safe for concurrent use when used by a writable transaction.
|
||||
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||
if tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if pgid(id) >= tx.meta.pgid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the page info.
|
||||
p := tx.db.page(pgid(id))
|
||||
info := &PageInfo{
|
||||
ID: id,
|
||||
Count: int(p.count),
|
||||
OverflowCount: int(p.overflow),
|
||||
}
|
||||
|
||||
// Determine the type (or if it's free).
|
||||
if tx.db.freelist.freed(pgid(id)) {
|
||||
info.Type = "free"
|
||||
} else {
|
||||
info.Type = p.typ()
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// TxStats represents statistics about the actions performed by the transaction.
|
||||
type TxStats struct {
|
||||
// Page statistics.
|
||||
PageCount int // number of page allocations
|
||||
PageAlloc int // total bytes allocated
|
||||
|
||||
// Cursor statistics.
|
||||
CursorCount int // number of cursors created
|
||||
|
||||
// Node statistics
|
||||
NodeCount int // number of node allocations
|
||||
NodeDeref int // number of node dereferences
|
||||
|
||||
// Rebalance statistics.
|
||||
Rebalance int // number of node rebalances
|
||||
RebalanceTime time.Duration // total time spent rebalancing
|
||||
|
||||
// Split/Spill statistics.
|
||||
Split int // number of nodes split
|
||||
Spill int // number of nodes spilled
|
||||
SpillTime time.Duration // total time spent spilling
|
||||
|
||||
// Write statistics.
|
||||
Write int // number of writes performed
|
||||
WriteTime time.Duration // total time spent writing to disk
|
||||
}
|
||||
|
||||
func (s *TxStats) add(other *TxStats) {
|
||||
s.PageCount += other.PageCount
|
||||
s.PageAlloc += other.PageAlloc
|
||||
s.CursorCount += other.CursorCount
|
||||
s.NodeCount += other.NodeCount
|
||||
s.NodeDeref += other.NodeDeref
|
||||
s.Rebalance += other.Rebalance
|
||||
s.RebalanceTime += other.RebalanceTime
|
||||
s.Split += other.Split
|
||||
s.Spill += other.Spill
|
||||
s.SpillTime += other.SpillTime
|
||||
s.Write += other.Write
|
||||
s.WriteTime += other.WriteTime
|
||||
}
|
||||
|
||||
// Sub calculates and returns the difference between two sets of transaction stats.
|
||||
// This is useful when obtaining stats at two different points and time and
|
||||
// you need the performance counters that occurred within that time span.
|
||||
func (s *TxStats) Sub(other *TxStats) TxStats {
|
||||
var diff TxStats
|
||||
diff.PageCount = s.PageCount - other.PageCount
|
||||
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
||||
diff.CursorCount = s.CursorCount - other.CursorCount
|
||||
diff.NodeCount = s.NodeCount - other.NodeCount
|
||||
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
||||
diff.Rebalance = s.Rebalance - other.Rebalance
|
||||
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
||||
diff.Split = s.Split - other.Split
|
||||
diff.Spill = s.Spill - other.Spill
|
||||
diff.SpillTime = s.SpillTime - other.SpillTime
|
||||
diff.Write = s.Write - other.Write
|
||||
diff.WriteTime = s.WriteTime - other.WriteTime
|
||||
return diff
|
||||
}
|
22
vendor/github.com/br0xen/boltease/.gitignore
generated
vendored
Executable file
22
vendor/github.com/br0xen/boltease/.gitignore
generated
vendored
Executable file
@ -0,0 +1,22 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
4
vendor/github.com/br0xen/boltease/README.md
generated
vendored
Normal file
4
vendor/github.com/br0xen/boltease/README.md
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
boltease
|
||||
========
|
||||
|
||||
A library for easing the use of bolt databases
|
361
vendor/github.com/br0xen/boltease/boltease.go
generated
vendored
Normal file
361
vendor/github.com/br0xen/boltease/boltease.go
generated
vendored
Normal file
@ -0,0 +1,361 @@
|
||||
package boltease
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// This is a library for easing the use of bolt dbs
|
||||
|
||||
// DB is a struct for accomplishing this
|
||||
type DB struct {
|
||||
filename string
|
||||
boltDB *bolt.DB
|
||||
mode os.FileMode
|
||||
options *bolt.Options
|
||||
dbIsOpen bool
|
||||
}
|
||||
|
||||
// Create makes sure we can get open the file and returns the DB object
|
||||
func Create(fn string, m os.FileMode, opts *bolt.Options) (*DB, error) {
|
||||
var err error
|
||||
b := DB{filename: fn, mode: m, options: opts}
|
||||
b.boltDB, err = bolt.Open(fn, m, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer b.boltDB.Close()
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
func (b *DB) OpenDB() error {
|
||||
if b.dbIsOpen {
|
||||
// DB is already open, that's fine.
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if b.boltDB, err = bolt.Open(b.filename, b.mode, b.options); err != nil {
|
||||
return err
|
||||
}
|
||||
b.dbIsOpen = true
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *DB) CloseDB() error {
|
||||
if !b.dbIsOpen {
|
||||
// DB is already closed, that's fine.
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if err = b.boltDB.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.dbIsOpen = false
|
||||
return err
|
||||
}
|
||||
|
||||
// MkBucketPath builds all buckets in the string slice
|
||||
func (b *DB) MkBucketPath(path []string) error {
|
||||
var err error
|
||||
if !b.dbIsOpen {
|
||||
if err = b.OpenDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.CloseDB()
|
||||
}
|
||||
|
||||
err = b.boltDB.Update(func(tx *bolt.Tx) error {
|
||||
var err error
|
||||
bkt := tx.Bucket([]byte(path[0]))
|
||||
if bkt == nil {
|
||||
// Create it
|
||||
bkt, err = tx.CreateBucket([]byte(path[0]))
|
||||
if err != nil {
|
||||
// error creating
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(path) > 1 {
|
||||
path = path[1:]
|
||||
for i := range path {
|
||||
nextBkt := bkt.Bucket([]byte(path[i]))
|
||||
if nextBkt == nil {
|
||||
// Create it
|
||||
nextBkt, err = bkt.CreateBucket([]byte(path[i]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
bkt = nextBkt
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetValue returns the value at path
|
||||
// path is a slice of strings
|
||||
// key is the key to get
|
||||
func (b *DB) GetValue(path []string, key string) (string, error) {
|
||||
var err error
|
||||
var ret string
|
||||
if !b.dbIsOpen {
|
||||
if err = b.OpenDB(); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
defer b.CloseDB()
|
||||
}
|
||||
err = b.boltDB.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(path[0]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + path[0])
|
||||
}
|
||||
for idx := 1; idx < len(path); idx++ {
|
||||
bkt = bkt.Bucket([]byte(path[idx]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + strings.Join(path[:idx], "/"))
|
||||
}
|
||||
}
|
||||
// newBkt should have the last bucket in the path
|
||||
ret = string(bkt.Get([]byte(key)))
|
||||
return nil
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// SetValue sets the value of key at path to val
|
||||
// path is a slice of tokens
|
||||
func (b *DB) SetValue(path []string, key, val string) error {
|
||||
var err error
|
||||
if !b.dbIsOpen {
|
||||
if err = b.OpenDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.CloseDB()
|
||||
}
|
||||
|
||||
err = b.MkBucketPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.boltDB.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(path[0]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + path[0])
|
||||
}
|
||||
for idx := 1; idx < len(path); idx++ {
|
||||
bkt, err = bkt.CreateBucketIfNotExists([]byte(path[idx]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// bkt should have the last bucket in the path
|
||||
return bkt.Put([]byte(key), []byte(val))
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetInt returns the value at path
|
||||
// If the value cannot be parsed as an int, error
|
||||
func (b *DB) GetInt(path []string, key string) (int, error) {
|
||||
var ret int
|
||||
r, err := b.GetValue(path, key)
|
||||
if err == nil {
|
||||
ret, err = strconv.Atoi(r)
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// SetInt Sets an integer value
|
||||
func (b *DB) SetInt(path []string, key string, val int) error {
|
||||
return b.SetValue(path, key, strconv.Itoa(val))
|
||||
}
|
||||
|
||||
// GetBool returns the value at 'path'
|
||||
// If the value cannot be parsed as a bool, error
|
||||
// We check 'true/false' and '1/0', else error
|
||||
func (b *DB) GetBool(path []string, key string) (bool, error) {
|
||||
var ret bool
|
||||
r, err := b.GetValue(path, key)
|
||||
if err == nil {
|
||||
if r == "true" || r == "1" {
|
||||
ret = true
|
||||
} else if r != "false" && r != "0" {
|
||||
err = fmt.Errorf("Cannot parse as a boolean")
|
||||
}
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// SetBool Sets a boolean value
|
||||
func (b *DB) SetBool(path []string, key string, val bool) error {
|
||||
if val {
|
||||
return b.SetValue(path, key, "true")
|
||||
}
|
||||
return b.SetValue(path, key, "false")
|
||||
}
|
||||
|
||||
// GetTimestamp returns the value at 'path'
|
||||
// If the value cannot be parsed as a RFC3339, error
|
||||
func (b *DB) GetTimestamp(path []string, key string) (time.Time, error) {
|
||||
r, err := b.GetValue(path, key)
|
||||
if err == nil {
|
||||
return time.Parse(time.RFC3339, r)
|
||||
}
|
||||
return time.Unix(0, 0), err
|
||||
}
|
||||
|
||||
// SetTimestamp saves a timestamp into the db
|
||||
func (b *DB) SetTimestamp(path []string, key string, val time.Time) error {
|
||||
return b.SetValue(path, key, val.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
// GetBucketList returns a list of all sub-buckets at path
|
||||
func (b *DB) GetBucketList(path []string) ([]string, error) {
|
||||
var err error
|
||||
var ret []string
|
||||
if !b.dbIsOpen {
|
||||
if err = b.OpenDB(); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
defer b.CloseDB()
|
||||
}
|
||||
|
||||
err = b.boltDB.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(path[0]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + path[0])
|
||||
}
|
||||
var berr error
|
||||
if len(path) > 1 {
|
||||
for idx := 1; idx < len(path); idx++ {
|
||||
bkt = bkt.Bucket([]byte(path[idx]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + strings.Join(path[:idx], " / "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newBkt should have the last bucket in the path
|
||||
berr = bkt.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
// Must be a bucket
|
||||
ret = append(ret, string(k))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return berr
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// GetKeyList returns a list of all keys at path
|
||||
func (b *DB) GetKeyList(path []string) ([]string, error) {
|
||||
var err error
|
||||
var ret []string
|
||||
if !b.dbIsOpen {
|
||||
if err = b.OpenDB(); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
defer b.CloseDB()
|
||||
}
|
||||
|
||||
err = b.boltDB.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(path[0]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + path[0])
|
||||
}
|
||||
if len(path) > 1 {
|
||||
var newBkt *bolt.Bucket
|
||||
for idx := 1; idx < len(path); idx++ {
|
||||
newBkt = bkt.Bucket([]byte(path[idx]))
|
||||
if newBkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + strings.Join(path[:idx], "/"))
|
||||
}
|
||||
}
|
||||
bkt = newBkt
|
||||
}
|
||||
// newBkt should have the last bucket in the path
|
||||
berr := bkt.ForEach(func(k, v []byte) error {
|
||||
if v != nil {
|
||||
// Not a bucket
|
||||
ret = append(ret, string(k))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return berr
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// DeletePair deletes the pair with key at path
|
||||
func (b *DB) DeletePair(path []string, key string) error {
|
||||
var err error
|
||||
if !b.dbIsOpen {
|
||||
if err = b.OpenDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.CloseDB()
|
||||
}
|
||||
|
||||
err = b.boltDB.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(path[0]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + path[0])
|
||||
}
|
||||
if len(path) > 1 {
|
||||
var newBkt *bolt.Bucket
|
||||
for idx := 1; idx < len(path); idx++ {
|
||||
newBkt = bkt.Bucket([]byte(path[idx]))
|
||||
if newBkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + strings.Join(path[:idx], "/"))
|
||||
}
|
||||
}
|
||||
bkt = newBkt
|
||||
}
|
||||
// bkt should have the last bucket in the path
|
||||
// Test to make sure that key is a pair, if so, delete it
|
||||
if tst := bkt.Bucket([]byte(key)); tst == nil {
|
||||
return bkt.Delete([]byte(key))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteBucket deletes the bucket key at path
|
||||
func (b *DB) DeleteBucket(path []string, key string) error {
|
||||
var err error
|
||||
if !b.dbIsOpen {
|
||||
if err = b.OpenDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.CloseDB()
|
||||
}
|
||||
|
||||
err = b.boltDB.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(path[0]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + path[0])
|
||||
}
|
||||
for idx := 1; idx < len(path); idx++ {
|
||||
bkt = bkt.Bucket([]byte(path[idx]))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("Couldn't find bucket " + strings.Join(path[:idx], "/"))
|
||||
}
|
||||
}
|
||||
// bkt should have the last bucket in the path
|
||||
// Test to make sure that key is a bucket, if so, delete it
|
||||
if tst := bkt.Bucket([]byte(key)); tst != nil {
|
||||
return bkt.DeleteBucket([]byte(key))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
1
vendor/github.com/pborman/uuid/CONTRIBUTORS
generated
vendored
Normal file
1
vendor/github.com/pborman/uuid/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Paul Borman <borman@google.com>
|
27
vendor/github.com/pborman/uuid/LICENSE
generated
vendored
Normal file
27
vendor/github.com/pborman/uuid/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
84
vendor/github.com/pborman/uuid/dce.go
generated
vendored
Executable file
84
vendor/github.com/pborman/uuid/dce.go
generated
vendored
Executable file
@ -0,0 +1,84 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte
|
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const (
|
||||
Person = Domain(0)
|
||||
Group = Domain(1)
|
||||
Org = Domain(2)
|
||||
)
|
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) UUID {
|
||||
uuid := NewUUID()
|
||||
if uuid != nil {
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain)
|
||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCEPerson(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() UUID {
|
||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
}
|
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCEGroup(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() UUID {
|
||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
// Domain returns the domain for a Version 2 UUID or false.
|
||||
func (uuid UUID) Domain() (Domain, bool) {
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
return 0, false
|
||||
}
|
||||
return Domain(uuid[9]), true
|
||||
}
|
||||
|
||||
// Id returns the id for a Version 2 UUID or false.
|
||||
func (uuid UUID) Id() (uint32, bool) {
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
return 0, false
|
||||
}
|
||||
return binary.BigEndian.Uint32(uuid[0:4]), true
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
switch d {
|
||||
case Person:
|
||||
return "Person"
|
||||
case Group:
|
||||
return "Group"
|
||||
case Org:
|
||||
return "Org"
|
||||
}
|
||||
return fmt.Sprintf("Domain%d", int(d))
|
||||
}
|
8
vendor/github.com/pborman/uuid/doc.go
generated
vendored
Executable file
8
vendor/github.com/pborman/uuid/doc.go
generated
vendored
Executable file
@ -0,0 +1,8 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The uuid package generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
|
||||
package uuid
|
53
vendor/github.com/pborman/uuid/hash.go
generated
vendored
Normal file
53
vendor/github.com/pborman/uuid/hash.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Well known Name Space IDs and UUIDs
|
||||
var (
|
||||
NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
|
||||
NIL = Parse("00000000-0000-0000-0000-000000000000")
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID dervied from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space)
|
||||
h.Write([]byte(data))
|
||||
s := h.Sum(nil)
|
||||
uuid := make([]byte, 16)
|
||||
copy(uuid, s)
|
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data.
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID {
|
||||
return NewHash(md5.New(), space, data, 3)
|
||||
}
|
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data.
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID {
|
||||
return NewHash(sha1.New(), space, data, 5)
|
||||
}
|
30
vendor/github.com/pborman/uuid/json.go
generated
vendored
Normal file
30
vendor/github.com/pborman/uuid/json.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "errors"
|
||||
|
||||
func (u UUID) MarshalJSON() ([]byte, error) {
|
||||
if len(u) == 0 {
|
||||
return []byte(`""`), nil
|
||||
}
|
||||
return []byte(`"` + u.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (u *UUID) UnmarshalJSON(data []byte) error {
|
||||
if len(data) == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
data = data[1 : len(data)-1]
|
||||
uu := Parse(string(data))
|
||||
if uu == nil {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
*u = uu
|
||||
return nil
|
||||
}
|
101
vendor/github.com/pborman/uuid/node.go
generated
vendored
Executable file
101
vendor/github.com/pborman/uuid/node.go
generated
vendored
Executable file
@ -0,0 +1,101 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "net"
|
||||
|
||||
var (
|
||||
interfaces []net.Interface // cached list of interfaces
|
||||
ifname string // name of interface being used
|
||||
nodeID []byte // hardware for version 1 UUIDs
|
||||
)
|
||||
|
||||
// NodeInterface returns the name of the interface from which the NodeID was
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
return ifname
|
||||
}
|
||||
|
||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||
// If name is "" then the first usable interface found will be used or a random
|
||||
// Node ID will be generated. If a named interface cannot be found then false
|
||||
// is returned.
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
if err != nil && name != "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, ifs := range interfaces {
|
||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||
if setNodeID(ifs.HardwareAddr) {
|
||||
ifname = ifs.Name
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We found no interfaces with a valid hardware address. If name
|
||||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
if nodeID == nil {
|
||||
nodeID = make([]byte, 6)
|
||||
}
|
||||
randomBits(nodeID)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
if nodeID == nil {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
nid := make([]byte, 6)
|
||||
copy(nid, nodeID)
|
||||
return nid
|
||||
}
|
||||
|
||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
if setNodeID(id) {
|
||||
ifname = "user"
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setNodeID(id []byte) bool {
|
||||
if len(id) < 6 {
|
||||
return false
|
||||
}
|
||||
if nodeID == nil {
|
||||
nodeID = make([]byte, 6)
|
||||
}
|
||||
copy(nodeID, id)
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) NodeID() []byte {
|
||||
if len(uuid) != 16 {
|
||||
return nil
|
||||
}
|
||||
node := make([]byte, 6)
|
||||
copy(node, uuid[10:])
|
||||
return node
|
||||
}
|
48
vendor/github.com/pborman/uuid/sql.go
generated
vendored
Normal file
48
vendor/github.com/pborman/uuid/sql.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
switch src.(type) {
|
||||
case string:
|
||||
// see uuid.Parse for required string format
|
||||
parsed := Parse(src.(string))
|
||||
|
||||
if parsed == nil {
|
||||
return errors.New("Scan: invalid UUID format")
|
||||
}
|
||||
|
||||
*uuid = parsed
|
||||
case []byte:
|
||||
b := src.([]byte)
|
||||
|
||||
// assumes a simple slice of bytes if 16 bytes
|
||||
// otherwise attempts to parse
|
||||
if len(b) == 16 {
|
||||
*uuid = UUID(b)
|
||||
} else {
|
||||
u := Parse(string(b))
|
||||
|
||||
if u == nil {
|
||||
return errors.New("Scan: invalid UUID format")
|
||||
}
|
||||
|
||||
*uuid = u
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
132
vendor/github.com/pborman/uuid/time.go
generated
vendored
Executable file
132
vendor/github.com/pborman/uuid/time.go
generated
vendored
Executable file
@ -0,0 +1,132 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||
// 1582.
|
||||
type Time int64
|
||||
|
||||
const (
|
||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||
unix = 2440587 // Julian day of 1 Jan 1970
|
||||
epoch = unix - lillian // Days between epochs
|
||||
g1582 = epoch * 86400 // seconds between epochs
|
||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clock_seq uint16 // clock sequence for this run
|
||||
|
||||
timeNow = time.Now // for testing
|
||||
)
|
||||
|
||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||
// epoch of 1 Jan 1970.
|
||||
func (t Time) UnixTime() (sec, nsec int64) {
|
||||
sec = int64(t - g1582ns100)
|
||||
nsec = (sec % 10000000) * 100
|
||||
sec /= 10000000
|
||||
return sec, nsec
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
if clock_seq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
||||
|
||||
// If time has gone backwards with this clock sequence then we
|
||||
// increment the clock sequence
|
||||
if now <= lasttime {
|
||||
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clock_seq, nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||
//
|
||||
// The uuid package does not use global static storage for the clock sequence or
|
||||
// the last time a UUID was generated. Unless SetClockSequence a new random
|
||||
// clock sequence is generated the first time a clock sequence is requested by
|
||||
// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
|
||||
// for
|
||||
func ClockSequence() int {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
func clockSequence() int {
|
||||
if clock_seq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
return int(clock_seq & 0x3fff)
|
||||
}
|
||||
|
||||
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
||||
func setClockSequence(seq int) {
|
||||
if seq == -1 {
|
||||
var b [2]byte
|
||||
randomBits(b[:]) // clock sequence
|
||||
seq = int(b[0])<<8 | int(b[1])
|
||||
}
|
||||
old_seq := clock_seq
|
||||
clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||
if old_seq != clock_seq {
|
||||
lasttime = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. It returns false if uuid is not valid. The time is only well defined
|
||||
// for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) Time() (Time, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
return Time(time), true
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid. It returns false
|
||||
// if uuid is not valid. The clock sequence is only well defined for version 1
|
||||
// and 2 UUIDs.
|
||||
func (uuid UUID) ClockSequence() (int, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
|
||||
}
|
43
vendor/github.com/pborman/uuid/util.go
generated
vendored
Normal file
43
vendor/github.com/pborman/uuid/util.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// randomBits completely fills slice b with random data.
|
||||
func randomBits(b []byte) {
|
||||
if _, err := io.ReadFull(rander, b); err != nil {
|
||||
panic(err.Error()) // rand should never fail
|
||||
}
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = []byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts the the first two hex bytes of x into a byte.
|
||||
func xtob(x string) (byte, bool) {
|
||||
b1 := xvalues[x[0]]
|
||||
b2 := xvalues[x[1]]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
163
vendor/github.com/pborman/uuid/uuid.go
generated
vendored
Executable file
163
vendor/github.com/pborman/uuid/uuid.go
generated
vendored
Executable file
@ -0,0 +1,163 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
// 4122.
|
||||
type UUID []byte
|
||||
|
||||
// A Version represents a UUIDs version.
|
||||
type Version byte
|
||||
|
||||
// A Variant represents a UUIDs variant.
|
||||
type Variant byte
|
||||
|
||||
// Constants returned by Variant.
|
||||
const (
|
||||
Invalid = Variant(iota) // Invalid UUID
|
||||
RFC4122 // The variant specified in RFC4122
|
||||
Reserved // Reserved, NCS backward compatibility.
|
||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
|
||||
// New returns a new random (version 4) UUID as a string. It is a convenience
|
||||
// function for NewRandom().String().
|
||||
func New() string {
|
||||
return NewRandom().String()
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns nil. Both the UUID form of
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
|
||||
func Parse(s string) UUID {
|
||||
if len(s) == 36+9 {
|
||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
||||
return nil
|
||||
}
|
||||
s = s[9:]
|
||||
} else if len(s) != 36 {
|
||||
return nil
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return nil
|
||||
}
|
||||
uuid := make([]byte, 16)
|
||||
for i, x := range []int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34} {
|
||||
if v, ok := xtob(s[x:]); !ok {
|
||||
return nil
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// Equal returns true if uuid1 and uuid2 are equal.
|
||||
func Equal(uuid1, uuid2 UUID) bool {
|
||||
return bytes.Equal(uuid1, uuid2)
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid. It returns Invalid if
|
||||
// uuid is invalid.
|
||||
func (uuid UUID) Variant() Variant {
|
||||
if len(uuid) != 16 {
|
||||
return Invalid
|
||||
}
|
||||
switch {
|
||||
case (uuid[8] & 0xc0) == 0x80:
|
||||
return RFC4122
|
||||
case (uuid[8] & 0xe0) == 0xc0:
|
||||
return Microsoft
|
||||
case (uuid[8] & 0xe0) == 0xe0:
|
||||
return Future
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Version returns the verison of uuid. It returns false if uuid is not
|
||||
// valid.
|
||||
func (uuid UUID) Version() (Version, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
return Version(uuid[6] >> 4), true
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v > 15 {
|
||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
||||
}
|
||||
return fmt.Sprintf("VERSION_%d", v)
|
||||
}
|
||||
|
||||
func (v Variant) String() string {
|
||||
switch v {
|
||||
case RFC4122:
|
||||
return "RFC4122"
|
||||
case Reserved:
|
||||
return "Reserved"
|
||||
case Microsoft:
|
||||
return "Microsoft"
|
||||
case Future:
|
||||
return "Future"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
}
|
||||
return fmt.Sprintf("BadVariant%d", int(v))
|
||||
}
|
||||
|
||||
// SetRand sets the random number generator to r, which implents io.Reader.
|
||||
// If r.Read returns an error when the package requests random data then
|
||||
// a panic will be issued.
|
||||
//
|
||||
// Calling SetRand with nil sets the random number generator to the default
|
||||
// generator.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
41
vendor/github.com/pborman/uuid/version1.go
generated
vendored
Normal file
41
vendor/github.com/pborman/uuid/version1.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewUUID returns nil.
|
||||
func NewUUID() UUID {
|
||||
if nodeID == nil {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
uuid := make([]byte, 16)
|
||||
|
||||
time_low := uint32(now & 0xffffffff)
|
||||
time_mid := uint16((now >> 32) & 0xffff)
|
||||
time_hi := uint16((now >> 48) & 0x0fff)
|
||||
time_hi |= 0x1000 // Version 1
|
||||
|
||||
binary.BigEndian.PutUint32(uuid[0:], time_low)
|
||||
binary.BigEndian.PutUint16(uuid[4:], time_mid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], time_hi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
copy(uuid[10:], nodeID)
|
||||
|
||||
return uuid
|
||||
}
|
25
vendor/github.com/pborman/uuid/version4.go
generated
vendored
Normal file
25
vendor/github.com/pborman/uuid/version4.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
// Random returns a Random (Version 4) UUID or panics.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// A note about uniqueness derived from from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() UUID {
|
||||
uuid := make([]byte, 16)
|
||||
randomBits([]byte(uuid))
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid
|
||||
}
|
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/sys/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/sys/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/sys/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/sys/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
1
vendor/golang.org/x/sys/unix/.gitignore
generated
vendored
Normal file
1
vendor/golang.org/x/sys/unix/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
_obj/
|
10
vendor/golang.org/x/sys/unix/asm.s
generated
vendored
Normal file
10
vendor/golang.org/x/sys/unix/asm.s
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·use(SB),NOSPLIT,$0
|
||||
RET
|
29
vendor/golang.org/x/sys/unix/asm_darwin_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_darwin_386.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
30
vendor/golang.org/x/sys/unix/asm_darwin_arm.s
generated
vendored
Normal file
30
vendor/golang.org/x/sys/unix/asm_darwin_arm.s
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
// +build arm,darwin
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for ARM, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
30
vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
generated
vendored
Normal file
30
vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
// +build arm64,darwin
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, Darwin
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, DragonFly
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-64
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-112
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-64
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_freebsd_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_freebsd_386.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, FreeBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, FreeBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for ARM, FreeBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
35
vendor/golang.org/x/sys/unix/asm_linux_386.s
generated
vendored
Normal file
35
vendor/golang.org/x/sys/unix/asm_linux_386.s
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for 386, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·socketcall(SB),NOSPLIT,$0-36
|
||||
JMP syscall·socketcall(SB)
|
||||
|
||||
TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
|
||||
JMP syscall·rawsocketcall(SB)
|
||||
|
||||
TEXT ·seek(SB),NOSPLIT,$0-28
|
||||
JMP syscall·seek(SB)
|
29
vendor/golang.org/x/sys/unix/asm_linux_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_linux_amd64.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for AMD64, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·gettimeofday(SB),NOSPLIT,$0-16
|
||||
JMP syscall·gettimeofday(SB)
|
29
vendor/golang.org/x/sys/unix/asm_linux_arm.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_linux_arm.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for arm, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
||||
|
||||
TEXT ·seek(SB),NOSPLIT,$0-32
|
||||
B syscall·seek(SB)
|
24
vendor/golang.org/x/sys/unix/asm_linux_arm64.s
generated
vendored
Normal file
24
vendor/golang.org/x/sys/unix/asm_linux_arm64.s
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build arm64
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
B syscall·RawSyscall6(SB)
|
28
vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
generated
vendored
Normal file
28
vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build mips64 mips64le
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for mips64, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
28
vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
generated
vendored
Normal file
28
vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build ppc64 ppc64le
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for ppc64, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·RawSyscall6(SB)
|
28
vendor/golang.org/x/sys/unix/asm_linux_s390x.s
generated
vendored
Normal file
28
vendor/golang.org/x/sys/unix/asm_linux_s390x.s
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x
|
||||
// +build linux
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for s390x, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_netbsd_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_netbsd_386.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, NetBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, NetBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for ARM, NetBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
B syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
B syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
B syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
B syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_openbsd_386.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_openbsd_386.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, OpenBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
29
vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
generated
vendored
Normal file
29
vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for AMD64, OpenBSD
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-104
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
17
vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
generated
vendored
Normal file
17
vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
|
||||
//
|
||||
|
||||
TEXT ·sysvicall6(SB),NOSPLIT,$0-64
|
||||
JMP syscall·sysvicall6(SB)
|
||||
|
||||
TEXT ·rawSysvicall6(SB),NOSPLIT,$0-64
|
||||
JMP syscall·rawSysvicall6(SB)
|
35
vendor/golang.org/x/sys/unix/bluetooth_linux.go
generated
vendored
Normal file
35
vendor/golang.org/x/sys/unix/bluetooth_linux.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Bluetooth sockets and messages
|
||||
|
||||
package unix
|
||||
|
||||
// Bluetooth Protocols
|
||||
const (
|
||||
BTPROTO_L2CAP = 0
|
||||
BTPROTO_HCI = 1
|
||||
BTPROTO_SCO = 2
|
||||
BTPROTO_RFCOMM = 3
|
||||
BTPROTO_BNEP = 4
|
||||
BTPROTO_CMTP = 5
|
||||
BTPROTO_HIDP = 6
|
||||
BTPROTO_AVDTP = 7
|
||||
)
|
||||
|
||||
const (
|
||||
HCI_CHANNEL_RAW = 0
|
||||
HCI_CHANNEL_USER = 1
|
||||
HCI_CHANNEL_MONITOR = 2
|
||||
HCI_CHANNEL_CONTROL = 3
|
||||
)
|
||||
|
||||
// Socketoption Level
|
||||
const (
|
||||
SOL_BLUETOOTH = 0x112
|
||||
SOL_HCI = 0x0
|
||||
SOL_L2CAP = 0x6
|
||||
SOL_RFCOMM = 0x12
|
||||
SOL_SCO = 0x11
|
||||
)
|
13
vendor/golang.org/x/sys/unix/constants.go
generated
vendored
Normal file
13
vendor/golang.org/x/sys/unix/constants.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
R_OK = 0x4
|
||||
W_OK = 0x2
|
||||
X_OK = 0x1
|
||||
)
|
27
vendor/golang.org/x/sys/unix/env_unix.go
generated
vendored
Normal file
27
vendor/golang.org/x/sys/unix/env_unix.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
// Unix environment variables.
|
||||
|
||||
package unix
|
||||
|
||||
import "syscall"
|
||||
|
||||
func Getenv(key string) (value string, found bool) {
|
||||
return syscall.Getenv(key)
|
||||
}
|
||||
|
||||
func Setenv(key, value string) error {
|
||||
return syscall.Setenv(key, value)
|
||||
}
|
||||
|
||||
func Clearenv() {
|
||||
syscall.Clearenv()
|
||||
}
|
||||
|
||||
func Environ() []string {
|
||||
return syscall.Environ()
|
||||
}
|
14
vendor/golang.org/x/sys/unix/env_unset.go
generated
vendored
Normal file
14
vendor/golang.org/x/sys/unix/env_unset.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.4
|
||||
|
||||
package unix
|
||||
|
||||
import "syscall"
|
||||
|
||||
func Unsetenv(key string) error {
|
||||
// This was added in Go 1.4.
|
||||
return syscall.Unsetenv(key)
|
||||
}
|
24
vendor/golang.org/x/sys/unix/flock.go
generated
vendored
Normal file
24
vendor/golang.org/x/sys/unix/flock.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
|
||||
// systems by flock_linux_32bit.go to be SYS_FCNTL64.
|
||||
var fcntl64Syscall uintptr = SYS_FCNTL
|
||||
|
||||
// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
|
||||
func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
|
||||
_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
13
vendor/golang.org/x/sys/unix/flock_linux_32bit.go
generated
vendored
Normal file
13
vendor/golang.org/x/sys/unix/flock_linux_32bit.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build linux,386 linux,arm
|
||||
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package unix
|
||||
|
||||
func init() {
|
||||
// On 32-bit Linux systems, the fcntl syscall that matches Go's
|
||||
// Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
|
||||
fcntl64Syscall = SYS_FCNTL64
|
||||
}
|
46
vendor/golang.org/x/sys/unix/gccgo.go
generated
vendored
Normal file
46
vendor/golang.org/x/sys/unix/gccgo.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gccgo
|
||||
|
||||
package unix
|
||||
|
||||
import "syscall"
|
||||
|
||||
// We can't use the gc-syntax .s files for gccgo. On the plus side
|
||||
// much of the functionality can be written directly in Go.
|
||||
|
||||
//extern gccgoRealSyscall
|
||||
func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
|
||||
|
||||
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
syscall.Entersyscall()
|
||||
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
|
||||
syscall.Exitsyscall()
|
||||
return r, 0, syscall.Errno(errno)
|
||||
}
|
||||
|
||||
func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
syscall.Entersyscall()
|
||||
r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
|
||||
syscall.Exitsyscall()
|
||||
return r, 0, syscall.Errno(errno)
|
||||
}
|
||||
|
||||
func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
syscall.Entersyscall()
|
||||
r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
|
||||
syscall.Exitsyscall()
|
||||
return r, 0, syscall.Errno(errno)
|
||||
}
|
||||
|
||||
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
|
||||
return r, 0, syscall.Errno(errno)
|
||||
}
|
||||
|
||||
func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
|
||||
return r, 0, syscall.Errno(errno)
|
||||
}
|
41
vendor/golang.org/x/sys/unix/gccgo_c.c
generated
vendored
Normal file
41
vendor/golang.org/x/sys/unix/gccgo_c.c
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gccgo
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define _STRINGIFY2_(x) #x
|
||||
#define _STRINGIFY_(x) _STRINGIFY2_(x)
|
||||
#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
|
||||
|
||||
// Call syscall from C code because the gccgo support for calling from
|
||||
// Go to C does not support varargs functions.
|
||||
|
||||
struct ret {
|
||||
uintptr_t r;
|
||||
uintptr_t err;
|
||||
};
|
||||
|
||||
struct ret
|
||||
gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
|
||||
{
|
||||
struct ret r;
|
||||
|
||||
errno = 0;
|
||||
r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
|
||||
r.err = errno;
|
||||
return r;
|
||||
}
|
||||
|
||||
// Define the use function in C so that it is not inlined.
|
||||
|
||||
extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));
|
||||
|
||||
void
|
||||
use(void *p __attribute__ ((unused)))
|
||||
{
|
||||
}
|
20
vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
generated
vendored
Normal file
20
vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gccgo,linux,amd64
|
||||
|
||||
package unix
|
||||
|
||||
import "syscall"
|
||||
|
||||
//extern gettimeofday
|
||||
func realGettimeofday(*Timeval, *byte) int32
|
||||
|
||||
func gettimeofday(tv *Timeval) (err syscall.Errno) {
|
||||
r := realGettimeofday(tv, nil)
|
||||
if r < 0 {
|
||||
return syscall.GetErrno()
|
||||
}
|
||||
return 0
|
||||
}
|
285
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
Executable file
285
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
Executable file
@ -0,0 +1,285 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# The unix package provides access to the raw system call
|
||||
# interface of the underlying operating system. Porting Go to
|
||||
# a new architecture/operating system combination requires
|
||||
# some manual effort, though there are tools that automate
|
||||
# much of the process. The auto-generated files have names
|
||||
# beginning with z.
|
||||
#
|
||||
# This script runs or (given -n) prints suggested commands to generate z files
|
||||
# for the current system. Running those commands is not automatic.
|
||||
# This script is documentation more than anything else.
|
||||
#
|
||||
# * asm_${GOOS}_${GOARCH}.s
|
||||
#
|
||||
# This hand-written assembly file implements system call dispatch.
|
||||
# There are three entry points:
|
||||
#
|
||||
# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
|
||||
# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
|
||||
# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
|
||||
#
|
||||
# The first and second are the standard ones; they differ only in
|
||||
# how many arguments can be passed to the kernel.
|
||||
# The third is for low-level use by the ForkExec wrapper;
|
||||
# unlike the first two, it does not call into the scheduler to
|
||||
# let it know that a system call is running.
|
||||
#
|
||||
# * syscall_${GOOS}.go
|
||||
#
|
||||
# This hand-written Go file implements system calls that need
|
||||
# special handling and lists "//sys" comments giving prototypes
|
||||
# for ones that can be auto-generated. Mksyscall reads those
|
||||
# comments to generate the stubs.
|
||||
#
|
||||
# * syscall_${GOOS}_${GOARCH}.go
|
||||
#
|
||||
# Same as syscall_${GOOS}.go except that it contains code specific
|
||||
# to ${GOOS} on one particular architecture.
|
||||
#
|
||||
# * types_${GOOS}.c
|
||||
#
|
||||
# This hand-written C file includes standard C headers and then
|
||||
# creates typedef or enum names beginning with a dollar sign
|
||||
# (use of $ in variable names is a gcc extension). The hardest
|
||||
# part about preparing this file is figuring out which headers to
|
||||
# include and which symbols need to be #defined to get the
|
||||
# actual data structures that pass through to the kernel system calls.
|
||||
# Some C libraries present alternate versions for binary compatibility
|
||||
# and translate them on the way in and out of system calls, but
|
||||
# there is almost always a #define that can get the real ones.
|
||||
# See types_darwin.c and types_linux.c for examples.
|
||||
#
|
||||
# * zerror_${GOOS}_${GOARCH}.go
|
||||
#
|
||||
# This machine-generated file defines the system's error numbers,
|
||||
# error strings, and signal numbers. The generator is "mkerrors.sh".
|
||||
# Usually no arguments are needed, but mkerrors.sh will pass its
|
||||
# arguments on to godefs.
|
||||
#
|
||||
# * zsyscall_${GOOS}_${GOARCH}.go
|
||||
#
|
||||
# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
|
||||
#
|
||||
# * zsysnum_${GOOS}_${GOARCH}.go
|
||||
#
|
||||
# Generated by mksysnum_${GOOS}.
|
||||
#
|
||||
# * ztypes_${GOOS}_${GOARCH}.go
|
||||
#
|
||||
# Generated by godefs; see types_${GOOS}.c above.
|
||||
|
||||
GOOSARCH="${GOOS}_${GOARCH}"
|
||||
|
||||
# defaults
|
||||
mksyscall="./mksyscall.pl"
|
||||
mkerrors="./mkerrors.sh"
|
||||
zerrors="zerrors_$GOOSARCH.go"
|
||||
mksysctl=""
|
||||
zsysctl="zsysctl_$GOOSARCH.go"
|
||||
mksysnum=
|
||||
mktypes=
|
||||
run="sh"
|
||||
|
||||
case "$1" in
|
||||
-syscalls)
|
||||
for i in zsyscall*go
|
||||
do
|
||||
sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
|
||||
rm _$i
|
||||
done
|
||||
exit 0
|
||||
;;
|
||||
-n)
|
||||
run="cat"
|
||||
shift
|
||||
esac
|
||||
|
||||
case "$#" in
|
||||
0)
|
||||
;;
|
||||
*)
|
||||
echo 'usage: mkall.sh [-n]' 1>&2
|
||||
exit 2
|
||||
esac
|
||||
|
||||
GOOSARCH_in=syscall_$GOOSARCH.go
|
||||
case "$GOOSARCH" in
|
||||
_* | *_ | _)
|
||||
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
darwin_386)
|
||||
mkerrors="$mkerrors -m32"
|
||||
mksyscall="./mksyscall.pl -l32"
|
||||
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
darwin_amd64)
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
darwin_arm)
|
||||
mkerrors="$mkerrors"
|
||||
mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
darwin_arm64)
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
dragonfly_386)
|
||||
mkerrors="$mkerrors -m32"
|
||||
mksyscall="./mksyscall.pl -l32 -dragonfly"
|
||||
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
dragonfly_amd64)
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksyscall="./mksyscall.pl -dragonfly"
|
||||
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
freebsd_386)
|
||||
mkerrors="$mkerrors -m32"
|
||||
mksyscall="./mksyscall.pl -l32"
|
||||
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
freebsd_amd64)
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
freebsd_arm)
|
||||
mkerrors="$mkerrors"
|
||||
mksyscall="./mksyscall.pl -l32 -arm"
|
||||
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
|
||||
# Let the type of C char be signed for making the bare syscall
|
||||
# API consistent across over platforms.
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||
;;
|
||||
linux_386)
|
||||
mkerrors="$mkerrors -m32"
|
||||
mksyscall="./mksyscall.pl -l32"
|
||||
mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
linux_amd64)
|
||||
unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1)
|
||||
if [ "$unistd_h" = "" ]; then
|
||||
echo >&2 cannot find unistd_64.h
|
||||
exit 1
|
||||
fi
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="./mksysnum_linux.pl $unistd_h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
linux_arm)
|
||||
mkerrors="$mkerrors"
|
||||
mksyscall="./mksyscall.pl -l32 -arm"
|
||||
mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
linux_arm64)
|
||||
unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1)
|
||||
if [ "$unistd_h" = "" ]; then
|
||||
echo >&2 cannot find unistd_64.h
|
||||
exit 1
|
||||
fi
|
||||
mksysnum="./mksysnum_linux.pl $unistd_h"
|
||||
# Let the type of C char be signed for making the bare syscall
|
||||
# API consistent across over platforms.
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||
;;
|
||||
linux_ppc64)
|
||||
GOOSARCH_in=syscall_linux_ppc64x.go
|
||||
unistd_h=/usr/include/asm/unistd.h
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="./mksysnum_linux.pl $unistd_h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
linux_ppc64le)
|
||||
GOOSARCH_in=syscall_linux_ppc64x.go
|
||||
unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="./mksysnum_linux.pl $unistd_h"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
linux_s390x)
|
||||
GOOSARCH_in=syscall_linux_s390x.go
|
||||
unistd_h=/usr/include/asm/unistd.h
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum="./mksysnum_linux.pl $unistd_h"
|
||||
# Let the type of C char be signed to make the bare sys
|
||||
# API more consistent between platforms.
|
||||
# This is a deliberate departure from the way the syscall
|
||||
# package generates its version of the types file.
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
|
||||
;;
|
||||
netbsd_386)
|
||||
mkerrors="$mkerrors -m32"
|
||||
mksyscall="./mksyscall.pl -l32 -netbsd"
|
||||
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
netbsd_amd64)
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksyscall="./mksyscall.pl -netbsd"
|
||||
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
openbsd_386)
|
||||
mkerrors="$mkerrors -m32"
|
||||
mksyscall="./mksyscall.pl -l32 -openbsd"
|
||||
mksysctl="./mksysctl_openbsd.pl"
|
||||
zsysctl="zsysctl_openbsd.go"
|
||||
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
openbsd_amd64)
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksyscall="./mksyscall.pl -openbsd"
|
||||
mksysctl="./mksysctl_openbsd.pl"
|
||||
zsysctl="zsysctl_openbsd.go"
|
||||
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
solaris_amd64)
|
||||
mksyscall="./mksyscall_solaris.pl"
|
||||
mkerrors="$mkerrors -m64"
|
||||
mksysnum=
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
*)
|
||||
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
(
|
||||
if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
|
||||
case "$GOOS" in
|
||||
*)
|
||||
syscall_goos="syscall_$GOOS.go"
|
||||
case "$GOOS" in
|
||||
darwin | dragonfly | freebsd | netbsd | openbsd)
|
||||
syscall_goos="syscall_bsd.go $syscall_goos"
|
||||
;;
|
||||
esac
|
||||
if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
|
||||
;;
|
||||
esac
|
||||
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
|
||||
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
|
||||
if [ -n "$mktypes" ]; then
|
||||
echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go";
|
||||
echo "$mktypes types_$GOOS.go | go run mkpost.go >>ztypes_$GOOSARCH.go";
|
||||
fi
|
||||
) | $run
|
476
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
Executable file
476
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
Executable file
@ -0,0 +1,476 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# Generate Go code listing errors and other #defined constant
|
||||
# values (ENAMETOOLONG etc.), by asking the preprocessor
|
||||
# about the definitions.
|
||||
|
||||
unset LANG
|
||||
export LC_ALL=C
|
||||
export LC_CTYPE=C
|
||||
|
||||
if test -z "$GOARCH" -o -z "$GOOS"; then
|
||||
echo 1>&2 "GOARCH or GOOS not defined in environment"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CC=${CC:-cc}
|
||||
|
||||
if [[ "$GOOS" -eq "solaris" ]]; then
|
||||
# Assumes GNU versions of utilities in PATH.
|
||||
export PATH=/usr/gnu/bin:$PATH
|
||||
fi
|
||||
|
||||
uname=$(uname)
|
||||
|
||||
includes_Darwin='
|
||||
#define _DARWIN_C_SOURCE
|
||||
#define KERNEL
|
||||
#define _DARWIN_USE_64_BIT_INODE
|
||||
#include <sys/types.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_types.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <termios.h>
|
||||
'
|
||||
|
||||
includes_DragonFly='
|
||||
#include <sys/types.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_types.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <termios.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <net/ip_mroute/ip_mroute.h>
|
||||
'
|
||||
|
||||
includes_FreeBSD='
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_types.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <termios.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ip_mroute.h>
|
||||
#include <sys/extattr.h>
|
||||
|
||||
#if __FreeBSD__ >= 10
|
||||
#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
|
||||
#undef SIOCAIFADDR
|
||||
#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
|
||||
#undef SIOCSIFPHYADDR
|
||||
#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
|
||||
#endif
|
||||
'
|
||||
|
||||
includes_Linux='
|
||||
#define _LARGEFILE_SOURCE
|
||||
#define _LARGEFILE64_SOURCE
|
||||
#ifndef __LP64__
|
||||
#define _FILE_OFFSET_BITS 64
|
||||
#endif
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <bits/sockaddr.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/inotify.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/socket.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_tun.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <linux/if_addr.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <net/route.h>
|
||||
#include <asm/termbits.h>
|
||||
|
||||
#ifndef MSG_FASTOPEN
|
||||
#define MSG_FASTOPEN 0x20000000
|
||||
#endif
|
||||
|
||||
#ifndef PTRACE_GETREGS
|
||||
#define PTRACE_GETREGS 0xc
|
||||
#endif
|
||||
|
||||
#ifndef PTRACE_SETREGS
|
||||
#define PTRACE_SETREGS 0xd
|
||||
#endif
|
||||
'
|
||||
|
||||
includes_NetBSD='
|
||||
#include <sys/types.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/termios.h>
|
||||
#include <sys/ttycom.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_types.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/in_systm.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ip_mroute.h>
|
||||
#include <netinet/if_ether.h>
|
||||
|
||||
// Needed since <sys/param.h> refers to it...
|
||||
#define schedppq 1
|
||||
'
|
||||
|
||||
includes_OpenBSD='
|
||||
#include <sys/types.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/termios.h>
|
||||
#include <sys/ttycom.h>
|
||||
#include <sys/wait.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_types.h>
|
||||
#include <net/if_var.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/in_systm.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ip_mroute.h>
|
||||
#include <netinet/if_ether.h>
|
||||
#include <net/if_bridge.h>
|
||||
|
||||
// We keep some constants not supported in OpenBSD 5.5 and beyond for
|
||||
// the promise of compatibility.
|
||||
#define EMUL_ENABLED 0x1
|
||||
#define EMUL_NATIVE 0x2
|
||||
#define IPV6_FAITH 0x1d
|
||||
#define IPV6_OPTIONS 0x1
|
||||
#define IPV6_RTHDR_STRICT 0x1
|
||||
#define IPV6_SOCKOPT_RESERVED1 0x3
|
||||
#define SIOCGIFGENERIC 0xc020693a
|
||||
#define SIOCSIFGENERIC 0x80206939
|
||||
#define WALTSIG 0x4
|
||||
'
|
||||
|
||||
includes_SunOS='
|
||||
#include <limits.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <net/bpf.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_arp.h>
|
||||
#include <net/if_types.h>
|
||||
#include <net/route.h>
|
||||
#include <netinet/in.h>
|
||||
#include <termios.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ip_mroute.h>
|
||||
'
|
||||
|
||||
|
||||
includes='
|
||||
#include <sys/types.h>
|
||||
#include <sys/file.h>
|
||||
#include <fcntl.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ip6.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <errno.h>
|
||||
#include <sys/signal.h>
|
||||
#include <signal.h>
|
||||
#include <sys/resource.h>
|
||||
#include <time.h>
|
||||
'
|
||||
ccflags="$@"
|
||||
|
||||
# Write go tool cgo -godefs input.
|
||||
(
|
||||
echo package unix
|
||||
echo
|
||||
echo '/*'
|
||||
indirect="includes_$(uname)"
|
||||
echo "${!indirect} $includes"
|
||||
echo '*/'
|
||||
echo 'import "C"'
|
||||
echo 'import "syscall"'
|
||||
echo
|
||||
echo 'const ('
|
||||
|
||||
# The gcc command line prints all the #defines
|
||||
# it encounters while processing the input
|
||||
echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
|
||||
awk '
|
||||
$1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
|
||||
|
||||
$2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
|
||||
$2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
|
||||
$2 ~ /^(SCM_SRCRT)$/ {next}
|
||||
$2 ~ /^(MAP_FAILED)$/ {next}
|
||||
$2 ~ /^ELF_.*$/ {next}# <asm/elf.h> contains ELF_ARCH, etc.
|
||||
|
||||
$2 ~ /^EXTATTR_NAMESPACE_NAMES/ ||
|
||||
$2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next}
|
||||
|
||||
$2 !~ /^ETH_/ &&
|
||||
$2 !~ /^EPROC_/ &&
|
||||
$2 !~ /^EQUIV_/ &&
|
||||
$2 !~ /^EXPR_/ &&
|
||||
$2 ~ /^E[A-Z0-9_]+$/ ||
|
||||
$2 ~ /^B[0-9_]+$/ ||
|
||||
$2 == "BOTHER" ||
|
||||
$2 ~ /^CI?BAUD(EX)?$/ ||
|
||||
$2 == "IBSHIFT" ||
|
||||
$2 ~ /^V[A-Z0-9]+$/ ||
|
||||
$2 ~ /^CS[A-Z0-9]/ ||
|
||||
$2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
|
||||
$2 ~ /^IGN/ ||
|
||||
$2 ~ /^IX(ON|ANY|OFF)$/ ||
|
||||
$2 ~ /^IN(LCR|PCK)$/ ||
|
||||
$2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
|
||||
$2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ ||
|
||||
$2 == "BRKINT" ||
|
||||
$2 == "HUPCL" ||
|
||||
$2 == "PENDIN" ||
|
||||
$2 == "TOSTOP" ||
|
||||
$2 == "XCASE" ||
|
||||
$2 == "ALTWERASE" ||
|
||||
$2 == "NOKERNINFO" ||
|
||||
$2 ~ /^PAR/ ||
|
||||
$2 ~ /^SIG[^_]/ ||
|
||||
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
|
||||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
|
||||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
|
||||
$2 ~ /^O?XTABS$/ ||
|
||||
$2 ~ /^TC[IO](ON|OFF)$/ ||
|
||||
$2 ~ /^IN_/ ||
|
||||
$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
|
||||
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
|
||||
$2 == "ICMPV6_FILTER" ||
|
||||
$2 == "SOMAXCONN" ||
|
||||
$2 == "NAME_MAX" ||
|
||||
$2 == "IFNAMSIZ" ||
|
||||
$2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ ||
|
||||
$2 ~ /^SYSCTL_VERS/ ||
|
||||
$2 ~ /^(MS|MNT)_/ ||
|
||||
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
|
||||
$2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ ||
|
||||
$2 ~ /^LINUX_REBOOT_CMD_/ ||
|
||||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
|
||||
$2 !~ "NLA_TYPE_MASK" &&
|
||||
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ ||
|
||||
$2 ~ /^SIOC/ ||
|
||||
$2 ~ /^TIOC/ ||
|
||||
$2 ~ /^TCGET/ ||
|
||||
$2 ~ /^TCSET/ ||
|
||||
$2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
|
||||
$2 !~ "RTF_BITS" &&
|
||||
$2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
|
||||
$2 ~ /^BIOC/ ||
|
||||
$2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
|
||||
$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ ||
|
||||
$2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
|
||||
$2 ~ /^CLONE_[A-Z_]+/ ||
|
||||
$2 !~ /^(BPF_TIMEVAL)$/ &&
|
||||
$2 ~ /^(BPF|DLT)_/ ||
|
||||
$2 ~ /^CLOCK_/ ||
|
||||
$2 !~ "WMESGLEN" &&
|
||||
$2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
|
||||
$2 ~ /^__WCOREFLAG$/ {next}
|
||||
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
|
||||
|
||||
{next}
|
||||
' | sort
|
||||
|
||||
echo ')'
|
||||
) >_const.go
|
||||
|
||||
# Pull out the error names for later.
|
||||
errors=$(
|
||||
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
|
||||
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
|
||||
sort
|
||||
)
|
||||
|
||||
# Pull out the signal names for later.
|
||||
signals=$(
|
||||
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
|
||||
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
|
||||
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
|
||||
sort
|
||||
)
|
||||
|
||||
# Again, writing regexps to a file.
|
||||
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
|
||||
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
|
||||
sort >_error.grep
|
||||
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
|
||||
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
|
||||
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
|
||||
sort >_signal.grep
|
||||
|
||||
echo '// mkerrors.sh' "$@"
|
||||
echo '// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT'
|
||||
echo
|
||||
echo "// +build ${GOARCH},${GOOS}"
|
||||
echo
|
||||
go tool cgo -godefs -- "$@" _const.go >_error.out
|
||||
cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
|
||||
echo
|
||||
echo '// Errors'
|
||||
echo 'const ('
|
||||
cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/'
|
||||
echo ')'
|
||||
|
||||
echo
|
||||
echo '// Signals'
|
||||
echo 'const ('
|
||||
cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/'
|
||||
echo ')'
|
||||
|
||||
# Run C program to print error and syscall strings.
|
||||
(
|
||||
echo -E "
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <ctype.h>
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
|
||||
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
|
||||
|
||||
enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
|
||||
|
||||
int errors[] = {
|
||||
"
|
||||
for i in $errors
|
||||
do
|
||||
echo -E ' '$i,
|
||||
done
|
||||
|
||||
echo -E "
|
||||
};
|
||||
|
||||
int signals[] = {
|
||||
"
|
||||
for i in $signals
|
||||
do
|
||||
echo -E ' '$i,
|
||||
done
|
||||
|
||||
# Use -E because on some systems bash builtin interprets \n itself.
|
||||
echo -E '
|
||||
};
|
||||
|
||||
static int
|
||||
intcmp(const void *a, const void *b)
|
||||
{
|
||||
return *(int*)a - *(int*)b;
|
||||
}
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
int i, j, e;
|
||||
char buf[1024], *p;
|
||||
|
||||
printf("\n\n// Error table\n");
|
||||
printf("var errors = [...]string {\n");
|
||||
qsort(errors, nelem(errors), sizeof errors[0], intcmp);
|
||||
for(i=0; i<nelem(errors); i++) {
|
||||
e = errors[i];
|
||||
if(i > 0 && errors[i-1] == e)
|
||||
continue;
|
||||
strcpy(buf, strerror(e));
|
||||
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
|
||||
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
|
||||
buf[0] += a - A;
|
||||
printf("\t%d: \"%s\",\n", e, buf);
|
||||
}
|
||||
printf("}\n\n");
|
||||
|
||||
printf("\n\n// Signal table\n");
|
||||
printf("var signals = [...]string {\n");
|
||||
qsort(signals, nelem(signals), sizeof signals[0], intcmp);
|
||||
for(i=0; i<nelem(signals); i++) {
|
||||
e = signals[i];
|
||||
if(i > 0 && signals[i-1] == e)
|
||||
continue;
|
||||
strcpy(buf, strsignal(e));
|
||||
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
|
||||
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
|
||||
buf[0] += a - A;
|
||||
// cut trailing : number.
|
||||
p = strrchr(buf, ":"[0]);
|
||||
if(p)
|
||||
*p = '\0';
|
||||
printf("\t%d: \"%s\",\n", e, buf);
|
||||
}
|
||||
printf("}\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
'
|
||||
) >_errors.c
|
||||
|
||||
$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out
|
62
vendor/golang.org/x/sys/unix/mkpost.go
generated
vendored
Normal file
62
vendor/golang.org/x/sys/unix/mkpost.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// mkpost processes the output of cgo -godefs to
|
||||
// modify the generated types. It is used to clean up
|
||||
// the sys API in an architecture specific manner.
|
||||
//
|
||||
// mkpost is run after cgo -godefs by mkall.sh.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func main() {
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s := string(b)
|
||||
|
||||
goarch := os.Getenv("GOARCH")
|
||||
goos := os.Getenv("GOOS")
|
||||
if goarch == "s390x" && goos == "linux" {
|
||||
// Export the types of PtraceRegs fields.
|
||||
re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)")
|
||||
s = re.ReplaceAllString(s, "Ptrace$1")
|
||||
|
||||
// Replace padding fields inserted by cgo with blank identifiers.
|
||||
re = regexp.MustCompile("Pad_cgo[A-Za-z0-9_]*")
|
||||
s = re.ReplaceAllString(s, "_")
|
||||
|
||||
// Replace other unwanted fields with blank identifiers.
|
||||
re = regexp.MustCompile("X_[A-Za-z0-9_]*")
|
||||
s = re.ReplaceAllString(s, "_")
|
||||
|
||||
// Replace the control_regs union with a blank identifier for now.
|
||||
re = regexp.MustCompile("(Control_regs)\\s+\\[0\\]uint64")
|
||||
s = re.ReplaceAllString(s, "_ [0]uint64")
|
||||
}
|
||||
|
||||
// gofmt
|
||||
b, err = format.Source([]byte(s))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Append this command to the header to show where the new file
|
||||
// came from.
|
||||
re := regexp.MustCompile("(cgo -godefs [a-zA-Z0-9_]+\\.go.*)")
|
||||
b = re.ReplaceAll(b, []byte("$1 | go run mkpost.go"))
|
||||
|
||||
fmt.Printf("%s", b)
|
||||
}
|
323
vendor/golang.org/x/sys/unix/mksyscall.pl
generated
vendored
Executable file
323
vendor/golang.org/x/sys/unix/mksyscall.pl
generated
vendored
Executable file
@ -0,0 +1,323 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# This program reads a file containing function prototypes
|
||||
# (like syscall_darwin.go) and generates system call bodies.
|
||||
# The prototypes are marked by lines beginning with "//sys"
|
||||
# and read like func declarations if //sys is replaced by func, but:
|
||||
# * The parameter lists must give a name for each argument.
|
||||
# This includes return parameters.
|
||||
# * The parameter lists must give a type for each argument:
|
||||
# the (x, y, z int) shorthand is not allowed.
|
||||
# * If the return parameter is an error number, it must be named errno.
|
||||
|
||||
# A line beginning with //sysnb is like //sys, except that the
|
||||
# goroutine will not be suspended during the execution of the system
|
||||
# call. This must only be used for system calls which can never
|
||||
# block, as otherwise the system call could cause all goroutines to
|
||||
# hang.
|
||||
|
||||
use strict;
|
||||
|
||||
my $cmdline = "mksyscall.pl " . join(' ', @ARGV);
|
||||
my $errors = 0;
|
||||
my $_32bit = "";
|
||||
my $plan9 = 0;
|
||||
my $openbsd = 0;
|
||||
my $netbsd = 0;
|
||||
my $dragonfly = 0;
|
||||
my $arm = 0; # 64-bit value should use (even, odd)-pair
|
||||
|
||||
if($ARGV[0] eq "-b32") {
|
||||
$_32bit = "big-endian";
|
||||
shift;
|
||||
} elsif($ARGV[0] eq "-l32") {
|
||||
$_32bit = "little-endian";
|
||||
shift;
|
||||
}
|
||||
if($ARGV[0] eq "-plan9") {
|
||||
$plan9 = 1;
|
||||
shift;
|
||||
}
|
||||
if($ARGV[0] eq "-openbsd") {
|
||||
$openbsd = 1;
|
||||
shift;
|
||||
}
|
||||
if($ARGV[0] eq "-netbsd") {
|
||||
$netbsd = 1;
|
||||
shift;
|
||||
}
|
||||
if($ARGV[0] eq "-dragonfly") {
|
||||
$dragonfly = 1;
|
||||
shift;
|
||||
}
|
||||
if($ARGV[0] eq "-arm") {
|
||||
$arm = 1;
|
||||
shift;
|
||||
}
|
||||
|
||||
if($ARGV[0] =~ /^-/) {
|
||||
print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
sub parseparamlist($) {
|
||||
my ($list) = @_;
|
||||
$list =~ s/^\s*//;
|
||||
$list =~ s/\s*$//;
|
||||
if($list eq "") {
|
||||
return ();
|
||||
}
|
||||
return split(/\s*,\s*/, $list);
|
||||
}
|
||||
|
||||
sub parseparam($) {
|
||||
my ($p) = @_;
|
||||
if($p !~ /^(\S*) (\S*)$/) {
|
||||
print STDERR "$ARGV:$.: malformed parameter: $p\n";
|
||||
$errors = 1;
|
||||
return ("xx", "int");
|
||||
}
|
||||
return ($1, $2);
|
||||
}
|
||||
|
||||
my $text = "";
|
||||
while(<>) {
|
||||
chomp;
|
||||
s/\s+/ /g;
|
||||
s/^\s+//;
|
||||
s/\s+$//;
|
||||
my $nonblock = /^\/\/sysnb /;
|
||||
next if !/^\/\/sys / && !$nonblock;
|
||||
|
||||
# Line must be of the form
|
||||
# func Open(path string, mode int, perm int) (fd int, errno error)
|
||||
# Split into name, in params, out params.
|
||||
if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) {
|
||||
print STDERR "$ARGV:$.: malformed //sys declaration\n";
|
||||
$errors = 1;
|
||||
next;
|
||||
}
|
||||
my ($func, $in, $out, $sysname) = ($2, $3, $4, $5);
|
||||
|
||||
# Split argument lists on comma.
|
||||
my @in = parseparamlist($in);
|
||||
my @out = parseparamlist($out);
|
||||
|
||||
# Try in vain to keep people from editing this file.
|
||||
# The theory is that they jump into the middle of the file
|
||||
# without reading the header.
|
||||
$text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
|
||||
|
||||
# Go function header.
|
||||
my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : "";
|
||||
$text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl;
|
||||
|
||||
# Check if err return available
|
||||
my $errvar = "";
|
||||
foreach my $p (@out) {
|
||||
my ($name, $type) = parseparam($p);
|
||||
if($type eq "error") {
|
||||
$errvar = $name;
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
# Prepare arguments to Syscall.
|
||||
my @args = ();
|
||||
my @uses = ();
|
||||
my $n = 0;
|
||||
foreach my $p (@in) {
|
||||
my ($name, $type) = parseparam($p);
|
||||
if($type =~ /^\*/) {
|
||||
push @args, "uintptr(unsafe.Pointer($name))";
|
||||
} elsif($type eq "string" && $errvar ne "") {
|
||||
$text .= "\tvar _p$n *byte\n";
|
||||
$text .= "\t_p$n, $errvar = BytePtrFromString($name)\n";
|
||||
$text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
|
||||
push @args, "uintptr(unsafe.Pointer(_p$n))";
|
||||
push @uses, "use(unsafe.Pointer(_p$n))";
|
||||
$n++;
|
||||
} elsif($type eq "string") {
|
||||
print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
|
||||
$text .= "\tvar _p$n *byte\n";
|
||||
$text .= "\t_p$n, _ = BytePtrFromString($name)\n";
|
||||
push @args, "uintptr(unsafe.Pointer(_p$n))";
|
||||
push @uses, "use(unsafe.Pointer(_p$n))";
|
||||
$n++;
|
||||
} elsif($type =~ /^\[\](.*)/) {
|
||||
# Convert slice into pointer, length.
|
||||
# Have to be careful not to take address of &a[0] if len == 0:
|
||||
# pass dummy pointer in that case.
|
||||
# Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
|
||||
$text .= "\tvar _p$n unsafe.Pointer\n";
|
||||
$text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}";
|
||||
$text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}";
|
||||
$text .= "\n";
|
||||
push @args, "uintptr(_p$n)", "uintptr(len($name))";
|
||||
$n++;
|
||||
} elsif($type eq "int64" && ($openbsd || $netbsd)) {
|
||||
push @args, "0";
|
||||
if($_32bit eq "big-endian") {
|
||||
push @args, "uintptr($name>>32)", "uintptr($name)";
|
||||
} elsif($_32bit eq "little-endian") {
|
||||
push @args, "uintptr($name)", "uintptr($name>>32)";
|
||||
} else {
|
||||
push @args, "uintptr($name)";
|
||||
}
|
||||
} elsif($type eq "int64" && $dragonfly) {
|
||||
if ($func !~ /^extp(read|write)/i) {
|
||||
push @args, "0";
|
||||
}
|
||||
if($_32bit eq "big-endian") {
|
||||
push @args, "uintptr($name>>32)", "uintptr($name)";
|
||||
} elsif($_32bit eq "little-endian") {
|
||||
push @args, "uintptr($name)", "uintptr($name>>32)";
|
||||
} else {
|
||||
push @args, "uintptr($name)";
|
||||
}
|
||||
} elsif($type eq "int64" && $_32bit ne "") {
|
||||
if(@args % 2 && $arm) {
|
||||
# arm abi specifies 64-bit argument uses
|
||||
# (even, odd) pair
|
||||
push @args, "0"
|
||||
}
|
||||
if($_32bit eq "big-endian") {
|
||||
push @args, "uintptr($name>>32)", "uintptr($name)";
|
||||
} else {
|
||||
push @args, "uintptr($name)", "uintptr($name>>32)";
|
||||
}
|
||||
} else {
|
||||
push @args, "uintptr($name)";
|
||||
}
|
||||
}
|
||||
|
||||
# Determine which form to use; pad args with zeros.
|
||||
my $asm = "Syscall";
|
||||
if ($nonblock) {
|
||||
$asm = "RawSyscall";
|
||||
}
|
||||
if(@args <= 3) {
|
||||
while(@args < 3) {
|
||||
push @args, "0";
|
||||
}
|
||||
} elsif(@args <= 6) {
|
||||
$asm .= "6";
|
||||
while(@args < 6) {
|
||||
push @args, "0";
|
||||
}
|
||||
} elsif(@args <= 9) {
|
||||
$asm .= "9";
|
||||
while(@args < 9) {
|
||||
push @args, "0";
|
||||
}
|
||||
} else {
|
||||
print STDERR "$ARGV:$.: too many arguments to system call\n";
|
||||
}
|
||||
|
||||
# System call number.
|
||||
if($sysname eq "") {
|
||||
$sysname = "SYS_$func";
|
||||
$sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar
|
||||
$sysname =~ y/a-z/A-Z/;
|
||||
}
|
||||
|
||||
# Actual call.
|
||||
my $args = join(', ', @args);
|
||||
my $call = "$asm($sysname, $args)";
|
||||
|
||||
# Assign return values.
|
||||
my $body = "";
|
||||
my @ret = ("_", "_", "_");
|
||||
my $do_errno = 0;
|
||||
for(my $i=0; $i<@out; $i++) {
|
||||
my $p = $out[$i];
|
||||
my ($name, $type) = parseparam($p);
|
||||
my $reg = "";
|
||||
if($name eq "err" && !$plan9) {
|
||||
$reg = "e1";
|
||||
$ret[2] = $reg;
|
||||
$do_errno = 1;
|
||||
} elsif($name eq "err" && $plan9) {
|
||||
$ret[0] = "r0";
|
||||
$ret[2] = "e1";
|
||||
next;
|
||||
} else {
|
||||
$reg = sprintf("r%d", $i);
|
||||
$ret[$i] = $reg;
|
||||
}
|
||||
if($type eq "bool") {
|
||||
$reg = "$reg != 0";
|
||||
}
|
||||
if($type eq "int64" && $_32bit ne "") {
|
||||
# 64-bit number in r1:r0 or r0:r1.
|
||||
if($i+2 > @out) {
|
||||
print STDERR "$ARGV:$.: not enough registers for int64 return\n";
|
||||
}
|
||||
if($_32bit eq "big-endian") {
|
||||
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
|
||||
} else {
|
||||
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
|
||||
}
|
||||
$ret[$i] = sprintf("r%d", $i);
|
||||
$ret[$i+1] = sprintf("r%d", $i+1);
|
||||
}
|
||||
if($reg ne "e1" || $plan9) {
|
||||
$body .= "\t$name = $type($reg)\n";
|
||||
}
|
||||
}
|
||||
if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
|
||||
$text .= "\t$call\n";
|
||||
} else {
|
||||
$text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
|
||||
}
|
||||
foreach my $use (@uses) {
|
||||
$text .= "\t$use\n";
|
||||
}
|
||||
$text .= $body;
|
||||
|
||||
if ($plan9 && $ret[2] eq "e1") {
|
||||
$text .= "\tif int32(r0) == -1 {\n";
|
||||
$text .= "\t\terr = e1\n";
|
||||
$text .= "\t}\n";
|
||||
} elsif ($do_errno) {
|
||||
$text .= "\tif e1 != 0 {\n";
|
||||
$text .= "\t\terr = errnoErr(e1)\n";
|
||||
$text .= "\t}\n";
|
||||
}
|
||||
$text .= "\treturn\n";
|
||||
$text .= "}\n\n";
|
||||
}
|
||||
|
||||
chomp $text;
|
||||
chomp $text;
|
||||
|
||||
if($errors) {
|
||||
exit 1;
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
// $cmdline
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var _ syscall.Errno
|
||||
|
||||
$text
|
||||
EOF
|
||||
exit 0;
|
294
vendor/golang.org/x/sys/unix/mksyscall_solaris.pl
generated
vendored
Executable file
294
vendor/golang.org/x/sys/unix/mksyscall_solaris.pl
generated
vendored
Executable file
@ -0,0 +1,294 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# This program reads a file containing function prototypes
|
||||
# (like syscall_solaris.go) and generates system call bodies.
|
||||
# The prototypes are marked by lines beginning with "//sys"
|
||||
# and read like func declarations if //sys is replaced by func, but:
|
||||
# * The parameter lists must give a name for each argument.
|
||||
# This includes return parameters.
|
||||
# * The parameter lists must give a type for each argument:
|
||||
# the (x, y, z int) shorthand is not allowed.
|
||||
# * If the return parameter is an error number, it must be named err.
|
||||
# * If go func name needs to be different than its libc name,
|
||||
# * or the function is not in libc, name could be specified
|
||||
# * at the end, after "=" sign, like
|
||||
# //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
|
||||
|
||||
use strict;
|
||||
|
||||
my $cmdline = "mksyscall_solaris.pl " . join(' ', @ARGV);
|
||||
my $errors = 0;
|
||||
my $_32bit = "";
|
||||
|
||||
binmode STDOUT;
|
||||
|
||||
if($ARGV[0] eq "-b32") {
|
||||
$_32bit = "big-endian";
|
||||
shift;
|
||||
} elsif($ARGV[0] eq "-l32") {
|
||||
$_32bit = "little-endian";
|
||||
shift;
|
||||
}
|
||||
|
||||
if($ARGV[0] =~ /^-/) {
|
||||
print STDERR "usage: mksyscall_solaris.pl [-b32 | -l32] [file ...]\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
sub parseparamlist($) {
|
||||
my ($list) = @_;
|
||||
$list =~ s/^\s*//;
|
||||
$list =~ s/\s*$//;
|
||||
if($list eq "") {
|
||||
return ();
|
||||
}
|
||||
return split(/\s*,\s*/, $list);
|
||||
}
|
||||
|
||||
sub parseparam($) {
|
||||
my ($p) = @_;
|
||||
if($p !~ /^(\S*) (\S*)$/) {
|
||||
print STDERR "$ARGV:$.: malformed parameter: $p\n";
|
||||
$errors = 1;
|
||||
return ("xx", "int");
|
||||
}
|
||||
return ($1, $2);
|
||||
}
|
||||
|
||||
my $package = "";
|
||||
my $text = "";
|
||||
my $dynimports = "";
|
||||
my $linknames = "";
|
||||
my @vars = ();
|
||||
while(<>) {
|
||||
chomp;
|
||||
s/\s+/ /g;
|
||||
s/^\s+//;
|
||||
s/\s+$//;
|
||||
$package = $1 if !$package && /^package (\S+)$/;
|
||||
my $nonblock = /^\/\/sysnb /;
|
||||
next if !/^\/\/sys / && !$nonblock;
|
||||
|
||||
# Line must be of the form
|
||||
# func Open(path string, mode int, perm int) (fd int, err error)
|
||||
# Split into name, in params, out params.
|
||||
if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) {
|
||||
print STDERR "$ARGV:$.: malformed //sys declaration\n";
|
||||
$errors = 1;
|
||||
next;
|
||||
}
|
||||
my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6);
|
||||
|
||||
# Split argument lists on comma.
|
||||
my @in = parseparamlist($in);
|
||||
my @out = parseparamlist($out);
|
||||
|
||||
# So file name.
|
||||
if($modname eq "") {
|
||||
$modname = "libc";
|
||||
}
|
||||
|
||||
# System call name.
|
||||
if($sysname eq "") {
|
||||
$sysname = "$func";
|
||||
}
|
||||
|
||||
# System call pointer variable name.
|
||||
my $sysvarname = "proc$sysname";
|
||||
|
||||
my $strconvfunc = "BytePtrFromString";
|
||||
my $strconvtype = "*byte";
|
||||
|
||||
$sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
|
||||
|
||||
# Runtime import of function to allow cross-platform builds.
|
||||
$dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n";
|
||||
# Link symbol to proc address variable.
|
||||
$linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n";
|
||||
# Library proc address variable.
|
||||
push @vars, $sysvarname;
|
||||
|
||||
# Go function header.
|
||||
$out = join(', ', @out);
|
||||
if($out ne "") {
|
||||
$out = " ($out)";
|
||||
}
|
||||
if($text ne "") {
|
||||
$text .= "\n"
|
||||
}
|
||||
$text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out;
|
||||
|
||||
# Check if err return available
|
||||
my $errvar = "";
|
||||
foreach my $p (@out) {
|
||||
my ($name, $type) = parseparam($p);
|
||||
if($type eq "error") {
|
||||
$errvar = $name;
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
# Prepare arguments to Syscall.
|
||||
my @args = ();
|
||||
my @uses = ();
|
||||
my $n = 0;
|
||||
foreach my $p (@in) {
|
||||
my ($name, $type) = parseparam($p);
|
||||
if($type =~ /^\*/) {
|
||||
push @args, "uintptr(unsafe.Pointer($name))";
|
||||
} elsif($type eq "string" && $errvar ne "") {
|
||||
$text .= "\tvar _p$n $strconvtype\n";
|
||||
$text .= "\t_p$n, $errvar = $strconvfunc($name)\n";
|
||||
$text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
|
||||
push @args, "uintptr(unsafe.Pointer(_p$n))";
|
||||
push @uses, "use(unsafe.Pointer(_p$n))";
|
||||
$n++;
|
||||
} elsif($type eq "string") {
|
||||
print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
|
||||
$text .= "\tvar _p$n $strconvtype\n";
|
||||
$text .= "\t_p$n, _ = $strconvfunc($name)\n";
|
||||
push @args, "uintptr(unsafe.Pointer(_p$n))";
|
||||
push @uses, "use(unsafe.Pointer(_p$n))";
|
||||
$n++;
|
||||
} elsif($type =~ /^\[\](.*)/) {
|
||||
# Convert slice into pointer, length.
|
||||
# Have to be careful not to take address of &a[0] if len == 0:
|
||||
# pass nil in that case.
|
||||
$text .= "\tvar _p$n *$1\n";
|
||||
$text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n";
|
||||
push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))";
|
||||
$n++;
|
||||
} elsif($type eq "int64" && $_32bit ne "") {
|
||||
if($_32bit eq "big-endian") {
|
||||
push @args, "uintptr($name >> 32)", "uintptr($name)";
|
||||
} else {
|
||||
push @args, "uintptr($name)", "uintptr($name >> 32)";
|
||||
}
|
||||
} elsif($type eq "bool") {
|
||||
$text .= "\tvar _p$n uint32\n";
|
||||
$text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n";
|
||||
push @args, "uintptr(_p$n)";
|
||||
$n++;
|
||||
} else {
|
||||
push @args, "uintptr($name)";
|
||||
}
|
||||
}
|
||||
my $nargs = @args;
|
||||
|
||||
# Determine which form to use; pad args with zeros.
|
||||
my $asm = "sysvicall6";
|
||||
if ($nonblock) {
|
||||
$asm = "rawSysvicall6";
|
||||
}
|
||||
if(@args <= 6) {
|
||||
while(@args < 6) {
|
||||
push @args, "0";
|
||||
}
|
||||
} else {
|
||||
print STDERR "$ARGV:$.: too many arguments to system call\n";
|
||||
}
|
||||
|
||||
# Actual call.
|
||||
my $args = join(', ', @args);
|
||||
my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)";
|
||||
|
||||
# Assign return values.
|
||||
my $body = "";
|
||||
my $failexpr = "";
|
||||
my @ret = ("_", "_", "_");
|
||||
my @pout= ();
|
||||
my $do_errno = 0;
|
||||
for(my $i=0; $i<@out; $i++) {
|
||||
my $p = $out[$i];
|
||||
my ($name, $type) = parseparam($p);
|
||||
my $reg = "";
|
||||
if($name eq "err") {
|
||||
$reg = "e1";
|
||||
$ret[2] = $reg;
|
||||
$do_errno = 1;
|
||||
} else {
|
||||
$reg = sprintf("r%d", $i);
|
||||
$ret[$i] = $reg;
|
||||
}
|
||||
if($type eq "bool") {
|
||||
$reg = "$reg != 0";
|
||||
}
|
||||
if($type eq "int64" && $_32bit ne "") {
|
||||
# 64-bit number in r1:r0 or r0:r1.
|
||||
if($i+2 > @out) {
|
||||
print STDERR "$ARGV:$.: not enough registers for int64 return\n";
|
||||
}
|
||||
if($_32bit eq "big-endian") {
|
||||
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
|
||||
} else {
|
||||
$reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
|
||||
}
|
||||
$ret[$i] = sprintf("r%d", $i);
|
||||
$ret[$i+1] = sprintf("r%d", $i+1);
|
||||
}
|
||||
if($reg ne "e1") {
|
||||
$body .= "\t$name = $type($reg)\n";
|
||||
}
|
||||
}
|
||||
if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
|
||||
$text .= "\t$call\n";
|
||||
} else {
|
||||
$text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
|
||||
}
|
||||
foreach my $use (@uses) {
|
||||
$text .= "\t$use\n";
|
||||
}
|
||||
$text .= $body;
|
||||
|
||||
if ($do_errno) {
|
||||
$text .= "\tif e1 != 0 {\n";
|
||||
$text .= "\t\terr = e1\n";
|
||||
$text .= "\t}\n";
|
||||
}
|
||||
$text .= "\treturn\n";
|
||||
$text .= "}\n";
|
||||
}
|
||||
|
||||
if($errors) {
|
||||
exit 1;
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
// $cmdline
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package $package
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
EOF
|
||||
|
||||
print "import \"golang.org/x/sys/unix\"\n" if $package ne "unix";
|
||||
|
||||
my $vardecls = "\t" . join(",\n\t", @vars);
|
||||
$vardecls .= " syscallFunc";
|
||||
|
||||
chomp($_=<<EOF);
|
||||
|
||||
$dynimports
|
||||
$linknames
|
||||
var (
|
||||
$vardecls
|
||||
)
|
||||
|
||||
$text
|
||||
EOF
|
||||
print $_;
|
||||
exit 0;
|
264
vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl
generated
vendored
Executable file
264
vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl
generated
vendored
Executable file
@ -0,0 +1,264 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
# Copyright 2011 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
#
|
||||
# Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
|
||||
#
|
||||
# Build a MIB with each entry being an array containing the level, type and
|
||||
# a hash that will contain additional entries if the current entry is a node.
|
||||
# We then walk this MIB and create a flattened sysctl name to OID hash.
|
||||
#
|
||||
|
||||
use strict;
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $debug = 0;
|
||||
my %ctls = ();
|
||||
|
||||
my @headers = qw (
|
||||
sys/sysctl.h
|
||||
sys/socket.h
|
||||
sys/tty.h
|
||||
sys/malloc.h
|
||||
sys/mount.h
|
||||
sys/namei.h
|
||||
sys/sem.h
|
||||
sys/shm.h
|
||||
sys/vmmeter.h
|
||||
uvm/uvm_param.h
|
||||
uvm/uvm_swap_encrypt.h
|
||||
ddb/db_var.h
|
||||
net/if.h
|
||||
net/if_pfsync.h
|
||||
net/pipex.h
|
||||
netinet/in.h
|
||||
netinet/icmp_var.h
|
||||
netinet/igmp_var.h
|
||||
netinet/ip_ah.h
|
||||
netinet/ip_carp.h
|
||||
netinet/ip_divert.h
|
||||
netinet/ip_esp.h
|
||||
netinet/ip_ether.h
|
||||
netinet/ip_gre.h
|
||||
netinet/ip_ipcomp.h
|
||||
netinet/ip_ipip.h
|
||||
netinet/pim_var.h
|
||||
netinet/tcp_var.h
|
||||
netinet/udp_var.h
|
||||
netinet6/in6.h
|
||||
netinet6/ip6_divert.h
|
||||
netinet6/pim6_var.h
|
||||
netinet/icmp6.h
|
||||
netmpls/mpls.h
|
||||
);
|
||||
|
||||
my @ctls = qw (
|
||||
kern
|
||||
vm
|
||||
fs
|
||||
net
|
||||
#debug # Special handling required
|
||||
hw
|
||||
#machdep # Arch specific
|
||||
user
|
||||
ddb
|
||||
#vfs # Special handling required
|
||||
fs.posix
|
||||
kern.forkstat
|
||||
kern.intrcnt
|
||||
kern.malloc
|
||||
kern.nchstats
|
||||
kern.seminfo
|
||||
kern.shminfo
|
||||
kern.timecounter
|
||||
kern.tty
|
||||
kern.watchdog
|
||||
net.bpf
|
||||
net.ifq
|
||||
net.inet
|
||||
net.inet.ah
|
||||
net.inet.carp
|
||||
net.inet.divert
|
||||
net.inet.esp
|
||||
net.inet.etherip
|
||||
net.inet.gre
|
||||
net.inet.icmp
|
||||
net.inet.igmp
|
||||
net.inet.ip
|
||||
net.inet.ip.ifq
|
||||
net.inet.ipcomp
|
||||
net.inet.ipip
|
||||
net.inet.mobileip
|
||||
net.inet.pfsync
|
||||
net.inet.pim
|
||||
net.inet.tcp
|
||||
net.inet.udp
|
||||
net.inet6
|
||||
net.inet6.divert
|
||||
net.inet6.ip6
|
||||
net.inet6.icmp6
|
||||
net.inet6.pim6
|
||||
net.inet6.tcp6
|
||||
net.inet6.udp6
|
||||
net.mpls
|
||||
net.mpls.ifq
|
||||
net.key
|
||||
net.pflow
|
||||
net.pfsync
|
||||
net.pipex
|
||||
net.rt
|
||||
vm.swapencrypt
|
||||
#vfsgenctl # Special handling required
|
||||
);
|
||||
|
||||
# Node name "fixups"
|
||||
my %ctl_map = (
|
||||
"ipproto" => "net.inet",
|
||||
"net.inet.ipproto" => "net.inet",
|
||||
"net.inet6.ipv6proto" => "net.inet6",
|
||||
"net.inet6.ipv6" => "net.inet6.ip6",
|
||||
"net.inet.icmpv6" => "net.inet6.icmp6",
|
||||
"net.inet6.divert6" => "net.inet6.divert",
|
||||
"net.inet6.tcp6" => "net.inet.tcp",
|
||||
"net.inet6.udp6" => "net.inet.udp",
|
||||
"mpls" => "net.mpls",
|
||||
"swpenc" => "vm.swapencrypt"
|
||||
);
|
||||
|
||||
# Node mappings
|
||||
my %node_map = (
|
||||
"net.inet.ip.ifq" => "net.ifq",
|
||||
"net.inet.pfsync" => "net.pfsync",
|
||||
"net.mpls.ifq" => "net.ifq"
|
||||
);
|
||||
|
||||
my $ctlname;
|
||||
my %mib = ();
|
||||
my %sysctl = ();
|
||||
my $node;
|
||||
|
||||
sub debug() {
|
||||
print STDERR "$_[0]\n" if $debug;
|
||||
}
|
||||
|
||||
# Walk the MIB and build a sysctl name to OID mapping.
|
||||
sub build_sysctl() {
|
||||
my ($node, $name, $oid) = @_;
|
||||
my %node = %{$node};
|
||||
my @oid = @{$oid};
|
||||
|
||||
foreach my $key (sort keys %node) {
|
||||
my @node = @{$node{$key}};
|
||||
my $nodename = $name.($name ne '' ? '.' : '').$key;
|
||||
my @nodeoid = (@oid, $node[0]);
|
||||
if ($node[1] eq 'CTLTYPE_NODE') {
|
||||
if (exists $node_map{$nodename}) {
|
||||
$node = \%mib;
|
||||
$ctlname = $node_map{$nodename};
|
||||
foreach my $part (split /\./, $ctlname) {
|
||||
$node = \%{@{$$node{$part}}[2]};
|
||||
}
|
||||
} else {
|
||||
$node = $node[2];
|
||||
}
|
||||
&build_sysctl($node, $nodename, \@nodeoid);
|
||||
} elsif ($node[1] ne '') {
|
||||
$sysctl{$nodename} = \@nodeoid;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
foreach my $ctl (@ctls) {
|
||||
$ctls{$ctl} = $ctl;
|
||||
}
|
||||
|
||||
# Build MIB
|
||||
foreach my $header (@headers) {
|
||||
&debug("Processing $header...");
|
||||
open HEADER, "/usr/include/$header" ||
|
||||
print STDERR "Failed to open $header\n";
|
||||
while (<HEADER>) {
|
||||
if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ ||
|
||||
$_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ ||
|
||||
$_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) {
|
||||
if ($1 eq 'CTL_NAMES') {
|
||||
# Top level.
|
||||
$node = \%mib;
|
||||
} else {
|
||||
# Node.
|
||||
my $nodename = lc($2);
|
||||
if ($header =~ /^netinet\//) {
|
||||
$ctlname = "net.inet.$nodename";
|
||||
} elsif ($header =~ /^netinet6\//) {
|
||||
$ctlname = "net.inet6.$nodename";
|
||||
} elsif ($header =~ /^net\//) {
|
||||
$ctlname = "net.$nodename";
|
||||
} else {
|
||||
$ctlname = "$nodename";
|
||||
$ctlname =~ s/^(fs|net|kern)_/$1\./;
|
||||
}
|
||||
if (exists $ctl_map{$ctlname}) {
|
||||
$ctlname = $ctl_map{$ctlname};
|
||||
}
|
||||
if (not exists $ctls{$ctlname}) {
|
||||
&debug("Ignoring $ctlname...");
|
||||
next;
|
||||
}
|
||||
|
||||
# Walk down from the top of the MIB.
|
||||
$node = \%mib;
|
||||
foreach my $part (split /\./, $ctlname) {
|
||||
if (not exists $$node{$part}) {
|
||||
&debug("Missing node $part");
|
||||
$$node{$part} = [ 0, '', {} ];
|
||||
}
|
||||
$node = \%{@{$$node{$part}}[2]};
|
||||
}
|
||||
}
|
||||
|
||||
# Populate current node with entries.
|
||||
my $i = -1;
|
||||
while (defined($_) && $_ !~ /^}/) {
|
||||
$_ = <HEADER>;
|
||||
$i++ if $_ =~ /{.*}/;
|
||||
next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/;
|
||||
$$node{$1} = [ $i, $2, {} ];
|
||||
}
|
||||
}
|
||||
}
|
||||
close HEADER;
|
||||
}
|
||||
|
||||
&build_sysctl(\%mib, "", []);
|
||||
|
||||
print <<EOF;
|
||||
// mksysctl_openbsd.pl
|
||||
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix;
|
||||
|
||||
type mibentry struct {
|
||||
ctlname string
|
||||
ctloid []_C_int
|
||||
}
|
||||
|
||||
var sysctlMib = []mibentry {
|
||||
EOF
|
||||
|
||||
foreach my $name (sort keys %sysctl) {
|
||||
my @oid = @{$sysctl{$name}};
|
||||
print "\t{ \"$name\", []_C_int{ ", join(', ', @oid), " } }, \n";
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
}
|
||||
EOF
|
39
vendor/golang.org/x/sys/unix/mksysnum_darwin.pl
generated
vendored
Executable file
39
vendor/golang.org/x/sys/unix/mksysnum_darwin.pl
generated
vendored
Executable file
@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
#
|
||||
# Generate system call table for Darwin from sys/syscall.h
|
||||
|
||||
use strict;
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $command = "mksysnum_darwin.pl " . join(' ', @ARGV);
|
||||
|
||||
print <<EOF;
|
||||
// $command
|
||||
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
EOF
|
||||
|
||||
while(<>){
|
||||
if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){
|
||||
my $name = $1;
|
||||
my $num = $2;
|
||||
$name =~ y/a-z/A-Z/;
|
||||
print " SYS_$name = $num;"
|
||||
}
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
)
|
||||
EOF
|
50
vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl
generated
vendored
Executable file
50
vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl
generated
vendored
Executable file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
#
|
||||
# Generate system call table for DragonFly from master list
|
||||
# (for example, /usr/src/sys/kern/syscalls.master).
|
||||
|
||||
use strict;
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $command = "mksysnum_dragonfly.pl " . join(' ', @ARGV);
|
||||
|
||||
print <<EOF;
|
||||
// $command
|
||||
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
EOF
|
||||
|
||||
while(<>){
|
||||
if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){
|
||||
my $num = $1;
|
||||
my $proto = $2;
|
||||
my $name = "SYS_$3";
|
||||
$name =~ y/a-z/A-Z/;
|
||||
|
||||
# There are multiple entries for enosys and nosys, so comment them out.
|
||||
if($name =~ /^SYS_E?NOSYS$/){
|
||||
$name = "// $name";
|
||||
}
|
||||
if($name eq 'SYS_SYS_EXIT'){
|
||||
$name = 'SYS_EXIT';
|
||||
}
|
||||
|
||||
print " $name = $num; // $proto\n";
|
||||
}
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
)
|
||||
EOF
|
63
vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl
generated
vendored
Executable file
63
vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl
generated
vendored
Executable file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
#
|
||||
# Generate system call table for FreeBSD from master list
|
||||
# (for example, /usr/src/sys/kern/syscalls.master).
|
||||
|
||||
use strict;
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $command = "mksysnum_freebsd.pl " . join(' ', @ARGV);
|
||||
|
||||
print <<EOF;
|
||||
// $command
|
||||
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
EOF
|
||||
|
||||
while(<>){
|
||||
if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){
|
||||
my $num = $1;
|
||||
my $proto = $2;
|
||||
my $name = "SYS_$3";
|
||||
$name =~ y/a-z/A-Z/;
|
||||
|
||||
# There are multiple entries for enosys and nosys, so comment them out.
|
||||
if($name =~ /^SYS_E?NOSYS$/){
|
||||
$name = "// $name";
|
||||
}
|
||||
if($name eq 'SYS_SYS_EXIT'){
|
||||
$name = 'SYS_EXIT';
|
||||
}
|
||||
if($name =~ /^SYS_CAP_+/ || $name =~ /^SYS___CAP_+/){
|
||||
next
|
||||
}
|
||||
|
||||
print " $name = $num; // $proto\n";
|
||||
|
||||
# We keep Capsicum syscall numbers for FreeBSD
|
||||
# 9-STABLE here because we are not sure whether they
|
||||
# are mature and stable.
|
||||
if($num == 513){
|
||||
print " SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }\n";
|
||||
print " SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \\\n";
|
||||
print " SYS_CAP_ENTER = 516 // { int cap_enter(void); }\n";
|
||||
print " SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
)
|
||||
EOF
|
58
vendor/golang.org/x/sys/unix/mksysnum_linux.pl
generated
vendored
Executable file
58
vendor/golang.org/x/sys/unix/mksysnum_linux.pl
generated
vendored
Executable file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
use strict;
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $command = "mksysnum_linux.pl ". join(' ', @ARGV);
|
||||
|
||||
print <<EOF;
|
||||
// $command
|
||||
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix
|
||||
|
||||
const(
|
||||
EOF
|
||||
|
||||
sub fmt {
|
||||
my ($name, $num) = @_;
|
||||
if($num > 999){
|
||||
# ignore deprecated syscalls that are no longer implemented
|
||||
# https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716
|
||||
return;
|
||||
}
|
||||
$name =~ y/a-z/A-Z/;
|
||||
print " SYS_$name = $num;\n";
|
||||
}
|
||||
|
||||
my $prev;
|
||||
open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc";
|
||||
while(<GCC>){
|
||||
if(/^#define __NR_syscalls\s+/) {
|
||||
# ignore redefinitions of __NR_syscalls
|
||||
}
|
||||
elsif(/^#define __NR_(\w+)\s+([0-9]+)/){
|
||||
$prev = $2;
|
||||
fmt($1, $2);
|
||||
}
|
||||
elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){
|
||||
$prev = $2;
|
||||
fmt($1, $2);
|
||||
}
|
||||
elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){
|
||||
fmt($1, $prev+$2)
|
||||
}
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
)
|
||||
EOF
|
58
vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl
generated
vendored
Executable file
58
vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl
generated
vendored
Executable file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
#
|
||||
# Generate system call table for OpenBSD from master list
|
||||
# (for example, /usr/src/sys/kern/syscalls.master).
|
||||
|
||||
use strict;
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $command = "mksysnum_netbsd.pl " . join(' ', @ARGV);
|
||||
|
||||
print <<EOF;
|
||||
// $command
|
||||
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
EOF
|
||||
|
||||
my $line = '';
|
||||
while(<>){
|
||||
if($line =~ /^(.*)\\$/) {
|
||||
# Handle continuation
|
||||
$line = $1;
|
||||
$_ =~ s/^\s+//;
|
||||
$line .= $_;
|
||||
} else {
|
||||
# New line
|
||||
$line = $_;
|
||||
}
|
||||
next if $line =~ /\\$/;
|
||||
if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) {
|
||||
my $num = $1;
|
||||
my $proto = $6;
|
||||
my $compat = $8;
|
||||
my $name = "$7_$9";
|
||||
|
||||
$name = "$7_$11" if $11 ne '';
|
||||
$name =~ y/a-z/A-Z/;
|
||||
|
||||
if($compat eq '' || $compat eq '30' || $compat eq '50') {
|
||||
print " $name = $num; // $proto\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
)
|
||||
EOF
|
50
vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl
generated
vendored
Executable file
50
vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl
generated
vendored
Executable file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env perl
|
||||
# Copyright 2009 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
#
|
||||
# Generate system call table for OpenBSD from master list
|
||||
# (for example, /usr/src/sys/kern/syscalls.master).
|
||||
|
||||
use strict;
|
||||
|
||||
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
|
||||
print STDERR "GOARCH or GOOS not defined in environment\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $command = "mksysnum_openbsd.pl " . join(' ', @ARGV);
|
||||
|
||||
print <<EOF;
|
||||
// $command
|
||||
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
|
||||
|
||||
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
|
||||
|
||||
package unix
|
||||
|
||||
const (
|
||||
EOF
|
||||
|
||||
while(<>){
|
||||
if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){
|
||||
my $num = $1;
|
||||
my $proto = $3;
|
||||
my $name = $4;
|
||||
$name =~ y/a-z/A-Z/;
|
||||
|
||||
# There are multiple entries for enosys and nosys, so comment them out.
|
||||
if($name =~ /^SYS_E?NOSYS$/){
|
||||
$name = "// $name";
|
||||
}
|
||||
if($name eq 'SYS_SYS_EXIT'){
|
||||
$name = 'SYS_EXIT';
|
||||
}
|
||||
|
||||
print " $name = $num; // $proto\n";
|
||||
}
|
||||
}
|
||||
|
||||
print <<EOF;
|
||||
)
|
||||
EOF
|
30
vendor/golang.org/x/sys/unix/race.go
generated
vendored
Normal file
30
vendor/golang.org/x/sys/unix/race.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin,race linux,race freebsd,race
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const raceenabled = true
|
||||
|
||||
func raceAcquire(addr unsafe.Pointer) {
|
||||
runtime.RaceAcquire(addr)
|
||||
}
|
||||
|
||||
func raceReleaseMerge(addr unsafe.Pointer) {
|
||||
runtime.RaceReleaseMerge(addr)
|
||||
}
|
||||
|
||||
func raceReadRange(addr unsafe.Pointer, len int) {
|
||||
runtime.RaceReadRange(addr, len)
|
||||
}
|
||||
|
||||
func raceWriteRange(addr unsafe.Pointer, len int) {
|
||||
runtime.RaceWriteRange(addr, len)
|
||||
}
|
25
vendor/golang.org/x/sys/unix/race0.go
generated
vendored
Normal file
25
vendor/golang.org/x/sys/unix/race0.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const raceenabled = false
|
||||
|
||||
func raceAcquire(addr unsafe.Pointer) {
|
||||
}
|
||||
|
||||
func raceReleaseMerge(addr unsafe.Pointer) {
|
||||
}
|
||||
|
||||
func raceReadRange(addr unsafe.Pointer, len int) {
|
||||
}
|
||||
|
||||
func raceWriteRange(addr unsafe.Pointer, len int) {
|
||||
}
|
36
vendor/golang.org/x/sys/unix/sockcmsg_linux.go
generated
vendored
Normal file
36
vendor/golang.org/x/sys/unix/sockcmsg_linux.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Socket control messages
|
||||
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// UnixCredentials encodes credentials into a socket control message
|
||||
// for sending to another process. This can be used for
|
||||
// authentication.
|
||||
func UnixCredentials(ucred *Ucred) []byte {
|
||||
b := make([]byte, CmsgSpace(SizeofUcred))
|
||||
h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
|
||||
h.Level = SOL_SOCKET
|
||||
h.Type = SCM_CREDENTIALS
|
||||
h.SetLen(CmsgLen(SizeofUcred))
|
||||
*((*Ucred)(cmsgData(h))) = *ucred
|
||||
return b
|
||||
}
|
||||
|
||||
// ParseUnixCredentials decodes a socket control message that contains
|
||||
// credentials in a Ucred structure. To receive such a message, the
|
||||
// SO_PASSCRED option must be enabled on the socket.
|
||||
func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) {
|
||||
if m.Header.Level != SOL_SOCKET {
|
||||
return nil, EINVAL
|
||||
}
|
||||
if m.Header.Type != SCM_CREDENTIALS {
|
||||
return nil, EINVAL
|
||||
}
|
||||
ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
|
||||
return &ucred, nil
|
||||
}
|
103
vendor/golang.org/x/sys/unix/sockcmsg_unix.go
generated
vendored
Normal file
103
vendor/golang.org/x/sys/unix/sockcmsg_unix.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
// Socket control messages
|
||||
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Round the length of a raw sockaddr up to align it properly.
|
||||
func cmsgAlignOf(salen int) int {
|
||||
salign := sizeofPtr
|
||||
// NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
|
||||
// still require 32-bit aligned access to network subsystem.
|
||||
if darwin64Bit || dragonfly64Bit {
|
||||
salign = 4
|
||||
}
|
||||
return (salen + salign - 1) & ^(salign - 1)
|
||||
}
|
||||
|
||||
// CmsgLen returns the value to store in the Len field of the Cmsghdr
|
||||
// structure, taking into account any necessary alignment.
|
||||
func CmsgLen(datalen int) int {
|
||||
return cmsgAlignOf(SizeofCmsghdr) + datalen
|
||||
}
|
||||
|
||||
// CmsgSpace returns the number of bytes an ancillary element with
|
||||
// payload of the passed data length occupies.
|
||||
func CmsgSpace(datalen int) int {
|
||||
return cmsgAlignOf(SizeofCmsghdr) + cmsgAlignOf(datalen)
|
||||
}
|
||||
|
||||
func cmsgData(h *Cmsghdr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr)))
|
||||
}
|
||||
|
||||
// SocketControlMessage represents a socket control message.
|
||||
type SocketControlMessage struct {
|
||||
Header Cmsghdr
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// ParseSocketControlMessage parses b as an array of socket control
|
||||
// messages.
|
||||
func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) {
|
||||
var msgs []SocketControlMessage
|
||||
i := 0
|
||||
for i+CmsgLen(0) <= len(b) {
|
||||
h, dbuf, err := socketControlMessageHeaderAndData(b[i:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := SocketControlMessage{Header: *h, Data: dbuf}
|
||||
msgs = append(msgs, m)
|
||||
i += cmsgAlignOf(int(h.Len))
|
||||
}
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) {
|
||||
h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
|
||||
if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) {
|
||||
return nil, nil, EINVAL
|
||||
}
|
||||
return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil
|
||||
}
|
||||
|
||||
// UnixRights encodes a set of open file descriptors into a socket
|
||||
// control message for sending to another process.
|
||||
func UnixRights(fds ...int) []byte {
|
||||
datalen := len(fds) * 4
|
||||
b := make([]byte, CmsgSpace(datalen))
|
||||
h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
|
||||
h.Level = SOL_SOCKET
|
||||
h.Type = SCM_RIGHTS
|
||||
h.SetLen(CmsgLen(datalen))
|
||||
data := cmsgData(h)
|
||||
for _, fd := range fds {
|
||||
*(*int32)(data) = int32(fd)
|
||||
data = unsafe.Pointer(uintptr(data) + 4)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// ParseUnixRights decodes a socket control message that contains an
|
||||
// integer array of open file descriptors from another process.
|
||||
func ParseUnixRights(m *SocketControlMessage) ([]int, error) {
|
||||
if m.Header.Level != SOL_SOCKET {
|
||||
return nil, EINVAL
|
||||
}
|
||||
if m.Header.Type != SCM_RIGHTS {
|
||||
return nil, EINVAL
|
||||
}
|
||||
fds := make([]int, len(m.Data)>>2)
|
||||
for i, j := 0, 0; i < len(m.Data); i += 4 {
|
||||
fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i])))
|
||||
j++
|
||||
}
|
||||
return fds, nil
|
||||
}
|
26
vendor/golang.org/x/sys/unix/str.go
generated
vendored
Normal file
26
vendor/golang.org/x/sys/unix/str.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package unix
|
||||
|
||||
func itoa(val int) string { // do it here rather than with fmt to avoid dependency
|
||||
if val < 0 {
|
||||
return "-" + uitoa(uint(-val))
|
||||
}
|
||||
return uitoa(uint(val))
|
||||
}
|
||||
|
||||
func uitoa(val uint) string {
|
||||
var buf [32]byte // big enough for int64
|
||||
i := len(buf) - 1
|
||||
for val >= 10 {
|
||||
buf[i] = byte(val%10 + '0')
|
||||
i--
|
||||
val /= 10
|
||||
}
|
||||
buf[i] = byte(val + '0')
|
||||
return string(buf[i:])
|
||||
}
|
76
vendor/golang.org/x/sys/unix/syscall.go
generated
vendored
Normal file
76
vendor/golang.org/x/sys/unix/syscall.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
// Package unix contains an interface to the low-level operating system
|
||||
// primitives. OS details vary depending on the underlying system, and
|
||||
// by default, godoc will display OS-specific documentation for the current
|
||||
// system. If you want godoc to display OS documentation for another
|
||||
// system, set $GOOS and $GOARCH to the desired system. For example, if
|
||||
// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
|
||||
// to freebsd and $GOARCH to arm.
|
||||
// The primary use of this package is inside other packages that provide a more
|
||||
// portable interface to the system, such as "os", "time" and "net". Use
|
||||
// those packages rather than this one if you can.
|
||||
// For details of the functions and data types in this package consult
|
||||
// the manuals for the appropriate operating system.
|
||||
// These calls return err == nil to indicate success; otherwise
|
||||
// err represents an operating system error describing the failure and
|
||||
// holds a value of type syscall.Errno.
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// ByteSliceFromString returns a NUL-terminated slice of bytes
|
||||
// containing the text of s. If s contains a NUL byte at any
|
||||
// location, it returns (nil, EINVAL).
|
||||
func ByteSliceFromString(s string) ([]byte, error) {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == 0 {
|
||||
return nil, EINVAL
|
||||
}
|
||||
}
|
||||
a := make([]byte, len(s)+1)
|
||||
copy(a, s)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// BytePtrFromString returns a pointer to a NUL-terminated array of
|
||||
// bytes containing the text of s. If s contains a NUL byte at any
|
||||
// location, it returns (nil, EINVAL).
|
||||
func BytePtrFromString(s string) (*byte, error) {
|
||||
a, err := ByteSliceFromString(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &a[0], nil
|
||||
}
|
||||
|
||||
// Single-word zero for use when we need a valid pointer to 0 bytes.
|
||||
// See mkunix.pl.
|
||||
var _zero uintptr
|
||||
|
||||
func (ts *Timespec) Unix() (sec int64, nsec int64) {
|
||||
return int64(ts.Sec), int64(ts.Nsec)
|
||||
}
|
||||
|
||||
func (tv *Timeval) Unix() (sec int64, nsec int64) {
|
||||
return int64(tv.Sec), int64(tv.Usec) * 1000
|
||||
}
|
||||
|
||||
func (ts *Timespec) Nano() int64 {
|
||||
return int64(ts.Sec)*1e9 + int64(ts.Nsec)
|
||||
}
|
||||
|
||||
func (tv *Timeval) Nano() int64 {
|
||||
return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
// use is a no-op, but the compiler cannot see that it is.
|
||||
// Calling use(p) ensures that p is kept live until that point.
|
||||
//go:noescape
|
||||
func use(p unsafe.Pointer)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user