Compare commits

..

No commits in common. "master" and "v0.6.0" have entirely different histories.

25 changed files with 86 additions and 547 deletions

View File

@ -4,7 +4,6 @@ Replicated in-memory database and file store.
## TODO
* [ ] mdb: Tests for using `nil` snapshots ?
* [ ] mdb: tests for sanitize and validate functions
* [ ] Test: lib/wal iterator w/ corrupt file (random corruptions)
* [ ] Test: lib/wal io.go

3
go.mod
View File

@ -1,10 +1,9 @@
module git.crumpington.com/public/jldb
go 1.22
go 1.21.1
require (
github.com/google/btree v1.1.2
go.uber.org/goleak v1.3.0
golang.org/x/net v0.15.0
golang.org/x/sys v0.12.0
)

10
go.sum
View File

@ -1,16 +1,6 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -8,27 +8,26 @@ import (
)
type Error struct {
Code int64
Msg string
StackTrace string
msg string
code int64
collection string
index string
stackTrace string
err error // Wrapped error
}
func NewErr(code int64, msg string) *Error {
return &Error{
Code: code,
Msg: msg,
msg: msg,
code: code,
}
}
func (e *Error) Error() string {
if e.collection != "" || e.index != "" {
return fmt.Sprintf(`[%d] (%s/%s) %s`, e.Code, e.collection, e.index, e.Msg)
return fmt.Sprintf(`[%d] (%s/%s) %s`, e.code, e.collection, e.index, e.msg)
} else {
return fmt.Sprintf("[%d] %s", e.Code, e.Msg)
return fmt.Sprintf("[%d] %s", e.code, e.msg)
}
}
@ -37,15 +36,11 @@ func (e *Error) Is(rhs error) bool {
if !ok {
return false
}
return e.Code == e2.Code
}
func (e *Error) Unwrap() error {
return e.err
return e.code == e2.code
}
func (e *Error) WithErr(err error) *Error {
if e2, ok := err.(*Error); ok && e2.Code == e.Code {
if e2, ok := err.(*Error); ok && e2.code == e.code {
return e2
}
@ -54,11 +49,18 @@ func (e *Error) WithErr(err error) *Error {
return e2
}
func (e *Error) Unwrap() error {
if e.err != nil {
return e.err
}
return e
}
func (e *Error) WithMsg(msg string, args ...any) *Error {
err := *e
err.Msg += ": " + fmt.Sprintf(msg, args...)
if len(err.StackTrace) == 0 {
err.StackTrace = string(debug.Stack())
err.msg += ": " + fmt.Sprintf(msg, args...)
if len(err.stackTrace) == 0 {
err.stackTrace = string(debug.Stack())
}
return &err
}
@ -76,16 +78,16 @@ func (e *Error) WithIndex(s string) *Error {
}
func (e *Error) msgTruncacted() string {
if len(e.Msg) > 255 {
return e.Msg[:255]
if len(e.msg) > 255 {
return e.msg[:255]
}
return e.Msg
return e.msg
}
func (e *Error) Write(w io.Writer) error {
msg := e.msgTruncacted()
if err := binary.Write(w, binary.LittleEndian, e.Code); err != nil {
if err := binary.Write(w, binary.LittleEndian, e.code); err != nil {
return IO.WithErr(err)
}
@ -101,7 +103,7 @@ func (e *Error) Read(r io.Reader) error {
size uint8
)
if err := binary.Read(r, binary.LittleEndian, &e.Code); err != nil {
if err := binary.Read(r, binary.LittleEndian, &e.code); err != nil {
return IO.WithErr(err)
}
@ -114,6 +116,6 @@ func (e *Error) Read(r io.Reader) error {
return IO.WithErr(err)
}
e.Msg = string(msgBuf)
e.msg = string(msgBuf)
return nil
}

View File

@ -10,12 +10,12 @@ func FmtDetails(err error) string {
var s string
if e.collection != "" || e.index != "" {
s = fmt.Sprintf(`[%d] (%s/%s) %s`, e.Code, e.collection, e.index, e.Msg)
s = fmt.Sprintf(`[%d] (%s/%s) %s`, e.code, e.collection, e.index, e.msg)
} else {
s = fmt.Sprintf("[%d] %s", e.Code, e.Msg)
s = fmt.Sprintf("[%d] %s", e.code, e.msg)
}
if len(e.StackTrace) != 0 {
s += "\n\nStack Trace:\n" + e.StackTrace + "\n"
if len(e.stackTrace) != 0 {
s += "\n\nStack Trace:\n" + e.stackTrace + "\n"
}
return s

View File

@ -1,11 +0,0 @@
package rep
import (
"testing"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}

View File

@ -15,7 +15,7 @@ func (rep *Replicator) runWALGC() {
select {
case <-ticker.C:
state := rep.getState()
before := time.Now().Unix() - rep.conf.WALSegGCAgeSec
before := time.Now().Unix() - rep.conf.WALSegMaxAgeSec
if err := rep.wal.DeleteBefore(before, state.SeqNum); err != nil {
log.Printf("[WAL-GC] failed to delete wal segments: %v", err)
}

View File

@ -36,8 +36,8 @@ type App struct {
// SendState: The primary may need to send storage state to a secondary node.
SendState func(conn net.Conn) error
// (1) RecvState: Secondary nodes may need to load state from the primary if
// the WAL is too far behind.
// (1) RecvState: Secondary nodes may need to load state from the primary if the
// WAL is too far behind.
RecvState func(conn net.Conn) error
// (2) InitStorage: Prepare application storage for possible calls to

View File

@ -56,7 +56,6 @@ func (h TestAppHarness) Run(t *testing.T) {
WALSegMaxAgeSec: 1,
WALSegGCAgeSec: 1,
})
defer app2.Close()
val.MethodByName(method.Name).Call([]reflect.Value{
reflect.ValueOf(t),

View File

@ -1,11 +0,0 @@
package wal
import (
"testing"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}

View File

@ -1,6 +1,7 @@
package mdb
import (
"bytes"
"encoding/json"
"errors"
"hash/crc64"
@ -24,6 +25,8 @@ type Collection[T any] struct {
uniqueIndices []*Index[T]
ByID *Index[T]
buf *bytes.Buffer
}
type CollectionConfig[T any] struct {
@ -64,6 +67,7 @@ func NewCollection[T any](db *Database, name string, conf *CollectionConfig[T])
validate: conf.Validate,
indices: []*Index[T]{},
uniqueIndices: []*Index[T]{},
buf: &bytes.Buffer{},
}
db.addCollection(c.collectionID, c, &collectionState[T]{
@ -155,15 +159,6 @@ func (c *Collection[T]) Get(tx *Snapshot, id uint64) *T {
return c.ByID.Get(tx, item)
}
func (c *Collection[T]) Has(tx *Snapshot, id uint64) bool {
if tx == nil {
tx = c.db.Snapshot()
}
item := new(T)
c.setID(item, id)
return c.ByID.Has(tx, item)
}
func (c *Collection[T]) Insert(tx *Snapshot, userItem *T) error {
if tx == nil {
return c.db.Update(func(tx *Snapshot) error {
@ -242,27 +237,6 @@ func (c *Collection[T]) update(tx *Snapshot, userItem *T) error {
return nil
}
func (c *Collection[T]) UpdateFunc(tx *Snapshot, id uint64, update func(item *T) error) error {
if tx == nil {
return c.db.Update(func(tx *Snapshot) error {
return c.updateFunc(tx, id, update)
})
}
return c.updateFunc(tx, id, update)
}
func (c *Collection[T]) updateFunc(tx *Snapshot, id uint64, update func(item *T) error) error {
item := c.Get(tx, id)
if item == nil {
return errs.NotFound
}
if err := update(item); err != nil {
return err
}
c.setID(item, id) // Don't allow the ID to change.
return c.update(tx, item)
}
func (c *Collection[T]) Upsert(tx *Snapshot, item *T) error {
if tx == nil {
return c.db.Update(func(tx *Snapshot) error {
@ -283,36 +257,6 @@ func (c *Collection[T]) upsert(tx *Snapshot, item *T) error {
return err
}
func (c *Collection[T]) UpsertFunc(tx *Snapshot, id uint64, update func(item *T) error) error {
if tx == nil {
c.db.Update(func(tx *Snapshot) error {
return c.upsertFunc(tx, id, update)
})
}
return c.upsertFunc(tx, id, update)
}
func (c *Collection[T]) upsertFunc(tx *Snapshot, id uint64, update func(item *T) error) error {
insert := false
item := c.Get(tx, id)
if item == nil {
item = new(T)
insert = true
}
if err := update(item); err != nil {
return err
}
c.setID(item, id) // Don't allow the ID to change.
if insert {
return c.insert(tx, item)
}
return c.update(tx, item)
}
func (c *Collection[T]) Delete(tx *Snapshot, itemID uint64) error {
if tx == nil {
return c.db.Update(func(tx *Snapshot) error {

View File

@ -1,67 +0,0 @@
package mdb
/*
func (db *Database) openPrimary() (err error) {
wal, err := cwal.Open(db.walRootDir, cwal.Config{
SegMinCount: db.conf.WALSegMinCount,
SegMaxAgeSec: db.conf.WALSegMaxAgeSec,
})
pFile, err := pfile.Open(db.pageFilePath,
pFile, err := openPageFileAndReplayWAL(db.rootDir)
if err != nil {
return err
}
defer pFile.Close()
pfHeader, err := pFile.ReadHeader()
if err != nil {
return err
}
tx := db.Snapshot()
tx.seqNum = pfHeader.SeqNum
tx.updatedAt = pfHeader.UpdatedAt
pIndex, err := pagefile.NewIndex(pFile)
if err != nil {
return err
}
err = pFile.IterateAllocated(pIndex, func(cID, iID uint64, data []byte) error {
return db.loadItem(tx, cID, iID, data)
})
if err != nil {
return err
}
w, err := cwal.OpenWriter(db.walRootDir, &cwal.WriterConfig{
SegMinCount: db.conf.WALSegMinCount,
SegMaxAgeSec: db.conf.WALSegMaxAgeSec,
})
if err != nil {
return err
}
db.done.Add(1)
go txAggregator{
Stop: db.stop,
Done: db.done,
ModChan: db.modChan,
W: w,
Index: pIndex,
Snapshot: db.snapshot,
}.Run()
db.done.Add(1)
go (&fileWriter{
Stop: db.stop,
Done: db.done,
PageFilePath: db.pageFilePath,
WALRootDir: db.walRootDir,
}).Run()
return nil
}
*/

View File

@ -99,7 +99,6 @@ func (db *Database) repApply(rec wal.Record) (err error) {
}
tx.seqNum = rec.SeqNum
tx.timestampMS = rec.TimestampMS
tx.setReadOnly()
db.snapshot.Store(tx)
return nil
}

View File

@ -1,129 +0,0 @@
package mdb
/*
func (db *Database) openSecondary() (err error) {
if db.shouldLoadFromPrimary() {
if err := db.loadFromPrimary(); err != nil {
return err
}
}
log.Printf("Opening page-file...")
pFile, err := openPageFileAndReplayWAL(db.rootDir)
if err != nil {
return err
}
defer pFile.Close()
pfHeader, err := pFile.ReadHeader()
if err != nil {
return err
}
log.Printf("Building page-file index...")
pIndex, err := pagefile.NewIndex(pFile)
if err != nil {
return err
}
tx := db.Snapshot()
tx.seqNum = pfHeader.SeqNum
tx.updatedAt = pfHeader.UpdatedAt
log.Printf("Loading data into memory...")
err = pFile.IterateAllocated(pIndex, func(cID, iID uint64, data []byte) error {
return db.loadItem(tx, cID, iID, data)
})
if err != nil {
return err
}
log.Printf("Creating writer...")
w, err := cswal.OpenWriter(db.walRootDir, &cswal.WriterConfig{
SegMinCount: db.conf.WALSegMinCount,
SegMaxAgeSec: db.conf.WALSegMaxAgeSec,
})
if err != nil {
return err
}
db.done.Add(1)
go (&walFollower{
Stop: db.stop,
Done: db.done,
W: w,
Client: NewClient(db.conf.PrimaryURL, db.conf.ReplicationPSK, db.conf.NetTimeout),
}).Run()
db.done.Add(1)
go (&follower{
Stop: db.stop,
Done: db.done,
WALRootDir: db.walRootDir,
SeqNum: pfHeader.SeqNum,
ApplyChanges: db.applyChanges,
}).Run()
db.done.Add(1)
go (&fileWriter{
Stop: db.stop,
Done: db.done,
PageFilePath: db.pageFilePath,
WALRootDir: db.walRootDir,
}).Run()
return nil
}
func (db *Database) shouldLoadFromPrimary() bool {
if _, err := os.Stat(db.walRootDir); os.IsNotExist(err) {
log.Printf("WAL doesn't exist.")
return true
}
if _, err := os.Stat(db.pageFilePath); os.IsNotExist(err) {
log.Printf("Page-file doesn't exist.")
return true
}
return false
}
func (db *Database) loadFromPrimary() error {
client := NewClient(db.conf.PrimaryURL, db.conf.ReplicationPSK, db.conf.NetTimeout)
defer client.Disconnect()
log.Printf("Loading data from primary...")
if err := os.RemoveAll(db.pageFilePath); err != nil {
log.Printf("Failed to remove page-file: %s", err)
return errs.IO.WithErr(err) // Caller can retry.
}
if err := os.RemoveAll(db.walRootDir); err != nil {
log.Printf("Failed to remove WAL: %s", err)
return errs.IO.WithErr(err) // Caller can retry.
}
err := client.DownloadPageFile(db.pageFilePath+".tmp", db.pageFilePath)
if err != nil {
log.Printf("Failed to get page-file from primary: %s", err)
return err // Caller can retry.
}
pfHeader, err := pagefile.ReadHeader(db.pageFilePath)
if err != nil {
log.Printf("Failed to read page-file sequence number: %s", err)
return err // Caller can retry.
}
if err = cswal.CreateEx(db.walRootDir, pfHeader.SeqNum+1); err != nil {
log.Printf("Failed to initialize WAL: %s", err)
return err // Caller can retry.
}
return nil
}
*/

View File

@ -743,25 +743,29 @@ var testDBTestCases = []DBTestCase{{
first := true
pivot := User{Name: "User1"}
for u := range db.Users.ByName.AscendAfter(tx, &pivot) {
db.Users.ByName.AscendAfter(tx, &pivot, func(u *User) bool {
u.Name += "Mod"
if err = db.Users.Update(tx, u); err != nil {
return err
return false
}
if first {
first = false
continue
return true
}
prev := db.Users.ByID.Get(tx, &User{ID: u.ID - 1})
if prev == nil {
return errors.New("Previous user not found")
err = errors.New("Previous user not found")
return false
}
if !strings.HasSuffix(prev.Name, "Mod") {
return errors.New("Incorrect user name: " + prev.Name)
err = errors.New("Incorrect user name: " + prev.Name)
return false
}
}
return true
})
return nil
},
@ -797,26 +801,29 @@ var testDBTestCases = []DBTestCase{{
}
first := true
for u := range db.Users.ByName.DescendAfter(tx, &User{Name: "User5Mod"}) {
db.Users.ByName.DescendAfter(tx, &User{Name: "User5Mod"}, func(u *User) bool {
u.Name = strings.TrimSuffix(u.Name, "Mod")
if err = db.Users.Update(tx, u); err != nil {
return err
return false
}
if first {
first = false
continue
return true
}
prev := db.Users.ByID.Get(tx, &User{ID: u.ID + 1})
if prev == nil {
return errors.New("Previous user not found")
err = errors.New("Previous user not found")
return false
}
if strings.HasSuffix(prev.Name, "Mod") {
return errors.New("Incorrect user name: " + prev.Name)
err = errors.New("Incorrect user name: " + prev.Name)
return false
}
}
return true
})
return nil
},

View File

@ -1,138 +0,0 @@
package mdb
import (
"fmt"
"reflect"
"testing"
)
func TestDBList(t *testing.T) {
db := NewTestDBPrimary(t, t.TempDir())
var (
user1 = User{
ID: NewID(),
Name: "User1",
Email: "user1@gmail.com",
}
user2 = User{
ID: NewID(),
Name: "User2",
Email: "user2@gmail.com",
}
user3 = User{
ID: NewID(),
Name: "User3",
Email: "user3@gmail.com",
}
user1Data = make([]UserDataItem, 10)
user2Data = make([]UserDataItem, 4)
user3Data = make([]UserDataItem, 8)
)
err := db.Update(func(tx *Snapshot) error {
if err := db.Users.Insert(tx, &user1); err != nil {
return err
}
if err := db.Users.Insert(tx, &user2); err != nil {
return err
}
for i := range user1Data {
user1Data[i] = UserDataItem{
ID: NewID(),
UserID: user1.ID,
Name: fmt.Sprintf("Name1: %d", i),
Data: fmt.Sprintf("Data: %d", i),
}
if err := db.UserData.Insert(tx, &user1Data[i]); err != nil {
return err
}
}
for i := range user2Data {
user2Data[i] = UserDataItem{
ID: NewID(),
UserID: user2.ID,
Name: fmt.Sprintf("Name2: %d", i),
Data: fmt.Sprintf("Data: %d", i),
}
if err := db.UserData.Insert(tx, &user2Data[i]); err != nil {
return err
}
}
for i := range user3Data {
user3Data[i] = UserDataItem{
ID: NewID(),
UserID: user3.ID,
Name: fmt.Sprintf("Name3: %d", i),
Data: fmt.Sprintf("Data: %d", i),
}
if err := db.UserData.Insert(tx, &user3Data[i]); err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
type TestCase struct {
Name string
Args ListArgs[UserDataItem]
Expected []UserDataItem
}
cases := []TestCase{
{
Name: "User1 all",
Args: ListArgs[UserDataItem]{
After: &UserDataItem{
UserID: user1.ID,
},
While: func(item *UserDataItem) bool {
return item.UserID == user1.ID
},
},
Expected: user1Data,
}, {
Name: "User1 limited",
Args: ListArgs[UserDataItem]{
After: &UserDataItem{
UserID: user1.ID,
},
While: func(item *UserDataItem) bool {
return item.UserID == user1.ID
},
Limit: 4,
},
Expected: user1Data[:4],
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
tx := db.Snapshot()
l := db.UserData.ByName.List(tx, tc.Args, nil)
if len(l) != len(tc.Expected) {
t.Fatal(tc.Name, l)
}
for i := range l {
if !reflect.DeepEqual(*l[i], tc.Expected[i]) {
t.Fatal(tc.Name, l)
}
}
})
}
}

View File

@ -72,7 +72,7 @@ func testRunner_testCase(t *testing.T, testCase DBTestCase) {
}
// TODO: Why is this necessary?
//time.Sleep(time.Second)
time.Sleep(time.Second)
finalStep := testCase.Steps[len(testCase.Steps)-1]
secondarySnapshot := db2.Snapshot()

View File

@ -73,10 +73,6 @@ type Database struct {
}
func New(conf Config) *Database {
if conf.NetTimeout <= 0 {
conf.NetTimeout = time.Minute
}
if conf.MaxConcurrentUpdates <= 0 {
conf.MaxConcurrentUpdates = 32
}

View File

@ -1,6 +1,7 @@
package mdb
import (
"fmt"
"reflect"
"testing"
)
@ -19,16 +20,18 @@ func (i Index[T]) AssertEqual(t *testing.T, tx1, tx2 *Snapshot) {
}
errStr := ""
iter := i.Ascend(tx1)
for item1 := range iter {
i.Ascend(tx1, func(item1 *T) bool {
item2 := i.Get(tx2, item1)
if item2 == nil {
t.Fatalf("Indices don't match. %v not found.", item1)
errStr = fmt.Sprintf("Indices don't match. %v not found.", item1)
return false
}
if !reflect.DeepEqual(item1, item2) {
t.Fatalf("%v != %v", item1, item2)
errStr = fmt.Sprintf("%v != %v", item1, item2)
return false
}
}
return true
})
if errStr != "" {
t.Fatal(errStr)

View File

@ -1,7 +1,6 @@
package mdb
import (
"iter"
"unsafe"
"github.com/google/btree"
@ -112,40 +111,32 @@ func (i *Index[T]) Max(tx *Snapshot) *T {
return nil
}
func (i *Index[T]) Ascend(tx *Snapshot) iter.Seq[*T] {
func (i *Index[T]) Ascend(tx *Snapshot, each func(*T) bool) {
tx = i.ensureSnapshot(tx)
return func(yield func(*T) bool) {
i.btreeForIter(tx).Ascend(func(t *T) bool {
return yield(i.copy(t))
})
}
i.btreeForIter(tx).Ascend(func(t *T) bool {
return each(i.copy(t))
})
}
func (i *Index[T]) AscendAfter(tx *Snapshot, after *T) iter.Seq[*T] {
func (i *Index[T]) AscendAfter(tx *Snapshot, after *T, each func(*T) bool) {
tx = i.ensureSnapshot(tx)
return func(yield func(*T) bool) {
i.btreeForIter(tx).AscendGreaterOrEqual(after, func(t *T) bool {
return yield(i.copy(t))
})
}
i.btreeForIter(tx).AscendGreaterOrEqual(after, func(t *T) bool {
return each(i.copy(t))
})
}
func (i *Index[T]) Descend(tx *Snapshot) iter.Seq[*T] {
func (i *Index[T]) Descend(tx *Snapshot, each func(*T) bool) {
tx = i.ensureSnapshot(tx)
return func(yield func(*T) bool) {
i.btreeForIter(tx).Descend(func(t *T) bool {
return yield(i.copy(t))
})
}
i.btreeForIter(tx).Descend(func(t *T) bool {
return each(i.copy(t))
})
}
func (i *Index[T]) DescendAfter(tx *Snapshot, after *T) iter.Seq[*T] {
func (i *Index[T]) DescendAfter(tx *Snapshot, after *T, each func(*T) bool) {
tx = i.ensureSnapshot(tx)
return func(yield func(*T) bool) {
i.btreeForIter(tx).DescendLessOrEqual(after, func(t *T) bool {
return yield(i.copy(t))
})
}
i.btreeForIter(tx).DescendLessOrEqual(after, func(t *T) bool {
return each(i.copy(t))
})
}
func (i *Index[T]) Count(tx *Snapshot) int {

View File

@ -1,8 +1,9 @@
package mdb
func (i Index[T]) Dump(tx *Snapshot) (l []T) {
for t := range i.Ascend(tx) {
i.Ascend(tx, func(t *T) bool {
l = append(l, *t)
}
return true
})
return l
}

View File

@ -1,11 +0,0 @@
package mdb
import (
"testing"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}

View File

@ -51,10 +51,6 @@ func (f *freeList) Push(pages ...uint64) {
}
}
func (f *freeList) SetNextPage(nextPage uint64) {
f.nextPage = nextPage
}
func (f *freeList) Pop(count int, out []uint64) []uint64 {
out = out[:0]

View File

@ -13,19 +13,14 @@ type Index struct {
}
func NewIndex(f *File) (*Index, error) {
firstPage, err := f.pageCount()
if err != nil {
return nil, err
}
idx := &Index{
fList: newFreeList(firstPage),
fList: newFreeList(0),
aList: *newAllocList(),
seen: map[[2]uint64]struct{}{},
mask: []bool{},
}
err = f.iterate(func(pageID uint64, page dataPage) error {
err := f.iterate(func(pageID uint64, page dataPage) error {
header := page.Header()
switch header.PageType {
case pageTypeHead:

View File

@ -134,21 +134,6 @@ func (pf *File) writePage(page dataPage, id uint64) error {
// Reading
// ----------------------------------------------------------------------------
func (pf *File) pageCount() (uint64, error) {
fi, err := pf.f.Stat()
if err != nil {
return 0, errs.IO.WithErr(err)
}
fileSize := fi.Size()
if fileSize%pageSize != 0 {
return 0, errs.Corrupt.WithMsg("File size isn't a multiple of page size.")
}
maxPage := uint64(fileSize / pageSize)
return maxPage, nil
}
func (pf *File) iterate(each func(pageID uint64, page dataPage) error) error {
pf.lock.RLock()
defer pf.lock.RUnlock()