Skip to content

Commit

Permalink
Changing meta dataSize to blockSize to make dead-space more accurate
Browse files Browse the repository at this point in the history
  • Loading branch information
fredli74 committed Jun 20, 2020
1 parent cfafbae commit a29680a
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 27 deletions.
33 changes: 17 additions & 16 deletions server/account.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,14 @@ package main
import (
"crypto/md5"
"encoding/base64"
"github.com/fredli74/hashbox/core"
"io"
"os"
"path/filepath"
"sort"
"sync"
"time"

"github.com/fredli74/hashbox/core"
)

type AccountInfo struct {
Expand Down Expand Up @@ -50,11 +51,11 @@ type AccountHandler struct {
}

const (
accounthandler_setinfo = iota
accounthandler_getinfo
accounthandler_listset
accounthandler_addset
accounthandler_removeset
accounthandlerSetinfo = iota
accounthandlerGetinfo
accounthandlerListset
accounthandlerAddset
accounthandlerRemoveset
)

func (handler *AccountHandler) dispatcher() {
Expand All @@ -80,22 +81,22 @@ func (handler *AccountHandler) dispatcher() {
func() {
defer close(q.result) // Always close the result channel after returning
switch q.query {
case accounthandler_getinfo:
case accounthandlerGetinfo:
accountNameH := q.data.(core.Byte128)
q.result <- readInfoFile(accountNameH)

case accounthandler_setinfo:
case accounthandlerSetinfo:
accountInfo := q.data.(AccountInfo)
accountNameH := core.Hash([]byte(accountInfo.AccountName))

writeInfoFile(accountNameH, accountInfo)
q.result <- true

case accounthandler_listset:
case accounthandlerListset:
list := q.data.(queryListDataset)
q.result <- readDBFile(list.AccountNameH, list.DatasetName)

case accounthandler_addset:
case accounthandlerAddset:
add := q.data.(queryAddDatasetState)

result := appendDatasetTx(add.AccountNameH, add.DatasetName, dbTx{timestamp: time.Now().UnixNano(), txType: dbTxTypeAdd, data: add.State})
Expand All @@ -120,7 +121,7 @@ func (handler *AccountHandler) dispatcher() {

q.result <- result

case accounthandler_removeset:
case accounthandlerRemoveset:
del := q.data.(queryRemoveDatasetState)

result := appendDatasetTx(del.AccountNameH, del.DatasetName, dbTx{timestamp: time.Now().UnixNano(), txType: dbTxTypeDel, data: del.StateID})
Expand Down Expand Up @@ -178,33 +179,33 @@ func (handler *AccountHandler) doCommand(q ChannelQuery) interface{} {
}

func (handler *AccountHandler) ListDataset(a core.Byte128, set core.String) *dbStateCollection {
q := ChannelQuery{accounthandler_listset, queryListDataset{a, set}, make(chan interface{}, 1)}
q := ChannelQuery{accounthandlerListset, queryListDataset{a, set}, make(chan interface{}, 1)}
return handler.doCommand(q).(*dbStateCollection)
}

func (handler *AccountHandler) AddDatasetState(a core.Byte128, set core.String, state core.DatasetState) error {
q := ChannelQuery{accounthandler_addset, queryAddDatasetState{a, set, state}, make(chan interface{}, 1)}
q := ChannelQuery{accounthandlerAddset, queryAddDatasetState{a, set, state}, make(chan interface{}, 1)}
r := handler.doCommand(q)
if r != nil {
return r.(error)
}
return nil
}
func (handler *AccountHandler) RemoveDatasetState(a core.Byte128, set core.String, stateID core.Byte128) error {
q := ChannelQuery{accounthandler_removeset, queryRemoveDatasetState{a, set, stateID}, make(chan interface{}, 1)}
q := ChannelQuery{accounthandlerRemoveset, queryRemoveDatasetState{a, set, stateID}, make(chan interface{}, 1)}
r := handler.doCommand(q)
if r != nil {
return r.(error)
}
return nil
}
func (handler *AccountHandler) GetInfo(a core.Byte128) *AccountInfo {
q := ChannelQuery{accounthandler_getinfo, a, make(chan interface{}, 1)}
q := ChannelQuery{accounthandlerGetinfo, a, make(chan interface{}, 1)}
return handler.doCommand(q).(*AccountInfo)
// ToDO: test this with a read-error
}
func (handler *AccountHandler) SetInfo(a AccountInfo) bool {
q := ChannelQuery{accounthandler_setinfo, a, make(chan interface{}, 1)}
q := ChannelQuery{accounthandlerSetinfo, a, make(chan interface{}, 1)}
return handler.doCommand(q).(bool)
// ToDO: test this with a write-error
}
Expand Down
25 changes: 14 additions & 11 deletions server/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ type storageMetaEntry struct {
datamarker uint32 // = storageDataMarker, used to find / align blocks in case of recovery
blockID core.Byte128 // 16 bytes
location sixByteLocation // 6 bytes
dataSize uint32 // Size of hashboxBlock Data
blockSize uint32 // Size of hashboxBlock
links []core.Byte128 // Array of BlockIDs
}

Expand All @@ -332,7 +332,7 @@ func (e *storageMetaEntry) Serialize(w io.Writer) (size int) {
size += core.WriteUint32(w, storageDataMarker)
size += e.blockID.Serialize(w)
size += e.location.Serialize(w)
size += core.WriteUint32(w, e.dataSize)
size += core.WriteUint32(w, e.blockSize)
size += core.WriteUint32(w, uint32(len(e.links)))
for i := range e.links {
size += e.links[i].Serialize(w)
Expand All @@ -346,7 +346,7 @@ func (e *storageMetaEntry) Unserialize(r io.Reader) (size int) {
}
size += e.blockID.Unserialize(r)
size += e.location.Unserialize(r)
size += core.ReadUint32(r, &e.dataSize)
size += core.ReadUint32(r, &e.blockSize)
var n uint32
size += core.ReadUint32(r, &n)
e.links = make([]core.Byte128, n)
Expand Down Expand Up @@ -619,8 +619,8 @@ func (handler *StorageHandler) killMetaEntry(blockID core.Byte128, metaFileNumbe
size += int64(entrySize)

dataFileNumber, _ := entry.location.Get()
handler.setDeadSpace(storageFileTypeData, dataFileNumber, int64(entry.dataSize), true)
size += int64(entry.dataSize)
handler.setDeadSpace(storageFileTypeData, dataFileNumber, int64(entry.blockSize), true)
size += int64(entry.blockSize)
} else {
abort("Incorrect block %x (should be %x) read on metadata location %x:%x", entry.blockID[:], blockID[:], metaFileNumber, metaOffset)
}
Expand Down Expand Up @@ -685,7 +685,7 @@ func (handler *StorageHandler) writeBlockFile(block *core.HashboxBlock) bool {
// flush notice: manually flush datFile before creating the meta entry
datFile.Sync()

metaEntry := storageMetaEntry{blockID: block.BlockID, dataSize: uint32(block.Data.Len()), links: block.Links}
metaEntry := storageMetaEntry{blockID: block.BlockID, blockSize: uint32(data.Len()), links: block.Links}
metaEntry.location.Set(datFileNumber, datOffset)
// flush notice: writeMetaEntry always flushes meta file
metaFileNumber, metaOffset := handler.writeMetaEntry(0, 0, &metaEntry)
Expand Down Expand Up @@ -1323,6 +1323,9 @@ func (handler *StorageHandler) RecoverData(startfile int32, endfile int32) (repa
break
}
}

blockSize := offset - blockOffset

if blockOffset > storageOffsetLimit {
core.Log(core.LogError, "Offset %x for block %x is beyond offset limit, forcing a move", blockOffset, dataEntry.block.BlockID[:])
brokenSpot = blockOffset
Expand All @@ -1331,10 +1334,10 @@ func (handler *StorageHandler) RecoverData(startfile int32, endfile int32) (repa
// Cannot fit a Cgap marker, so we need to move the block
moveFileNum, moveOffset, moveFile := handler.findFreeOffset(storageFileTypeData, -1)
core.Log(core.LogDebug, "Rewriting block %x at (%x:%x)", dataEntry.block.BlockID[:], moveFileNum, moveOffset)
dataEntry.Serialize(moveFile.Writer)
moveSize := dataEntry.Serialize(moveFile.Writer)

core.Log(core.LogTrace, "Creating new meta for block %x", dataEntry.block.BlockID[:])
metaEntry := storageMetaEntry{blockID: dataEntry.block.BlockID, dataSize: uint32(dataEntry.block.Data.Len()), links: dataEntry.block.Links}
metaEntry := storageMetaEntry{blockID: dataEntry.block.BlockID, blockSize: uint32(moveSize), links: dataEntry.block.Links}
metaEntry.location.Set(moveFileNum, moveOffset)
// flush notice: force flush before writing meta entry
moveFile.Sync()
Expand Down Expand Up @@ -1375,8 +1378,8 @@ func (handler *StorageHandler) RecoverData(startfile int32, endfile int32) (repa
core.Log(core.LogWarning, "Metadata cache location error for block %x (%x:%x != %x:%x)", dataEntry.block.BlockID[:], f, o, datFileNumber, blockOffset)
rewriteIX = true
}
if int(metaEntry.dataSize) != dataEntry.block.Data.Len() {
core.Log(core.LogWarning, "Metadata cache size error for block %x (%x != %x)", dataEntry.block.BlockID[:], metaEntry.dataSize, dataEntry.block.Data.Len())
if int64(metaEntry.blockSize) != blockSize {
core.Log(core.LogWarning, "Metadata cache size error for block %x (%x != %x)", dataEntry.block.BlockID[:], metaEntry.blockSize, blockSize)
rewriteIX = true
}
linksOk := true
Expand Down Expand Up @@ -1413,7 +1416,7 @@ func (handler *StorageHandler) RecoverData(startfile int32, endfile int32) (repa
core.Log(core.LogTrace, "Block %x (%x:%x) verified", dataEntry.block.BlockID[:], datFileNumber, blockOffset)
} else {
core.Log(core.LogTrace, "REPAIRING meta for block %x", dataEntry.block.BlockID[:])
metaEntry := storageMetaEntry{blockID: dataEntry.block.BlockID, dataSize: uint32(dataEntry.block.Data.Len()), links: dataEntry.block.Links}
metaEntry := storageMetaEntry{blockID: dataEntry.block.BlockID, blockSize: uint32(blockSize), links: dataEntry.block.Links}
metaEntry.location.Set(datFileNumber, blockOffset)
metaFileNumber, metaOffset := handler.writeMetaEntry(0, 0, &metaEntry)

Expand Down

0 comments on commit a29680a

Please sign in to comment.