mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-21 00:58:51 +08:00
volume server: support tcp direct put/get/delete
This commit is contained in:
@@ -52,7 +52,7 @@ func (df *DiskFile) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (df *DiskFile) Append(p []byte) (n int, err error) {
|
||||
func (df *DiskFile) Write(p []byte) (n int, err error) {
|
||||
return df.WriteAt(p, df.fileSize)
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,8 @@ package needle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/crc32"
|
||||
|
||||
@@ -29,3 +31,25 @@ func (n *Needle) Etag() string {
|
||||
util.Uint32toBytes(bits, uint32(n.Checksum))
|
||||
return fmt.Sprintf("%x", bits)
|
||||
}
|
||||
|
||||
func NewCRCwriter(w io.Writer) *CRCwriter {
|
||||
|
||||
return &CRCwriter{
|
||||
h: crc32.New(table),
|
||||
w: w,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type CRCwriter struct {
|
||||
h hash.Hash32
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (c *CRCwriter) Write(p []byte) (n int, err error) {
|
||||
n, err = c.w.Write(p) // with each write ...
|
||||
c.h.Write(p) // ... update the hash
|
||||
return
|
||||
}
|
||||
|
||||
func (c *CRCwriter) Sum() uint32 { return c.h.Sum32() } // final hash
|
||||
|
106
weed/storage/volume_stream_write.go
Normal file
106
weed/storage/volume_stream_write.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func (v *Volume) StreamWrite(n *needle.Needle, data io.Reader, dataSize uint32) (err error) {
|
||||
|
||||
v.dataFileAccessLock.Lock()
|
||||
defer v.dataFileAccessLock.Unlock()
|
||||
|
||||
df, ok := v.DataBackend.(*backend.DiskFile)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected volume backend")
|
||||
}
|
||||
offset, _, _ := v.DataBackend.GetStat()
|
||||
|
||||
header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation
|
||||
CookieToBytes(header[0:CookieSize], n.Cookie)
|
||||
NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id)
|
||||
n.Size = 4 + Size(dataSize) + 1
|
||||
SizeToBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size)
|
||||
|
||||
n.DataSize = dataSize
|
||||
|
||||
// needle header
|
||||
df.Write(header[0:NeedleHeaderSize])
|
||||
|
||||
// data size and data
|
||||
util.Uint32toBytes(header[0:4], n.DataSize)
|
||||
df.Write(header[0:4])
|
||||
// write and calculate CRC
|
||||
crcWriter := needle.NewCRCwriter(df)
|
||||
io.Copy(crcWriter, io.LimitReader(data, int64(dataSize)))
|
||||
|
||||
// flags
|
||||
util.Uint8toBytes(header[0:1], n.Flags)
|
||||
df.Write(header[0:1])
|
||||
|
||||
// data checksum
|
||||
util.Uint32toBytes(header[0:needle.NeedleChecksumSize], crcWriter.Sum())
|
||||
df.Write(header[0:needle.NeedleChecksumSize])
|
||||
|
||||
// write timestamp, padding
|
||||
n.AppendAtNs = uint64(time.Now().UnixNano())
|
||||
util.Uint64toBytes(header[needle.NeedleChecksumSize:needle.NeedleChecksumSize+TimestampSize], n.AppendAtNs)
|
||||
padding := needle.PaddingLength(n.Size, needle.Version3)
|
||||
df.Write(header[0 : needle.NeedleChecksumSize+TimestampSize+padding])
|
||||
|
||||
// add to needle map
|
||||
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
|
||||
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Volume) StreamRead(n *needle.Needle, writer io.Writer) (err error) {
|
||||
|
||||
v.dataFileAccessLock.Lock()
|
||||
defer v.dataFileAccessLock.Unlock()
|
||||
|
||||
nv, ok := v.nm.Get(n.Id)
|
||||
if !ok || nv.Offset.IsZero() {
|
||||
return ErrorNotFound
|
||||
}
|
||||
|
||||
sr := &StreamReader{
|
||||
readerAt: v.DataBackend,
|
||||
offset: nv.Offset.ToActualOffset(),
|
||||
}
|
||||
bufReader := bufio.NewReader(sr)
|
||||
bufReader.Discard(NeedleHeaderSize)
|
||||
sizeBuf := make([]byte, 4)
|
||||
bufReader.Read(sizeBuf)
|
||||
if _, err = writer.Write(sizeBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
dataSize := util.BytesToUint32(sizeBuf)
|
||||
|
||||
_, err = io.Copy(writer, io.LimitReader(bufReader, int64(dataSize)))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type StreamReader struct {
|
||||
offset int64
|
||||
readerAt io.ReaderAt
|
||||
}
|
||||
|
||||
func (sr *StreamReader) Read(p []byte) (n int, err error) {
|
||||
n, err = sr.readerAt.ReadAt(p, sr.offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sr.offset += int64(n)
|
||||
return
|
||||
}
|
@@ -286,7 +286,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
|
||||
if err != nil {
|
||||
return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, err)
|
||||
}
|
||||
dstDatBackend.Append(needleBytes)
|
||||
dstDatBackend.Write(needleBytes)
|
||||
util.Uint32toBytes(idxEntryBytes[8:12], uint32(offset/NeedlePaddingSize))
|
||||
} else { //deleted needle
|
||||
//fakeDelNeedle 's default Data field is nil
|
||||
|
Reference in New Issue
Block a user