Merge branch 'main' into middleware-compatibility
This commit is contained in:
commit
e1565f60d6
1
Makefile
1
Makefile
@ -15,6 +15,7 @@ proto:
|
||||
$(eval PKGMAP := $$(P_TREE_CHANGES),$$(P_ACL_RECORDS))
|
||||
protoc --gogofaster_out=$(PKGMAP):. --go-drpc_out=protolib=github.com/gogo/protobuf:. commonspace/spacesyncproto/protos/*.proto
|
||||
protoc --gogofaster_out=$(PKGMAP):. --go-drpc_out=protolib=github.com/gogo/protobuf:. commonfile/fileproto/protos/*.proto
|
||||
protoc --gogofaster_out=$(PKGMAP):. --go-drpc_out=protolib=github.com/gogo/protobuf:. net/streampool/testservice/protos/*.proto
|
||||
|
||||
deps:
|
||||
go mod download
|
||||
|
||||
55
app/logger/ctxfiled.go
Normal file
55
app/logger/ctxfiled.go
Normal file
@ -0,0 +1,55 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ctxKey uint
|
||||
|
||||
const (
|
||||
ctxKeyFields ctxKey = iota
|
||||
)
|
||||
|
||||
func WithCtx(ctx context.Context, l *zap.Logger) *zap.Logger {
|
||||
return l.With(CtxGetFields(ctx)...)
|
||||
}
|
||||
|
||||
func CtxWithFields(ctx context.Context, fields ...zap.Field) context.Context {
|
||||
existingFields := CtxGetFields(ctx)
|
||||
if existingFields != nil {
|
||||
existingFields = append(existingFields, fields...)
|
||||
}
|
||||
return context.WithValue(ctx, ctxKeyFields, fields)
|
||||
}
|
||||
|
||||
func CtxGetFields(ctx context.Context) (fields []zap.Field) {
|
||||
if v := ctx.Value(ctxKeyFields); v != nil {
|
||||
return v.([]zap.Field)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type CtxLogger struct {
|
||||
*zap.Logger
|
||||
}
|
||||
|
||||
func (cl CtxLogger) DebugCtx(ctx context.Context, msg string, fields ...zap.Field) {
|
||||
cl.Logger.Debug(msg, append(CtxGetFields(ctx), fields...)...)
|
||||
}
|
||||
|
||||
func (cl CtxLogger) InfoCtx(ctx context.Context, msg string, fields ...zap.Field) {
|
||||
cl.Logger.Info(msg, append(CtxGetFields(ctx), fields...)...)
|
||||
}
|
||||
|
||||
func (cl CtxLogger) WarnCtx(ctx context.Context, msg string, fields ...zap.Field) {
|
||||
cl.Logger.Warn(msg, append(CtxGetFields(ctx), fields...)...)
|
||||
}
|
||||
|
||||
func (cl CtxLogger) ErrorCtx(ctx context.Context, msg string, fields ...zap.Field) {
|
||||
cl.Logger.Error(msg, append(CtxGetFields(ctx), fields...)...)
|
||||
}
|
||||
|
||||
func (cl CtxLogger) With(fields ...zap.Field) CtxLogger {
|
||||
return CtxLogger{cl.Logger.With(fields...)}
|
||||
}
|
||||
@ -9,7 +9,7 @@ var (
|
||||
mu sync.Mutex
|
||||
defaultLogger *zap.Logger
|
||||
levels = make(map[string]zap.AtomicLevel)
|
||||
loggers = make(map[string]*zap.Logger)
|
||||
loggers = make(map[string]CtxLogger)
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -22,7 +22,7 @@ func SetDefault(l *zap.Logger) {
|
||||
defer mu.Unlock()
|
||||
*defaultLogger = *l
|
||||
for name, l := range loggers {
|
||||
*l = *defaultLogger.Named(name)
|
||||
*l.Logger = *defaultLogger.Named(name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,13 +38,14 @@ func Default() *zap.Logger {
|
||||
return defaultLogger
|
||||
}
|
||||
|
||||
func NewNamed(name string, fields ...zap.Field) *zap.Logger {
|
||||
func NewNamed(name string, fields ...zap.Field) CtxLogger {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
l := defaultLogger.Named(name)
|
||||
if len(fields) > 0 {
|
||||
l = l.With(fields...)
|
||||
}
|
||||
loggers[name] = l
|
||||
return l
|
||||
ctxL := CtxLogger{l}
|
||||
loggers[name] = ctxL
|
||||
return ctxL
|
||||
}
|
||||
|
||||
@ -5,7 +5,6 @@ import (
|
||||
"github.com/anytypeio/any-sync/commonspace/object/syncobjectgetter"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/tree/objecttree"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/treegetter"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type commonGetter struct {
|
||||
@ -33,13 +32,12 @@ func (c *commonGetter) GetTree(ctx context.Context, spaceId, treeId string) (obj
|
||||
}
|
||||
|
||||
func (c *commonGetter) getReservedObject(id string) syncobjectgetter.SyncObject {
|
||||
pos := slices.IndexFunc(c.reservedObjects, func(object syncobjectgetter.SyncObject) bool {
|
||||
return object.Id() == id
|
||||
})
|
||||
if pos == -1 {
|
||||
return nil
|
||||
for _, obj := range c.reservedObjects {
|
||||
if obj != nil && obj.Id() == id {
|
||||
return obj
|
||||
}
|
||||
return c.reservedObjects[pos]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commonGetter) GetObject(ctx context.Context, objectId string) (obj syncobjectgetter.SyncObject, err error) {
|
||||
|
||||
@ -2,7 +2,9 @@ package headsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/app/ldiff"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/anytypeio/any-sync/commonspace/confconnector"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/tree/synctree"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/treegetter"
|
||||
@ -31,7 +33,7 @@ func newDiffSyncer(
|
||||
storage spacestorage.SpaceStorage,
|
||||
clientFactory spacesyncproto.ClientFactory,
|
||||
syncStatus syncstatus.StatusUpdater,
|
||||
log *zap.Logger) DiffSyncer {
|
||||
log logger.CtxLogger) DiffSyncer {
|
||||
return &diffSyncer{
|
||||
diff: diff,
|
||||
spaceId: spaceId,
|
||||
@ -51,7 +53,7 @@ type diffSyncer struct {
|
||||
cache treegetter.TreeGetter
|
||||
storage spacestorage.SpaceStorage
|
||||
clientFactory spacesyncproto.ClientFactory
|
||||
log *zap.Logger
|
||||
log logger.CtxLogger
|
||||
deletionState deletionstate.DeletionState
|
||||
syncStatus syncstatus.StatusUpdater
|
||||
}
|
||||
@ -90,16 +92,22 @@ func (d *diffSyncer) Sync(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var peerIds = make([]string, 0, len(peers))
|
||||
for _, p := range peers {
|
||||
if err := d.syncWithPeer(ctx, p); err != nil {
|
||||
d.log.Error("can't sync with peer", zap.String("peer", p.Id()), zap.Error(err))
|
||||
peerIds = append(peerIds, p.Id())
|
||||
}
|
||||
d.log.DebugCtx(ctx, "start diffsync", zap.Strings("peerIds", peerIds))
|
||||
for _, p := range peers {
|
||||
if err = d.syncWithPeer(peer.CtxWithPeerId(ctx, p.Id()), p); err != nil {
|
||||
d.log.ErrorCtx(ctx, "can't sync with peer", zap.String("peer", p.Id()), zap.Error(err))
|
||||
}
|
||||
}
|
||||
d.log.Info("synced", zap.String("spaceId", d.spaceId), zap.Duration("dur", time.Since(st)))
|
||||
d.log.InfoCtx(ctx, "diff done", zap.String("spaceId", d.spaceId), zap.Duration("dur", time.Since(st)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *diffSyncer) syncWithPeer(ctx context.Context, p peer.Peer) (err error) {
|
||||
ctx = logger.CtxWithFields(ctx, zap.String("peerId", p.Id()))
|
||||
var (
|
||||
cl = d.clientFactory.Client(p)
|
||||
rdiff = NewRemoteDiff(d.spaceId, cl)
|
||||
@ -110,7 +118,7 @@ func (d *diffSyncer) syncWithPeer(ctx context.Context, p peer.Peer) (err error)
|
||||
err = rpcerr.Unwrap(err)
|
||||
if err != nil && err != spacesyncproto.ErrSpaceMissing {
|
||||
d.syncStatus.SetNodesOnline(p.Id(), false)
|
||||
return err
|
||||
return fmt.Errorf("diff error: %v", err)
|
||||
}
|
||||
d.syncStatus.SetNodesOnline(p.Id(), true)
|
||||
|
||||
@ -124,13 +132,14 @@ func (d *diffSyncer) syncWithPeer(ctx context.Context, p peer.Peer) (err error)
|
||||
|
||||
d.syncStatus.RemoveAllExcept(p.Id(), filteredIds, stateCounter)
|
||||
|
||||
ctx = peer.CtxWithPeerId(ctx, p.Id())
|
||||
d.pingTreesInCache(ctx, filteredIds)
|
||||
|
||||
d.log.Info("sync done:", zap.Int("newIds", len(newIds)),
|
||||
zap.Int("changedIds", len(changedIds)),
|
||||
zap.Int("removedIds", len(removedIds)),
|
||||
zap.Int("already deleted ids", totalLen-len(filteredIds)))
|
||||
zap.Int("already deleted ids", totalLen-len(filteredIds)),
|
||||
zap.String("peerId", p.Id()),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@ -138,17 +147,23 @@ func (d *diffSyncer) pingTreesInCache(ctx context.Context, trees []string) {
|
||||
for _, tId := range trees {
|
||||
tree, err := d.cache.GetTree(ctx, d.spaceId, tId)
|
||||
if err != nil {
|
||||
d.log.InfoCtx(ctx, "can't load tree", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
syncTree, ok := tree.(synctree.SyncTree)
|
||||
if !ok {
|
||||
d.log.InfoCtx(ctx, "not a sync tree", zap.String("objectId", tId))
|
||||
continue
|
||||
}
|
||||
// the idea why we call it directly is that if we try to get it from cache
|
||||
// it may be already there (i.e. loaded)
|
||||
// and build func will not be called, thus we won't sync the tree
|
||||
// therefore we just do it manually
|
||||
syncTree.Ping()
|
||||
if err = syncTree.Ping(ctx); err != nil {
|
||||
d.log.WarnCtx(ctx, "synctree.Ping error", zap.Error(err), zap.String("treeId", tId))
|
||||
} else {
|
||||
d.log.DebugCtx(ctx, "success tree ping", zap.String("treeId", tId))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@ package headsync
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/app/ldiff"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/anytypeio/any-sync/commonspace/confconnector"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/treegetter"
|
||||
"github.com/anytypeio/any-sync/commonspace/settings/deletionstate"
|
||||
@ -38,7 +39,7 @@ type headSync struct {
|
||||
periodicSync periodicsync.PeriodicSync
|
||||
storage spacestorage.SpaceStorage
|
||||
diff ldiff.Diff
|
||||
log *zap.Logger
|
||||
log logger.CtxLogger
|
||||
syncer DiffSyncer
|
||||
|
||||
syncPeriod int
|
||||
@ -51,13 +52,13 @@ func NewHeadSync(
|
||||
confConnector confconnector.ConfConnector,
|
||||
cache treegetter.TreeGetter,
|
||||
syncStatus syncstatus.StatusUpdater,
|
||||
log *zap.Logger) HeadSync {
|
||||
log logger.CtxLogger) HeadSync {
|
||||
|
||||
diff := ldiff.New(16, 16)
|
||||
l := log.With(zap.String("spaceId", spaceId))
|
||||
factory := spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient)
|
||||
syncer := newDiffSyncer(spaceId, diff, confConnector, cache, storage, factory, syncStatus, l)
|
||||
periodicSync := periodicsync.NewPeriodicSync(syncPeriod, time.Minute, syncer.Sync, l)
|
||||
periodicSync := periodicsync.NewPeriodicSync(syncPeriod, time.Minute*10, syncer.Sync, l)
|
||||
|
||||
return &headSync{
|
||||
spaceId: spaceId,
|
||||
|
||||
@ -9,13 +9,13 @@ import (
|
||||
type SyncAcl struct {
|
||||
list.AclList
|
||||
synchandler.SyncHandler
|
||||
streamPool objectsync.StreamPool
|
||||
messagePool objectsync.MessagePool
|
||||
}
|
||||
|
||||
func NewSyncAcl(aclList list.AclList, streamPool objectsync.StreamPool) *SyncAcl {
|
||||
func NewSyncAcl(aclList list.AclList, messagePool objectsync.MessagePool) *SyncAcl {
|
||||
return &SyncAcl{
|
||||
AclList: aclList,
|
||||
SyncHandler: nil,
|
||||
streamPool: streamPool,
|
||||
messagePool: messagePool,
|
||||
}
|
||||
}
|
||||
|
||||
@ -40,32 +40,32 @@ func (m *MockSyncClient) EXPECT() *MockSyncClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// BroadcastAsync mocks base method.
|
||||
func (m *MockSyncClient) BroadcastAsync(arg0 *treechangeproto.TreeSyncMessage) error {
|
||||
// Broadcast mocks base method.
|
||||
func (m *MockSyncClient) Broadcast(arg0 context.Context, arg1 *treechangeproto.TreeSyncMessage) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "BroadcastAsync", arg0)
|
||||
ret := m.ctrl.Call(m, "Broadcast", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// BroadcastAsync indicates an expected call of BroadcastAsync.
|
||||
func (mr *MockSyncClientMockRecorder) BroadcastAsync(arg0 interface{}) *gomock.Call {
|
||||
// Broadcast indicates an expected call of Broadcast.
|
||||
func (mr *MockSyncClientMockRecorder) Broadcast(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BroadcastAsync", reflect.TypeOf((*MockSyncClient)(nil).BroadcastAsync), arg0)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Broadcast", reflect.TypeOf((*MockSyncClient)(nil).Broadcast), arg0, arg1)
|
||||
}
|
||||
|
||||
// BroadcastAsyncOrSendResponsible mocks base method.
|
||||
func (m *MockSyncClient) BroadcastAsyncOrSendResponsible(arg0 *treechangeproto.TreeSyncMessage) error {
|
||||
func (m *MockSyncClient) BroadcastAsyncOrSendResponsible(arg0 context.Context, arg1 *treechangeproto.TreeSyncMessage) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "BroadcastAsyncOrSendResponsible", arg0)
|
||||
ret := m.ctrl.Call(m, "BroadcastAsyncOrSendResponsible", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// BroadcastAsyncOrSendResponsible indicates an expected call of BroadcastAsyncOrSendResponsible.
|
||||
func (mr *MockSyncClientMockRecorder) BroadcastAsyncOrSendResponsible(arg0 interface{}) *gomock.Call {
|
||||
func (mr *MockSyncClientMockRecorder) BroadcastAsyncOrSendResponsible(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BroadcastAsyncOrSendResponsible", reflect.TypeOf((*MockSyncClient)(nil).BroadcastAsyncOrSendResponsible), arg0)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BroadcastAsyncOrSendResponsible", reflect.TypeOf((*MockSyncClient)(nil).BroadcastAsyncOrSendResponsible), arg0, arg1)
|
||||
}
|
||||
|
||||
// CreateFullSyncRequest mocks base method.
|
||||
@ -126,18 +126,18 @@ func (mr *MockSyncClientMockRecorder) CreateNewTreeRequest() *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNewTreeRequest", reflect.TypeOf((*MockSyncClient)(nil).CreateNewTreeRequest))
|
||||
}
|
||||
|
||||
// SendAsync mocks base method.
|
||||
func (m *MockSyncClient) SendAsync(arg0 string, arg1 *treechangeproto.TreeSyncMessage, arg2 string) error {
|
||||
// SendWithReply mocks base method.
|
||||
func (m *MockSyncClient) SendWithReply(arg0 context.Context, arg1 string, arg2 *treechangeproto.TreeSyncMessage, arg3 string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SendAsync", arg0, arg1, arg2)
|
||||
ret := m.ctrl.Call(m, "SendWithReply", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SendAsync indicates an expected call of SendAsync.
|
||||
func (mr *MockSyncClientMockRecorder) SendAsync(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
// SendWithReply indicates an expected call of SendWithReply.
|
||||
func (mr *MockSyncClientMockRecorder) SendWithReply(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAsync", reflect.TypeOf((*MockSyncClient)(nil).SendAsync), arg0, arg1, arg2)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendWithReply", reflect.TypeOf((*MockSyncClient)(nil).SendWithReply), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// MockSyncTree is a mock of SyncTree interface.
|
||||
@ -395,17 +395,17 @@ func (mr *MockSyncTreeMockRecorder) Lock() *gomock.Call {
|
||||
}
|
||||
|
||||
// Ping mocks base method.
|
||||
func (m *MockSyncTree) Ping() error {
|
||||
func (m *MockSyncTree) Ping(arg0 context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Ping")
|
||||
ret := m.ctrl.Call(m, "Ping", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Ping indicates an expected call of Ping.
|
||||
func (mr *MockSyncTreeMockRecorder) Ping() *gomock.Call {
|
||||
func (mr *MockSyncTreeMockRecorder) Ping(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockSyncTree)(nil).Ping))
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockSyncTree)(nil).Ping), arg0)
|
||||
}
|
||||
|
||||
// RLock mocks base method.
|
||||
|
||||
@ -1,36 +0,0 @@
|
||||
package synctree
|
||||
|
||||
import (
|
||||
"github.com/anytypeio/any-sync/commonspace/object/tree/treechangeproto"
|
||||
"github.com/anytypeio/any-sync/commonspace/objectsync"
|
||||
)
|
||||
|
||||
type queuedClient struct {
|
||||
SyncClient
|
||||
queue objectsync.ActionQueue
|
||||
}
|
||||
|
||||
func newQueuedClient(client SyncClient, queue objectsync.ActionQueue) SyncClient {
|
||||
return &queuedClient{
|
||||
SyncClient: client,
|
||||
queue: queue,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *queuedClient) BroadcastAsync(message *treechangeproto.TreeSyncMessage) (err error) {
|
||||
return q.queue.Send(func() error {
|
||||
return q.SyncClient.BroadcastAsync(message)
|
||||
})
|
||||
}
|
||||
|
||||
func (q *queuedClient) SendAsync(peerId string, message *treechangeproto.TreeSyncMessage, replyId string) (err error) {
|
||||
return q.queue.Send(func() error {
|
||||
return q.SyncClient.SendAsync(peerId, message, replyId)
|
||||
})
|
||||
}
|
||||
|
||||
func (q *queuedClient) BroadcastAsyncOrSendResponsible(message *treechangeproto.TreeSyncMessage) (err error) {
|
||||
return q.queue.Send(func() error {
|
||||
return q.SyncClient.BroadcastAsyncOrSendResponsible(message)
|
||||
})
|
||||
}
|
||||
@ -2,6 +2,7 @@
|
||||
package synctree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/commonspace/confconnector"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/tree/treechangeproto"
|
||||
"github.com/anytypeio/any-sync/commonspace/objectsync"
|
||||
@ -11,72 +12,61 @@ import (
|
||||
|
||||
type SyncClient interface {
|
||||
RequestFactory
|
||||
BroadcastAsync(message *treechangeproto.TreeSyncMessage) (err error)
|
||||
BroadcastAsyncOrSendResponsible(message *treechangeproto.TreeSyncMessage) (err error)
|
||||
SendAsync(peerId string, message *treechangeproto.TreeSyncMessage, replyId string) (err error)
|
||||
Broadcast(ctx context.Context, msg *treechangeproto.TreeSyncMessage) (err error)
|
||||
BroadcastAsyncOrSendResponsible(ctx context.Context, msg *treechangeproto.TreeSyncMessage) (err error)
|
||||
SendWithReply(ctx context.Context, peerId string, msg *treechangeproto.TreeSyncMessage, replyId string) (err error)
|
||||
}
|
||||
|
||||
type syncClient struct {
|
||||
objectsync.StreamPool
|
||||
objectsync.MessagePool
|
||||
RequestFactory
|
||||
spaceId string
|
||||
connector confconnector.ConfConnector
|
||||
configuration nodeconf.Configuration
|
||||
|
||||
checker objectsync.StreamChecker
|
||||
}
|
||||
|
||||
func newSyncClient(
|
||||
spaceId string,
|
||||
pool objectsync.StreamPool,
|
||||
pool objectsync.MessagePool,
|
||||
factory RequestFactory,
|
||||
configuration nodeconf.Configuration,
|
||||
checker objectsync.StreamChecker) SyncClient {
|
||||
configuration nodeconf.Configuration) SyncClient {
|
||||
return &syncClient{
|
||||
StreamPool: pool,
|
||||
MessagePool: pool,
|
||||
RequestFactory: factory,
|
||||
configuration: configuration,
|
||||
checker: checker,
|
||||
spaceId: spaceId,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syncClient) BroadcastAsync(message *treechangeproto.TreeSyncMessage) (err error) {
|
||||
objMsg, err := marshallTreeMessage(message, message.RootChange.Id, "")
|
||||
func (s *syncClient) Broadcast(ctx context.Context, msg *treechangeproto.TreeSyncMessage) (err error) {
|
||||
objMsg, err := marshallTreeMessage(msg, s.spaceId, msg.RootChange.Id, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.checker.CheckResponsiblePeers()
|
||||
return s.StreamPool.BroadcastAsync(objMsg)
|
||||
return s.MessagePool.Broadcast(ctx, objMsg)
|
||||
}
|
||||
|
||||
func (s *syncClient) SendAsync(peerId string, message *treechangeproto.TreeSyncMessage, replyId string) (err error) {
|
||||
err = s.checker.CheckPeerConnection(peerId)
|
||||
func (s *syncClient) SendWithReply(ctx context.Context, peerId string, msg *treechangeproto.TreeSyncMessage, replyId string) (err error) {
|
||||
objMsg, err := marshallTreeMessage(msg, s.spaceId, msg.RootChange.Id, replyId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
objMsg, err := marshallTreeMessage(message, message.RootChange.Id, replyId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return s.StreamPool.SendAsync([]string{peerId}, objMsg)
|
||||
return s.MessagePool.SendPeer(ctx, peerId, objMsg)
|
||||
}
|
||||
|
||||
func (s *syncClient) BroadcastAsyncOrSendResponsible(message *treechangeproto.TreeSyncMessage) (err error) {
|
||||
objMsg, err := marshallTreeMessage(message, message.RootChange.Id, "")
|
||||
func (s *syncClient) BroadcastAsyncOrSendResponsible(ctx context.Context, message *treechangeproto.TreeSyncMessage) (err error) {
|
||||
objMsg, err := marshallTreeMessage(message, s.spaceId, message.RootChange.Id, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.configuration.IsResponsible(s.spaceId) {
|
||||
s.checker.CheckResponsiblePeers()
|
||||
return s.StreamPool.SendAsync(s.configuration.NodeIds(s.spaceId), objMsg)
|
||||
return s.MessagePool.SendResponsible(ctx, objMsg)
|
||||
}
|
||||
return s.BroadcastAsync(message)
|
||||
return s.Broadcast(ctx, message)
|
||||
}
|
||||
|
||||
func marshallTreeMessage(message *treechangeproto.TreeSyncMessage, id, replyId string) (objMsg *spacesyncproto.ObjectSyncMessage, err error) {
|
||||
func marshallTreeMessage(message *treechangeproto.TreeSyncMessage, spaceId, objectId, replyId string) (objMsg *spacesyncproto.ObjectSyncMessage, err error) {
|
||||
payload, err := message.Marshal()
|
||||
if err != nil {
|
||||
return
|
||||
@ -84,7 +74,8 @@ func marshallTreeMessage(message *treechangeproto.TreeSyncMessage, id, replyId s
|
||||
objMsg = &spacesyncproto.ObjectSyncMessage{
|
||||
ReplyId: replyId,
|
||||
Payload: payload,
|
||||
ObjectId: id,
|
||||
ObjectId: objectId,
|
||||
SpaceId: spaceId,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ type SyncTree interface {
|
||||
objecttree.ObjectTree
|
||||
synchandler.SyncHandler
|
||||
ListenerSetter
|
||||
Ping() (err error)
|
||||
Ping(ctx context.Context) (err error)
|
||||
}
|
||||
|
||||
// SyncTree sends head updates to sync service and also sends new changes to update listener
|
||||
@ -54,10 +54,10 @@ type syncTree struct {
|
||||
isDeleted bool
|
||||
}
|
||||
|
||||
var log = logger.NewNamed("commonspace.synctree").Sugar()
|
||||
var log = logger.NewNamed("commonspace.synctree")
|
||||
|
||||
var buildObjectTree = objecttree.BuildObjectTree
|
||||
var createSyncClient = newWrappedSyncClient
|
||||
var createSyncClient = newSyncClient
|
||||
|
||||
type BuildDeps struct {
|
||||
SpaceId string
|
||||
@ -73,39 +73,21 @@ type BuildDeps struct {
|
||||
WaitTreeRemoteSync bool
|
||||
}
|
||||
|
||||
func newWrappedSyncClient(
|
||||
spaceId string,
|
||||
factory RequestFactory,
|
||||
objectSync objectsync.ObjectSync,
|
||||
configuration nodeconf.Configuration) SyncClient {
|
||||
syncClient := newSyncClient(spaceId, objectSync.StreamPool(), factory, configuration, objectSync.StreamChecker())
|
||||
return newQueuedClient(syncClient, objectSync.ActionQueue())
|
||||
}
|
||||
|
||||
func BuildSyncTreeOrGetRemote(ctx context.Context, id string, deps BuildDeps) (t SyncTree, err error) {
|
||||
getTreeRemote := func() (msg *treechangeproto.TreeSyncMessage, err error) {
|
||||
streamChecker := deps.ObjectSync.StreamChecker()
|
||||
peerId, err := peer.CtxPeerId(ctx)
|
||||
if err != nil {
|
||||
streamChecker.CheckResponsiblePeers()
|
||||
peerId, err = streamChecker.FirstResponsiblePeer()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
log.WarnCtx(ctx, "peer not found in context, use first responsible")
|
||||
peerId = deps.Configuration.NodeIds(deps.SpaceId)[0]
|
||||
}
|
||||
|
||||
newTreeRequest := GetRequestFactory().CreateNewTreeRequest()
|
||||
objMsg, err := marshallTreeMessage(newTreeRequest, id, "")
|
||||
objMsg, err := marshallTreeMessage(newTreeRequest, deps.SpaceId, id, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = deps.ObjectSync.StreamChecker().CheckPeerConnection(peerId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := deps.ObjectSync.StreamPool().SendSync(peerId, objMsg)
|
||||
resp, err := deps.ObjectSync.MessagePool().SendSync(ctx, peerId, objMsg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -169,7 +151,7 @@ func BuildSyncTreeOrGetRemote(ctx context.Context, id string, deps BuildDeps) (t
|
||||
}
|
||||
|
||||
// basically building tree with in-memory storage and validating that it was without errors
|
||||
log.With(zap.String("id", id)).Debug("validating tree")
|
||||
log.With(zap.String("id", id)).DebugCtx(ctx, "validating tree")
|
||||
err = objecttree.ValidateRawTree(payload, deps.AclList)
|
||||
if err != nil {
|
||||
return
|
||||
@ -197,8 +179,8 @@ func buildSyncTree(ctx context.Context, isFirstBuild bool, deps BuildDeps) (t Sy
|
||||
}
|
||||
syncClient := createSyncClient(
|
||||
deps.SpaceId,
|
||||
deps.ObjectSync.MessagePool(),
|
||||
sharedFactory,
|
||||
deps.ObjectSync,
|
||||
deps.Configuration)
|
||||
syncTree := &syncTree{
|
||||
ObjectTree: objTree,
|
||||
@ -218,7 +200,9 @@ func buildSyncTree(ctx context.Context, isFirstBuild bool, deps BuildDeps) (t Sy
|
||||
if isFirstBuild {
|
||||
headUpdate := syncTree.syncClient.CreateHeadUpdate(t, nil)
|
||||
// send to everybody, because everybody should know that the node or client got new tree
|
||||
syncTree.syncClient.BroadcastAsync(headUpdate)
|
||||
if e := syncTree.syncClient.Broadcast(ctx, headUpdate); e != nil {
|
||||
log.ErrorCtx(ctx, "broadcast error", zap.Error(e))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -255,7 +239,7 @@ func (s *syncTree) AddContent(ctx context.Context, content objecttree.SignableCh
|
||||
}
|
||||
s.syncStatus.HeadsChange(s.Id(), res.Heads)
|
||||
headUpdate := s.syncClient.CreateHeadUpdate(s, res.Added)
|
||||
err = s.syncClient.BroadcastAsync(headUpdate)
|
||||
err = s.syncClient.Broadcast(ctx, headUpdate)
|
||||
return
|
||||
}
|
||||
|
||||
@ -282,13 +266,13 @@ func (s *syncTree) AddRawChanges(ctx context.Context, changesPayload objecttree.
|
||||
s.notifiable.UpdateHeads(s.Id(), res.Heads)
|
||||
}
|
||||
headUpdate := s.syncClient.CreateHeadUpdate(s, res.Added)
|
||||
err = s.syncClient.BroadcastAsync(headUpdate)
|
||||
err = s.syncClient.Broadcast(ctx, headUpdate)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *syncTree) Delete() (err error) {
|
||||
log.With("id", s.Id()).Debug("deleting sync tree")
|
||||
log.With(zap.String("id", s.Id())).Debug("deleting sync tree")
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if err = s.checkAlive(); err != nil {
|
||||
@ -303,7 +287,7 @@ func (s *syncTree) Delete() (err error) {
|
||||
}
|
||||
|
||||
func (s *syncTree) Close() (err error) {
|
||||
log.With("id", s.Id()).Debug("closing sync tree")
|
||||
log.With(zap.String("id", s.Id())).Debug("closing sync tree")
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.isClosed {
|
||||
@ -324,11 +308,11 @@ func (s *syncTree) checkAlive() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (s *syncTree) Ping() (err error) {
|
||||
func (s *syncTree) Ping(ctx context.Context) (err error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
headUpdate := s.syncClient.CreateHeadUpdate(s, nil)
|
||||
return s.syncClient.BroadcastAsyncOrSendResponsible(headUpdate)
|
||||
return s.syncClient.BroadcastAsyncOrSendResponsible(ctx, headUpdate)
|
||||
}
|
||||
|
||||
func (s *syncTree) afterBuild() {
|
||||
|
||||
@ -73,7 +73,7 @@ func Test_BuildSyncTree(t *testing.T) {
|
||||
updateListenerMock.EXPECT().Update(tr)
|
||||
|
||||
syncClientMock.EXPECT().CreateHeadUpdate(gomock.Eq(tr), gomock.Eq(changes)).Return(headUpdate)
|
||||
syncClientMock.EXPECT().BroadcastAsync(gomock.Eq(headUpdate)).Return(nil)
|
||||
syncClientMock.EXPECT().Broadcast(gomock.Any(), gomock.Eq(headUpdate)).Return(nil)
|
||||
res, err := tr.AddRawChanges(ctx, payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedRes, res)
|
||||
@ -95,7 +95,7 @@ func Test_BuildSyncTree(t *testing.T) {
|
||||
updateListenerMock.EXPECT().Rebuild(tr)
|
||||
|
||||
syncClientMock.EXPECT().CreateHeadUpdate(gomock.Eq(tr), gomock.Eq(changes)).Return(headUpdate)
|
||||
syncClientMock.EXPECT().BroadcastAsync(gomock.Eq(headUpdate)).Return(nil)
|
||||
syncClientMock.EXPECT().Broadcast(gomock.Any(), gomock.Eq(headUpdate)).Return(nil)
|
||||
res, err := tr.AddRawChanges(ctx, payload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedRes, res)
|
||||
@ -133,7 +133,7 @@ func Test_BuildSyncTree(t *testing.T) {
|
||||
Return(expectedRes, nil)
|
||||
|
||||
syncClientMock.EXPECT().CreateHeadUpdate(gomock.Eq(tr), gomock.Eq(changes)).Return(headUpdate)
|
||||
syncClientMock.EXPECT().BroadcastAsync(gomock.Eq(headUpdate)).Return(nil)
|
||||
syncClientMock.EXPECT().Broadcast(gomock.Any(), gomock.Eq(headUpdate)).Return(nil)
|
||||
res, err := tr.AddContent(ctx, content)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedRes, res)
|
||||
|
||||
@ -41,7 +41,7 @@ func (s *syncTreeHandler) HandleMessage(ctx context.Context, senderId string, ms
|
||||
|
||||
s.syncStatus.HeadsReceive(senderId, msg.ObjectId, treechangeproto.GetHeads(unmarshalled))
|
||||
|
||||
queueFull := s.queue.AddMessage(senderId, unmarshalled, msg.ReplyId)
|
||||
queueFull := s.queue.AddMessage(senderId, unmarshalled, msg.RequestId)
|
||||
if queueFull {
|
||||
return
|
||||
}
|
||||
@ -82,36 +82,37 @@ func (s *syncTreeHandler) handleHeadUpdate(
|
||||
objTree = s.objTree
|
||||
)
|
||||
|
||||
log := log.With("senderId", senderId).
|
||||
With("heads", objTree.Heads()).
|
||||
With("treeId", objTree.Id())
|
||||
log.Debug("received head update message")
|
||||
log := log.With(zap.Strings("heads", objTree.Heads()), zap.String("treeId", objTree.Id()))
|
||||
log.DebugCtx(ctx, "received head update message")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Debug("head update finished with error")
|
||||
} else if fullRequest != nil {
|
||||
log.Debug("sending full sync request")
|
||||
log.DebugCtx(ctx, "sending full sync request")
|
||||
} else {
|
||||
if !isEmptyUpdate {
|
||||
log.Debug("head update finished correctly")
|
||||
log.DebugCtx(ctx, "head update finished correctly")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// isEmptyUpdate is sent when the tree is brought up from cache
|
||||
if isEmptyUpdate {
|
||||
log.With("treeId", objTree.Id()).Debug("is empty update")
|
||||
if slice.UnsortedEquals(objTree.Heads(), update.Heads) {
|
||||
|
||||
headEquals := slice.UnsortedEquals(objTree.Heads(), update.Heads)
|
||||
log.DebugCtx(ctx, "is empty update", zap.String("treeId", objTree.Id()), zap.Bool("headEquals", headEquals))
|
||||
if headEquals {
|
||||
return
|
||||
}
|
||||
|
||||
// we need to sync in any case
|
||||
fullRequest, err = s.syncClient.CreateFullSyncRequest(objTree, update.Heads, update.SnapshotPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return s.syncClient.SendAsync(senderId, fullRequest, replyId)
|
||||
return s.syncClient.SendWithReply(ctx, senderId, fullRequest, replyId)
|
||||
}
|
||||
|
||||
if s.alreadyHasHeads(objTree, update.Heads) {
|
||||
@ -135,7 +136,7 @@ func (s *syncTreeHandler) handleHeadUpdate(
|
||||
return
|
||||
}
|
||||
|
||||
return s.syncClient.SendAsync(senderId, fullRequest, replyId)
|
||||
return s.syncClient.SendWithReply(ctx, senderId, fullRequest, replyId)
|
||||
}
|
||||
|
||||
func (s *syncTreeHandler) handleFullSyncRequest(
|
||||
@ -149,20 +150,17 @@ func (s *syncTreeHandler) handleFullSyncRequest(
|
||||
objTree = s.objTree
|
||||
)
|
||||
|
||||
log := log.With("senderId", senderId).
|
||||
With("heads", request.Heads).
|
||||
With("treeId", s.objTree.Id()).
|
||||
With("replyId", replyId)
|
||||
log.Debug("received full sync request message")
|
||||
log := log.With(zap.String("senderId", senderId), zap.Strings("heads", request.Heads), zap.String("treeId", s.objTree.Id()), zap.String("replyId", replyId))
|
||||
log.DebugCtx(ctx, "received full sync request message")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Debug("full sync request finished with error")
|
||||
log.With(zap.Error(err)).DebugCtx(ctx, "full sync request finished with error")
|
||||
|
||||
s.syncClient.SendAsync(senderId, treechangeproto.WrapError(err, header), replyId)
|
||||
s.syncClient.SendWithReply(ctx, senderId, treechangeproto.WrapError(err, header), replyId)
|
||||
return
|
||||
} else if fullResponse != nil {
|
||||
log.Debug("full sync response sent")
|
||||
log.DebugCtx(ctx, "full sync response sent")
|
||||
}
|
||||
}()
|
||||
|
||||
@ -180,7 +178,7 @@ func (s *syncTreeHandler) handleFullSyncRequest(
|
||||
return
|
||||
}
|
||||
|
||||
return s.syncClient.SendAsync(senderId, fullResponse, replyId)
|
||||
return s.syncClient.SendWithReply(ctx, senderId, fullResponse, replyId)
|
||||
}
|
||||
|
||||
func (s *syncTreeHandler) handleFullSyncResponse(
|
||||
@ -190,16 +188,14 @@ func (s *syncTreeHandler) handleFullSyncResponse(
|
||||
var (
|
||||
objTree = s.objTree
|
||||
)
|
||||
log := log.With("senderId", senderId).
|
||||
With("heads", response.Heads).
|
||||
With("treeId", s.objTree.Id())
|
||||
log.Debug("received full sync response message")
|
||||
log := log.With(zap.Strings("heads", response.Heads), zap.String("treeId", s.objTree.Id()))
|
||||
log.DebugCtx(ctx, "received full sync response message")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Debug("full sync response failed")
|
||||
log.With(zap.Error(err)).DebugCtx(ctx, "full sync response failed")
|
||||
} else {
|
||||
log.Debug("full sync response succeeded")
|
||||
log.DebugCtx(ctx, "full sync response succeeded")
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ package synctree
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/tree/objecttree"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/tree/objecttree/mock_objecttree"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/tree/synctree/mock_synctree"
|
||||
@ -38,7 +39,7 @@ type syncHandlerFixture struct {
|
||||
ctrl *gomock.Controller
|
||||
syncClientMock *mock_synctree.MockSyncClient
|
||||
objectTreeMock *testObjTreeMock
|
||||
receiveQueueMock *mock_synctree.MockReceiveQueue
|
||||
receiveQueueMock ReceiveQueue
|
||||
|
||||
syncHandler *syncTreeHandler
|
||||
}
|
||||
@ -47,19 +48,19 @@ func newSyncHandlerFixture(t *testing.T) *syncHandlerFixture {
|
||||
ctrl := gomock.NewController(t)
|
||||
syncClientMock := mock_synctree.NewMockSyncClient(ctrl)
|
||||
objectTreeMock := newTestObjMock(mock_objecttree.NewMockObjectTree(ctrl))
|
||||
receiveQueueMock := mock_synctree.NewMockReceiveQueue(ctrl)
|
||||
receiveQueue := newReceiveQueue(5)
|
||||
|
||||
syncHandler := &syncTreeHandler{
|
||||
objTree: objectTreeMock,
|
||||
syncClient: syncClientMock,
|
||||
queue: receiveQueueMock,
|
||||
queue: receiveQueue,
|
||||
syncStatus: syncstatus.NewNoOpSyncStatus(),
|
||||
}
|
||||
return &syncHandlerFixture{
|
||||
ctrl: ctrl,
|
||||
syncClientMock: syncClientMock,
|
||||
objectTreeMock: objectTreeMock,
|
||||
receiveQueueMock: receiveQueueMock,
|
||||
receiveQueueMock: receiveQueue,
|
||||
syncHandler: syncHandler,
|
||||
}
|
||||
}
|
||||
@ -70,7 +71,7 @@ func (fx *syncHandlerFixture) stop() {
|
||||
|
||||
func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
log = zap.NewNop().Sugar()
|
||||
log = logger.CtxLogger{Logger: zap.NewNop()}
|
||||
|
||||
t.Run("head update non empty all heads added", func(t *testing.T) {
|
||||
fx := newSyncHandlerFixture(t)
|
||||
@ -84,10 +85,7 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapHeadUpdate(headUpdate, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().Heads().Return([]string{"h2"}).Times(2)
|
||||
@ -101,7 +99,6 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
fx.objectTreeMock.EXPECT().Heads().Return([]string{"h2", "h1"})
|
||||
fx.objectTreeMock.EXPECT().HasChanges(gomock.Eq([]string{"h1"})).Return(true)
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -118,10 +115,8 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapHeadUpdate(headUpdate, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
fullRequest := &treechangeproto.TreeSyncMessage{}
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().Heads().Return([]string{"h2"}).AnyTimes()
|
||||
@ -136,9 +131,8 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
fx.syncClientMock.EXPECT().
|
||||
CreateFullSyncRequest(gomock.Eq(fx.objectTreeMock), gomock.Eq([]string{"h1"}), gomock.Eq([]string{"h1"})).
|
||||
Return(fullRequest, nil)
|
||||
fx.syncClientMock.EXPECT().SendAsync(gomock.Eq(senderId), gomock.Eq(fullRequest), gomock.Eq(""))
|
||||
fx.syncClientMock.EXPECT().SendWithReply(gomock.Any(), gomock.Eq(senderId), gomock.Eq(fullRequest), gomock.Eq(""))
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -155,14 +149,11 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapHeadUpdate(headUpdate, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().Heads().Return([]string{"h1"}).AnyTimes()
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -179,19 +170,16 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapHeadUpdate(headUpdate, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
fullRequest := &treechangeproto.TreeSyncMessage{}
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().Heads().Return([]string{"h2"}).AnyTimes()
|
||||
fx.syncClientMock.EXPECT().
|
||||
CreateFullSyncRequest(gomock.Eq(fx.objectTreeMock), gomock.Eq([]string{"h1"}), gomock.Eq([]string{"h1"})).
|
||||
Return(fullRequest, nil)
|
||||
fx.syncClientMock.EXPECT().SendAsync(gomock.Eq(senderId), gomock.Eq(fullRequest), gomock.Eq(""))
|
||||
fx.syncClientMock.EXPECT().SendWithReply(gomock.Any(), gomock.Eq(senderId), gomock.Eq(fullRequest), gomock.Eq(""))
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -208,14 +196,11 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapHeadUpdate(headUpdate, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().Heads().Return([]string{"h1"}).AnyTimes()
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -223,7 +208,7 @@ func TestSyncHandler_HandleHeadUpdate(t *testing.T) {
|
||||
|
||||
func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
log = zap.NewNop().Sugar()
|
||||
log = logger.CtxLogger{Logger: zap.NewNop()}
|
||||
|
||||
t.Run("full sync request with change", func(t *testing.T) {
|
||||
fx := newSyncHandlerFixture(t)
|
||||
@ -237,10 +222,8 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapFullRequest(fullSyncRequest, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
fullResponse := &treechangeproto.TreeSyncMessage{}
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().Header().Return(nil)
|
||||
@ -255,9 +238,8 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
fx.syncClientMock.EXPECT().
|
||||
CreateFullSyncResponse(gomock.Eq(fx.objectTreeMock), gomock.Eq([]string{"h1"}), gomock.Eq([]string{"h1"})).
|
||||
Return(fullResponse, nil)
|
||||
fx.syncClientMock.EXPECT().SendAsync(gomock.Eq(senderId), gomock.Eq(fullResponse), gomock.Eq(""))
|
||||
fx.syncClientMock.EXPECT().SendWithReply(gomock.Any(), gomock.Eq(senderId), gomock.Eq(fullResponse), gomock.Eq(""))
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -274,10 +256,8 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapFullRequest(fullSyncRequest, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
fullResponse := &treechangeproto.TreeSyncMessage{}
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
|
||||
fx.objectTreeMock.EXPECT().
|
||||
Id().AnyTimes().Return(treeId)
|
||||
@ -288,9 +268,8 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
fx.syncClientMock.EXPECT().
|
||||
CreateFullSyncResponse(gomock.Eq(fx.objectTreeMock), gomock.Eq([]string{"h1"}), gomock.Eq([]string{"h1"})).
|
||||
Return(fullResponse, nil)
|
||||
fx.syncClientMock.EXPECT().SendAsync(gomock.Eq(senderId), gomock.Eq(fullResponse), gomock.Eq(""))
|
||||
fx.syncClientMock.EXPECT().SendWithReply(gomock.Any(), gomock.Eq(senderId), gomock.Eq(fullResponse), gomock.Eq(""))
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -307,10 +286,9 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapFullRequest(fullSyncRequest, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, replyId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
objectMsg.RequestId = replyId
|
||||
fullResponse := &treechangeproto.TreeSyncMessage{}
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), replyId).Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, replyId, nil)
|
||||
|
||||
fx.objectTreeMock.EXPECT().
|
||||
Id().AnyTimes().Return(treeId)
|
||||
@ -318,9 +296,8 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
fx.syncClientMock.EXPECT().
|
||||
CreateFullSyncResponse(gomock.Eq(fx.objectTreeMock), gomock.Eq([]string{"h1"}), gomock.Eq([]string{"h1"})).
|
||||
Return(fullResponse, nil)
|
||||
fx.syncClientMock.EXPECT().SendAsync(gomock.Eq(senderId), gomock.Eq(fullResponse), gomock.Eq(replyId))
|
||||
fx.syncClientMock.EXPECT().SendWithReply(gomock.Any(), gomock.Eq(senderId), gomock.Eq(fullResponse), gomock.Eq(replyId))
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -337,9 +314,7 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapFullRequest(fullSyncRequest, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, "")
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), "").Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, "", nil)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, "")
|
||||
|
||||
fx.objectTreeMock.EXPECT().
|
||||
Id().AnyTimes().Return(treeId)
|
||||
@ -356,9 +331,8 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
RawChanges: []*treechangeproto.RawTreeChangeWithId{chWithId},
|
||||
})).
|
||||
Return(objecttree.AddResult{}, fmt.Errorf(""))
|
||||
fx.syncClientMock.EXPECT().SendAsync(gomock.Eq(senderId), gomock.Any(), gomock.Eq(""))
|
||||
fx.syncClientMock.EXPECT().SendWithReply(gomock.Any(), gomock.Eq(senderId), gomock.Any(), gomock.Eq(""))
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.Error(t, err)
|
||||
})
|
||||
@ -366,7 +340,7 @@ func TestSyncHandler_HandleFullSyncRequest(t *testing.T) {
|
||||
|
||||
func TestSyncHandler_HandleFullSyncResponse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
log = zap.NewNop().Sugar()
|
||||
log = logger.CtxLogger{Logger: zap.NewNop()}
|
||||
|
||||
t.Run("full sync response with change", func(t *testing.T) {
|
||||
fx := newSyncHandlerFixture(t)
|
||||
@ -381,9 +355,7 @@ func TestSyncHandler_HandleFullSyncResponse(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapFullResponse(fullSyncResponse, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, replyId)
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), replyId).Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, replyId, nil)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, replyId)
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().
|
||||
@ -399,7 +371,6 @@ func TestSyncHandler_HandleFullSyncResponse(t *testing.T) {
|
||||
})).
|
||||
Return(objecttree.AddResult{}, nil)
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -417,16 +388,13 @@ func TestSyncHandler_HandleFullSyncResponse(t *testing.T) {
|
||||
SnapshotPath: []string{"h1"},
|
||||
}
|
||||
treeMsg := treechangeproto.WrapFullResponse(fullSyncResponse, chWithId)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, treeId, replyId)
|
||||
fx.receiveQueueMock.EXPECT().AddMessage(senderId, gomock.Eq(treeMsg), replyId).Return(false)
|
||||
fx.receiveQueueMock.EXPECT().GetMessage(senderId).Return(treeMsg, replyId, nil)
|
||||
objectMsg, _ := marshallTreeMessage(treeMsg, "spaceId", treeId, replyId)
|
||||
|
||||
fx.objectTreeMock.EXPECT().Id().AnyTimes().Return(treeId)
|
||||
fx.objectTreeMock.EXPECT().
|
||||
Heads().
|
||||
Return([]string{"h1"}).AnyTimes()
|
||||
|
||||
fx.receiveQueueMock.EXPECT().ClearQueue(senderId)
|
||||
err := fx.syncHandler.HandleMessage(ctx, senderId, objectMsg)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@ -1,78 +0,0 @@
|
||||
package objectsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/cheggaaa/mb/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ActionFunc func() error
|
||||
|
||||
type ActionQueue interface {
|
||||
Send(action ActionFunc) (err error)
|
||||
Run()
|
||||
Close()
|
||||
}
|
||||
|
||||
type actionQueue struct {
|
||||
batcher *mb.MB[ActionFunc]
|
||||
maxReaders int
|
||||
maxQueueLen int
|
||||
readers chan struct{}
|
||||
}
|
||||
|
||||
func NewDefaultActionQueue() ActionQueue {
|
||||
return NewActionQueue(10, 200)
|
||||
}
|
||||
|
||||
func NewActionQueue(maxReaders int, maxQueueLen int) ActionQueue {
|
||||
return &actionQueue{
|
||||
batcher: mb.New[ActionFunc](maxQueueLen),
|
||||
maxReaders: maxReaders,
|
||||
maxQueueLen: maxQueueLen,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *actionQueue) Send(action ActionFunc) (err error) {
|
||||
log.Debug("adding action to batcher")
|
||||
err = q.batcher.TryAdd(action)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
log.With(zap.Error(err)).Debug("queue returned error")
|
||||
actions := q.batcher.GetAll()
|
||||
actions = append(actions[len(actions)/2:], action)
|
||||
return q.batcher.Add(context.Background(), actions...)
|
||||
}
|
||||
|
||||
func (q *actionQueue) Run() {
|
||||
log.Debug("running the queue")
|
||||
q.readers = make(chan struct{}, q.maxReaders)
|
||||
for i := 0; i < q.maxReaders; i++ {
|
||||
go q.startReading()
|
||||
}
|
||||
}
|
||||
|
||||
func (q *actionQueue) startReading() {
|
||||
defer func() {
|
||||
q.readers <- struct{}{}
|
||||
}()
|
||||
for {
|
||||
action, err := q.batcher.WaitOne(context.Background())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = action()
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Debug("action errored out")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *actionQueue) Close() {
|
||||
log.Debug("closing the queue")
|
||||
q.batcher.Close()
|
||||
for i := 0; i < q.maxReaders; i++ {
|
||||
<-q.readers
|
||||
}
|
||||
}
|
||||
@ -1,54 +0,0 @@
|
||||
package objectsync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/require"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestActionQueue_Send(t *testing.T) {
|
||||
maxReaders := 41
|
||||
maxLen := 93
|
||||
|
||||
queue := NewActionQueue(maxReaders, maxLen).(*actionQueue)
|
||||
counter := atomic.Int32{}
|
||||
expectedCounter := int32(maxReaders + (maxLen+1)/2 + 1)
|
||||
blocker := make(chan struct{}, expectedCounter)
|
||||
waiter := make(chan struct{}, expectedCounter)
|
||||
increase := func() error {
|
||||
counter.Add(1)
|
||||
waiter <- struct{}{}
|
||||
<-blocker
|
||||
return nil
|
||||
}
|
||||
|
||||
queue.Run()
|
||||
// sending maxReaders messages, so the goroutines will block on `blocker` channel
|
||||
for i := 0; i < maxReaders; i++ {
|
||||
queue.Send(increase)
|
||||
}
|
||||
// waiting until they all make progress
|
||||
for i := 0; i < maxReaders; i++ {
|
||||
<-waiter
|
||||
}
|
||||
fmt.Println(counter.Load())
|
||||
// check that queue is empty
|
||||
require.Equal(t, queue.batcher.Len(), 0)
|
||||
// making queue to overflow while readers are blocked
|
||||
for i := 0; i < maxLen+1; i++ {
|
||||
queue.Send(increase)
|
||||
}
|
||||
// check that queue was halved after overflow
|
||||
require.Equal(t, (maxLen+1)/2+1, queue.batcher.Len())
|
||||
// unblocking maxReaders waiting + then we should also unblock the new readers to do a bit more readings
|
||||
for i := 0; i < int(expectedCounter); i++ {
|
||||
blocker <- struct{}{}
|
||||
}
|
||||
// waiting for all readers to finish adding
|
||||
for i := 0; i < int(expectedCounter)-maxReaders; i++ {
|
||||
<-waiter
|
||||
}
|
||||
queue.Close()
|
||||
require.Equal(t, expectedCounter, counter.Load())
|
||||
}
|
||||
@ -1,73 +0,0 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/anytypeio/any-sync/commonspace/objectsync (interfaces: ActionQueue)
|
||||
|
||||
// Package mock_objectsync is a generated GoMock package.
|
||||
package mock_objectsync
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
objectsync "github.com/anytypeio/any-sync/commonspace/objectsync"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
// MockActionQueue is a mock of ActionQueue interface.
|
||||
type MockActionQueue struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockActionQueueMockRecorder
|
||||
}
|
||||
|
||||
// MockActionQueueMockRecorder is the mock recorder for MockActionQueue.
|
||||
type MockActionQueueMockRecorder struct {
|
||||
mock *MockActionQueue
|
||||
}
|
||||
|
||||
// NewMockActionQueue creates a new mock instance.
|
||||
func NewMockActionQueue(ctrl *gomock.Controller) *MockActionQueue {
|
||||
mock := &MockActionQueue{ctrl: ctrl}
|
||||
mock.recorder = &MockActionQueueMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockActionQueue) EXPECT() *MockActionQueueMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Close mocks base method.
|
||||
func (m *MockActionQueue) Close() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Close")
|
||||
}
|
||||
|
||||
// Close indicates an expected call of Close.
|
||||
func (mr *MockActionQueueMockRecorder) Close() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockActionQueue)(nil).Close))
|
||||
}
|
||||
|
||||
// Run mocks base method.
|
||||
func (m *MockActionQueue) Run() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Run")
|
||||
}
|
||||
|
||||
// Run indicates an expected call of Run.
|
||||
func (mr *MockActionQueueMockRecorder) Run() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockActionQueue)(nil).Run))
|
||||
}
|
||||
|
||||
// Send mocks base method.
|
||||
func (m *MockActionQueue) Send(arg0 objectsync.ActionFunc) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Send", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Send indicates an expected call of Send.
|
||||
func (mr *MockActionQueueMockRecorder) Send(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockActionQueue)(nil).Send), arg0)
|
||||
}
|
||||
145
commonspace/objectsync/msgpool.go
Normal file
145
commonspace/objectsync/msgpool.go
Normal file
@ -0,0 +1,145 @@
|
||||
package objectsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/app/ocache"
|
||||
"github.com/anytypeio/any-sync/commonspace/objectsync/synchandler"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
"go.uber.org/zap"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type StreamManager interface {
|
||||
SendPeer(ctx context.Context, peerId string, msg *spacesyncproto.ObjectSyncMessage) (err error)
|
||||
SendResponsible(ctx context.Context, msg *spacesyncproto.ObjectSyncMessage) (err error)
|
||||
Broadcast(ctx context.Context, msg *spacesyncproto.ObjectSyncMessage) (err error)
|
||||
}
|
||||
|
||||
// MessagePool can be made generic to work with different streams
|
||||
type MessagePool interface {
|
||||
ocache.ObjectLastUsage
|
||||
synchandler.SyncHandler
|
||||
StreamManager
|
||||
SendSync(ctx context.Context, peerId string, message *spacesyncproto.ObjectSyncMessage) (reply *spacesyncproto.ObjectSyncMessage, err error)
|
||||
}
|
||||
|
||||
type MessageHandler func(ctx context.Context, senderId string, message *spacesyncproto.ObjectSyncMessage) (err error)
|
||||
|
||||
type responseWaiter struct {
|
||||
ch chan *spacesyncproto.ObjectSyncMessage
|
||||
}
|
||||
|
||||
type messagePool struct {
|
||||
sync.Mutex
|
||||
StreamManager
|
||||
messageHandler MessageHandler
|
||||
waiters map[string]responseWaiter
|
||||
waitersMx sync.Mutex
|
||||
counter atomic.Uint64
|
||||
lastUsage atomic.Int64
|
||||
}
|
||||
|
||||
func newMessagePool(streamManager StreamManager, messageHandler MessageHandler) MessagePool {
|
||||
s := &messagePool{
|
||||
StreamManager: streamManager,
|
||||
messageHandler: messageHandler,
|
||||
waiters: make(map[string]responseWaiter),
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *messagePool) SendSync(ctx context.Context, peerId string, msg *spacesyncproto.ObjectSyncMessage) (reply *spacesyncproto.ObjectSyncMessage, err error) {
|
||||
s.updateLastUsage()
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Second*10)
|
||||
defer cancel()
|
||||
newCounter := s.counter.Add(1)
|
||||
msg.RequestId = genReplyKey(peerId, msg.ObjectId, newCounter)
|
||||
log.InfoCtx(ctx, "mpool sendSync", zap.String("requestId", msg.RequestId))
|
||||
s.waitersMx.Lock()
|
||||
waiter := responseWaiter{
|
||||
ch: make(chan *spacesyncproto.ObjectSyncMessage, 1),
|
||||
}
|
||||
s.waiters[msg.RequestId] = waiter
|
||||
s.waitersMx.Unlock()
|
||||
|
||||
err = s.SendPeer(ctx, peerId, msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.waitersMx.Lock()
|
||||
delete(s.waiters, msg.RequestId)
|
||||
s.waitersMx.Unlock()
|
||||
|
||||
log.With(zap.String("requestId", msg.RequestId)).WarnCtx(ctx, "time elapsed when waiting")
|
||||
err = fmt.Errorf("sendSync context error: %v", ctx.Err())
|
||||
case reply = <-waiter.ch:
|
||||
// success
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *messagePool) SendPeer(ctx context.Context, peerId string, msg *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
s.updateLastUsage()
|
||||
return s.StreamManager.SendPeer(ctx, peerId, msg)
|
||||
}
|
||||
|
||||
func (s *messagePool) SendResponsible(ctx context.Context, msg *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
s.updateLastUsage()
|
||||
return s.StreamManager.SendResponsible(ctx, msg)
|
||||
}
|
||||
func (s *messagePool) Broadcast(ctx context.Context, msg *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
s.updateLastUsage()
|
||||
return s.StreamManager.Broadcast(ctx, msg)
|
||||
}
|
||||
|
||||
func (s *messagePool) HandleMessage(ctx context.Context, senderId string, msg *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
s.updateLastUsage()
|
||||
if msg.ReplyId != "" {
|
||||
log.InfoCtx(ctx, "mpool receive reply", zap.String("replyId", msg.ReplyId))
|
||||
// we got reply, send it to waiter
|
||||
if s.stopWaiter(msg) {
|
||||
return
|
||||
}
|
||||
log.WarnCtx(ctx, "reply id does not exist", zap.String("replyId", msg.ReplyId))
|
||||
}
|
||||
return s.messageHandler(ctx, senderId, msg)
|
||||
}
|
||||
|
||||
func (s *messagePool) LastUsage() time.Time {
|
||||
return time.Unix(s.lastUsage.Load(), 0)
|
||||
}
|
||||
|
||||
func (s *messagePool) updateLastUsage() {
|
||||
s.lastUsage.Store(time.Now().Unix())
|
||||
}
|
||||
|
||||
func (s *messagePool) stopWaiter(msg *spacesyncproto.ObjectSyncMessage) bool {
|
||||
s.waitersMx.Lock()
|
||||
waiter, exists := s.waiters[msg.ReplyId]
|
||||
if exists {
|
||||
delete(s.waiters, msg.ReplyId)
|
||||
s.waitersMx.Unlock()
|
||||
waiter.ch <- msg
|
||||
return true
|
||||
}
|
||||
s.waitersMx.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
func genReplyKey(peerId, treeId string, counter uint64) string {
|
||||
b := &strings.Builder{}
|
||||
b.WriteString(peerId)
|
||||
b.WriteString(".")
|
||||
b.WriteString(treeId)
|
||||
b.WriteString(".")
|
||||
b.WriteString(strconv.FormatUint(counter, 36))
|
||||
return b.String()
|
||||
}
|
||||
@ -1,11 +1,9 @@
|
||||
//go:generate mockgen -destination mock_objectsync/mock_objectsync.go github.com/anytypeio/any-sync/commonspace/objectsync ActionQueue
|
||||
package objectsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/anytypeio/any-sync/app/ocache"
|
||||
"github.com/anytypeio/any-sync/commonspace/confconnector"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/syncobjectgetter"
|
||||
"github.com/anytypeio/any-sync/commonspace/objectsync/synchandler"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
@ -18,9 +16,7 @@ var log = logger.NewNamed("commonspace.objectsync")
|
||||
type ObjectSync interface {
|
||||
ocache.ObjectLastUsage
|
||||
synchandler.SyncHandler
|
||||
StreamPool() StreamPool
|
||||
StreamChecker() StreamChecker
|
||||
ActionQueue() ActionQueue
|
||||
MessagePool() MessagePool
|
||||
|
||||
Init()
|
||||
Close() (err error)
|
||||
@ -29,10 +25,8 @@ type ObjectSync interface {
|
||||
type objectSync struct {
|
||||
spaceId string
|
||||
|
||||
streamPool StreamPool
|
||||
checker StreamChecker
|
||||
messagePool MessagePool
|
||||
objectGetter syncobjectgetter.SyncObjectGetter
|
||||
actionQueue ActionQueue
|
||||
|
||||
syncCtx context.Context
|
||||
cancelSync context.CancelFunc
|
||||
@ -40,82 +34,61 @@ type objectSync struct {
|
||||
|
||||
func NewObjectSync(
|
||||
spaceId string,
|
||||
confConnector confconnector.ConfConnector,
|
||||
objectGetter syncobjectgetter.SyncObjectGetter) (objectSync ObjectSync) {
|
||||
streamPool := newStreamPool(func(ctx context.Context, senderId string, message *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
return objectSync.HandleMessage(ctx, senderId, message)
|
||||
})
|
||||
clientFactory := spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient)
|
||||
syncLog := log.With(zap.String("id", spaceId))
|
||||
streamManager StreamManager,
|
||||
objectGetter syncobjectgetter.SyncObjectGetter) ObjectSync {
|
||||
syncCtx, cancel := context.WithCancel(context.Background())
|
||||
checker := NewStreamChecker(
|
||||
os := newObjectSync(
|
||||
spaceId,
|
||||
confConnector,
|
||||
streamPool,
|
||||
clientFactory,
|
||||
syncCtx,
|
||||
syncLog)
|
||||
objectSync = newObjectSync(
|
||||
spaceId,
|
||||
streamPool,
|
||||
checker,
|
||||
objectGetter,
|
||||
syncCtx,
|
||||
cancel)
|
||||
return
|
||||
msgPool := newMessagePool(streamManager, os.handleMessage)
|
||||
os.messagePool = msgPool
|
||||
return os
|
||||
}
|
||||
|
||||
func newObjectSync(
|
||||
spaceId string,
|
||||
streamPool StreamPool,
|
||||
checker StreamChecker,
|
||||
objectGetter syncobjectgetter.SyncObjectGetter,
|
||||
syncCtx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
) *objectSync {
|
||||
return &objectSync{
|
||||
objectGetter: objectGetter,
|
||||
streamPool: streamPool,
|
||||
spaceId: spaceId,
|
||||
checker: checker,
|
||||
syncCtx: syncCtx,
|
||||
cancelSync: cancel,
|
||||
actionQueue: NewDefaultActionQueue(),
|
||||
//actionQueue: NewDefaultActionQueue(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *objectSync) Init() {
|
||||
s.actionQueue.Run()
|
||||
go s.checker.CheckResponsiblePeers()
|
||||
//s.actionQueue.Run()
|
||||
}
|
||||
|
||||
func (s *objectSync) Close() (err error) {
|
||||
s.actionQueue.Close()
|
||||
//s.actionQueue.Close()
|
||||
s.cancelSync()
|
||||
return s.streamPool.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *objectSync) LastUsage() time.Time {
|
||||
return s.streamPool.LastUsage()
|
||||
return s.messagePool.LastUsage()
|
||||
}
|
||||
|
||||
func (s *objectSync) HandleMessage(ctx context.Context, senderId string, message *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
log.With(zap.String("peerId", senderId), zap.String("objectId", message.ObjectId)).Debug("handling message")
|
||||
obj, err := s.objectGetter.GetObject(ctx, message.ObjectId)
|
||||
return s.messagePool.HandleMessage(ctx, senderId, message)
|
||||
}
|
||||
|
||||
func (s *objectSync) handleMessage(ctx context.Context, senderId string, msg *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
log.With(zap.String("objectId", msg.ObjectId), zap.String("replyId", msg.ReplyId)).DebugCtx(ctx, "handling message")
|
||||
obj, err := s.objectGetter.GetObject(ctx, msg.ObjectId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return obj.HandleMessage(ctx, senderId, message)
|
||||
return obj.HandleMessage(ctx, senderId, msg)
|
||||
}
|
||||
|
||||
func (s *objectSync) StreamPool() StreamPool {
|
||||
return s.streamPool
|
||||
}
|
||||
|
||||
func (s *objectSync) StreamChecker() StreamChecker {
|
||||
return s.checker
|
||||
}
|
||||
|
||||
func (s *objectSync) ActionQueue() ActionQueue {
|
||||
return s.actionQueue
|
||||
func (s *objectSync) MessagePool() MessagePool {
|
||||
return s.messagePool
|
||||
}
|
||||
|
||||
@ -1,146 +0,0 @@
|
||||
package objectsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/commonspace/confconnector"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"github.com/anytypeio/any-sync/net/rpc/rpcerr"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
"time"
|
||||
)
|
||||
|
||||
type StreamChecker interface {
|
||||
CheckResponsiblePeers()
|
||||
CheckPeerConnection(peerId string) (err error)
|
||||
FirstResponsiblePeer() (peerId string, err error)
|
||||
}
|
||||
|
||||
type streamChecker struct {
|
||||
spaceId string
|
||||
connector confconnector.ConfConnector
|
||||
streamPool StreamPool
|
||||
clientFactory spacesyncproto.ClientFactory
|
||||
log *zap.Logger
|
||||
syncCtx context.Context
|
||||
lastCheck *atomic.Time
|
||||
}
|
||||
|
||||
const streamCheckerInterval = time.Second * 5
|
||||
|
||||
func NewStreamChecker(
|
||||
spaceId string,
|
||||
connector confconnector.ConfConnector,
|
||||
streamPool StreamPool,
|
||||
clientFactory spacesyncproto.ClientFactory,
|
||||
syncCtx context.Context,
|
||||
log *zap.Logger) StreamChecker {
|
||||
return &streamChecker{
|
||||
spaceId: spaceId,
|
||||
connector: connector,
|
||||
streamPool: streamPool,
|
||||
clientFactory: clientFactory,
|
||||
log: log,
|
||||
syncCtx: syncCtx,
|
||||
lastCheck: atomic.NewTime(time.Time{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *streamChecker) CheckResponsiblePeers() {
|
||||
lastCheck := s.lastCheck.Load()
|
||||
now := time.Now()
|
||||
if lastCheck.Add(streamCheckerInterval).After(now) {
|
||||
return
|
||||
}
|
||||
s.lastCheck.Store(now)
|
||||
|
||||
var (
|
||||
activeNodeIds []string
|
||||
configuration = s.connector.Configuration()
|
||||
)
|
||||
nodeIds := configuration.NodeIds(s.spaceId)
|
||||
for _, nodeId := range nodeIds {
|
||||
if s.streamPool.HasActiveStream(nodeId) {
|
||||
s.log.Debug("has active stream for", zap.String("id", nodeId))
|
||||
activeNodeIds = append(activeNodeIds, nodeId)
|
||||
continue
|
||||
}
|
||||
}
|
||||
s.log.Debug("total streams", zap.Int("total", len(activeNodeIds)))
|
||||
newPeers, err := s.connector.DialInactiveResponsiblePeers(s.syncCtx, s.spaceId, activeNodeIds)
|
||||
if err != nil {
|
||||
s.log.Error("failed to dial peers", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range newPeers {
|
||||
err := s.createStream(p)
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Error("failed to create stream")
|
||||
continue
|
||||
}
|
||||
s.log.Debug("reading stream for", zap.String("id", p.Id()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamChecker) CheckPeerConnection(peerId string) (err error) {
|
||||
if s.streamPool.HasActiveStream(peerId) {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
configuration = s.connector.Configuration()
|
||||
pool = s.connector.Pool()
|
||||
)
|
||||
nodeIds := configuration.NodeIds(s.spaceId)
|
||||
// we don't know the address of the peer
|
||||
if !slices.Contains(nodeIds, peerId) {
|
||||
err = fmt.Errorf("don't know the address of peer %s", peerId)
|
||||
return
|
||||
}
|
||||
|
||||
newPeer, err := pool.Dial(s.syncCtx, peerId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return s.createStream(newPeer)
|
||||
}
|
||||
|
||||
func (s *streamChecker) createStream(p peer.Peer) (err error) {
|
||||
stream, err := s.clientFactory.Client(p).ObjectSyncStream(s.syncCtx)
|
||||
if err != nil {
|
||||
// so here probably the request is failed because there is no such space,
|
||||
// but diffService should handle such cases by sending pushSpace
|
||||
err = fmt.Errorf("failed to open stream: %w", rpcerr.Unwrap(err))
|
||||
return
|
||||
}
|
||||
|
||||
// sending empty message for the server to understand from which space is it coming
|
||||
err = stream.Send(&spacesyncproto.ObjectSyncMessage{SpaceId: s.spaceId})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to send first message to stream: %w", rpcerr.Unwrap(err))
|
||||
return
|
||||
}
|
||||
err = s.streamPool.AddAndReadStreamAsync(stream)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to read from stream async: %w", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamChecker) FirstResponsiblePeer() (peerId string, err error) {
|
||||
nodeIds := s.connector.Configuration().NodeIds(s.spaceId)
|
||||
for _, nodeId := range nodeIds {
|
||||
if s.streamPool.HasActiveStream(nodeId) {
|
||||
peerId = nodeId
|
||||
return
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("no responsible peers are connected")
|
||||
return
|
||||
}
|
||||
@ -1,332 +0,0 @@
|
||||
package objectsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/app/ocache"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"go.uber.org/zap"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrEmptyPeer = errors.New("don't have such a peer")
|
||||
var ErrStreamClosed = errors.New("stream is already closed")
|
||||
|
||||
var maxStreamReaders = 10
|
||||
var syncWaitPeriod = 2 * time.Second
|
||||
|
||||
var ErrSyncTimeout = errors.New("too long wait on sync receive")
|
||||
|
||||
// StreamPool can be made generic to work with different streams
|
||||
type StreamPool interface {
|
||||
ocache.ObjectLastUsage
|
||||
AddAndReadStreamSync(stream spacesyncproto.ObjectSyncStream) (err error)
|
||||
AddAndReadStreamAsync(stream spacesyncproto.ObjectSyncStream) (err error)
|
||||
|
||||
SendSync(peerId string, message *spacesyncproto.ObjectSyncMessage) (reply *spacesyncproto.ObjectSyncMessage, err error)
|
||||
SendAsync(peers []string, message *spacesyncproto.ObjectSyncMessage) (err error)
|
||||
BroadcastAsync(message *spacesyncproto.ObjectSyncMessage) (err error)
|
||||
|
||||
HasActiveStream(peerId string) bool
|
||||
Close() (err error)
|
||||
}
|
||||
|
||||
type MessageHandler func(ctx context.Context, senderId string, message *spacesyncproto.ObjectSyncMessage) (err error)
|
||||
|
||||
type responseWaiter struct {
|
||||
ch chan *spacesyncproto.ObjectSyncMessage
|
||||
}
|
||||
|
||||
type streamPool struct {
|
||||
sync.Mutex
|
||||
peerStreams map[string]spacesyncproto.ObjectSyncStream
|
||||
messageHandler MessageHandler
|
||||
wg *sync.WaitGroup
|
||||
waiters map[string]responseWaiter
|
||||
waitersMx sync.Mutex
|
||||
counter atomic.Uint64
|
||||
lastUsage atomic.Int64
|
||||
}
|
||||
|
||||
func newStreamPool(messageHandler MessageHandler) StreamPool {
|
||||
s := &streamPool{
|
||||
peerStreams: make(map[string]spacesyncproto.ObjectSyncStream),
|
||||
messageHandler: messageHandler,
|
||||
waiters: make(map[string]responseWaiter),
|
||||
wg: &sync.WaitGroup{},
|
||||
}
|
||||
s.lastUsage.Store(time.Now().Unix())
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *streamPool) LastUsage() time.Time {
|
||||
return time.Unix(s.lastUsage.Load(), 0)
|
||||
}
|
||||
|
||||
func (s *streamPool) HasActiveStream(peerId string) (res bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
_, err := s.getOrDeleteStream(peerId)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (s *streamPool) SendSync(
|
||||
peerId string,
|
||||
msg *spacesyncproto.ObjectSyncMessage) (reply *spacesyncproto.ObjectSyncMessage, err error) {
|
||||
newCounter := s.counter.Add(1)
|
||||
msg.ReplyId = genStreamPoolKey(peerId, msg.ObjectId, newCounter)
|
||||
|
||||
s.waitersMx.Lock()
|
||||
waiter := responseWaiter{
|
||||
ch: make(chan *spacesyncproto.ObjectSyncMessage, 1),
|
||||
}
|
||||
s.waiters[msg.ReplyId] = waiter
|
||||
s.waitersMx.Unlock()
|
||||
|
||||
err = s.SendAsync([]string{peerId}, msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delay := time.NewTimer(syncWaitPeriod)
|
||||
select {
|
||||
case <-delay.C:
|
||||
s.waitersMx.Lock()
|
||||
delete(s.waiters, msg.ReplyId)
|
||||
s.waitersMx.Unlock()
|
||||
|
||||
log.With(zap.String("replyId", msg.ReplyId)).Error("time elapsed when waiting")
|
||||
err = ErrSyncTimeout
|
||||
case reply = <-waiter.ch:
|
||||
if !delay.Stop() {
|
||||
<-delay.C
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) SendAsync(peers []string, message *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
s.lastUsage.Store(time.Now().Unix())
|
||||
getStreams := func() (streams []spacesyncproto.ObjectSyncStream) {
|
||||
for _, pId := range peers {
|
||||
stream, err := s.getOrDeleteStream(pId)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
streams = append(streams, stream)
|
||||
}
|
||||
return streams
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
streams := getStreams()
|
||||
s.Unlock()
|
||||
|
||||
log.With(zap.String("objectId", message.ObjectId), zap.Int("existing peers len", len(streams)), zap.Strings("wanted peers", peers)).
|
||||
Debug("sending message to peers")
|
||||
for _, stream := range streams {
|
||||
err = stream.Send(message)
|
||||
if err != nil {
|
||||
log.Debug("error sending message to stream", zap.Error(err))
|
||||
}
|
||||
}
|
||||
if len(peers) != 1 {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *streamPool) getOrDeleteStream(id string) (stream spacesyncproto.ObjectSyncStream, err error) {
|
||||
stream, exists := s.peerStreams[id]
|
||||
if !exists {
|
||||
err = ErrEmptyPeer
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-stream.Context().Done():
|
||||
delete(s.peerStreams, id)
|
||||
err = ErrStreamClosed
|
||||
default:
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) getAllStreams() (streams []spacesyncproto.ObjectSyncStream) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
Loop:
|
||||
for id, stream := range s.peerStreams {
|
||||
select {
|
||||
case <-stream.Context().Done():
|
||||
delete(s.peerStreams, id)
|
||||
continue Loop
|
||||
default:
|
||||
break
|
||||
}
|
||||
log.With(zap.String("id", id)).Debug("getting peer stream")
|
||||
streams = append(streams, stream)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) BroadcastAsync(message *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
streams := s.getAllStreams()
|
||||
log.With(zap.String("objectId", message.ObjectId), zap.Int("peers", len(streams))).
|
||||
Debug("broadcasting message to peers")
|
||||
for _, stream := range streams {
|
||||
if err = stream.Send(message); err != nil {
|
||||
log.Debug("error sending message to stream", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *streamPool) AddAndReadStreamAsync(stream spacesyncproto.ObjectSyncStream) (err error) {
|
||||
peerId, err := s.addStream(stream)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
go s.readPeerLoop(peerId, stream)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) AddAndReadStreamSync(stream spacesyncproto.ObjectSyncStream) (err error) {
|
||||
peerId, err := s.addStream(stream)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return s.readPeerLoop(peerId, stream)
|
||||
}
|
||||
|
||||
func (s *streamPool) addStream(stream spacesyncproto.ObjectSyncStream) (peerId string, err error) {
|
||||
s.Lock()
|
||||
peerId, err = peer.CtxPeerId(stream.Context())
|
||||
if err != nil {
|
||||
s.Unlock()
|
||||
return
|
||||
}
|
||||
log.With(zap.String("peer id", peerId)).Debug("adding stream")
|
||||
|
||||
if oldStream, ok := s.peerStreams[peerId]; ok {
|
||||
s.Unlock()
|
||||
oldStream.Close()
|
||||
s.Lock()
|
||||
log.With(zap.String("peer id", peerId)).Debug("closed old stream before adding")
|
||||
}
|
||||
|
||||
s.peerStreams[peerId] = stream
|
||||
s.wg.Add(1)
|
||||
s.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) Close() (err error) {
|
||||
s.Lock()
|
||||
wg := s.wg
|
||||
s.Unlock()
|
||||
streams := s.getAllStreams()
|
||||
|
||||
log.Debug("closing streams on lock")
|
||||
for _, stream := range streams {
|
||||
stream.Close()
|
||||
}
|
||||
log.Debug("closed streams")
|
||||
|
||||
if wg != nil {
|
||||
wg.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *streamPool) readPeerLoop(peerId string, stream spacesyncproto.ObjectSyncStream) (err error) {
|
||||
var (
|
||||
log = log.With(zap.String("peerId", peerId))
|
||||
queue = NewDefaultActionQueue()
|
||||
)
|
||||
queue.Run()
|
||||
|
||||
defer func() {
|
||||
log.Debug("stopped reading stream from peer")
|
||||
s.removePeer(peerId, stream)
|
||||
queue.Close()
|
||||
s.wg.Done()
|
||||
}()
|
||||
|
||||
log.Debug("started reading stream from peer")
|
||||
|
||||
stopWaiter := func(msg *spacesyncproto.ObjectSyncMessage) bool {
|
||||
s.waitersMx.Lock()
|
||||
waiter, exists := s.waiters[msg.ReplyId]
|
||||
if exists {
|
||||
delete(s.waiters, msg.ReplyId)
|
||||
s.waitersMx.Unlock()
|
||||
waiter.ch <- msg
|
||||
return true
|
||||
}
|
||||
s.waitersMx.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
process := func(msg *spacesyncproto.ObjectSyncMessage) error {
|
||||
log := log.With(zap.String("replyId", msg.ReplyId), zap.String("object id", msg.ObjectId))
|
||||
log.Debug("getting message with reply id")
|
||||
err = s.messageHandler(stream.Context(), peerId, msg)
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Debug("message handling failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stream.Context().Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
var msg *spacesyncproto.ObjectSyncMessage
|
||||
msg, err = stream.Recv()
|
||||
s.lastUsage.Store(time.Now().Unix())
|
||||
if err != nil {
|
||||
stream.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReplyId != "" {
|
||||
// then we can send it directly to waiters without adding to queue or starting a reader
|
||||
if stopWaiter(msg) {
|
||||
continue
|
||||
}
|
||||
log.With(zap.String("replyId", msg.ReplyId)).Debug("reply id does not exist")
|
||||
}
|
||||
|
||||
queue.Send(func() error {
|
||||
return process(msg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *streamPool) removePeer(peerId string, stream spacesyncproto.ObjectSyncStream) (err error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
mapStream, ok := s.peerStreams[peerId]
|
||||
if !ok {
|
||||
return ErrEmptyPeer
|
||||
}
|
||||
|
||||
// it can be the case that the stream was already replaced
|
||||
if mapStream == stream {
|
||||
delete(s.peerStreams, peerId)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func genStreamPoolKey(peerId, treeId string, counter uint64) string {
|
||||
return fmt.Sprintf("%s.%s.%d", peerId, treeId, counter)
|
||||
}
|
||||
@ -1,299 +0,0 @@
|
||||
package objectsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"github.com/anytypeio/any-sync/net/rpc/rpctest"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type testServer struct {
|
||||
stream chan spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream
|
||||
releaseStream chan error
|
||||
watchErrOnce bool
|
||||
}
|
||||
|
||||
func (t *testServer) HeadSync(ctx context.Context, request *spacesyncproto.HeadSyncRequest) (*spacesyncproto.HeadSyncResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testServer) SpacePush(ctx context.Context, request *spacesyncproto.SpacePushRequest) (*spacesyncproto.SpacePushResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testServer) SpacePull(ctx context.Context, request *spacesyncproto.SpacePullRequest) (*spacesyncproto.SpacePullResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *testServer) ObjectSyncStream(stream spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream) error {
|
||||
t.stream <- stream
|
||||
return <-t.releaseStream
|
||||
}
|
||||
|
||||
func (t *testServer) waitStream(test *testing.T) spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream {
|
||||
select {
|
||||
case <-time.After(time.Second * 5):
|
||||
test.Fatalf("waiteStream timeout")
|
||||
case st := <-t.stream:
|
||||
return st
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
testServer *testServer
|
||||
drpcTS *rpctest.TesServer
|
||||
client spacesyncproto.DRPCSpaceSyncClient
|
||||
clientStream spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream
|
||||
serverStream spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream
|
||||
pool *streamPool
|
||||
clientId string
|
||||
serverId string
|
||||
}
|
||||
|
||||
func newFixture(t *testing.T, clientId, serverId string, handler MessageHandler) *fixture {
|
||||
fx := &fixture{
|
||||
testServer: &testServer{},
|
||||
drpcTS: rpctest.NewTestServer(),
|
||||
clientId: clientId,
|
||||
serverId: serverId,
|
||||
}
|
||||
fx.testServer.stream = make(chan spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream, 1)
|
||||
require.NoError(t, spacesyncproto.DRPCRegisterSpaceSync(fx.drpcTS.Mux, fx.testServer))
|
||||
fx.client = spacesyncproto.NewDRPCSpaceSyncClient(fx.drpcTS.Dial(peer.CtxWithPeerId(context.Background(), clientId)))
|
||||
|
||||
var err error
|
||||
fx.clientStream, err = fx.client.ObjectSyncStream(peer.CtxWithPeerId(context.Background(), serverId))
|
||||
require.NoError(t, err)
|
||||
fx.serverStream = fx.testServer.waitStream(t)
|
||||
fx.pool = newStreamPool(handler).(*streamPool)
|
||||
|
||||
return fx
|
||||
}
|
||||
|
||||
func (fx *fixture) run(t *testing.T) chan error {
|
||||
waitCh := make(chan error)
|
||||
go func() {
|
||||
err := fx.pool.AddAndReadStreamSync(fx.clientStream)
|
||||
waitCh <- err
|
||||
}()
|
||||
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
fx.pool.Lock()
|
||||
require.Equal(t, fx.pool.peerStreams[fx.serverId], fx.clientStream)
|
||||
fx.pool.Unlock()
|
||||
|
||||
return waitCh
|
||||
}
|
||||
|
||||
func TestStreamPool_AddAndReadStreamAsync(t *testing.T) {
|
||||
remId := "remoteId"
|
||||
|
||||
t.Run("client close", func(t *testing.T) {
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
waitCh := fx.run(t)
|
||||
|
||||
err := fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
t.Run("server close", func(t *testing.T) {
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
waitCh := fx.run(t)
|
||||
|
||||
err := fx.serverStream.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = <-waitCh
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_Close(t *testing.T) {
|
||||
remId := "remoteId"
|
||||
|
||||
t.Run("close", func(t *testing.T) {
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
fx.run(t)
|
||||
fx.pool.Close()
|
||||
select {
|
||||
case <-fx.clientStream.Context().Done():
|
||||
break
|
||||
case <-time.After(time.Millisecond * 100):
|
||||
t.Fatal("context should be closed")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_ReceiveMessage(t *testing.T) {
|
||||
remId := "remoteId"
|
||||
t.Run("pool receive message from server", func(t *testing.T) {
|
||||
objectId := "objectId"
|
||||
msg := &spacesyncproto.ObjectSyncMessage{
|
||||
ObjectId: objectId,
|
||||
}
|
||||
recvChan := make(chan struct{})
|
||||
fx := newFixture(t, "", remId, func(ctx context.Context, senderId string, message *spacesyncproto.ObjectSyncMessage) (err error) {
|
||||
require.Equal(t, msg, message)
|
||||
recvChan <- struct{}{}
|
||||
return nil
|
||||
})
|
||||
waitCh := fx.run(t)
|
||||
|
||||
err := fx.serverStream.Send(msg)
|
||||
require.NoError(t, err)
|
||||
<-recvChan
|
||||
err = fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_HasActiveStream(t *testing.T) {
|
||||
remId := "remoteId"
|
||||
t.Run("pool has active stream", func(t *testing.T) {
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
waitCh := fx.run(t)
|
||||
require.True(t, fx.pool.HasActiveStream(remId))
|
||||
|
||||
err := fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
t.Run("pool has no active stream", func(t *testing.T) {
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
waitCh := fx.run(t)
|
||||
err := fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
require.Error(t, err)
|
||||
require.False(t, fx.pool.HasActiveStream(remId))
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_SendAsync(t *testing.T) {
|
||||
remId := "remoteId"
|
||||
t.Run("pool send async to server", func(t *testing.T) {
|
||||
objectId := "objectId"
|
||||
msg := &spacesyncproto.ObjectSyncMessage{
|
||||
ObjectId: objectId,
|
||||
}
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
recvChan := make(chan struct{})
|
||||
go func() {
|
||||
message, err := fx.serverStream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, msg, message)
|
||||
recvChan <- struct{}{}
|
||||
}()
|
||||
waitCh := fx.run(t)
|
||||
|
||||
err := fx.pool.SendAsync([]string{remId}, msg)
|
||||
require.NoError(t, err)
|
||||
<-recvChan
|
||||
err = fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_SendSync(t *testing.T) {
|
||||
remId := "remoteId"
|
||||
t.Run("pool send sync to server", func(t *testing.T) {
|
||||
objectId := "objectId"
|
||||
payload := []byte("payload")
|
||||
msg := &spacesyncproto.ObjectSyncMessage{
|
||||
ObjectId: objectId,
|
||||
}
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
go func() {
|
||||
message, err := fx.serverStream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, msg.ObjectId, message.ObjectId)
|
||||
require.NotEmpty(t, message.ReplyId)
|
||||
message.Payload = payload
|
||||
err = fx.serverStream.Send(message)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
waitCh := fx.run(t)
|
||||
res, err := fx.pool.SendSync(remId, msg)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, payload, res.Payload)
|
||||
err = fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
|
||||
t.Run("pool send sync timeout", func(t *testing.T) {
|
||||
objectId := "objectId"
|
||||
msg := &spacesyncproto.ObjectSyncMessage{
|
||||
ObjectId: objectId,
|
||||
}
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
syncWaitPeriod = time.Millisecond * 30
|
||||
go func() {
|
||||
message, err := fx.serverStream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, msg.ObjectId, message.ObjectId)
|
||||
require.NotEmpty(t, message.ReplyId)
|
||||
}()
|
||||
waitCh := fx.run(t)
|
||||
_, err := fx.pool.SendSync(remId, msg)
|
||||
require.Equal(t, ErrSyncTimeout, err)
|
||||
err = fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_BroadcastAsync(t *testing.T) {
|
||||
remId := "remoteId"
|
||||
t.Run("pool broadcast async to server", func(t *testing.T) {
|
||||
objectId := "objectId"
|
||||
msg := &spacesyncproto.ObjectSyncMessage{
|
||||
ObjectId: objectId,
|
||||
}
|
||||
fx := newFixture(t, "", remId, nil)
|
||||
recvChan := make(chan struct{})
|
||||
go func() {
|
||||
message, err := fx.serverStream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, msg, message)
|
||||
recvChan <- struct{}{}
|
||||
}()
|
||||
waitCh := fx.run(t)
|
||||
|
||||
err := fx.pool.BroadcastAsync(msg)
|
||||
require.NoError(t, err)
|
||||
<-recvChan
|
||||
err = fx.clientStream.Close()
|
||||
require.NoError(t, err)
|
||||
err = <-waitCh
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fx.pool.peerStreams[remId])
|
||||
})
|
||||
}
|
||||
@ -1,24 +0,0 @@
|
||||
package commonspace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
)
|
||||
|
||||
type RpcHandler interface {
|
||||
HeadSync(ctx context.Context, req *spacesyncproto.HeadSyncRequest) (*spacesyncproto.HeadSyncResponse, error)
|
||||
Stream(stream spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream) error
|
||||
}
|
||||
|
||||
type rpcHandler struct {
|
||||
s *space
|
||||
}
|
||||
|
||||
func (r *rpcHandler) HeadSync(ctx context.Context, req *spacesyncproto.HeadSyncRequest) (*spacesyncproto.HeadSyncResponse, error) {
|
||||
return r.s.HeadSync().HandleRangeRequest(ctx, req)
|
||||
}
|
||||
|
||||
func (r *rpcHandler) Stream(stream spacesyncproto.DRPCSpaceSync_ObjectSyncStreamStream) (err error) {
|
||||
// TODO: if needed we can launch full sync here
|
||||
return r.s.ObjectSync().StreamPool().AddAndReadStreamSync(stream)
|
||||
}
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/accountservice"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/anytypeio/any-sync/app/ocache"
|
||||
"github.com/anytypeio/any-sync/commonspace/headsync"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/acl/list"
|
||||
@ -20,9 +21,11 @@ import (
|
||||
"github.com/anytypeio/any-sync/commonspace/spacestorage"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anytypeio/any-sync/commonspace/syncstatus"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"github.com/anytypeio/any-sync/nodeconf"
|
||||
"github.com/anytypeio/any-sync/util/keys/asymmetric/encryptionkey"
|
||||
"github.com/anytypeio/any-sync/util/keys/asymmetric/signingkey"
|
||||
"github.com/anytypeio/any-sync/util/multiqueue"
|
||||
"github.com/anytypeio/any-sync/util/slice"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
@ -50,6 +53,13 @@ type SpaceCreatePayload struct {
|
||||
ReplicationKey uint64
|
||||
}
|
||||
|
||||
type HandleMessage struct {
|
||||
Id uint64
|
||||
Deadline time.Time
|
||||
SenderId string
|
||||
Message *spacesyncproto.ObjectSyncMessage
|
||||
}
|
||||
|
||||
const SpaceTypeDerived = "derived.space"
|
||||
|
||||
type SpaceDerivePayload struct {
|
||||
@ -80,8 +90,6 @@ type Space interface {
|
||||
DebugAllHeads() []headsync.TreeHeads
|
||||
Description() (SpaceDescription, error)
|
||||
|
||||
SpaceSyncRpc() RpcHandler
|
||||
|
||||
DeriveTree(ctx context.Context, payload objecttree.ObjectTreeCreatePayload) (res treestorage.TreeStorageCreatePayload, err error)
|
||||
CreateTree(ctx context.Context, payload objecttree.ObjectTreeCreatePayload) (res treestorage.TreeStorageCreatePayload, err error)
|
||||
PutTree(ctx context.Context, payload treestorage.TreeStorageCreatePayload, listener updatelistener.UpdateListener) (t objecttree.ObjectTree, err error)
|
||||
@ -90,9 +98,12 @@ type Space interface {
|
||||
BuildHistoryTree(ctx context.Context, id string, opts HistoryTreeOpts) (t objecttree.HistoryTree, err error)
|
||||
|
||||
HeadSync() headsync.HeadSync
|
||||
ObjectSync() objectsync.ObjectSync
|
||||
SyncStatus() syncstatus.StatusUpdater
|
||||
Storage() spacestorage.SpaceStorage
|
||||
|
||||
HandleMessage(ctx context.Context, msg HandleMessage) (err error)
|
||||
|
||||
Close() error
|
||||
}
|
||||
|
||||
@ -101,8 +112,6 @@ type space struct {
|
||||
mu sync.RWMutex
|
||||
header *spacesyncproto.RawSpaceHeaderWithId
|
||||
|
||||
rpc *rpcHandler
|
||||
|
||||
objectSync objectsync.ObjectSync
|
||||
headSync headsync.HeadSync
|
||||
syncStatus syncstatus.StatusUpdater
|
||||
@ -113,6 +122,8 @@ type space struct {
|
||||
configuration nodeconf.Configuration
|
||||
settingsObject settings.SettingsObject
|
||||
|
||||
handleQueue multiqueue.MultiQueue[HandleMessage]
|
||||
|
||||
isClosed atomic.Bool
|
||||
treesUsed atomic.Int32
|
||||
}
|
||||
@ -161,7 +172,6 @@ func (s *space) Init(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
s.header = header
|
||||
s.rpc = &rpcHandler{s: s}
|
||||
initialIds, err := s.storage.StoredIds()
|
||||
if err != nil {
|
||||
return
|
||||
@ -174,7 +184,7 @@ func (s *space) Init(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.aclList = syncacl.NewSyncAcl(aclList, s.objectSync.StreamPool())
|
||||
s.aclList = syncacl.NewSyncAcl(aclList, s.objectSync.MessagePool())
|
||||
s.cache.AddObject(s.aclList)
|
||||
|
||||
deletionState := deletionstate.NewDeletionState(s.storage)
|
||||
@ -196,22 +206,18 @@ func (s *space) Init(ctx context.Context) (err error) {
|
||||
DeletionState: deletionState,
|
||||
}
|
||||
s.settingsObject = settings.NewSettingsObject(deps, s.id)
|
||||
s.cache.AddObject(s.settingsObject)
|
||||
s.objectSync.Init()
|
||||
s.headSync.Init(initialIds, deletionState)
|
||||
err = s.settingsObject.Init(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.cache.AddObject(s.settingsObject)
|
||||
s.syncStatus.Run()
|
||||
|
||||
s.handleQueue = multiqueue.New[HandleMessage](s.handleMessage, 100)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *space) SpaceSyncRpc() RpcHandler {
|
||||
return s.rpc
|
||||
}
|
||||
|
||||
func (s *space) ObjectSync() objectsync.ObjectSync {
|
||||
return s.objectSync
|
||||
}
|
||||
@ -345,13 +351,47 @@ func (s *space) DeleteTree(ctx context.Context, id string) (err error) {
|
||||
return s.settingsObject.DeleteObject(id)
|
||||
}
|
||||
|
||||
func (s *space) Close() error {
|
||||
log.With(zap.String("id", s.id)).Debug("space is closing")
|
||||
func (s *space) HandleMessage(ctx context.Context, hm HandleMessage) (err error) {
|
||||
threadId := hm.Message.ObjectId
|
||||
if hm.Message.ReplyId != "" {
|
||||
threadId += hm.Message.ReplyId
|
||||
defer func() {
|
||||
s.isClosed.Store(true)
|
||||
log.With(zap.String("id", s.id)).Debug("space closed")
|
||||
_ = s.handleQueue.CloseThread(threadId)
|
||||
}()
|
||||
}
|
||||
return s.handleQueue.Add(ctx, threadId, hm)
|
||||
}
|
||||
|
||||
func (s *space) handleMessage(msg HandleMessage) {
|
||||
ctx := peer.CtxWithPeerId(context.Background(), msg.SenderId)
|
||||
ctx = logger.CtxWithFields(ctx, zap.Uint64("msgId", msg.Id), zap.String("senderId", msg.SenderId))
|
||||
if !msg.Deadline.IsZero() {
|
||||
now := time.Now()
|
||||
if now.After(msg.Deadline) {
|
||||
log.InfoCtx(ctx, "skip message: deadline exceed")
|
||||
return
|
||||
}
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithDeadline(ctx, msg.Deadline)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if err := s.objectSync.HandleMessage(ctx, msg.SenderId, msg.Message); err != nil {
|
||||
log.InfoCtx(ctx, "handleMessage error", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *space) Close() error {
|
||||
if s.isClosed.Swap(true) {
|
||||
log.Warn("call space.Close on closed space", zap.String("id", s.id))
|
||||
return nil
|
||||
}
|
||||
log.With(zap.String("id", s.id)).Debug("space is closing")
|
||||
|
||||
var mError errs.Group
|
||||
if err := s.handleQueue.Close(); err != nil {
|
||||
mError.Add(err)
|
||||
}
|
||||
if err := s.headSync.Close(); err != nil {
|
||||
mError.Add(err)
|
||||
}
|
||||
@ -370,6 +410,6 @@ func (s *space) Close() error {
|
||||
if err := s.syncStatus.Close(); err != nil {
|
||||
mError.Add(err)
|
||||
}
|
||||
|
||||
log.With(zap.String("id", s.id)).Debug("space closed")
|
||||
return mError.Err()
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"github.com/anytypeio/any-sync/commonspace/objectsync"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacestorage"
|
||||
"github.com/anytypeio/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anytypeio/any-sync/commonspace/streammanager"
|
||||
"github.com/anytypeio/any-sync/commonspace/syncstatus"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"github.com/anytypeio/any-sync/net/pool"
|
||||
@ -43,6 +44,7 @@ type spaceService struct {
|
||||
account accountservice.Service
|
||||
configurationService nodeconf.Service
|
||||
storageProvider spacestorage.SpaceStorageProvider
|
||||
streamManagerProvider streammanager.StreamManagerProvider
|
||||
treeGetter treegetter.TreeGetter
|
||||
pool pool.Pool
|
||||
}
|
||||
@ -53,6 +55,7 @@ func (s *spaceService) Init(a *app.App) (err error) {
|
||||
s.storageProvider = a.MustComponent(spacestorage.CName).(spacestorage.SpaceStorageProvider)
|
||||
s.configurationService = a.MustComponent(nodeconf.CName).(nodeconf.Service)
|
||||
s.treeGetter = a.MustComponent(treegetter.CName).(treegetter.TreeGetter)
|
||||
s.streamManagerProvider = a.MustComponent(streammanager.CName).(streammanager.StreamManagerProvider)
|
||||
s.pool = a.MustComponent(pool.CName).(pool.Pool)
|
||||
return nil
|
||||
}
|
||||
@ -94,7 +97,7 @@ func (s *spaceService) DeriveSpace(ctx context.Context, payload SpaceDerivePaylo
|
||||
}
|
||||
|
||||
func (s *spaceService) NewSpace(ctx context.Context, id string) (Space, error) {
|
||||
st, err := s.storageProvider.SpaceStorage(id)
|
||||
st, err := s.storageProvider.WaitSpaceStorage(ctx, id)
|
||||
if err != nil {
|
||||
if err != spacestorage.ErrSpaceStorageMissing {
|
||||
return nil, err
|
||||
@ -124,7 +127,13 @@ func (s *spaceService) NewSpace(ctx context.Context, id string) (Space, error) {
|
||||
}
|
||||
|
||||
headSync := headsync.NewHeadSync(id, s.config.SyncPeriod, st, confConnector, getter, syncStatus, log)
|
||||
objectSync := objectsync.NewObjectSync(id, confConnector, getter)
|
||||
|
||||
streamManager, err := s.streamManagerProvider.NewStreamManager(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objectSync := objectsync.NewObjectSync(id, streamManager, getter)
|
||||
sp := &space{
|
||||
id: id,
|
||||
objectSync: objectSync,
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
package spacestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/anytypeio/any-sync/app"
|
||||
"github.com/anytypeio/any-sync/commonspace/object/acl/aclrecordproto"
|
||||
@ -51,7 +52,7 @@ type SpaceStorageCreatePayload struct {
|
||||
|
||||
type SpaceStorageProvider interface {
|
||||
app.Component
|
||||
SpaceStorage(id string) (SpaceStorage, error)
|
||||
WaitSpaceStorage(ctx context.Context, id string) (SpaceStorage, error)
|
||||
SpaceExists(id string) bool
|
||||
CreateSpaceStorage(payload SpaceStorageCreatePayload) (SpaceStorage, error)
|
||||
}
|
||||
|
||||
@ -56,9 +56,10 @@ message HeadSyncResponse {
|
||||
// ObjectSyncMessage is a message sent on object sync
|
||||
message ObjectSyncMessage {
|
||||
string spaceId = 1;
|
||||
string replyId = 2;
|
||||
bytes payload = 3;
|
||||
string objectId = 4;
|
||||
string requestId = 2;
|
||||
string replyId = 3;
|
||||
bytes payload = 4;
|
||||
string objectId = 5;
|
||||
// string identity = 5;
|
||||
// string peerSignature = 6;
|
||||
}
|
||||
@ -134,3 +135,13 @@ message SettingsData {
|
||||
SpaceSettingsSnapshot snapshot = 2;
|
||||
}
|
||||
|
||||
// SpaceSubscription contains in ObjectSyncMessage.Payload and indicates that we need to subscribe or unsubscribe the current stream to this space
|
||||
enum SpaceSubscriptionAction {
|
||||
Subscribe = 0;
|
||||
Unsubscribe = 1;
|
||||
}
|
||||
|
||||
message SpaceSubscription {
|
||||
repeated string spaceIds = 1;
|
||||
SpaceSubscriptionAction action = 2;
|
||||
}
|
||||
|
||||
@ -56,6 +56,32 @@ func (ErrCodes) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_80e49f1f4ac27799, []int{0}
|
||||
}
|
||||
|
||||
// SpaceSubscription contains in ObjectSyncMessage.Payload and indicates that we need to subscribe or unsubscribe the current stream to this space
|
||||
type SpaceSubscriptionAction int32
|
||||
|
||||
const (
|
||||
SpaceSubscriptionAction_Subscribe SpaceSubscriptionAction = 0
|
||||
SpaceSubscriptionAction_Unsubscribe SpaceSubscriptionAction = 1
|
||||
)
|
||||
|
||||
var SpaceSubscriptionAction_name = map[int32]string{
|
||||
0: "Subscribe",
|
||||
1: "Unsubscribe",
|
||||
}
|
||||
|
||||
var SpaceSubscriptionAction_value = map[string]int32{
|
||||
"Subscribe": 0,
|
||||
"Unsubscribe": 1,
|
||||
}
|
||||
|
||||
func (x SpaceSubscriptionAction) String() string {
|
||||
return proto.EnumName(SpaceSubscriptionAction_name, int32(x))
|
||||
}
|
||||
|
||||
func (SpaceSubscriptionAction) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_80e49f1f4ac27799, []int{1}
|
||||
}
|
||||
|
||||
// HeadSyncRange presenting a request for one range
|
||||
type HeadSyncRange struct {
|
||||
From uint64 `protobuf:"varint,1,opt,name=from,proto3" json:"from,omitempty"`
|
||||
@ -332,9 +358,10 @@ func (m *HeadSyncResponse) GetResults() []*HeadSyncResult {
|
||||
// ObjectSyncMessage is a message sent on object sync
|
||||
type ObjectSyncMessage struct {
|
||||
SpaceId string `protobuf:"bytes,1,opt,name=spaceId,proto3" json:"spaceId,omitempty"`
|
||||
ReplyId string `protobuf:"bytes,2,opt,name=replyId,proto3" json:"replyId,omitempty"`
|
||||
Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
ObjectId string `protobuf:"bytes,4,opt,name=objectId,proto3" json:"objectId,omitempty"`
|
||||
RequestId string `protobuf:"bytes,2,opt,name=requestId,proto3" json:"requestId,omitempty"`
|
||||
ReplyId string `protobuf:"bytes,3,opt,name=replyId,proto3" json:"replyId,omitempty"`
|
||||
Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
ObjectId string `protobuf:"bytes,5,opt,name=objectId,proto3" json:"objectId,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ObjectSyncMessage) Reset() { *m = ObjectSyncMessage{} }
|
||||
@ -377,6 +404,13 @@ func (m *ObjectSyncMessage) GetSpaceId() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ObjectSyncMessage) GetRequestId() string {
|
||||
if m != nil {
|
||||
return m.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ObjectSyncMessage) GetReplyId() string {
|
||||
if m != nil {
|
||||
return m.ReplyId
|
||||
@ -1047,8 +1081,61 @@ func (m *SettingsData) GetSnapshot() *SpaceSettingsSnapshot {
|
||||
return nil
|
||||
}
|
||||
|
||||
type SpaceSubscription struct {
|
||||
SpaceIds []string `protobuf:"bytes,1,rep,name=spaceIds,proto3" json:"spaceIds,omitempty"`
|
||||
Action SpaceSubscriptionAction `protobuf:"varint,2,opt,name=action,proto3,enum=spacesync.SpaceSubscriptionAction" json:"action,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SpaceSubscription) Reset() { *m = SpaceSubscription{} }
|
||||
func (m *SpaceSubscription) String() string { return proto.CompactTextString(m) }
|
||||
func (*SpaceSubscription) ProtoMessage() {}
|
||||
func (*SpaceSubscription) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_80e49f1f4ac27799, []int{18}
|
||||
}
|
||||
func (m *SpaceSubscription) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *SpaceSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_SpaceSubscription.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *SpaceSubscription) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SpaceSubscription.Merge(m, src)
|
||||
}
|
||||
func (m *SpaceSubscription) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *SpaceSubscription) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SpaceSubscription.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SpaceSubscription proto.InternalMessageInfo
|
||||
|
||||
func (m *SpaceSubscription) GetSpaceIds() []string {
|
||||
if m != nil {
|
||||
return m.SpaceIds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SpaceSubscription) GetAction() SpaceSubscriptionAction {
|
||||
if m != nil {
|
||||
return m.Action
|
||||
}
|
||||
return SpaceSubscriptionAction_Subscribe
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("spacesync.ErrCodes", ErrCodes_name, ErrCodes_value)
|
||||
proto.RegisterEnum("spacesync.SpaceSubscriptionAction", SpaceSubscriptionAction_name, SpaceSubscriptionAction_value)
|
||||
proto.RegisterType((*HeadSyncRange)(nil), "spacesync.HeadSyncRange")
|
||||
proto.RegisterType((*HeadSyncResult)(nil), "spacesync.HeadSyncResult")
|
||||
proto.RegisterType((*HeadSyncResultElement)(nil), "spacesync.HeadSyncResultElement")
|
||||
@ -1067,6 +1154,7 @@ func init() {
|
||||
proto.RegisterType((*ObjectDelete)(nil), "spacesync.ObjectDelete")
|
||||
proto.RegisterType((*SpaceSettingsSnapshot)(nil), "spacesync.SpaceSettingsSnapshot")
|
||||
proto.RegisterType((*SettingsData)(nil), "spacesync.SettingsData")
|
||||
proto.RegisterType((*SpaceSubscription)(nil), "spacesync.SpaceSubscription")
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -1074,64 +1162,68 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptor_80e49f1f4ac27799 = []byte{
|
||||
// 903 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xf7, 0x6e, 0x9c, 0x26, 0x7e, 0xd9, 0x3a, 0xdb, 0x69, 0x0a, 0x8b, 0x1b, 0xb9, 0xd6, 0x1e,
|
||||
0x50, 0xc4, 0xa1, 0x7f, 0x52, 0x04, 0x42, 0xc0, 0x81, 0x26, 0x2e, 0x5d, 0xa1, 0x92, 0x6a, 0x0c,
|
||||
0x42, 0x42, 0x02, 0x69, 0xba, 0xfb, 0x62, 0x2f, 0x5a, 0xcf, 0x2c, 0x3b, 0x63, 0x1a, 0x1f, 0x38,
|
||||
0x70, 0xe2, 0xca, 0x57, 0xe0, 0x3b, 0xf0, 0x21, 0x38, 0xf6, 0xc8, 0x11, 0x25, 0x5f, 0x04, 0xcd,
|
||||
0xec, 0x5f, 0xdb, 0x9b, 0x1c, 0xb8, 0x38, 0x33, 0xef, 0xcf, 0xef, 0xfd, 0xde, 0x9b, 0x99, 0xdf,
|
||||
0x06, 0x9e, 0x84, 0x62, 0x3e, 0x17, 0x5c, 0xa6, 0x2c, 0xc4, 0x47, 0xe6, 0x57, 0x2e, 0x79, 0x98,
|
||||
0x66, 0x42, 0x89, 0x47, 0xe6, 0x57, 0xd6, 0xd6, 0x87, 0xc6, 0x40, 0x7a, 0x95, 0xc1, 0x0f, 0xe0,
|
||||
0xf6, 0x0b, 0x64, 0xd1, 0x64, 0xc9, 0x43, 0xca, 0xf8, 0x14, 0x09, 0x81, 0xee, 0x79, 0x26, 0xe6,
|
||||
0x9e, 0x35, 0xb2, 0x8e, 0xba, 0xd4, 0xac, 0x49, 0x1f, 0x6c, 0x25, 0x3c, 0xdb, 0x58, 0x6c, 0x25,
|
||||
0xc8, 0x01, 0x6c, 0x27, 0xf1, 0x3c, 0x56, 0xde, 0xd6, 0xc8, 0x3a, 0xba, 0x4d, 0xf3, 0x8d, 0x7f,
|
||||
0x01, 0xfd, 0x0a, 0x0a, 0xe5, 0x22, 0x51, 0x1a, 0x6b, 0xc6, 0xe4, 0xcc, 0x60, 0x39, 0xd4, 0xac,
|
||||
0xc9, 0x67, 0xb0, 0x8b, 0x09, 0xce, 0x91, 0x2b, 0xe9, 0xd9, 0xa3, 0xad, 0xa3, 0xbd, 0xe3, 0xd1,
|
||||
0xc3, 0x9a, 0xdf, 0x2a, 0xc0, 0x38, 0x0f, 0xa4, 0x55, 0x86, 0xae, 0x1c, 0x8a, 0x05, 0xaf, 0x2a,
|
||||
0x9b, 0x8d, 0xff, 0x29, 0xdc, 0x6b, 0x4d, 0xd4, 0xc4, 0xe3, 0xc8, 0x94, 0xef, 0x51, 0x3b, 0x8e,
|
||||
0x0c, 0x21, 0x64, 0x91, 0x69, 0xa5, 0x47, 0xcd, 0xda, 0xff, 0x01, 0xf6, 0xeb, 0xe4, 0x9f, 0x17,
|
||||
0x28, 0x15, 0xf1, 0x60, 0xc7, 0x50, 0x0a, 0xca, 0xdc, 0x72, 0x4b, 0x1e, 0xc3, 0xad, 0x4c, 0x8f,
|
||||
0xa9, 0xe4, 0xee, 0xb5, 0x71, 0xd7, 0x01, 0xb4, 0x88, 0xf3, 0xbf, 0x04, 0xb7, 0xc1, 0x2d, 0x15,
|
||||
0x5c, 0x22, 0x79, 0x0a, 0x3b, 0x99, 0xe1, 0x29, 0x3d, 0xcb, 0xc0, 0xbc, 0x77, 0xed, 0x08, 0x68,
|
||||
0x19, 0xe9, 0xff, 0x0a, 0x77, 0xce, 0x5e, 0xff, 0x84, 0xa1, 0xd2, 0xce, 0x97, 0x28, 0x25, 0x9b,
|
||||
0xe2, 0x0d, 0x4c, 0x3d, 0x5d, 0x23, 0x4d, 0x96, 0x41, 0xd9, 0x6d, 0xb9, 0xd5, 0x9e, 0x94, 0x2d,
|
||||
0x13, 0xc1, 0x22, 0x33, 0x45, 0x87, 0x96, 0x5b, 0x32, 0x80, 0x5d, 0x61, 0x4a, 0x04, 0x91, 0xd7,
|
||||
0x35, 0x49, 0xd5, 0xde, 0x1f, 0x83, 0x3b, 0xd1, 0xd0, 0xaf, 0x16, 0x72, 0x56, 0xce, 0xe9, 0x49,
|
||||
0x8d, 0xa4, 0xab, 0xef, 0x1d, 0xbf, 0xdb, 0xe8, 0x23, 0x8f, 0xce, 0xdd, 0x55, 0x09, 0xff, 0x2e,
|
||||
0xdc, 0x69, 0xc0, 0xe4, 0xf3, 0xf0, 0xfd, 0x0a, 0x3b, 0x49, 0x4a, 0xec, 0xb5, 0xa3, 0xf3, 0x9f,
|
||||
0x57, 0x89, 0x3a, 0xa6, 0x18, 0xe4, 0xff, 0x20, 0xf0, 0x9b, 0x0d, 0x4e, 0xd3, 0x43, 0xbe, 0x80,
|
||||
0x3d, 0x93, 0xa3, 0xe7, 0x8e, 0x59, 0x81, 0xf3, 0xa0, 0x81, 0x43, 0xd9, 0x9b, 0x49, 0x1d, 0xf0,
|
||||
0x5d, 0xac, 0x66, 0x41, 0x44, 0x9b, 0x39, 0x64, 0x08, 0xc0, 0xc2, 0xa4, 0x00, 0x34, 0xe3, 0x76,
|
||||
0x68, 0xc3, 0x42, 0x7c, 0x70, 0xea, 0x5d, 0x90, 0x8f, 0xbd, 0x47, 0x57, 0x6c, 0xe4, 0x18, 0x0e,
|
||||
0x0c, 0xe4, 0x04, 0x95, 0x8a, 0xf9, 0x54, 0x96, 0x68, 0x5d, 0x83, 0xd6, 0xea, 0x23, 0x1f, 0xc1,
|
||||
0x3b, 0x6d, 0xf6, 0x20, 0xf2, 0xb6, 0x4d, 0x85, 0x6b, 0xbc, 0xfe, 0x9f, 0x16, 0xec, 0x35, 0x5a,
|
||||
0xd2, 0xe7, 0x1e, 0x47, 0xc8, 0x55, 0xac, 0x96, 0xc5, 0x5b, 0xad, 0xf6, 0xe4, 0x10, 0x7a, 0x2a,
|
||||
0x9e, 0xa3, 0x54, 0x6c, 0x9e, 0x9a, 0xd6, 0xb6, 0x68, 0x6d, 0xd0, 0x5e, 0x53, 0xe3, 0x9b, 0x65,
|
||||
0x8a, 0x45, 0x5b, 0xb5, 0x81, 0xbc, 0x0f, 0x7d, 0x7d, 0xe9, 0xe2, 0x90, 0xa9, 0x58, 0xf0, 0xaf,
|
||||
0x70, 0x69, 0xba, 0xe9, 0xd2, 0x35, 0xab, 0x7e, 0x96, 0x12, 0x31, 0x67, 0xed, 0x50, 0xb3, 0xf6,
|
||||
0x5f, 0x41, 0x7f, 0x75, 0xf0, 0x64, 0xb4, 0x79, 0x50, 0xce, 0xea, 0x39, 0x68, 0x36, 0xf1, 0x94,
|
||||
0x33, 0xb5, 0xc8, 0xb0, 0x38, 0x86, 0xda, 0xe0, 0x9f, 0xc2, 0x41, 0xdb, 0x51, 0xea, 0xac, 0x8c,
|
||||
0xbd, 0x59, 0x41, 0xad, 0x0d, 0xc5, 0x3d, 0xb4, 0xab, 0x7b, 0xf8, 0x23, 0x1c, 0x4c, 0x9a, 0x53,
|
||||
0x3d, 0x11, 0x5c, 0x69, 0xa9, 0xf9, 0x1c, 0x9c, 0xfc, 0xad, 0x9c, 0x62, 0x82, 0x0a, 0x5b, 0xee,
|
||||
0xe3, 0x59, 0xc3, 0xfd, 0xa2, 0x43, 0x57, 0xc2, 0x9f, 0xed, 0xc0, 0xf6, 0x2f, 0x2c, 0x59, 0xa0,
|
||||
0x3f, 0x04, 0xa7, 0x19, 0xb8, 0xf1, 0x0e, 0x3e, 0x86, 0x7b, 0x2b, 0xf5, 0x27, 0x9c, 0xa5, 0x72,
|
||||
0x26, 0x94, 0xbe, 0x84, 0x91, 0x49, 0x89, 0x82, 0x28, 0xd7, 0x95, 0x1e, 0x6d, 0x58, 0xfc, 0xdf,
|
||||
0x2d, 0x70, 0xca, 0xa4, 0x53, 0xa6, 0x18, 0xf9, 0x04, 0x76, 0xc2, 0x9c, 0x7c, 0xa1, 0x42, 0x0f,
|
||||
0xd6, 0x1f, 0xcf, 0x5a, 0x8f, 0xb4, 0x8c, 0xd7, 0x22, 0x2e, 0x8b, 0xba, 0x66, 0x34, 0xab, 0x22,
|
||||
0xde, 0xca, 0x8f, 0x56, 0x19, 0x1f, 0x84, 0xb0, 0x3b, 0xce, 0xb2, 0x13, 0x11, 0xa1, 0x24, 0x7d,
|
||||
0x80, 0x6f, 0x39, 0x5e, 0xa4, 0x18, 0x2a, 0x8c, 0xdc, 0x0e, 0x71, 0x8b, 0xd7, 0xf9, 0x32, 0x96,
|
||||
0x32, 0xe6, 0x53, 0xd7, 0x22, 0xfb, 0xc5, 0x5d, 0x1d, 0x5f, 0xc4, 0x52, 0x49, 0xd7, 0x26, 0x77,
|
||||
0x61, 0xdf, 0x18, 0xbe, 0x16, 0x2a, 0xe0, 0x27, 0x2c, 0x9c, 0xa1, 0xbb, 0xa5, 0xa3, 0xc6, 0x59,
|
||||
0x26, 0xb2, 0xb3, 0xf3, 0x73, 0x89, 0xca, 0x8d, 0x8e, 0xff, 0xb2, 0xa1, 0x97, 0x13, 0x59, 0xf2,
|
||||
0x90, 0x9c, 0xc0, 0x6e, 0xa9, 0xab, 0x64, 0xd0, 0x2a, 0xb6, 0x46, 0x75, 0x06, 0xf7, 0xdb, 0x85,
|
||||
0x38, 0x57, 0x9b, 0xe7, 0x05, 0xa2, 0xd6, 0x2e, 0x72, 0x7f, 0x43, 0x69, 0x6a, 0x61, 0x1c, 0x1c,
|
||||
0xb6, 0x3b, 0x37, 0x70, 0x92, 0xa4, 0x0d, 0xa7, 0x12, 0xc1, 0x36, 0x9c, 0x86, 0xfa, 0x51, 0x70,
|
||||
0xeb, 0x2f, 0xc2, 0x44, 0x65, 0xc8, 0xe6, 0xe4, 0x70, 0xe3, 0xc2, 0x35, 0x3e, 0x17, 0x83, 0x1b,
|
||||
0xbd, 0x47, 0xd6, 0x63, 0xeb, 0xd9, 0x87, 0x7f, 0x5f, 0x0e, 0xad, 0xb7, 0x97, 0x43, 0xeb, 0xdf,
|
||||
0xcb, 0xa1, 0xf5, 0xc7, 0xd5, 0xb0, 0xf3, 0xf6, 0x6a, 0xd8, 0xf9, 0xe7, 0x6a, 0xd8, 0xf9, 0x7e,
|
||||
0x70, 0xfd, 0xff, 0x19, 0xaf, 0x6f, 0x99, 0x3f, 0x4f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x24,
|
||||
0x0c, 0x9c, 0xee, 0x8c, 0x08, 0x00, 0x00,
|
||||
// 971 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x4b, 0x6f, 0xdb, 0xc6,
|
||||
0x13, 0x17, 0xe5, 0xa7, 0xc6, 0xb4, 0xcc, 0x6c, 0x9c, 0x7f, 0xf8, 0x57, 0x0c, 0x45, 0xd8, 0x43,
|
||||
0x61, 0xe4, 0x90, 0x87, 0x52, 0xb4, 0x48, 0x1f, 0x87, 0xc4, 0x56, 0x1a, 0xa1, 0x48, 0x6d, 0xac,
|
||||
0x1a, 0x14, 0x28, 0xd0, 0x02, 0x6b, 0x72, 0x2c, 0xb1, 0xa5, 0x96, 0x2c, 0x77, 0xd5, 0x58, 0xc7,
|
||||
0x9e, 0x7a, 0xed, 0xbd, 0xa7, 0x7e, 0x87, 0x7e, 0x88, 0x1e, 0x73, 0xec, 0xb1, 0xb0, 0xbf, 0x48,
|
||||
0xb1, 0xcb, 0xb7, 0x45, 0xe7, 0xd0, 0x8b, 0xcc, 0x9d, 0xc7, 0x6f, 0x7e, 0x33, 0xb3, 0x33, 0x6b,
|
||||
0x78, 0xe2, 0x45, 0xf3, 0x79, 0x24, 0x64, 0xcc, 0x3d, 0x7c, 0x64, 0x7e, 0xe5, 0x52, 0x78, 0x71,
|
||||
0x12, 0xa9, 0xe8, 0x91, 0xf9, 0x95, 0xa5, 0xf4, 0xa1, 0x11, 0x90, 0x4e, 0x21, 0xa0, 0x63, 0xd8,
|
||||
0x7d, 0x85, 0xdc, 0x9f, 0x2c, 0x85, 0xc7, 0xb8, 0x98, 0x22, 0x21, 0xb0, 0x7e, 0x9e, 0x44, 0x73,
|
||||
0xd7, 0x1a, 0x58, 0x87, 0xeb, 0xcc, 0x7c, 0x93, 0x2e, 0xb4, 0x55, 0xe4, 0xb6, 0x8d, 0xa4, 0xad,
|
||||
0x22, 0xb2, 0x0f, 0x1b, 0x61, 0x30, 0x0f, 0x94, 0xbb, 0x36, 0xb0, 0x0e, 0x77, 0x59, 0x7a, 0xa0,
|
||||
0x17, 0xd0, 0x2d, 0xa0, 0x50, 0x2e, 0x42, 0xa5, 0xb1, 0x66, 0x5c, 0xce, 0x0c, 0x96, 0xcd, 0xcc,
|
||||
0x37, 0xf9, 0x0c, 0xb6, 0x31, 0xc4, 0x39, 0x0a, 0x25, 0xdd, 0xf6, 0x60, 0xed, 0x70, 0x67, 0x38,
|
||||
0x78, 0x58, 0xf2, 0xab, 0x03, 0x8c, 0x52, 0x43, 0x56, 0x78, 0xe8, 0xc8, 0x5e, 0xb4, 0x10, 0x45,
|
||||
0x64, 0x73, 0xa0, 0x9f, 0xc2, 0x9d, 0x46, 0x47, 0x4d, 0x3c, 0xf0, 0x4d, 0xf8, 0x0e, 0x6b, 0x07,
|
||||
0xbe, 0x21, 0x84, 0xdc, 0x37, 0xa9, 0x74, 0x98, 0xf9, 0xa6, 0xdf, 0xc1, 0x5e, 0xe9, 0xfc, 0xd3,
|
||||
0x02, 0xa5, 0x22, 0x2e, 0x6c, 0x19, 0x4a, 0xe3, 0xdc, 0x37, 0x3f, 0x92, 0xc7, 0xb0, 0x99, 0xe8,
|
||||
0x32, 0xe5, 0xdc, 0xdd, 0x26, 0xee, 0xda, 0x80, 0x65, 0x76, 0xf4, 0x0b, 0x70, 0x2a, 0xdc, 0xe2,
|
||||
0x48, 0x48, 0x24, 0x4f, 0x61, 0x2b, 0x31, 0x3c, 0xa5, 0x6b, 0x19, 0x98, 0xff, 0xdf, 0x58, 0x02,
|
||||
0x96, 0x5b, 0xd2, 0xdf, 0x2d, 0xb8, 0x75, 0x72, 0xf6, 0x03, 0x7a, 0x4a, 0x6b, 0x5f, 0xa3, 0x94,
|
||||
0x7c, 0x8a, 0xef, 0xa1, 0x7a, 0x00, 0x9d, 0x24, 0xcd, 0x67, 0x9c, 0x27, 0x5c, 0x0a, 0xb4, 0x5f,
|
||||
0x82, 0x71, 0xb8, 0x1c, 0xfb, 0xa6, 0x94, 0x1d, 0x96, 0x1f, 0xb5, 0x26, 0xe6, 0xcb, 0x30, 0xe2,
|
||||
0xbe, 0xbb, 0x6e, 0xfa, 0x96, 0x1f, 0x49, 0x0f, 0xb6, 0x23, 0x43, 0x60, 0xec, 0xbb, 0x1b, 0xc6,
|
||||
0xa9, 0x38, 0xd3, 0x11, 0x38, 0x13, 0x1d, 0xf8, 0x74, 0x21, 0x67, 0x79, 0x19, 0x9f, 0x94, 0x48,
|
||||
0x9a, 0xdb, 0xce, 0xf0, 0x6e, 0x25, 0xcd, 0xd4, 0x3a, 0x55, 0x17, 0x21, 0xe8, 0x6d, 0xb8, 0x55,
|
||||
0x81, 0x49, 0xcb, 0x45, 0x69, 0x81, 0x1d, 0x86, 0x39, 0xf6, 0xb5, 0xce, 0xd2, 0x97, 0x85, 0xa3,
|
||||
0xb6, 0xc9, 0xea, 0xfc, 0x1f, 0x08, 0xfc, 0xd2, 0x06, 0xbb, 0xaa, 0x21, 0xcf, 0x61, 0xc7, 0xf8,
|
||||
0xe8, 0xb6, 0x60, 0x92, 0xe1, 0xdc, 0xaf, 0xe0, 0x30, 0xfe, 0x76, 0x52, 0x1a, 0x7c, 0x13, 0xa8,
|
||||
0xd9, 0xd8, 0x67, 0x55, 0x1f, 0xd2, 0x07, 0xe0, 0x5e, 0x98, 0x01, 0x9a, 0x56, 0xd8, 0xac, 0x22,
|
||||
0x21, 0x14, 0xec, 0xf2, 0x54, 0x34, 0xa4, 0x26, 0x23, 0x43, 0xd8, 0x37, 0x90, 0x13, 0x54, 0x2a,
|
||||
0x10, 0x53, 0x79, 0x5a, 0x6b, 0x51, 0xa3, 0x8e, 0x7c, 0x04, 0xff, 0x6b, 0x92, 0x17, 0xdd, 0xbb,
|
||||
0x41, 0x4b, 0xff, 0xb0, 0x60, 0xa7, 0x92, 0x92, 0xee, 0x7b, 0xe0, 0xa3, 0x50, 0x81, 0x5a, 0x66,
|
||||
0xa3, 0x5c, 0x9c, 0xf5, 0x2d, 0x53, 0xc1, 0x1c, 0xa5, 0xe2, 0xf3, 0xd8, 0xa4, 0xb6, 0xc6, 0x4a,
|
||||
0x81, 0xd6, 0x9a, 0x18, 0x5f, 0x2f, 0x63, 0xcc, 0xd2, 0x2a, 0x05, 0xe4, 0x03, 0xe8, 0xea, 0x4b,
|
||||
0x17, 0x78, 0x5c, 0x05, 0x91, 0xf8, 0x12, 0x97, 0x26, 0x9b, 0x75, 0x76, 0x4d, 0xaa, 0xa7, 0x56,
|
||||
0x22, 0xa6, 0xac, 0x6d, 0x66, 0xbe, 0xe9, 0x29, 0x74, 0xeb, 0x85, 0x27, 0x83, 0xd5, 0x46, 0xd9,
|
||||
0xf5, 0x3e, 0x68, 0x36, 0xc1, 0x54, 0x70, 0xb5, 0x48, 0x30, 0x6b, 0x43, 0x29, 0xa0, 0xc7, 0xb0,
|
||||
0xdf, 0xd4, 0x4a, 0x33, 0x47, 0xfc, 0x6d, 0x0d, 0xb5, 0x14, 0x64, 0xf7, 0xb0, 0x5d, 0xdc, 0xc3,
|
||||
0xef, 0x61, 0x7f, 0x52, 0xad, 0xea, 0x51, 0x24, 0x94, 0xde, 0x44, 0x9f, 0x83, 0x9d, 0xce, 0xca,
|
||||
0x31, 0x86, 0xa8, 0xb0, 0xe1, 0x3e, 0x9e, 0x54, 0xd4, 0xaf, 0x5a, 0xac, 0x66, 0xfe, 0x62, 0x0b,
|
||||
0x36, 0x7e, 0xe6, 0xe1, 0x02, 0x69, 0x1f, 0xec, 0xaa, 0xe1, 0xca, 0x1c, 0x7c, 0x0c, 0x77, 0x6a,
|
||||
0xf1, 0x27, 0x82, 0xc7, 0x72, 0x16, 0x29, 0x7d, 0x09, 0x7d, 0xe3, 0xe2, 0x8f, 0xfd, 0x74, 0xed,
|
||||
0x74, 0x58, 0x45, 0x42, 0x7f, 0xb5, 0xc0, 0xce, 0x9d, 0x8e, 0xb9, 0xe2, 0xe4, 0x19, 0x6c, 0x79,
|
||||
0x29, 0xf9, 0x6c, 0x49, 0xdd, 0xbf, 0x3e, 0x3c, 0xd7, 0x72, 0x64, 0xb9, 0xbd, 0xde, 0xf1, 0x32,
|
||||
0x8b, 0x6b, 0x4a, 0x53, 0xdf, 0xf1, 0x8d, 0xfc, 0x58, 0xe1, 0x41, 0x7f, 0xcc, 0x46, 0x79, 0xb2,
|
||||
0x38, 0x93, 0x5e, 0x12, 0xc4, 0xfa, 0x1a, 0xe8, 0x3b, 0x98, 0x2d, 0xb6, 0x9c, 0x7c, 0x71, 0x26,
|
||||
0x9f, 0xc0, 0x26, 0xf7, 0xb4, 0x95, 0x09, 0xd6, 0x1d, 0xd2, 0x95, 0x60, 0x15, 0xa4, 0xe7, 0xc6,
|
||||
0x92, 0x65, 0x1e, 0x0f, 0x3c, 0xd8, 0x1e, 0x25, 0xc9, 0x51, 0xe4, 0xa3, 0x24, 0x5d, 0x80, 0x37,
|
||||
0x02, 0x2f, 0x62, 0xf4, 0x14, 0xfa, 0x4e, 0x8b, 0x38, 0xd9, 0x2a, 0x78, 0x1d, 0x48, 0x19, 0x88,
|
||||
0xa9, 0x63, 0x91, 0xbd, 0x6c, 0x30, 0x46, 0x17, 0x81, 0x54, 0xd2, 0x69, 0x93, 0xdb, 0xb0, 0x67,
|
||||
0x04, 0x5f, 0x45, 0x6a, 0x2c, 0x8e, 0xb8, 0x37, 0x43, 0x67, 0x4d, 0x5b, 0x8d, 0x92, 0x24, 0x4a,
|
||||
0x4e, 0xce, 0xcf, 0x25, 0x2a, 0xc7, 0x7f, 0xf0, 0x0c, 0xee, 0xde, 0xc0, 0x83, 0xec, 0x42, 0x27,
|
||||
0x93, 0x9e, 0xa1, 0xd3, 0xd2, 0xae, 0x6f, 0x84, 0x2c, 0x04, 0xd6, 0xf0, 0xcf, 0x36, 0x74, 0x52,
|
||||
0xdf, 0xa5, 0xf0, 0xc8, 0x11, 0x6c, 0xe7, 0xcf, 0x03, 0xe9, 0x35, 0xbe, 0x19, 0x66, 0x3b, 0xf6,
|
||||
0xee, 0x35, 0xbf, 0x27, 0xe9, 0x56, 0x7c, 0x99, 0x21, 0xea, 0x1d, 0x4b, 0xee, 0xad, 0x6c, 0xc4,
|
||||
0x72, 0x81, 0xf7, 0x0e, 0x9a, 0x95, 0x2b, 0x38, 0x61, 0xd8, 0x84, 0x53, 0x2c, 0xeb, 0x26, 0x9c,
|
||||
0xca, 0x96, 0x66, 0xe0, 0x94, 0xef, 0xda, 0x44, 0x25, 0xc8, 0xe7, 0xe4, 0x60, 0x65, 0x30, 0x2a,
|
||||
0x8f, 0x5e, 0xef, 0xbd, 0xda, 0x43, 0xeb, 0xb1, 0xf5, 0xe2, 0xc3, 0xbf, 0x2e, 0xfb, 0xd6, 0xbb,
|
||||
0xcb, 0xbe, 0xf5, 0xcf, 0x65, 0xdf, 0xfa, 0xed, 0xaa, 0xdf, 0x7a, 0x77, 0xd5, 0x6f, 0xfd, 0x7d,
|
||||
0xd5, 0x6f, 0x7d, 0xdb, 0xbb, 0xf9, 0xdf, 0xa5, 0xb3, 0x4d, 0xf3, 0xe7, 0xe9, 0xbf, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0x23, 0x56, 0x6f, 0xd6, 0x53, 0x09, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *HeadSyncRange) Marshal() (dAtA []byte, err error) {
|
||||
@ -1364,20 +1456,27 @@ func (m *ObjectSyncMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
copy(dAtA[i:], m.ObjectId)
|
||||
i = encodeVarintSpacesync(dAtA, i, uint64(len(m.ObjectId)))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
if len(m.Payload) > 0 {
|
||||
i -= len(m.Payload)
|
||||
copy(dAtA[i:], m.Payload)
|
||||
i = encodeVarintSpacesync(dAtA, i, uint64(len(m.Payload)))
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
if len(m.ReplyId) > 0 {
|
||||
i -= len(m.ReplyId)
|
||||
copy(dAtA[i:], m.ReplyId)
|
||||
i = encodeVarintSpacesync(dAtA, i, uint64(len(m.ReplyId)))
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if len(m.RequestId) > 0 {
|
||||
i -= len(m.RequestId)
|
||||
copy(dAtA[i:], m.RequestId)
|
||||
i = encodeVarintSpacesync(dAtA, i, uint64(len(m.RequestId)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.SpaceId) > 0 {
|
||||
@ -1868,6 +1967,43 @@ func (m *SettingsData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *SpaceSubscription) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *SpaceSubscription) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *SpaceSubscription) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Action != 0 {
|
||||
i = encodeVarintSpacesync(dAtA, i, uint64(m.Action))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.SpaceIds) > 0 {
|
||||
for iNdEx := len(m.SpaceIds) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.SpaceIds[iNdEx])
|
||||
copy(dAtA[i:], m.SpaceIds[iNdEx])
|
||||
i = encodeVarintSpacesync(dAtA, i, uint64(len(m.SpaceIds[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintSpacesync(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovSpacesync(v)
|
||||
base := offset
|
||||
@ -1980,6 +2116,10 @@ func (m *ObjectSyncMessage) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSpacesync(uint64(l))
|
||||
}
|
||||
l = len(m.RequestId)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSpacesync(uint64(l))
|
||||
}
|
||||
l = len(m.ReplyId)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSpacesync(uint64(l))
|
||||
@ -2204,6 +2344,24 @@ func (m *SettingsData) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *SpaceSubscription) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.SpaceIds) > 0 {
|
||||
for _, s := range m.SpaceIds {
|
||||
l = len(s)
|
||||
n += 1 + l + sovSpacesync(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Action != 0 {
|
||||
n += 1 + sovSpacesync(uint64(m.Action))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovSpacesync(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
@ -2830,6 +2988,38 @@ func (m *ObjectSyncMessage) Unmarshal(dAtA []byte) error {
|
||||
m.SpaceId = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpacesync
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSpacesync
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthSpacesync
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RequestId = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ReplyId", wireType)
|
||||
}
|
||||
@ -2861,7 +3051,7 @@ func (m *ObjectSyncMessage) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.ReplyId = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType)
|
||||
}
|
||||
@ -2895,7 +3085,7 @@ func (m *ObjectSyncMessage) Unmarshal(dAtA []byte) error {
|
||||
m.Payload = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ObjectId", wireType)
|
||||
}
|
||||
@ -4261,6 +4451,107 @@ func (m *SettingsData) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *SpaceSubscription) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpacesync
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: SpaceSubscription: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: SpaceSubscription: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SpaceIds", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpacesync
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSpacesync
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthSpacesync
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.SpaceIds = append(m.SpaceIds, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
|
||||
}
|
||||
m.Action = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpacesync
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Action |= SpaceSubscriptionAction(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSpacesync(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSpacesync
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipSpacesync(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
||||
14
commonspace/streammanager/streammanager.go
Normal file
14
commonspace/streammanager/streammanager.go
Normal file
@ -0,0 +1,14 @@
|
||||
package streammanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/app"
|
||||
"github.com/anytypeio/any-sync/commonspace/objectsync"
|
||||
)
|
||||
|
||||
const CName = "common.commonspace.streammanager"
|
||||
|
||||
type StreamManagerProvider interface {
|
||||
app.Component
|
||||
NewStreamManager(ctx context.Context, spaceId string) (sm objectsync.StreamManager, err error)
|
||||
}
|
||||
1
go.mod
1
go.mod
@ -31,6 +31,7 @@ require (
|
||||
go.uber.org/atomic v1.10.0
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20230105202349-8879d0199aa3
|
||||
golang.org/x/net v0.3.0
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
storj.io/drpc v0.0.32
|
||||
|
||||
1
go.sum
1
go.sum
@ -574,6 +574,7 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
||||
@ -65,6 +65,7 @@ func (d *dialer) UpdateAddrs(addrs map[string][]string) {
|
||||
func (d *dialer) Dial(ctx context.Context, peerId string) (p peer.Peer, err error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
addrs, ok := d.peerAddrs[peerId]
|
||||
if !ok || len(addrs) == 0 {
|
||||
return nil, ErrArrdsNotFound
|
||||
@ -73,10 +74,11 @@ func (d *dialer) Dial(ctx context.Context, peerId string) (p peer.Peer, err erro
|
||||
conn drpc.Conn
|
||||
sc sec.SecureConn
|
||||
)
|
||||
log.InfoCtx(ctx, "dial", zap.String("peerId", peerId), zap.Strings("addrs", addrs))
|
||||
for _, addr := range addrs {
|
||||
conn, sc, err = d.handshake(ctx, addr)
|
||||
if err != nil {
|
||||
log.Info("can't connect to host", zap.String("addr", addr), zap.Error(err))
|
||||
log.InfoCtx(ctx, "can't connect to host", zap.String("addr", addr), zap.Error(err))
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
@ -2,12 +2,16 @@ package peer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/libp2p/go-libp2p/core/sec"
|
||||
"go.uber.org/zap"
|
||||
"storj.io/drpc"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var log = logger.NewNamed("peer")
|
||||
|
||||
func NewPeer(sc sec.SecureConn, conn drpc.Conn) Peer {
|
||||
return &peer{
|
||||
id: sc.RemotePeer().String(),
|
||||
@ -54,6 +58,25 @@ func (p *peer) NewStream(ctx context.Context, rpc string, enc drpc.Encoding) (dr
|
||||
return p.Conn.NewStream(ctx, rpc, enc)
|
||||
}
|
||||
|
||||
func (p *peer) Read(b []byte) (n int, err error) {
|
||||
if n, err = p.sc.Read(b); err != nil {
|
||||
p.UpdateLastUsage()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *peer) Write(b []byte) (n int, err error) {
|
||||
if n, err = p.sc.Write(b); err != nil {
|
||||
p.UpdateLastUsage()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *peer) UpdateLastUsage() {
|
||||
atomic.StoreInt64(&p.lastUsage, time.Now().Unix())
|
||||
}
|
||||
|
||||
func (p *peer) Close() (err error) {
|
||||
log.Debug("peer close", zap.String("peerId", p.id))
|
||||
return p.Conn.Close()
|
||||
}
|
||||
|
||||
@ -3,31 +3,16 @@ package pool
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/anytypeio/any-sync/app"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/anytypeio/any-sync/app/ocache"
|
||||
"github.com/anytypeio/any-sync/metric"
|
||||
"github.com/anytypeio/any-sync/net/dialer"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
CName = "common.net.pool"
|
||||
)
|
||||
|
||||
var log = logger.NewNamed(CName)
|
||||
|
||||
var (
|
||||
ErrUnableToConnect = errors.New("unable to connect")
|
||||
)
|
||||
|
||||
func New() Pool {
|
||||
return &pool{}
|
||||
}
|
||||
|
||||
// Pool creates and caches outgoing connection
|
||||
type Pool interface {
|
||||
// Get lookups to peer in existing connections or creates and cache new one
|
||||
@ -38,8 +23,6 @@ type Pool interface {
|
||||
GetOneOf(ctx context.Context, peerIds []string) (peer.Peer, error)
|
||||
|
||||
DialOneOf(ctx context.Context, peerIds []string) (peer.Peer, error)
|
||||
|
||||
app.ComponentRunnable
|
||||
}
|
||||
|
||||
type pool struct {
|
||||
@ -47,24 +30,6 @@ type pool struct {
|
||||
dialer dialer.Dialer
|
||||
}
|
||||
|
||||
func (p *pool) Init(a *app.App) (err error) {
|
||||
p.dialer = a.MustComponent(dialer.CName).(dialer.Dialer)
|
||||
var reg *prometheus.Registry
|
||||
if m := a.Component(metric.CName); m != nil {
|
||||
reg = m.(metric.Metric).Registry()
|
||||
}
|
||||
p.cache = ocache.New(
|
||||
func(ctx context.Context, id string) (value ocache.Object, err error) {
|
||||
return p.dialer.Dial(ctx, id)
|
||||
},
|
||||
ocache.WithLogger(log.Sugar()),
|
||||
ocache.WithGCPeriod(time.Minute),
|
||||
ocache.WithTTL(time.Minute*5),
|
||||
ocache.WithPrometheus(reg, "netpool", "cache"),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pool) Name() (name string) {
|
||||
return CName
|
||||
}
|
||||
@ -84,7 +49,7 @@ func (p *pool) Get(ctx context.Context, id string) (peer.Peer, error) {
|
||||
default:
|
||||
return pr, nil
|
||||
}
|
||||
p.cache.Remove(id)
|
||||
_, _ = p.cache.Remove(id)
|
||||
return p.Get(ctx, id)
|
||||
}
|
||||
|
||||
|
||||
@ -123,11 +123,11 @@ func TestPool_GetOneOf(t *testing.T) {
|
||||
|
||||
func newFixture(t *testing.T) *fixture {
|
||||
fx := &fixture{
|
||||
Pool: New(),
|
||||
Service: New(),
|
||||
Dialer: &dialerMock{},
|
||||
}
|
||||
a := new(app.App)
|
||||
a.Register(fx.Pool)
|
||||
a.Register(fx.Service)
|
||||
a.Register(fx.Dialer)
|
||||
require.NoError(t, a.Start(context.Background()))
|
||||
fx.a = a
|
||||
@ -140,7 +140,7 @@ func (fx *fixture) Finish() {
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
Pool
|
||||
Service
|
||||
Dialer *dialerMock
|
||||
a *app.App
|
||||
t *testing.T
|
||||
|
||||
68
net/pool/poolservice.go
Normal file
68
net/pool/poolservice.go
Normal file
@ -0,0 +1,68 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/app"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"github.com/anytypeio/any-sync/app/ocache"
|
||||
"github.com/anytypeio/any-sync/metric"
|
||||
"github.com/anytypeio/any-sync/net/dialer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
CName = "common.net.pool"
|
||||
)
|
||||
|
||||
var log = logger.NewNamed(CName)
|
||||
|
||||
func New() Service {
|
||||
return &poolService{}
|
||||
}
|
||||
|
||||
type Service interface {
|
||||
Pool
|
||||
NewPool(name string) Pool
|
||||
app.ComponentRunnable
|
||||
}
|
||||
|
||||
type poolService struct {
|
||||
// default pool
|
||||
*pool
|
||||
dialer dialer.Dialer
|
||||
metricReg *prometheus.Registry
|
||||
}
|
||||
|
||||
func (p *poolService) Init(a *app.App) (err error) {
|
||||
p.pool = &pool{}
|
||||
p.dialer = a.MustComponent(dialer.CName).(dialer.Dialer)
|
||||
if m := a.Component(metric.CName); m != nil {
|
||||
p.metricReg = m.(metric.Metric).Registry()
|
||||
}
|
||||
p.pool.cache = ocache.New(
|
||||
func(ctx context.Context, id string) (value ocache.Object, err error) {
|
||||
return p.dialer.Dial(ctx, id)
|
||||
},
|
||||
ocache.WithLogger(log.Sugar()),
|
||||
ocache.WithGCPeriod(time.Minute),
|
||||
ocache.WithTTL(time.Minute*5),
|
||||
ocache.WithPrometheus(p.metricReg, "netpool", "default"),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *poolService) NewPool(name string) Pool {
|
||||
return &pool{
|
||||
dialer: p.dialer,
|
||||
cache: ocache.New(
|
||||
func(ctx context.Context, id string) (value ocache.Object, err error) {
|
||||
return p.dialer.Dial(ctx, id)
|
||||
},
|
||||
ocache.WithLogger(log.Sugar()),
|
||||
ocache.WithGCPeriod(time.Minute),
|
||||
ocache.WithTTL(time.Minute*5),
|
||||
ocache.WithPrometheus(p.metricReg, "netpool", name),
|
||||
),
|
||||
}
|
||||
}
|
||||
@ -78,8 +78,8 @@ func (s *BaseDrpcServer) serve(ctx context.Context, lis secureservice.ContextLis
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, ok := err.(secureservice.HandshakeError); ok {
|
||||
l.Warn("listener handshake error", zap.Error(err))
|
||||
if herr, ok := err.(secureservice.HandshakeError); ok {
|
||||
l.Warn("listener handshake error", zap.Error(herr), zap.String("remoteAddr", herr.RemoteAddr()))
|
||||
continue
|
||||
}
|
||||
l.Error("listener accept error", zap.Error(err))
|
||||
|
||||
@ -12,7 +12,18 @@ import (
|
||||
"net"
|
||||
)
|
||||
|
||||
type HandshakeError error
|
||||
type HandshakeError struct {
|
||||
remoteAddr string
|
||||
err error
|
||||
}
|
||||
|
||||
func (he HandshakeError) RemoteAddr() string {
|
||||
return he.remoteAddr
|
||||
}
|
||||
|
||||
func (he HandshakeError) Error() string {
|
||||
return he.err.Error()
|
||||
}
|
||||
|
||||
const CName = "common.net.secure"
|
||||
|
||||
|
||||
@ -49,7 +49,10 @@ func (p *tlsListener) Accept(ctx context.Context) (context.Context, net.Conn, er
|
||||
func (p *tlsListener) upgradeConn(ctx context.Context, conn net.Conn) (context.Context, net.Conn, error) {
|
||||
secure, err := p.tr.SecureInbound(ctx, conn, "")
|
||||
if err != nil {
|
||||
return nil, nil, HandshakeError(err)
|
||||
return nil, nil, HandshakeError{
|
||||
remoteAddr: conn.RemoteAddr().String(),
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
ctx = peer.CtxWithPeerId(ctx, secure.RemotePeer().String())
|
||||
return ctx, secure, nil
|
||||
|
||||
17
net/streampool/context.go
Normal file
17
net/streampool/context.go
Normal file
@ -0,0 +1,17 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
)
|
||||
|
||||
type streamCtxKey uint
|
||||
|
||||
const (
|
||||
streamCtxKeyStreamId streamCtxKey = iota
|
||||
)
|
||||
|
||||
func streamCtx(ctx context.Context, streamId uint32, peerId string) context.Context {
|
||||
ctx = peer.CtxWithPeerId(ctx, peerId)
|
||||
return context.WithValue(ctx, streamCtxKeyStreamId, streamId)
|
||||
}
|
||||
34
net/streampool/encoding.go
Normal file
34
net/streampool/encoding.go
Normal file
@ -0,0 +1,34 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"storj.io/drpc"
|
||||
)
|
||||
|
||||
var (
|
||||
// EncodingProto drpc.Encoding implementation for gogo protobuf
|
||||
EncodingProto drpc.Encoding = protoEncoding{}
|
||||
)
|
||||
|
||||
var (
|
||||
errNotAProtoMsg = errors.New("encoding: not a proto message")
|
||||
)
|
||||
|
||||
type protoEncoding struct{}
|
||||
|
||||
func (p protoEncoding) Marshal(msg drpc.Message) ([]byte, error) {
|
||||
pmsg, ok := msg.(proto.Message)
|
||||
if !ok {
|
||||
return nil, errNotAProtoMsg
|
||||
}
|
||||
return proto.Marshal(pmsg)
|
||||
}
|
||||
|
||||
func (p protoEncoding) Unmarshal(buf []byte, msg drpc.Message) error {
|
||||
pmsg, ok := msg.(proto.Message)
|
||||
if !ok {
|
||||
return errNotAProtoMsg
|
||||
}
|
||||
return proto.Unmarshal(buf, pmsg)
|
||||
}
|
||||
24
net/streampool/encoding_test.go
Normal file
24
net/streampool/encoding_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"github.com/anytypeio/any-sync/net/streampool/testservice"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestProtoEncoding(t *testing.T) {
|
||||
t.Run("not a proto err", func(t *testing.T) {
|
||||
_, err := EncodingProto.Marshal("string")
|
||||
assert.Error(t, err)
|
||||
err = EncodingProto.Unmarshal(nil, "sss")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("encode", func(t *testing.T) {
|
||||
data, err := EncodingProto.Marshal(&testservice.StreamMessage{ReqData: "1"})
|
||||
require.NoError(t, err)
|
||||
msg := &testservice.StreamMessage{}
|
||||
require.NoError(t, EncodingProto.Unmarshal(data, msg))
|
||||
assert.Equal(t, "1", msg.ReqData)
|
||||
})
|
||||
}
|
||||
44
net/streampool/sendpool.go
Normal file
44
net/streampool/sendpool.go
Normal file
@ -0,0 +1,44 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/cheggaaa/mb/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// newStreamSender creates new sendPool
|
||||
// workers - how many processes will execute tasks
|
||||
// maxSize - limit for queue size
|
||||
func newStreamSender(workers, maxSize int) *sendPool {
|
||||
ss := &sendPool{
|
||||
batch: mb.New[func()](maxSize),
|
||||
}
|
||||
for i := 0; i < workers; i++ {
|
||||
go ss.sendLoop()
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// sendPool needed for parallel execution of the incoming send tasks
|
||||
type sendPool struct {
|
||||
batch *mb.MB[func()]
|
||||
}
|
||||
|
||||
func (ss *sendPool) Add(ctx context.Context, f ...func()) (err error) {
|
||||
return ss.batch.Add(ctx, f...)
|
||||
}
|
||||
|
||||
func (ss *sendPool) sendLoop() {
|
||||
for {
|
||||
f, err := ss.batch.WaitOne(context.Background())
|
||||
if err != nil {
|
||||
log.Debug("close send loop", zap.Error(err))
|
||||
return
|
||||
}
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *sendPool) Close() (err error) {
|
||||
return ss.batch.Close()
|
||||
}
|
||||
53
net/streampool/stream.go
Normal file
53
net/streampool/stream.go
Normal file
@ -0,0 +1,53 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"go.uber.org/zap"
|
||||
"storj.io/drpc"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type stream struct {
|
||||
peerId string
|
||||
stream drpc.Stream
|
||||
pool *streamPool
|
||||
streamId uint32
|
||||
closed atomic.Bool
|
||||
l logger.CtxLogger
|
||||
tags []string
|
||||
}
|
||||
|
||||
func (sr *stream) write(msg drpc.Message) (err error) {
|
||||
if err = sr.stream.MsgSend(msg, EncodingProto); err != nil {
|
||||
sr.streamClose()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (sr *stream) readLoop() error {
|
||||
defer func() {
|
||||
sr.streamClose()
|
||||
}()
|
||||
sr.l.Debug("stream read started")
|
||||
for {
|
||||
msg := sr.pool.handler.NewReadMessage()
|
||||
if err := sr.stream.MsgRecv(msg, EncodingProto); err != nil {
|
||||
sr.l.Info("msg receive error", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
ctx := streamCtx(context.Background(), sr.streamId, sr.peerId)
|
||||
ctx = logger.CtxWithFields(ctx, zap.String("peerId", sr.peerId))
|
||||
if err := sr.pool.handler.HandleMessage(ctx, sr.peerId, msg); err != nil {
|
||||
sr.l.Info("msg handle error", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *stream) streamClose() {
|
||||
if !sr.closed.Swap(true) {
|
||||
_ = sr.stream.Close()
|
||||
sr.pool.removeStream(sr.streamId)
|
||||
}
|
||||
}
|
||||
347
net/streampool/streampool.go
Normal file
347
net/streampool/streampool.go
Normal file
@ -0,0 +1,347 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"github.com/anytypeio/any-sync/net/pool"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/net/context"
|
||||
"storj.io/drpc"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// StreamHandler handles incoming messages from streams
|
||||
type StreamHandler interface {
|
||||
// OpenStream opens stream with given peer
|
||||
OpenStream(ctx context.Context, p peer.Peer) (stream drpc.Stream, tags []string, err error)
|
||||
// HandleMessage handles incoming message
|
||||
HandleMessage(ctx context.Context, peerId string, msg drpc.Message) (err error)
|
||||
// NewReadMessage creates new empty message for unmarshalling into it
|
||||
NewReadMessage() drpc.Message
|
||||
}
|
||||
|
||||
// StreamPool keeps and read streams
|
||||
type StreamPool interface {
|
||||
// AddStream adds new outgoing stream into the pool
|
||||
AddStream(peerId string, stream drpc.Stream, tags ...string)
|
||||
// ReadStream adds new incoming stream and synchronously read it
|
||||
ReadStream(peerId string, stream drpc.Stream, tags ...string) (err error)
|
||||
// Send sends a message to given peers. A stream will be opened if it is not cached before. Works async.
|
||||
Send(ctx context.Context, msg drpc.Message, peers ...peer.Peer) (err error)
|
||||
// SendById sends a message to given peerIds. Works only if stream exists
|
||||
SendById(ctx context.Context, msg drpc.Message, peerIds ...string) (err error)
|
||||
// Broadcast sends a message to all peers with given tags. Works async.
|
||||
Broadcast(ctx context.Context, msg drpc.Message, tags ...string) (err error)
|
||||
// AddTagsCtx adds tags to stream, stream will be extracted from ctx
|
||||
AddTagsCtx(ctx context.Context, tags ...string) error
|
||||
// RemoveTagsCtx removes tags from stream, stream will be extracted from ctx
|
||||
RemoveTagsCtx(ctx context.Context, tags ...string) error
|
||||
// Close closes all streams
|
||||
Close() error
|
||||
}
|
||||
|
||||
type streamPool struct {
|
||||
handler StreamHandler
|
||||
streamIdsByPeer map[string][]uint32
|
||||
streamIdsByTag map[string][]uint32
|
||||
streams map[uint32]*stream
|
||||
opening map[string]*openingProcess
|
||||
exec *sendPool
|
||||
mu sync.RWMutex
|
||||
lastStreamId uint32
|
||||
}
|
||||
|
||||
type openingProcess struct {
|
||||
ch chan struct{}
|
||||
err error
|
||||
}
|
||||
type handleMessage struct {
|
||||
ctx context.Context
|
||||
msg drpc.Message
|
||||
peerId string
|
||||
}
|
||||
|
||||
func (s *streamPool) ReadStream(peerId string, drpcStream drpc.Stream, tags ...string) error {
|
||||
st := s.addStream(peerId, drpcStream, tags...)
|
||||
return st.readLoop()
|
||||
}
|
||||
|
||||
func (s *streamPool) AddStream(peerId string, drpcStream drpc.Stream, tags ...string) {
|
||||
st := s.addStream(peerId, drpcStream, tags...)
|
||||
go func() {
|
||||
_ = st.readLoop()
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *streamPool) addStream(peerId string, drpcStream drpc.Stream, tags ...string) *stream {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.lastStreamId++
|
||||
streamId := s.lastStreamId
|
||||
st := &stream{
|
||||
peerId: peerId,
|
||||
stream: drpcStream,
|
||||
pool: s,
|
||||
streamId: streamId,
|
||||
l: log.With(zap.String("peerId", peerId), zap.Uint32("streamId", streamId)),
|
||||
tags: tags,
|
||||
}
|
||||
s.streams[streamId] = st
|
||||
s.streamIdsByPeer[peerId] = append(s.streamIdsByPeer[peerId], streamId)
|
||||
for _, tag := range tags {
|
||||
s.streamIdsByTag[tag] = append(s.streamIdsByTag[tag], streamId)
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
func (s *streamPool) Send(ctx context.Context, msg drpc.Message, peers ...peer.Peer) (err error) {
|
||||
var sendOneFunc = func(sp peer.Peer) func() {
|
||||
return func() {
|
||||
if e := s.sendOne(ctx, sp, msg); e != nil {
|
||||
log.InfoCtx(ctx, "send peer error", zap.Error(e), zap.String("peerId", sp.Id()))
|
||||
} else {
|
||||
log.DebugCtx(ctx, "send success", zap.String("peerId", sp.Id()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range peers {
|
||||
if err = s.exec.Add(ctx, sendOneFunc(p)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) SendById(ctx context.Context, msg drpc.Message, peerIds ...string) (err error) {
|
||||
s.mu.Lock()
|
||||
var streamsByPeer [][]*stream
|
||||
for _, peerId := range peerIds {
|
||||
var streams []*stream
|
||||
for _, streamId := range s.streamIdsByPeer[peerId] {
|
||||
streams = append(streams, s.streams[streamId])
|
||||
}
|
||||
if len(streams) != 0 {
|
||||
streamsByPeer = append(streamsByPeer, streams)
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
var sendStreamsFunc = func(streams []*stream) func() {
|
||||
return func() {
|
||||
for _, st := range streams {
|
||||
if e := st.write(msg); e != nil {
|
||||
st.l.Debug("sendById write error", zap.Error(e))
|
||||
} else {
|
||||
st.l.DebugCtx(ctx, "sendById success")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, streams := range streamsByPeer {
|
||||
if err = s.exec.Add(ctx, sendStreamsFunc(streams)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(streamsByPeer) == 0 {
|
||||
return pool.ErrUnableToConnect
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) sendOne(ctx context.Context, p peer.Peer, msg drpc.Message) (err error) {
|
||||
// get all streams relates to peer
|
||||
streams, err := s.getStreams(ctx, p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, st := range streams {
|
||||
if err = st.write(msg); err != nil {
|
||||
st.l.InfoCtx(ctx, "sendOne write error", zap.Error(err), zap.Int("streams", len(streams)))
|
||||
// continue with next stream
|
||||
continue
|
||||
} else {
|
||||
st.l.DebugCtx(ctx, "sendOne success")
|
||||
// stop sending on success
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) getStreams(ctx context.Context, p peer.Peer) (streams []*stream, err error) {
|
||||
s.mu.Lock()
|
||||
// check cached streams
|
||||
streamIds := s.streamIdsByPeer[p.Id()]
|
||||
for _, streamId := range streamIds {
|
||||
streams = append(streams, s.streams[streamId])
|
||||
}
|
||||
var op *openingProcess
|
||||
// no cached streams found
|
||||
if len(streams) == 0 {
|
||||
// start opening process
|
||||
op = s.openStream(ctx, p)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
// not empty openingCh means we should wait for the stream opening and try again
|
||||
if op != nil {
|
||||
select {
|
||||
case <-op.ch:
|
||||
if op.err != nil {
|
||||
return nil, op.err
|
||||
}
|
||||
return s.getStreams(ctx, p)
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (s *streamPool) openStream(ctx context.Context, p peer.Peer) *openingProcess {
|
||||
if op, ok := s.opening[p.Id()]; ok {
|
||||
// already have an opening process for this stream - return channel
|
||||
return op
|
||||
}
|
||||
op := &openingProcess{
|
||||
ch: make(chan struct{}),
|
||||
}
|
||||
s.opening[p.Id()] = op
|
||||
go func() {
|
||||
// start stream opening in separate goroutine to avoid lock whole pool
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
close(op.ch)
|
||||
delete(s.opening, p.Id())
|
||||
}()
|
||||
// open new stream and add to pool
|
||||
st, tags, err := s.handler.OpenStream(ctx, p)
|
||||
if err != nil {
|
||||
op.err = err
|
||||
return
|
||||
}
|
||||
s.AddStream(p.Id(), st, tags...)
|
||||
}()
|
||||
return op
|
||||
}
|
||||
|
||||
func (s *streamPool) Broadcast(ctx context.Context, msg drpc.Message, tags ...string) (err error) {
|
||||
s.mu.Lock()
|
||||
var streams []*stream
|
||||
for _, tag := range tags {
|
||||
for _, streamId := range s.streamIdsByTag[tag] {
|
||||
streams = append(streams, s.streams[streamId])
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
var sendStreamFunc = func(st *stream) func() {
|
||||
return func() {
|
||||
if e := st.write(msg); e != nil {
|
||||
st.l.InfoCtx(ctx, "broadcast write error", zap.Error(e))
|
||||
} else {
|
||||
st.l.DebugCtx(ctx, "broadcast success")
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, st := range streams {
|
||||
if st == nil {
|
||||
panic("nil stream")
|
||||
}
|
||||
if err = s.exec.Add(ctx, sendStreamFunc(st)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *streamPool) AddTagsCtx(ctx context.Context, tags ...string) error {
|
||||
streamId, ok := ctx.Value(streamCtxKeyStreamId).(uint32)
|
||||
if !ok {
|
||||
return fmt.Errorf("context without streamId")
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
st, ok := s.streams[streamId]
|
||||
if !ok {
|
||||
return fmt.Errorf("stream not found")
|
||||
}
|
||||
var newTags = make([]string, 0, len(tags))
|
||||
for _, newTag := range tags {
|
||||
if !slices.Contains(st.tags, newTag) {
|
||||
st.tags = append(st.tags, newTag)
|
||||
newTags = append(newTags, newTag)
|
||||
}
|
||||
}
|
||||
for _, newTag := range newTags {
|
||||
s.streamIdsByTag[newTag] = append(s.streamIdsByTag[newTag], streamId)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *streamPool) RemoveTagsCtx(ctx context.Context, tags ...string) error {
|
||||
streamId, ok := ctx.Value(streamCtxKeyStreamId).(uint32)
|
||||
if !ok {
|
||||
return fmt.Errorf("context without streamId")
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
st, ok := s.streams[streamId]
|
||||
if !ok {
|
||||
return fmt.Errorf("stream not found")
|
||||
}
|
||||
|
||||
var filtered = st.tags[:0]
|
||||
var toRemove = make([]string, 0, len(tags))
|
||||
for _, t := range st.tags {
|
||||
if slices.Contains(tags, t) {
|
||||
toRemove = append(toRemove, t)
|
||||
} else {
|
||||
filtered = append(filtered, t)
|
||||
}
|
||||
}
|
||||
st.tags = filtered
|
||||
for _, t := range toRemove {
|
||||
removeStream(s.streamIdsByTag, t, streamId)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *streamPool) removeStream(streamId uint32) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
st := s.streams[streamId]
|
||||
if st == nil {
|
||||
log.Fatal("removeStream: stream does not exist", zap.Uint32("streamId", streamId))
|
||||
}
|
||||
|
||||
removeStream(s.streamIdsByPeer, st.peerId, streamId)
|
||||
for _, tag := range st.tags {
|
||||
removeStream(s.streamIdsByTag, tag, streamId)
|
||||
}
|
||||
|
||||
delete(s.streams, streamId)
|
||||
st.l.Debug("stream removed", zap.Strings("tags", st.tags))
|
||||
}
|
||||
|
||||
func (s *streamPool) Close() (err error) {
|
||||
return s.exec.Close()
|
||||
}
|
||||
|
||||
func removeStream(m map[string][]uint32, key string, streamId uint32) {
|
||||
streamIds := m[key]
|
||||
idx := slices.Index(streamIds, streamId)
|
||||
if idx == -1 {
|
||||
log.Fatal("removeStream: streamId does not exist", zap.Uint32("streamId", streamId))
|
||||
}
|
||||
streamIds = slices.Delete(streamIds, idx, idx+1)
|
||||
if len(streamIds) == 0 {
|
||||
delete(m, key)
|
||||
} else {
|
||||
m[key] = streamIds
|
||||
}
|
||||
}
|
||||
262
net/streampool/streampool_test.go
Normal file
262
net/streampool/streampool_test.go
Normal file
@ -0,0 +1,262 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/anytypeio/any-sync/net/peer"
|
||||
"github.com/anytypeio/any-sync/net/rpc/rpctest"
|
||||
"github.com/anytypeio/any-sync/net/streampool/testservice"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
"sort"
|
||||
"storj.io/drpc"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func newClientStream(t *testing.T, fx *fixture, peerId string) (st testservice.DRPCTest_TestStreamClient, p peer.Peer) {
|
||||
p, err := fx.tp.Dial(ctx, peerId)
|
||||
require.NoError(t, err)
|
||||
s, err := testservice.NewDRPCTestClient(p).TestStream(ctx)
|
||||
require.NoError(t, err)
|
||||
return s, p
|
||||
}
|
||||
|
||||
func TestStreamPool_AddStream(t *testing.T) {
|
||||
|
||||
t.Run("broadcast incoming", func(t *testing.T) {
|
||||
fx := newFixture(t)
|
||||
defer fx.Finish(t)
|
||||
|
||||
s1, _ := newClientStream(t, fx, "p1")
|
||||
fx.AddStream("p1", s1, "space1", "common")
|
||||
s2, _ := newClientStream(t, fx, "p2")
|
||||
fx.AddStream("p2", s2, "space2", "common")
|
||||
|
||||
require.NoError(t, fx.Broadcast(ctx, &testservice.StreamMessage{ReqData: "space1"}, "space1"))
|
||||
require.NoError(t, fx.Broadcast(ctx, &testservice.StreamMessage{ReqData: "space2"}, "space2"))
|
||||
require.NoError(t, fx.Broadcast(ctx, &testservice.StreamMessage{ReqData: "common"}, "common"))
|
||||
|
||||
var serverResults []string
|
||||
for i := 0; i < 4; i++ {
|
||||
select {
|
||||
case msg := <-fx.tsh.receiveCh:
|
||||
serverResults = append(serverResults, msg.ReqData)
|
||||
case <-time.After(time.Second):
|
||||
require.NoError(t, fmt.Errorf("timeout"))
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(serverResults)
|
||||
assert.Equal(t, []string{"common", "common", "space1", "space2"}, serverResults)
|
||||
|
||||
assert.NoError(t, s1.Close())
|
||||
assert.NoError(t, s2.Close())
|
||||
})
|
||||
|
||||
t.Run("send incoming", func(t *testing.T) {
|
||||
fx := newFixture(t)
|
||||
defer fx.Finish(t)
|
||||
|
||||
s1, p1 := newClientStream(t, fx, "p1")
|
||||
defer s1.Close()
|
||||
fx.AddStream("p1", s1, "space1", "common")
|
||||
|
||||
require.NoError(t, fx.Send(ctx, &testservice.StreamMessage{ReqData: "test"}, p1))
|
||||
var msg *testservice.StreamMessage
|
||||
select {
|
||||
case msg = <-fx.tsh.receiveCh:
|
||||
case <-time.After(time.Second):
|
||||
require.NoError(t, fmt.Errorf("timeout"))
|
||||
}
|
||||
assert.Equal(t, "test", msg.ReqData)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_Send(t *testing.T) {
|
||||
t.Run("open stream", func(t *testing.T) {
|
||||
fx := newFixture(t)
|
||||
defer fx.Finish(t)
|
||||
|
||||
p, err := fx.tp.Dial(ctx, "p1")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, fx.Send(ctx, &testservice.StreamMessage{ReqData: "should open stream"}, p))
|
||||
|
||||
var msg *testservice.StreamMessage
|
||||
select {
|
||||
case msg = <-fx.tsh.receiveCh:
|
||||
case <-time.After(time.Second):
|
||||
require.NoError(t, fmt.Errorf("timeout"))
|
||||
}
|
||||
assert.Equal(t, "should open stream", msg.ReqData)
|
||||
})
|
||||
t.Run("parallel open stream", func(t *testing.T) {
|
||||
fx := newFixture(t)
|
||||
defer fx.Finish(t)
|
||||
|
||||
p, err := fx.tp.Dial(ctx, "p1")
|
||||
require.NoError(t, err)
|
||||
|
||||
fx.th.streamOpenDelay = time.Second / 3
|
||||
|
||||
var numMsgs = 5
|
||||
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
go require.NoError(t, fx.Send(ctx, &testservice.StreamMessage{ReqData: "should open stream"}, p))
|
||||
}
|
||||
|
||||
var msgs []*testservice.StreamMessage
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
select {
|
||||
case msg := <-fx.tsh.receiveCh:
|
||||
msgs = append(msgs, msg)
|
||||
case <-time.After(time.Second):
|
||||
require.NoError(t, fmt.Errorf("timeout"))
|
||||
}
|
||||
}
|
||||
assert.Len(t, msgs, numMsgs)
|
||||
// make sure that we have only one stream
|
||||
assert.Equal(t, int32(1), fx.tsh.streamsCount.Load())
|
||||
})
|
||||
t.Run("parallel open stream error", func(t *testing.T) {
|
||||
fx := newFixture(t)
|
||||
defer fx.Finish(t)
|
||||
|
||||
p, err := fx.tp.Dial(ctx, "p1")
|
||||
require.NoError(t, err)
|
||||
_ = p.Close()
|
||||
|
||||
fx.th.streamOpenDelay = time.Second / 3
|
||||
|
||||
var numMsgs = 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
assert.Error(t, fx.StreamPool.(*streamPool).sendOne(ctx, p, &testservice.StreamMessage{ReqData: "should open stream"}))
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamPool_SendById(t *testing.T) {
|
||||
fx := newFixture(t)
|
||||
defer fx.Finish(t)
|
||||
|
||||
s1, _ := newClientStream(t, fx, "p1")
|
||||
defer s1.Close()
|
||||
fx.AddStream("p1", s1, "space1", "common")
|
||||
|
||||
require.NoError(t, fx.SendById(ctx, &testservice.StreamMessage{ReqData: "test"}, "p1"))
|
||||
var msg *testservice.StreamMessage
|
||||
select {
|
||||
case msg = <-fx.tsh.receiveCh:
|
||||
case <-time.After(time.Second):
|
||||
require.NoError(t, fmt.Errorf("timeout"))
|
||||
}
|
||||
assert.Equal(t, "test", msg.ReqData)
|
||||
}
|
||||
|
||||
func TestStreamPool_Tags(t *testing.T) {
|
||||
fx := newFixture(t)
|
||||
defer fx.Finish(t)
|
||||
|
||||
s1, _ := newClientStream(t, fx, "p1")
|
||||
defer s1.Close()
|
||||
fx.AddStream("p1", s1, "t1")
|
||||
|
||||
s2, _ := newClientStream(t, fx, "p2")
|
||||
defer s1.Close()
|
||||
fx.AddStream("p2", s2, "t2")
|
||||
|
||||
err := fx.AddTagsCtx(streamCtx(ctx, 1, "p1"), "t3", "t3")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []uint32{1}, fx.StreamPool.(*streamPool).streamIdsByTag["t3"])
|
||||
|
||||
err = fx.RemoveTagsCtx(streamCtx(ctx, 2, "p2"), "t2")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, fx.StreamPool.(*streamPool).streamIdsByTag["t2"], 0)
|
||||
|
||||
}
|
||||
|
||||
func newFixture(t *testing.T) *fixture {
|
||||
fx := &fixture{}
|
||||
ts := rpctest.NewTestServer()
|
||||
fx.tsh = &testServerHandler{receiveCh: make(chan *testservice.StreamMessage, 100)}
|
||||
require.NoError(t, testservice.DRPCRegisterTest(ts, fx.tsh))
|
||||
fx.tp = rpctest.NewTestPool().WithServer(ts)
|
||||
fx.th = &testHandler{}
|
||||
fx.StreamPool = New().NewStreamPool(fx.th)
|
||||
return fx
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
StreamPool
|
||||
tp *rpctest.TestPool
|
||||
th *testHandler
|
||||
tsh *testServerHandler
|
||||
}
|
||||
|
||||
func (fx *fixture) Finish(t *testing.T) {
|
||||
require.NoError(t, fx.Close())
|
||||
require.NoError(t, fx.tp.Close(ctx))
|
||||
}
|
||||
|
||||
type testHandler struct {
|
||||
streamOpenDelay time.Duration
|
||||
incomingMessages []drpc.Message
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (t *testHandler) OpenStream(ctx context.Context, p peer.Peer) (stream drpc.Stream, tags []string, err error) {
|
||||
if t.streamOpenDelay > 0 {
|
||||
time.Sleep(t.streamOpenDelay)
|
||||
}
|
||||
stream, err = testservice.NewDRPCTestClient(p).TestStream(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
func (t *testHandler) HandleMessage(ctx context.Context, peerId string, msg drpc.Message) (err error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.incomingMessages = append(t.incomingMessages, msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testHandler) DRPCEncoding() drpc.Encoding {
|
||||
return EncodingProto
|
||||
}
|
||||
|
||||
func (t *testHandler) NewReadMessage() drpc.Message {
|
||||
return new(testservice.StreamMessage)
|
||||
}
|
||||
|
||||
type testServerHandler struct {
|
||||
receiveCh chan *testservice.StreamMessage
|
||||
streamsCount atomic.Int32
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (t *testServerHandler) TestStream(st testservice.DRPCTest_TestStreamStream) error {
|
||||
t.streamsCount.Add(1)
|
||||
defer t.streamsCount.Add(-1)
|
||||
for {
|
||||
msg, err := st.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.receiveCh <- msg
|
||||
if err = st.Send(msg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
42
net/streampool/streampoolservice.go
Normal file
42
net/streampool/streampoolservice.go
Normal file
@ -0,0 +1,42 @@
|
||||
package streampool
|
||||
|
||||
import (
|
||||
"github.com/anytypeio/any-sync/app"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
)
|
||||
|
||||
const CName = "common.net.streampool"
|
||||
|
||||
var log = logger.NewNamed(CName)
|
||||
|
||||
func New() Service {
|
||||
return new(service)
|
||||
}
|
||||
|
||||
type Service interface {
|
||||
NewStreamPool(h StreamHandler) StreamPool
|
||||
app.Component
|
||||
}
|
||||
|
||||
type service struct {
|
||||
}
|
||||
|
||||
func (s *service) NewStreamPool(h StreamHandler) StreamPool {
|
||||
sp := &streamPool{
|
||||
handler: h,
|
||||
streamIdsByPeer: map[string][]uint32{},
|
||||
streamIdsByTag: map[string][]uint32{},
|
||||
streams: map[uint32]*stream{},
|
||||
opening: map[string]*openingProcess{},
|
||||
exec: newStreamSender(10, 100),
|
||||
}
|
||||
return sp
|
||||
}
|
||||
|
||||
func (s *service) Init(a *app.App) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) Name() (name string) {
|
||||
return CName
|
||||
}
|
||||
13
net/streampool/testservice/protos/testservice.proto
Normal file
13
net/streampool/testservice/protos/testservice.proto
Normal file
@ -0,0 +1,13 @@
|
||||
syntax = "proto3";
|
||||
package testService;
|
||||
|
||||
option go_package = "net/streampool/testservice";
|
||||
|
||||
service Test {
|
||||
rpc TestStream(stream StreamMessage) returns (stream StreamMessage);
|
||||
}
|
||||
|
||||
|
||||
message StreamMessage {
|
||||
string reqData = 1;
|
||||
}
|
||||
317
net/streampool/testservice/testservice.pb.go
Normal file
317
net/streampool/testservice/testservice.pb.go
Normal file
@ -0,0 +1,317 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: net/streampool/testservice/protos/testservice.proto
|
||||
|
||||
package testservice
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type StreamMessage struct {
|
||||
ReqData string `protobuf:"bytes,1,opt,name=reqData,proto3" json:"reqData,omitempty"`
|
||||
}
|
||||
|
||||
func (m *StreamMessage) Reset() { *m = StreamMessage{} }
|
||||
func (m *StreamMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamMessage) ProtoMessage() {}
|
||||
func (*StreamMessage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_1c28d5a3a78be18f, []int{0}
|
||||
}
|
||||
func (m *StreamMessage) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *StreamMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_StreamMessage.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *StreamMessage) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamMessage.Merge(m, src)
|
||||
}
|
||||
func (m *StreamMessage) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *StreamMessage) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamMessage.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamMessage proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamMessage) GetReqData() string {
|
||||
if m != nil {
|
||||
return m.ReqData
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*StreamMessage)(nil), "testService.StreamMessage")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("net/streampool/testservice/protos/testservice.proto", fileDescriptor_1c28d5a3a78be18f)
|
||||
}
|
||||
|
||||
var fileDescriptor_1c28d5a3a78be18f = []byte{
|
||||
// 173 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xce, 0x4b, 0x2d, 0xd1,
|
||||
0x2f, 0x2e, 0x29, 0x4a, 0x4d, 0xcc, 0x2d, 0xc8, 0xcf, 0xcf, 0xd1, 0x2f, 0x49, 0x2d, 0x2e, 0x29,
|
||||
0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x2f, 0x46, 0x16, 0xd2,
|
||||
0x03, 0x0b, 0x09, 0x71, 0x83, 0x84, 0x82, 0x21, 0x42, 0x4a, 0x9a, 0x5c, 0xbc, 0xc1, 0x60, 0xfd,
|
||||
0xbe, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x42, 0x12, 0x5c, 0xec, 0x45, 0xa9, 0x85, 0x2e, 0x89,
|
||||
0x25, 0x89, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0x51, 0x00, 0x17, 0x4b, 0x48,
|
||||
0x6a, 0x71, 0x89, 0x90, 0x07, 0x17, 0x17, 0x88, 0x86, 0x68, 0x13, 0x92, 0xd2, 0x43, 0x32, 0x4e,
|
||||
0x0f, 0xc5, 0x2c, 0x29, 0x3c, 0x72, 0x1a, 0x8c, 0x06, 0x8c, 0x4e, 0x26, 0x27, 0x1e, 0xc9, 0x31,
|
||||
0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb,
|
||||
0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x25, 0x85, 0xdb, 0x63, 0x49, 0x6c, 0x60, 0x6f, 0x18, 0x03,
|
||||
0x02, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x59, 0x8d, 0x93, 0xfd, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *StreamMessage) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *StreamMessage) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *StreamMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ReqData) > 0 {
|
||||
i -= len(m.ReqData)
|
||||
copy(dAtA[i:], m.ReqData)
|
||||
i = encodeVarintTestservice(dAtA, i, uint64(len(m.ReqData)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTestservice(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTestservice(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *StreamMessage) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.ReqData)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTestservice(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTestservice(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozTestservice(x uint64) (n int) {
|
||||
return sovTestservice(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *StreamMessage) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTestservice
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: StreamMessage: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: StreamMessage: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ReqData", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTestservice
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTestservice
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTestservice
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ReqData = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTestservice(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTestservice
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTestservice(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTestservice
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTestservice
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTestservice
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTestservice
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupTestservice
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTestservice
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTestservice = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTestservice = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupTestservice = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
148
net/streampool/testservice/testservice_drpc.pb.go
Normal file
148
net/streampool/testservice/testservice_drpc.pb.go
Normal file
@ -0,0 +1,148 @@
|
||||
// Code generated by protoc-gen-go-drpc. DO NOT EDIT.
|
||||
// protoc-gen-go-drpc version: v0.0.32
|
||||
// source: net/streampool/testservice/protos/testservice.proto
|
||||
|
||||
package testservice
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
context "context"
|
||||
errors "errors"
|
||||
jsonpb "github.com/gogo/protobuf/jsonpb"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
drpc "storj.io/drpc"
|
||||
drpcerr "storj.io/drpc/drpcerr"
|
||||
)
|
||||
|
||||
type drpcEncoding_File_net_streampool_testservice_protos_testservice_proto struct{}
|
||||
|
||||
func (drpcEncoding_File_net_streampool_testservice_protos_testservice_proto) Marshal(msg drpc.Message) ([]byte, error) {
|
||||
return proto.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_net_streampool_testservice_protos_testservice_proto) Unmarshal(buf []byte, msg drpc.Message) error {
|
||||
return proto.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_net_streampool_testservice_protos_testservice_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
err := new(jsonpb.Marshaler).Marshal(&buf, msg.(proto.Message))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_net_streampool_testservice_protos_testservice_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
|
||||
return jsonpb.Unmarshal(bytes.NewReader(buf), msg.(proto.Message))
|
||||
}
|
||||
|
||||
type DRPCTestClient interface {
|
||||
DRPCConn() drpc.Conn
|
||||
|
||||
TestStream(ctx context.Context) (DRPCTest_TestStreamClient, error)
|
||||
}
|
||||
|
||||
type drpcTestClient struct {
|
||||
cc drpc.Conn
|
||||
}
|
||||
|
||||
func NewDRPCTestClient(cc drpc.Conn) DRPCTestClient {
|
||||
return &drpcTestClient{cc}
|
||||
}
|
||||
|
||||
func (c *drpcTestClient) DRPCConn() drpc.Conn { return c.cc }
|
||||
|
||||
func (c *drpcTestClient) TestStream(ctx context.Context) (DRPCTest_TestStreamClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, "/testService.Test/TestStream", drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &drpcTest_TestStreamClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DRPCTest_TestStreamClient interface {
|
||||
drpc.Stream
|
||||
Send(*StreamMessage) error
|
||||
Recv() (*StreamMessage, error)
|
||||
}
|
||||
|
||||
type drpcTest_TestStreamClient struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcTest_TestStreamClient) Send(m *StreamMessage) error {
|
||||
return x.MsgSend(m, drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{})
|
||||
}
|
||||
|
||||
func (x *drpcTest_TestStreamClient) Recv() (*StreamMessage, error) {
|
||||
m := new(StreamMessage)
|
||||
if err := x.MsgRecv(m, drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (x *drpcTest_TestStreamClient) RecvMsg(m *StreamMessage) error {
|
||||
return x.MsgRecv(m, drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{})
|
||||
}
|
||||
|
||||
type DRPCTestServer interface {
|
||||
TestStream(DRPCTest_TestStreamStream) error
|
||||
}
|
||||
|
||||
type DRPCTestUnimplementedServer struct{}
|
||||
|
||||
func (s *DRPCTestUnimplementedServer) TestStream(DRPCTest_TestStreamStream) error {
|
||||
return drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
type DRPCTestDescription struct{}
|
||||
|
||||
func (DRPCTestDescription) NumMethods() int { return 1 }
|
||||
|
||||
func (DRPCTestDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
|
||||
switch n {
|
||||
case 0:
|
||||
return "/testService.Test/TestStream", drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return nil, srv.(DRPCTestServer).
|
||||
TestStream(
|
||||
&drpcTest_TestStreamStream{in1.(drpc.Stream)},
|
||||
)
|
||||
}, DRPCTestServer.TestStream, true
|
||||
default:
|
||||
return "", nil, nil, nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func DRPCRegisterTest(mux drpc.Mux, impl DRPCTestServer) error {
|
||||
return mux.Register(impl, DRPCTestDescription{})
|
||||
}
|
||||
|
||||
type DRPCTest_TestStreamStream interface {
|
||||
drpc.Stream
|
||||
Send(*StreamMessage) error
|
||||
Recv() (*StreamMessage, error)
|
||||
}
|
||||
|
||||
type drpcTest_TestStreamStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcTest_TestStreamStream) Send(m *StreamMessage) error {
|
||||
return x.MsgSend(m, drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{})
|
||||
}
|
||||
|
||||
func (x *drpcTest_TestStreamStream) Recv() (*StreamMessage, error) {
|
||||
m := new(StreamMessage)
|
||||
if err := x.MsgRecv(m, drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (x *drpcTest_TestStreamStream) RecvMsg(m *StreamMessage) error {
|
||||
return x.MsgRecv(m, drpcEncoding_File_net_streampool_testservice_protos_testservice_proto{})
|
||||
}
|
||||
@ -2,11 +2,15 @@ package timeoutconn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"go.uber.org/zap"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var log = logger.NewNamed("net.timeoutconn")
|
||||
|
||||
type Conn struct {
|
||||
net.Conn
|
||||
timeout time.Duration
|
||||
@ -17,22 +21,32 @@ func NewConn(conn net.Conn, timeout time.Duration) *Conn {
|
||||
}
|
||||
|
||||
func (c *Conn) Write(p []byte) (n int, err error) {
|
||||
return c.Conn.Write(p)
|
||||
for {
|
||||
if c.timeout != 0 {
|
||||
c.Conn.SetWriteDeadline(time.Now().Add(c.timeout))
|
||||
if e := c.Conn.SetWriteDeadline(time.Now().Add(c.timeout)); e != nil {
|
||||
log.Warn("can't set write deadline", zap.String("remoteAddr", c.RemoteAddr().String()))
|
||||
}
|
||||
|
||||
}
|
||||
nn, err := c.Conn.Write(p[n:])
|
||||
n += nn
|
||||
if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// Keep extending the deadline so long as we're making progress.
|
||||
log.Debug("keep extending the deadline so long as we're making progress", zap.String("remoteAddr", c.RemoteAddr().String()))
|
||||
continue
|
||||
}
|
||||
if c.timeout != 0 {
|
||||
c.Conn.SetWriteDeadline(time.Time{})
|
||||
if e := c.Conn.SetWriteDeadline(time.Time{}); e != nil {
|
||||
log.Warn("can't set write deadline", zap.String("remoteAddr", c.RemoteAddr().String()))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
// if the connection is timed out and we should close it
|
||||
c.Conn.Close()
|
||||
if e := c.Conn.Close(); e != nil {
|
||||
log.Warn("connection close error", zap.String("remoteAddr", c.RemoteAddr().String()))
|
||||
}
|
||||
log.Debug("connection timed out", zap.String("remoteAddr", c.RemoteAddr().String()))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
100
util/multiqueue/multiqueue.go
Normal file
100
util/multiqueue/multiqueue.go
Normal file
@ -0,0 +1,100 @@
|
||||
package multiqueue
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/cheggaaa/mb/v3"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrThreadNotExists = errors.New("multiQueue: thread not exists")
|
||||
ErrClosed = errors.New("multiQueue: closed")
|
||||
)
|
||||
|
||||
func New[T any](h HandleFunc[T], maxThreadSize int) MultiQueue[T] {
|
||||
return &multiQueue[T]{
|
||||
handler: h,
|
||||
threads: make(map[string]*mb.MB[T]),
|
||||
queueMaxSize: maxThreadSize,
|
||||
}
|
||||
}
|
||||
|
||||
type HandleFunc[T any] func(msg T)
|
||||
|
||||
type MultiQueue[T any] interface {
|
||||
Add(ctx context.Context, threadId string, msg T) (err error)
|
||||
CloseThread(threadId string) (err error)
|
||||
Close() (err error)
|
||||
}
|
||||
|
||||
type multiQueue[T any] struct {
|
||||
handler HandleFunc[T]
|
||||
queueMaxSize int
|
||||
threads map[string]*mb.MB[T]
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (m *multiQueue[T]) Add(ctx context.Context, threadId string, msg T) (err error) {
|
||||
m.mu.Lock()
|
||||
if m.closed {
|
||||
m.mu.Unlock()
|
||||
return ErrClosed
|
||||
}
|
||||
q, ok := m.threads[threadId]
|
||||
if !ok {
|
||||
q = m.startThread(threadId)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
return q.TryAdd(msg)
|
||||
}
|
||||
|
||||
func (m *multiQueue[T]) startThread(id string) *mb.MB[T] {
|
||||
q := mb.New[T](m.queueMaxSize)
|
||||
m.threads[id] = q
|
||||
go m.threadLoop(q)
|
||||
return q
|
||||
}
|
||||
|
||||
func (m *multiQueue[T]) threadLoop(q *mb.MB[T]) {
|
||||
for {
|
||||
msg, err := q.WaitOne(context.Background())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m.handler(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *multiQueue[T]) CloseThread(threadId string) (err error) {
|
||||
m.mu.Lock()
|
||||
if m.closed {
|
||||
m.mu.Unlock()
|
||||
return ErrClosed
|
||||
}
|
||||
q, ok := m.threads[threadId]
|
||||
if ok {
|
||||
delete(m.threads, threadId)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
if !ok {
|
||||
return ErrThreadNotExists
|
||||
}
|
||||
return q.Close()
|
||||
}
|
||||
|
||||
func (m *multiQueue[T]) Close() (err error) {
|
||||
m.mu.Lock()
|
||||
if m.closed {
|
||||
m.mu.Unlock()
|
||||
return ErrClosed
|
||||
}
|
||||
m.closed = true
|
||||
threads := m.threads
|
||||
m.mu.Unlock()
|
||||
for _, q := range threads {
|
||||
_ = q.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
65
util/multiqueue/multiqueue_test.go
Normal file
65
util/multiqueue/multiqueue_test.go
Normal file
@ -0,0 +1,65 @@
|
||||
package multiqueue
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMultiQueue_Add(t *testing.T) {
|
||||
t.Run("process", func(t *testing.T) {
|
||||
var msgsCh = make(chan string)
|
||||
var h HandleFunc[string] = func(msg string) {
|
||||
msgsCh <- msg
|
||||
}
|
||||
q := New[string](h, 10)
|
||||
defer func() {
|
||||
require.NoError(t, q.Close())
|
||||
}()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
for j := 0; j < 5; j++ {
|
||||
assert.NoError(t, q.Add(context.Background(), fmt.Sprint(i), fmt.Sprint(i, j)))
|
||||
}
|
||||
}
|
||||
var msgs []string
|
||||
for i := 0; i < 5*5; i++ {
|
||||
select {
|
||||
case <-time.After(time.Second / 4):
|
||||
require.True(t, false, "timeout")
|
||||
case msg := <-msgsCh:
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
}
|
||||
assert.Len(t, msgs, 25)
|
||||
})
|
||||
t.Run("add to closed", func(t *testing.T) {
|
||||
q := New[string](func(msg string) {}, 10)
|
||||
require.NoError(t, q.Close())
|
||||
assert.Equal(t, ErrClosed, q.Add(context.Background(), "1", "1"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultiQueue_CloseThread(t *testing.T) {
|
||||
var msgsCh = make(chan string)
|
||||
var h HandleFunc[string] = func(msg string) {
|
||||
msgsCh <- msg
|
||||
}
|
||||
q := New[string](h, 10)
|
||||
defer func() {
|
||||
require.NoError(t, q.Close())
|
||||
}()
|
||||
require.NoError(t, q.Add(context.Background(), "1", "1"))
|
||||
require.NoError(t, q.Add(context.Background(), "1", "2"))
|
||||
require.NoError(t, q.CloseThread("1"))
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case <-msgsCh:
|
||||
case <-time.After(time.Second / 4):
|
||||
require.False(t, true, "timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3,6 +3,7 @@ package periodicsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/anytypeio/any-sync/app/logger"
|
||||
"go.uber.org/zap"
|
||||
"time"
|
||||
)
|
||||
@ -14,8 +15,9 @@ type PeriodicSync interface {
|
||||
|
||||
type SyncerFunc func(ctx context.Context) error
|
||||
|
||||
func NewPeriodicSync(periodSeconds int, timeout time.Duration, syncer SyncerFunc, l *zap.Logger) PeriodicSync {
|
||||
func NewPeriodicSync(periodSeconds int, timeout time.Duration, syncer SyncerFunc, l logger.CtxLogger) PeriodicSync {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx = logger.CtxWithFields(ctx, zap.String("rootOp", "periodicSync"))
|
||||
return &periodicSync{
|
||||
syncer: syncer,
|
||||
log: l,
|
||||
@ -28,7 +30,7 @@ func NewPeriodicSync(periodSeconds int, timeout time.Duration, syncer SyncerFunc
|
||||
}
|
||||
|
||||
type periodicSync struct {
|
||||
log *zap.Logger
|
||||
log logger.CtxLogger
|
||||
syncer SyncerFunc
|
||||
syncCtx context.Context
|
||||
syncCancel context.CancelFunc
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user