swarm/storage/mru: Renamed all comments to Feeds

pull/17796/head
Javier Peletier 6 years ago
parent f1e86ad9cf
commit b35622cf3c
  1. 4
      swarm/storage/mru/cacheentry.go
  2. 29
      swarm/storage/mru/doc.go
  3. 2
      swarm/storage/mru/error.go
  4. 57
      swarm/storage/mru/handler.go
  5. 54
      swarm/storage/mru/handler_test.go
  6. 4
      swarm/storage/mru/id.go
  7. 2
      swarm/storage/mru/lookup/lookup.go
  8. 2
      swarm/storage/mru/query.go
  9. 12
      swarm/storage/mru/request.go
  10. 16
      swarm/storage/mru/request_test.go
  11. 4
      swarm/storage/mru/sign.go
  12. 2
      swarm/storage/mru/topic.go
  13. 4
      swarm/storage/mru/update.go
  14. 6
      swarm/storage/mru/view.go

@ -30,7 +30,7 @@ const (
defaultRetrieveTimeout = 100 * time.Millisecond defaultRetrieveTimeout = 100 * time.Millisecond
) )
// cacheEntry caches resource data and the metadata of its root chunk. // cacheEntry caches the last known update of a specific Feed.
type cacheEntry struct { type cacheEntry struct {
Update Update
*bytes.Reader *bytes.Reader
@ -42,7 +42,7 @@ func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) {
return int64(len(r.Update.data)), nil return int64(len(r.Update.data)), nil
} }
//returns the resource's topic //returns the Feed's topic
func (r *cacheEntry) Topic() Topic { func (r *cacheEntry) Topic() Topic {
return r.Feed.Topic return r.Feed.Topic
} }

@ -1,43 +1,42 @@
/* /*
Package feeds defines Swarm Feeds. Package feeds defines Swarm Feeds.
A Mutable Resource is an entity which allows updates to a resource Swarm Feeds allows a user to build an update feed about a particular topic
without resorting to ENS on each update. without resorting to ENS on each update.
The update scheme is built on swarm chunks with chunk keys following The update scheme is built on swarm chunks with chunk keys following
a predictable, versionable pattern. a predictable, versionable pattern.
A Resource is tied to a unique identifier that is deterministically generated out of A Feed is tied to a unique identifier that is deterministically generated out of
the chosen topic. the chosen topic.
A Resource View is defined as a specific user's point of view about a particular resource. A Feed is defined as the series of updates of a specific user about a particular topic
Thus, a View is a Topic + the user's address (userAddr)
Actual data updates are also made in the form of swarm chunks. The keys Actual data updates are also made in the form of swarm chunks. The keys
of the updates are the hash of a concatenation of properties as follows: of the updates are the hash of a concatenation of properties as follows:
updateAddr = H(View, Epoch ID) updateAddr = H(Feed, Epoch ID)
where H is the SHA3 hash function where H is the SHA3 hash function
View is the combination of Topic and the user address Feed is the combination of Topic and the user address
Epoch ID is a time slot. See the lookup package for more information. Epoch ID is a time slot. See the lookup package for more information.
A user looking up a resource would only need to know the View in order to A user looking up a the latest update in a Feed only needs to know the Topic
another user's updates and the other user's address.
The resource update data is: The Feed Update data is:
resourcedata = View|Epoch|data updatedata = Feed|Epoch|data
the full update data that goes in the chunk payload is: The full update data that goes in the chunk payload is:
resourcedata|sign(resourcedata) resourcedata|sign(resourcedata)
Structure Summary: Structure Summary:
Request: Resource update with signature Request: Feed Update with signature
ResourceUpdate: headers + data Update: headers + data
Header: Protocol version and reserved for future use placeholders Header: Protocol version and reserved for future use placeholders
ID: Information about how to locate a specific update ID: Information about how to locate a specific update
View: Author of the update and what is updating Feed: Represents a user's series of publications about a specific Topic
Topic: Item that the updates are about Topic: Item that the updates are about
User: User who updates the resource User: User who updates the Feed
Epoch: time slot where the update is stored Epoch: time slot where the update is stored
*/ */

@ -52,7 +52,7 @@ func (e *Error) Code() int {
return e.code return e.code
} }
// NewError creates a new Mutable Resource Error object with the specified code and custom error message // NewError creates a new Swarm Feeds Error object with the specified code and custom error message
func NewError(code int, s string) error { func NewError(code int, s string) error {
if code < 0 || code >= ErrCnt { if code < 0 || code >= ErrCnt {
panic("no such error code!") panic("no such error code!")

@ -57,7 +57,7 @@ func init() {
} }
} }
// NewHandler creates a new Mutable Resource API // NewHandler creates a new Swarm Feeds API
func NewHandler(params *HandlerParams) *Handler { func NewHandler(params *HandlerParams) *Handler {
fh := &Handler{ fh := &Handler{
cache: make(map[uint64]*cacheEntry), cache: make(map[uint64]*cacheEntry),
@ -74,13 +74,13 @@ func NewHandler(params *HandlerParams) *Handler {
return fh return fh
} }
// SetStore sets the store backend for the Mutable Resource API // SetStore sets the store backend for the Swarm Feeds API
func (h *Handler) SetStore(store *storage.NetStore) { func (h *Handler) SetStore(store *storage.NetStore) {
h.chunkStore = store h.chunkStore = store
} }
// Validate is a chunk validation method // Validate is a chunk validation method
// If it looks like a resource update, the chunk address is checked against the userAddr of the update's signature // If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature
// It implements the storage.ChunkValidator interface // It implements the storage.ChunkValidator interface
func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
dataLength := len(data) dataLength := len(data)
@ -89,7 +89,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
} }
// check if it is a properly formatted update chunk with // check if it is a properly formatted update chunk with
// valid signature and proof of ownership of the resource it is trying // valid signature and proof of ownership of the feed it is trying
// to update // to update
// First, deserialize the chunk // First, deserialize the chunk
@ -99,9 +99,9 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
return false return false
} }
// Verify signatures and that the signer actually owns the resource // Verify signatures and that the signer actually owns the feed
// If it fails, it means either the signature is not valid, data is corrupted // If it fails, it means either the signature is not valid, data is corrupted
// or someone is trying to update someone else's resource. // or someone is trying to update someone else's feed.
if err := r.Verify(); err != nil { if err := r.Verify(); err != nil {
log.Debug("Invalid feed update signature", "err", err) log.Debug("Invalid feed update signature", "err", err)
return false return false
@ -110,14 +110,14 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
return true return true
} }
// GetContent retrieves the data payload of the last synced update of the Mutable Resource // GetContent retrieves the data payload of the last synced update of the Feed
func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) {
if feed == nil { if feed == nil {
return nil, nil, NewError(ErrInvalidValue, "view is nil") return nil, nil, NewError(ErrInvalidValue, "feed is nil")
} }
feedUpdate := h.get(feed) feedUpdate := h.get(feed)
if feedUpdate == nil { if feedUpdate == nil {
return nil, nil, NewError(ErrNotFound, "resource does not exist") return nil, nil, NewError(ErrNotFound, "feed update not cached")
} }
return feedUpdate.lastKey, feedUpdate.data, nil return feedUpdate.lastKey, feedUpdate.data, nil
} }
@ -142,7 +142,7 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request,
return nil, err return nil, err
} }
// not finding updates means that there is a network error // not finding updates means that there is a network error
// or that the resource really does not have updates // or that the feed really does not have updates
} }
request.Feed = *feed request.Feed = *feed
@ -157,13 +157,10 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request,
return request, nil return request, nil
} }
// Lookup retrieves a specific or latest version of the resource // Lookup retrieves a specific or latest feed update
// Lookup works differently depending on the configuration of `ID` // Lookup works differently depending on the configuration of `query`
// See the `ID` documentation and helper functions: // See the `query` documentation and helper functions:
// `LookupLatest` and `LookupBefore` // `NewQueryLatest` and `NewQuery`
// When looking for the latest update, it starts at the next period after the current time.
// upon failure tries the corresponding keys of each previous period until one is found
// (or startTime is reached, in which case there are no updates).
func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) {
timeLimit := query.TimeLimit timeLimit := query.TimeLimit
@ -213,17 +210,17 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
return nil, err return nil, err
} }
log.Info(fmt.Sprintf("Resource lookup finished in %d lookups", readCount)) log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount))
request, _ := requestPtr.(*Request) request, _ := requestPtr.(*Request)
if request == nil { if request == nil {
return nil, NewError(ErrNotFound, "no updates found") return nil, NewError(ErrNotFound, "no feed updates found")
} }
return h.updateCache(request) return h.updateCache(request)
} }
// update mutable resource cache map with specified content // update feed updates cache with specified content
func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
updateAddr := request.Addr() updateAddr := request.Addr()
@ -242,10 +239,10 @@ func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
return feedUpdate, nil return feedUpdate, nil
} }
// Update adds an actual data update // Update publishes a feed update
// Uses the Mutable Resource metadata currently loaded in the resources map entry. // Note that a Feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature.
// It is the caller's responsibility to make sure that this data is not stale. // This results in a max payload of `maxUpdateDataLength` (check update.go for more details)
// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit. // An error will be returned if the total length of the chunk payload will exceed this limit.
// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update // Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
// on the network. // on the network.
func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) {
@ -280,18 +277,18 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad
return r.idAddr, nil return r.idAddr, nil
} }
// Retrieves the resource cache value for the given nameHash // Retrieves the feed update cache value for the given nameHash
func (h *Handler) get(view *Feed) *cacheEntry { func (h *Handler) get(feed *Feed) *cacheEntry {
mapKey := view.mapKey() mapKey := feed.mapKey()
h.cacheLock.RLock() h.cacheLock.RLock()
defer h.cacheLock.RUnlock() defer h.cacheLock.RUnlock()
feedUpdate := h.cache[mapKey] feedUpdate := h.cache[mapKey]
return feedUpdate return feedUpdate
} }
// Sets the resource cache value for the given View // Sets the feed update cache value for the given Feed
func (h *Handler) set(view *Feed, feedUpdate *cacheEntry) { func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) {
mapKey := view.mapKey() mapKey := feed.mapKey()
h.cacheLock.Lock() h.cacheLock.Lock()
defer h.cacheLock.Unlock() defer h.cacheLock.Unlock()
h.cache[mapKey] = feedUpdate h.cache[mapKey] = feedUpdate

@ -89,12 +89,12 @@ func TestFeedsHandler(t *testing.T) {
} }
defer teardownTest() defer teardownTest()
// create a new resource // create a new Feed
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil) topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil)
view := Feed{ feed := Feed{
Topic: topic, Topic: topic,
User: signer.Address(), User: signer.Address(),
} }
@ -107,7 +107,7 @@ func TestFeedsHandler(t *testing.T) {
"clyde", // t=4285 "clyde", // t=4285
} }
request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time) request := NewFirstRequest(feed.Topic) // this timestamps the update at t = 4200 (start time)
chunkAddress := make(map[string]storage.Address) chunkAddress := make(map[string]storage.Address)
data := []byte(updates[0]) data := []byte(updates[0])
request.SetData(data) request.SetData(data)
@ -205,38 +205,38 @@ func TestFeedsHandler(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
rsrc2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// last update should be "clyde" // last update should be "clyde"
if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) { if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) {
t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1])
} }
if rsrc2.Level != 22 { if update2.Level != 22 {
t.Fatalf("resource epoch level was %d, expected 22", rsrc2.Level) t.Fatalf("feed update epoch level was %d, expected 22", update2.Level)
} }
if rsrc2.Base() != 0 { if update2.Base() != 0 {
t.Fatalf("resource epoch base time was %d, expected 0", rsrc2.Base()) t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base())
} }
log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)
// specific point in time // specific point in time
rsrc, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// check data // check data
if !bytes.Equal(rsrc.data, []byte(updates[2])) { if !bytes.Equal(update.data, []byte(updates[2])) {
t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2]) t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2])
} }
log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)
// beyond the first should yield an error // beyond the first should yield an error
rsrc, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue))
if err == nil { if err == nil {
t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data) t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data)
} }
} }
@ -266,11 +266,11 @@ func TestSparseUpdates(t *testing.T) {
defer teardownTest() defer teardownTest()
defer os.RemoveAll(datadir) defer os.RemoveAll(datadir)
// create a new resource // create a new Feed
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
topic, _ := NewTopic("Very slow updates", nil) topic, _ := NewTopic("Very slow updates", nil)
view := Feed{ feed := Feed{
Topic: topic, Topic: topic,
User: signer.Address(), User: signer.Address(),
} }
@ -280,7 +280,7 @@ func TestSparseUpdates(t *testing.T) {
var epoch lookup.Epoch var epoch lookup.Epoch
var lastUpdateTime uint64 var lastUpdateTime uint64
for T := uint64(0); T < today; T += 5 * Year { for T := uint64(0); T < today; T += 5 * Year {
request := NewFirstRequest(view.Topic) request := NewFirstRequest(feed.Topic)
request.Epoch = lookup.GetNextEpoch(epoch, T) request.Epoch = lookup.GetNextEpoch(epoch, T)
request.data = generateData(T) // this generates some data that depends on T, so we can check later request.data = generateData(T) // this generates some data that depends on T, so we can check later
request.Sign(signer) request.Sign(signer)
@ -295,14 +295,14 @@ func TestSparseUpdates(t *testing.T) {
lastUpdateTime = T lastUpdateTime = T
} }
query := NewQuery(&view, today, lookup.NoClue) query := NewQuery(&feed, today, lookup.NoClue)
_, err = rh.Lookup(ctx, query) _, err = rh.Lookup(ctx, query)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, content, err := rh.GetContent(&view) _, content, err := rh.GetContent(&feed)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -321,7 +321,7 @@ func TestSparseUpdates(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
_, content, err = rh.GetContent(&view) _, content, err = rh.GetContent(&feed)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -348,7 +348,7 @@ func TestValidator(t *testing.T) {
} }
defer teardownTest() defer teardownTest()
// create new resource // create new Feed
topic, _ := NewTopic(subtopicName, nil) topic, _ := NewTopic(subtopicName, nil)
feed := Feed{ feed := Feed{
Topic: topic, Topic: topic,
@ -382,7 +382,7 @@ func TestValidator(t *testing.T) {
} }
// tests that the content address validator correctly checks the data // tests that the content address validator correctly checks the data
// tests that resource update chunks are passed through content address validator // tests that Feed update chunks are passed through content address validator
// there is some redundancy in this test as it also tests content addressed chunks, // there is some redundancy in this test as it also tests content addressed chunks,
// which should be evaluated as invalid chunks by this validator // which should be evaluated as invalid chunks by this validator
func TestValidatorInStore(t *testing.T) { func TestValidatorInStore(t *testing.T) {
@ -409,7 +409,7 @@ func TestValidatorInStore(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// set up resource handler and add is as a validator to the localstore // set up Swarm Feeds handler and add is as a validator to the localstore
fhParams := &HandlerParams{} fhParams := &HandlerParams{}
fh := NewHandler(fhParams) fh := NewHandler(fhParams)
store.Validators = append(store.Validators, fh) store.Validators = append(store.Validators, fh)
@ -425,7 +425,7 @@ func TestValidatorInStore(t *testing.T) {
User: signer.Address(), User: signer.Address(),
} }
// create a resource update chunk with correct publickey // create a feed update chunk with correct publickey
id := ID{ id := ID{
Epoch: lookup.Epoch{Time: 42, Epoch: lookup.Epoch{Time: 42,
Level: 1, Level: 1,

@ -29,7 +29,7 @@ import (
// ID uniquely identifies an update on the network. // ID uniquely identifies an update on the network.
type ID struct { type ID struct {
Feed `json:"view"` Feed `json:"feed"`
lookup.Epoch `json:"epoch"` lookup.Epoch `json:"epoch"`
} }
@ -38,7 +38,7 @@ type ID struct {
// Epoch EpochLength // Epoch EpochLength
const idLength = feedLength + lookup.EpochLength const idLength = feedLength + lookup.EpochLength
// Addr calculates the resource update chunk address corresponding to this ID // Addr calculates the feed update chunk address corresponding to this ID
func (u *ID) Addr() (updateAddr storage.Address) { func (u *ID) Addr() (updateAddr storage.Address) {
serializedData := make([]byte, idLength) serializedData := make([]byte, idLength)
var cursor int var cursor int

@ -15,7 +15,7 @@
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
/* /*
Package lookup defines resource lookup algorithms and provides tools to place updates Package lookup defines Feed lookup algorithms and provides tools to place updates
so they can be found so they can be found
*/ */
package lookup package lookup

@ -72,7 +72,7 @@ func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query {
} }
} }
// NewQueryLatest generates lookup parameters that look for the latest version of a resource // NewQueryLatest generates lookup parameters that look for the latest update to a feed
func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query {
return NewQuery(feed, 0, hint) return NewQuery(feed, 0, hint)
} }

@ -27,7 +27,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
) )
// Request represents an update and/or resource create message // Request represents a request to sign or signed Feed Update message
type Request struct { type Request struct {
Update // actual content that will be put on the chunk, less signature Update // actual content that will be put on the chunk, less signature
Signature *Signature Signature *Signature
@ -62,7 +62,7 @@ func NewFirstRequest(topic Topic) *Request {
return request return request
} }
// SetData stores the payload data the resource will be updated with // SetData stores the payload data the feed update will be updated with
func (r *Request) SetData(data []byte) { func (r *Request) SetData(data []byte) {
r.data = data r.data = data
r.Signature = nil r.Signature = nil
@ -73,7 +73,7 @@ func (r *Request) IsUpdate() bool {
return r.Signature != nil return r.Signature != nil
} }
// Verify checks that signatures are valid and that the signer owns the resource to be updated // Verify checks that signatures are valid
func (r *Request) Verify() (err error) { func (r *Request) Verify() (err error) {
if len(r.data) == 0 { if len(r.data) == 0 {
return NewError(ErrInvalidValue, "Update does not contain data") return NewError(ErrInvalidValue, "Update does not contain data")
@ -103,7 +103,7 @@ func (r *Request) Verify() (err error) {
return nil return nil
} }
// Sign executes the signature to validate the resource // Sign executes the signature to validate the update message
func (r *Request) Sign(signer Signer) error { func (r *Request) Sign(signer Signer) error {
r.Feed.User = signer.Address() r.Feed.User = signer.Address()
r.binaryData = nil //invalidate serialized data r.binaryData = nil //invalidate serialized data
@ -133,7 +133,7 @@ func (r *Request) Sign(signer Signer) error {
return nil return nil
} }
// GetDigest creates the resource update digest used in signatures // GetDigest creates the feed update digest used in signatures
// the serialized payload is cached in .binaryData // the serialized payload is cached in .binaryData
func (r *Request) GetDigest() (result common.Hash, err error) { func (r *Request) GetDigest() (result common.Hash, err error) {
hasher := hashPool.Get().(hash.Hash) hasher := hashPool.Get().(hash.Hash)
@ -174,7 +174,7 @@ func (r *Request) toChunk() (storage.Chunk, error) {
func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
// for update chunk layout see Request definition // for update chunk layout see Request definition
//deserialize the resource update portion //deserialize the feed update portion
if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil {
return err return err
} }

@ -47,7 +47,7 @@ func areEqualJSON(s1, s2 string) (bool, error) {
} }
// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly // TestEncodingDecodingUpdateRequests ensures that requests are serialized properly
// while also checking cryptographically that only the owner of a resource can update it. // while also checking cryptographically that only the owner of a Feed can update it.
func TestEncodingDecodingUpdateRequests(t *testing.T) { func TestEncodingDecodingUpdateRequests(t *testing.T) {
charlie := newCharlieSigner() //Charlie charlie := newCharlieSigner() //Charlie
@ -75,12 +75,10 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
t.Fatal("Expected Verify to fail since the message is not signed") t.Fatal("Expected Verify to fail since the message is not signed")
} }
// We now assume that the resource was created and propagated. With rootAddr we can retrieve the resource metadata // We now assume that the feed ypdate was created and propagated.
// and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct
// proof of ownership
const expectedSignature = "0x32c2d2c7224e24e4d3ae6a10595fc6e945f1b3ecdf548a04d8247c240a50c9240076aa7730abad6c8a46dfea00cfb8f43b6211f02db5c4cc5ed8584cb0212a4d00" const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601"
const expectedJSON = `{"view":{"topic":"0x6120676f6f64207265736f75726365206e616d65000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}`
//Put together an unsigned update request that we will serialize to send it to the signer. //Put together an unsigned update request that we will serialize to send it to the signer.
data := []byte("This hour's update: Swarm 99.0 has been released!") data := []byte("This hour's update: Swarm 99.0 has been released!")
@ -138,7 +136,7 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
} }
// Now imagine Bob wants to create an update of his own about the same resource, // Now imagine Bob wants to create an update of his own about the same Feed,
// signing a message with his private key // signing a message with his private key
if err := request.Sign(bob); err != nil { if err := request.Sign(bob); err != nil {
t.Fatalf("Error signing: %s", err) t.Fatalf("Error signing: %s", err)
@ -258,7 +256,7 @@ func TestReverse(t *testing.T) {
defer teardownTest() defer teardownTest()
topic, _ := NewTopic("Cervantes quotes", nil) topic, _ := NewTopic("Cervantes quotes", nil)
view := Feed{ feed := Feed{
Topic: topic, Topic: topic,
User: signer.Address(), User: signer.Address(),
} }
@ -266,7 +264,7 @@ func TestReverse(t *testing.T) {
data := []byte("Donde una puerta se cierra, otra se abre") data := []byte("Donde una puerta se cierra, otra se abre")
request := new(Request) request := new(Request)
request.Feed = view request.Feed = feed
request.Epoch = epoch request.Epoch = epoch
request.data = data request.data = data

@ -28,7 +28,7 @@ const signatureLength = 65
// Signature is an alias for a static byte array with the size of a signature // Signature is an alias for a static byte array with the size of a signature
type Signature [signatureLength]byte type Signature [signatureLength]byte
// Signer signs Mutable Resource update payloads // Signer signs Feed update payloads
type Signer interface { type Signer interface {
Sign(common.Hash) (Signature, error) Sign(common.Hash) (Signature, error)
Address() common.Address Address() common.Address
@ -65,7 +65,7 @@ func (s *GenericSigner) Address() common.Address {
return s.address return s.address
} }
// getUserAddr extracts the address of the resource update signer // getUserAddr extracts the address of the Feed update signer
func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) {
pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
if err != nil { if err != nil {

@ -74,7 +74,7 @@ func (t *Topic) FromHex(hex string) error {
return nil return nil
} }
// Name will try to extract the resource name out of the topic // Name will try to extract the topic name out of the Topic
func (t *Topic) Name(relatedContent []byte) string { func (t *Topic) Name(relatedContent []byte) string {
nameBytes := *t nameBytes := *t
if relatedContent != nil { if relatedContent != nil {

@ -37,7 +37,7 @@ type Header struct {
// Update encapsulates the information sent as part of a feed update // Update encapsulates the information sent as part of a feed update
type Update struct { type Update struct {
Header Header // Header Header //
ID // Resource update identifying information ID // Feed Update identifying information
data []byte // actual data payload data []byte // actual data payload
} }
@ -86,7 +86,7 @@ func (r *Update) binaryLength() int {
// binaryGet populates this instance from the information contained in the passed byte slice // binaryGet populates this instance from the information contained in the passed byte slice
func (r *Update) binaryGet(serializedData []byte) error { func (r *Update) binaryGet(serializedData []byte) error {
if len(serializedData) < minimumUpdateDataLength { if len(serializedData) < minimumUpdateDataLength {
return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength) return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength)
} }
dataLength := len(serializedData) - idLength - headerLength dataLength := len(serializedData) - idLength - headerLength
// at this point we can be satisfied that we have the correct data length to read // at this point we can be satisfied that we have the correct data length to read

@ -25,13 +25,13 @@ import (
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
// Feed represents a particular user's view of a resource // Feed represents a particular user's stream of updates on a Topic
type Feed struct { type Feed struct {
Topic Topic `json:"topic"` Topic Topic `json:"topic"`
User common.Address `json:"user"` User common.Address `json:"user"`
} }
// View layout: // Feed layout:
// TopicLength bytes // TopicLength bytes
// userAddr common.AddressLength bytes // userAddr common.AddressLength bytes
const feedLength = TopicLength + common.AddressLength const feedLength = TopicLength + common.AddressLength
@ -51,7 +51,7 @@ func (u *Feed) mapKey() uint64 {
// binaryPut serializes this Feed instance into the provided slice // binaryPut serializes this Feed instance into the provided slice
func (u *Feed) binaryPut(serializedData []byte) error { func (u *Feed) binaryPut(serializedData []byte) error {
if len(serializedData) != feedLength { if len(serializedData) != feedLength {
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", feedLength, len(serializedData)) return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize Feed. Expected %d, got %d", feedLength, len(serializedData))
} }
var cursor int var cursor int
copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength]) copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength])

Loading…
Cancel
Save