mirror of https://github.com/ethereum/go-ethereum
swarm: code cleanup, move to ethersphere/swarm (#19661)
parent
15f24ff189
commit
42b81f94ad
@ -1,5 +0,0 @@ |
||||
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low |
||||
|
||||
* git build of {{.Env.Commit}} |
||||
|
||||
-- {{.Author}} {{.Time}} |
@ -1,19 +0,0 @@ |
||||
Source: {{.Name}} |
||||
Section: science |
||||
Priority: extra |
||||
Maintainer: {{.Author}} |
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.11 |
||||
Standards-Version: 3.9.5 |
||||
Homepage: https://ethereum.org |
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git |
||||
Vcs-Browser: https://github.com/ethereum/go-ethereum |
||||
|
||||
{{range .Executables}} |
||||
Package: {{$.ExeName .}} |
||||
Conflicts: {{$.ExeConflicts .}} |
||||
Architecture: any |
||||
Depends: ${shlibs:Depends}, ${misc:Depends} |
||||
Built-Using: ${misc:Built-Using} |
||||
Description: {{.Description}} |
||||
{{.Description}} |
||||
{{end}} |
@ -1,14 +0,0 @@ |
||||
Copyright 2018 The go-ethereum Authors |
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify |
||||
it under the terms of the GNU General Public License as published by |
||||
the Free Software Foundation, either version 3 of the License, or |
||||
(at your option) any later version. |
||||
|
||||
go-ethereum is distributed in the hope that it will be useful, |
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
GNU General Public License for more details. |
||||
|
||||
You should have received a copy of the GNU General Public License |
||||
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. |
@ -1 +0,0 @@ |
||||
AUTHORS |
@ -1 +0,0 @@ |
||||
build/bin/{{.BinaryName}} usr/bin |
@ -1,16 +0,0 @@ |
||||
#!/usr/bin/make -f |
||||
# -*- makefile -*- |
||||
|
||||
# Uncomment this to turn on verbose mode. |
||||
#export DH_VERBOSE=1 |
||||
|
||||
# Launchpad rejects Go's access to $HOME/.cache, use custom folder |
||||
export GOCACHE=/tmp/go-build |
||||
|
||||
override_dh_auto_build: |
||||
build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} |
||||
|
||||
override_dh_auto_test: |
||||
|
||||
%: |
||||
dh $@ |
@ -1,297 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main |
||||
|
||||
import ( |
||||
"crypto/rand" |
||||
"encoding/json" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
"github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var ( |
||||
salt = make([]byte, 32) |
||||
accessCommand = cli.Command{ |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "access", |
||||
Usage: "encrypts a reference and embeds it into a root manifest", |
||||
ArgsUsage: "<ref>", |
||||
Description: "encrypts a reference and embeds it into a root manifest", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "new", |
||||
Usage: "encrypts a reference and embeds it into a root manifest", |
||||
ArgsUsage: "<ref>", |
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
Action: accessNewPass, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Flags: []cli.Flag{ |
||||
utils.PasswordFileFlag, |
||||
SwarmDryRunFlag, |
||||
}, |
||||
Name: "pass", |
||||
Usage: "encrypts a reference with a password and embeds it into a root manifest", |
||||
ArgsUsage: "<ref>", |
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", |
||||
}, |
||||
{ |
||||
Action: accessNewPK, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Flags: []cli.Flag{ |
||||
utils.PasswordFileFlag, |
||||
SwarmDryRunFlag, |
||||
SwarmAccessGrantKeyFlag, |
||||
}, |
||||
Name: "pk", |
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", |
||||
ArgsUsage: "<ref>", |
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", |
||||
}, |
||||
{ |
||||
Action: accessNewACT, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Flags: []cli.Flag{ |
||||
SwarmAccessGrantKeysFlag, |
||||
SwarmDryRunFlag, |
||||
utils.PasswordFileFlag, |
||||
}, |
||||
Name: "act", |
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", |
||||
ArgsUsage: "<ref>", |
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
) |
||||
|
||||
func init() { |
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil { |
||||
panic("reading from crypto/rand failed: " + err.Error()) |
||||
} |
||||
} |
||||
|
||||
func accessNewPass(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 1 { |
||||
utils.Fatalf("Expected 1 argument - the ref") |
||||
} |
||||
|
||||
var ( |
||||
ae *api.AccessEntry |
||||
accessKey []byte |
||||
err error |
||||
ref = args[0] |
||||
password = getPassPhrase("", 0, makePasswordList(ctx)) |
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name) |
||||
) |
||||
accessKey, ae, err = api.DoPassword(ctx, password, salt) |
||||
if err != nil { |
||||
utils.Fatalf("error getting session key: %v", err) |
||||
} |
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae) |
||||
if err != nil { |
||||
utils.Fatalf("had an error generating the manifest: %v", err) |
||||
} |
||||
if dryRun { |
||||
err = printManifests(m, nil) |
||||
if err != nil { |
||||
utils.Fatalf("had an error printing the manifests: %v", err) |
||||
} |
||||
} else { |
||||
err = uploadManifests(ctx, m, nil) |
||||
if err != nil { |
||||
utils.Fatalf("had an error uploading the manifests: %v", err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func accessNewPK(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 1 { |
||||
utils.Fatalf("Expected 1 argument - the ref") |
||||
} |
||||
|
||||
var ( |
||||
ae *api.AccessEntry |
||||
sessionKey []byte |
||||
err error |
||||
ref = args[0] |
||||
privateKey = getPrivKey(ctx) |
||||
granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name) |
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name) |
||||
) |
||||
sessionKey, ae, err = api.DoPK(ctx, privateKey, granteePublicKey, salt) |
||||
if err != nil { |
||||
utils.Fatalf("error getting session key: %v", err) |
||||
} |
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae) |
||||
if err != nil { |
||||
utils.Fatalf("had an error generating the manifest: %v", err) |
||||
} |
||||
if dryRun { |
||||
err = printManifests(m, nil) |
||||
if err != nil { |
||||
utils.Fatalf("had an error printing the manifests: %v", err) |
||||
} |
||||
} else { |
||||
err = uploadManifests(ctx, m, nil) |
||||
if err != nil { |
||||
utils.Fatalf("had an error uploading the manifests: %v", err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func accessNewACT(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 1 { |
||||
utils.Fatalf("Expected 1 argument - the ref") |
||||
} |
||||
|
||||
var ( |
||||
ae *api.AccessEntry |
||||
actManifest *api.Manifest |
||||
accessKey []byte |
||||
err error |
||||
ref = args[0] |
||||
pkGrantees []string |
||||
passGrantees []string |
||||
pkGranteesFilename = ctx.String(SwarmAccessGrantKeysFlag.Name) |
||||
passGranteesFilename = ctx.String(utils.PasswordFileFlag.Name) |
||||
privateKey = getPrivKey(ctx) |
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name) |
||||
) |
||||
if pkGranteesFilename == "" && passGranteesFilename == "" { |
||||
utils.Fatalf("you have to provide either a grantee public-keys file or an encryption passwords file (or both)") |
||||
} |
||||
|
||||
if pkGranteesFilename != "" { |
||||
bytes, err := ioutil.ReadFile(pkGranteesFilename) |
||||
if err != nil { |
||||
utils.Fatalf("had an error reading the grantee public key list") |
||||
} |
||||
pkGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n") |
||||
} |
||||
|
||||
if passGranteesFilename != "" { |
||||
bytes, err := ioutil.ReadFile(passGranteesFilename) |
||||
if err != nil { |
||||
utils.Fatalf("could not read password filename: %v", err) |
||||
} |
||||
passGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n") |
||||
} |
||||
accessKey, ae, actManifest, err = api.DoACT(ctx, privateKey, salt, pkGrantees, passGrantees) |
||||
if err != nil { |
||||
utils.Fatalf("error generating ACT manifest: %v", err) |
||||
} |
||||
|
||||
if err != nil { |
||||
utils.Fatalf("error getting session key: %v", err) |
||||
} |
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae) |
||||
if err != nil { |
||||
utils.Fatalf("error generating root access manifest: %v", err) |
||||
} |
||||
|
||||
if dryRun { |
||||
err = printManifests(m, actManifest) |
||||
if err != nil { |
||||
utils.Fatalf("had an error printing the manifests: %v", err) |
||||
} |
||||
} else { |
||||
err = uploadManifests(ctx, m, actManifest) |
||||
if err != nil { |
||||
utils.Fatalf("had an error uploading the manifests: %v", err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func printManifests(rootAccessManifest, actManifest *api.Manifest) error { |
||||
js, err := json.Marshal(rootAccessManifest) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
fmt.Println(string(js)) |
||||
|
||||
if actManifest != nil { |
||||
js, err := json.Marshal(actManifest) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
fmt.Println(string(js)) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func uploadManifests(ctx *cli.Context, rootAccessManifest, actManifest *api.Manifest) error { |
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client := client.NewClient(bzzapi) |
||||
|
||||
var ( |
||||
key string |
||||
err error |
||||
) |
||||
if actManifest != nil { |
||||
key, err = client.UploadManifest(actManifest, false) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
rootAccessManifest.Entries[0].Access.Act = key |
||||
} |
||||
key, err = client.UploadManifest(rootAccessManifest, false) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
fmt.Println(key) |
||||
return nil |
||||
} |
||||
|
||||
// makePasswordList reads password lines from the file specified by the global --password flag
|
||||
// and also by the same subcommand --password flag.
|
||||
// This function ia a fork of utils.MakePasswordList to lookup cli context for subcommand.
|
||||
// Function ctx.SetGlobal is not setting the global flag value that can be accessed
|
||||
// by ctx.GlobalString using the current version of cli package.
|
||||
func makePasswordList(ctx *cli.Context) []string { |
||||
path := ctx.GlobalString(utils.PasswordFileFlag.Name) |
||||
if path == "" { |
||||
path = ctx.String(utils.PasswordFileFlag.Name) |
||||
if path == "" { |
||||
return nil |
||||
} |
||||
} |
||||
text, err := ioutil.ReadFile(path) |
||||
if err != nil { |
||||
utils.Fatalf("Failed to read password file: %v", err) |
||||
} |
||||
lines := strings.Split(string(text), "\n") |
||||
// Sanitise DOS line endings.
|
||||
for i := range lines { |
||||
lines[i] = strings.TrimRight(lines[i], "\r") |
||||
} |
||||
return lines |
||||
} |
@ -1,617 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/rand" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"io" |
||||
"io/ioutil" |
||||
gorand "math/rand" |
||||
"net/http" |
||||
"os" |
||||
"runtime" |
||||
"strings" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/crypto/ecies" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
"golang.org/x/crypto/sha3" |
||||
) |
||||
|
||||
const ( |
||||
hashRegexp = `[a-f\d]{128}` |
||||
data = "notsorandomdata" |
||||
) |
||||
|
||||
var DefaultCurve = crypto.S256() |
||||
|
||||
func TestACT(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() |
||||
} |
||||
|
||||
cluster := newTestCluster(t, clusterSize) |
||||
defer cluster.Shutdown() |
||||
|
||||
cases := []struct { |
||||
name string |
||||
f func(t *testing.T, cluster *testCluster) |
||||
}{ |
||||
{"Password", testPassword}, |
||||
{"PK", testPK}, |
||||
{"ACTWithoutBogus", testACTWithoutBogus}, |
||||
{"ACTWithBogus", testACTWithBogus}, |
||||
} |
||||
|
||||
for _, tc := range cases { |
||||
t.Run(tc.name, func(t *testing.T) { |
||||
tc.f(t, cluster) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
// testPassword tests for the correct creation of an ACT manifest protected by a password.
|
||||
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
|
||||
// The parties participating - node (publisher), uploads to second node then disappears. Content which was uploaded
|
||||
// is then fetched through 2nd node. since the tested code is not key-aware - we can just
|
||||
// fetch from the 2nd node using HTTP BasicAuth
|
||||
func testPassword(t *testing.T, cluster *testCluster) { |
||||
dataFilename := testutil.TempFileWithContent(t, data) |
||||
defer os.RemoveAll(dataFilename) |
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, |
||||
"--bzzapi", |
||||
cluster.Nodes[0].URL, |
||||
"up", |
||||
"--encrypt", |
||||
dataFilename) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
|
||||
if len(matches) < 1 { |
||||
t.Fatal("no matches found") |
||||
} |
||||
|
||||
ref := matches[0] |
||||
tmp, err := ioutil.TempDir("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmp) |
||||
password := "smth" |
||||
passwordFilename := testutil.TempFileWithContent(t, "smth") |
||||
defer os.RemoveAll(passwordFilename) |
||||
|
||||
up = runSwarm(t, |
||||
"access", |
||||
"new", |
||||
"pass", |
||||
"--dry-run", |
||||
"--password", |
||||
passwordFilename, |
||||
ref, |
||||
) |
||||
|
||||
_, matches = up.ExpectRegexp(".+") |
||||
up.ExpectExit() |
||||
|
||||
if len(matches) == 0 { |
||||
t.Fatalf("stdout not matched") |
||||
} |
||||
|
||||
var m api.Manifest |
||||
|
||||
err = json.Unmarshal([]byte(matches[0]), &m) |
||||
if err != nil { |
||||
t.Fatalf("unmarshal manifest: %v", err) |
||||
} |
||||
|
||||
if len(m.Entries) != 1 { |
||||
t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) |
||||
} |
||||
|
||||
e := m.Entries[0] |
||||
|
||||
ct := "application/bzz-manifest+json" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected %q content type, got %q", ct, e.ContentType) |
||||
} |
||||
|
||||
if e.Access == nil { |
||||
t.Fatal("manifest access is nil") |
||||
} |
||||
|
||||
a := e.Access |
||||
|
||||
if a.Type != "pass" { |
||||
t.Errorf(`got access type %q, expected "pass"`, a.Type) |
||||
} |
||||
if len(a.Salt) < 32 { |
||||
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) |
||||
} |
||||
if a.KdfParams == nil { |
||||
t.Fatal("manifest access kdf params is nil") |
||||
} |
||||
if a.Publisher != "" { |
||||
t.Fatal("should be empty") |
||||
} |
||||
|
||||
client := swarmapi.NewClient(cluster.Nodes[0].URL) |
||||
|
||||
hash, err := client.UploadManifest(&m, false) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash |
||||
|
||||
httpClient := &http.Client{} |
||||
response, err := httpClient.Get(url) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if response.StatusCode != http.StatusUnauthorized { |
||||
t.Fatal("should be a 401") |
||||
} |
||||
authHeader := response.Header.Get("WWW-Authenticate") |
||||
if authHeader == "" { |
||||
t.Fatal("should be something here") |
||||
} |
||||
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
req.SetBasicAuth("", password) |
||||
|
||||
response, err = http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer response.Body.Close() |
||||
|
||||
if response.StatusCode != http.StatusOK { |
||||
t.Errorf("expected status %v, got %v", http.StatusOK, response.StatusCode) |
||||
} |
||||
d, err := ioutil.ReadAll(response.Body) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if string(d) != data { |
||||
t.Errorf("expected decrypted data %q, got %q", data, string(d)) |
||||
} |
||||
|
||||
wrongPasswordFilename := testutil.TempFileWithContent(t, "just wr0ng") |
||||
defer os.RemoveAll(wrongPasswordFilename) |
||||
|
||||
//download file with 'swarm down' with wrong password
|
||||
up = runSwarm(t, |
||||
"--bzzapi", |
||||
cluster.Nodes[0].URL, |
||||
"down", |
||||
"bzz:/"+hash, |
||||
tmp, |
||||
"--password", |
||||
wrongPasswordFilename) |
||||
|
||||
_, matches = up.ExpectRegexp("unauthorized") |
||||
if len(matches) != 1 && matches[0] != "unauthorized" { |
||||
t.Fatal(`"unauthorized" not found in output"`) |
||||
} |
||||
up.ExpectExit() |
||||
} |
||||
|
||||
// testPK tests for the correct creation of an ACT manifest between two parties (publisher and grantee).
|
||||
// The test creates bogus content, uploads it encrypted, then creates the wrapping manifest with the Access entry
|
||||
// The parties participating - node (publisher), uploads to second node (which is also the grantee) then disappears.
|
||||
// Content which was uploaded is then fetched through the grantee's http proxy. Since the tested code is private-key aware,
|
||||
// the test will fail if the proxy's given private key is not granted on the ACT.
|
||||
func testPK(t *testing.T, cluster *testCluster) { |
||||
dataFilename := testutil.TempFileWithContent(t, data) |
||||
defer os.RemoveAll(dataFilename) |
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, |
||||
"--bzzapi", |
||||
cluster.Nodes[0].URL, |
||||
"up", |
||||
"--encrypt", |
||||
dataFilename) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
|
||||
if len(matches) < 1 { |
||||
t.Fatal("no matches found") |
||||
} |
||||
|
||||
ref := matches[0] |
||||
pk := cluster.Nodes[0].PrivateKey |
||||
granteePubKey := crypto.CompressPubkey(&pk.PublicKey) |
||||
|
||||
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
passwordFilename := testutil.TempFileWithContent(t, testPassphrase) |
||||
defer os.RemoveAll(passwordFilename) |
||||
|
||||
_, publisherAccount := getTestAccount(t, publisherDir) |
||||
up = runSwarm(t, |
||||
"--bzzaccount", |
||||
publisherAccount.Address.String(), |
||||
"--password", |
||||
passwordFilename, |
||||
"--datadir", |
||||
publisherDir, |
||||
"--bzzapi", |
||||
cluster.Nodes[0].URL, |
||||
"access", |
||||
"new", |
||||
"pk", |
||||
"--dry-run", |
||||
"--grant-key", |
||||
hex.EncodeToString(granteePubKey), |
||||
ref, |
||||
) |
||||
|
||||
_, matches = up.ExpectRegexp(".+") |
||||
up.ExpectExit() |
||||
|
||||
if len(matches) == 0 { |
||||
t.Fatalf("stdout not matched") |
||||
} |
||||
|
||||
//get the public key from the publisher directory
|
||||
publicKeyFromDataDir := runSwarm(t, |
||||
"--bzzaccount", |
||||
publisherAccount.Address.String(), |
||||
"--password", |
||||
passwordFilename, |
||||
"--datadir", |
||||
publisherDir, |
||||
"print-keys", |
||||
"--compressed", |
||||
) |
||||
_, publicKeyString := publicKeyFromDataDir.ExpectRegexp(".+") |
||||
publicKeyFromDataDir.ExpectExit() |
||||
pkComp := strings.Split(publicKeyString[0], "=")[1] |
||||
var m api.Manifest |
||||
|
||||
err = json.Unmarshal([]byte(matches[0]), &m) |
||||
if err != nil { |
||||
t.Fatalf("unmarshal manifest: %v", err) |
||||
} |
||||
|
||||
if len(m.Entries) != 1 { |
||||
t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) |
||||
} |
||||
|
||||
e := m.Entries[0] |
||||
|
||||
ct := "application/bzz-manifest+json" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected %q content type, got %q", ct, e.ContentType) |
||||
} |
||||
|
||||
if e.Access == nil { |
||||
t.Fatal("manifest access is nil") |
||||
} |
||||
|
||||
a := e.Access |
||||
|
||||
if a.Type != "pk" { |
||||
t.Errorf(`got access type %q, expected "pk"`, a.Type) |
||||
} |
||||
if len(a.Salt) < 32 { |
||||
t.Errorf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) |
||||
} |
||||
if a.KdfParams != nil { |
||||
t.Fatal("manifest access kdf params should be nil") |
||||
} |
||||
if a.Publisher != pkComp { |
||||
t.Fatal("publisher key did not match") |
||||
} |
||||
client := swarmapi.NewClient(cluster.Nodes[0].URL) |
||||
|
||||
hash, err := client.UploadManifest(&m, false) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
httpClient := &http.Client{} |
||||
|
||||
url := cluster.Nodes[0].URL + "/" + "bzz:/" + hash |
||||
response, err := httpClient.Get(url) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if response.StatusCode != http.StatusOK { |
||||
t.Fatal("should be a 200") |
||||
} |
||||
d, err := ioutil.ReadAll(response.Body) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if string(d) != data { |
||||
t.Errorf("expected decrypted data %q, got %q", data, string(d)) |
||||
} |
||||
} |
||||
|
||||
// testACTWithoutBogus tests the creation of the ACT manifest end-to-end, without any bogus entries (i.e. default scenario = 3 nodes 1 unauthorized)
|
||||
func testACTWithoutBogus(t *testing.T, cluster *testCluster) { |
||||
testACT(t, cluster, 0) |
||||
} |
||||
|
||||
// testACTWithBogus tests the creation of the ACT manifest end-to-end, with 100 bogus entries (i.e. 100 EC keys + default scenario = 3 nodes 1 unauthorized = 103 keys in the ACT manifest)
|
||||
func testACTWithBogus(t *testing.T, cluster *testCluster) { |
||||
testACT(t, cluster, 100) |
||||
} |
||||
|
||||
// testACT tests the e2e creation, uploading and downloading of an ACT access control with both EC keys AND password protection
|
||||
// the test fires up a 3 node cluster, then randomly picks 2 nodes which will be acting as grantees to the data
|
||||
// set and also protects the ACT with a password. the third node should fail decoding the reference as it will not be granted access.
|
||||
// the third node then then tries to download using a correct password (and succeeds) then uses a wrong password and fails.
|
||||
// the publisher uploads through one of the nodes then disappears.
|
||||
func testACT(t *testing.T, cluster *testCluster, bogusEntries int) { |
||||
var uploadThroughNode = cluster.Nodes[0] |
||||
client := swarmapi.NewClient(uploadThroughNode.URL) |
||||
|
||||
r1 := gorand.New(gorand.NewSource(time.Now().UnixNano())) |
||||
nodeToSkip := r1.Intn(clusterSize) // a number between 0 and 2 (node indices in `cluster`)
|
||||
dataFilename := testutil.TempFileWithContent(t, data) |
||||
defer os.RemoveAll(dataFilename) |
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, |
||||
"--bzzapi", |
||||
cluster.Nodes[0].URL, |
||||
"up", |
||||
"--encrypt", |
||||
dataFilename) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
|
||||
if len(matches) < 1 { |
||||
t.Fatal("no matches found") |
||||
} |
||||
|
||||
ref := matches[0] |
||||
var grantees []string |
||||
for i, v := range cluster.Nodes { |
||||
if i == nodeToSkip { |
||||
continue |
||||
} |
||||
pk := v.PrivateKey |
||||
granteePubKey := crypto.CompressPubkey(&pk.PublicKey) |
||||
grantees = append(grantees, hex.EncodeToString(granteePubKey)) |
||||
} |
||||
|
||||
if bogusEntries > 0 { |
||||
var bogusGrantees []string |
||||
|
||||
for i := 0; i < bogusEntries; i++ { |
||||
prv, err := ecies.GenerateKey(rand.Reader, DefaultCurve, nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
bogusGrantees = append(bogusGrantees, hex.EncodeToString(crypto.CompressPubkey(&prv.ExportECDSA().PublicKey))) |
||||
} |
||||
r2 := gorand.New(gorand.NewSource(time.Now().UnixNano())) |
||||
for i := 0; i < len(grantees); i++ { |
||||
insertAtIdx := r2.Intn(len(bogusGrantees)) |
||||
bogusGrantees = append(bogusGrantees[:insertAtIdx], append([]string{grantees[i]}, bogusGrantees[insertAtIdx:]...)...) |
||||
} |
||||
grantees = bogusGrantees |
||||
} |
||||
granteesPubkeyListFile := testutil.TempFileWithContent(t, strings.Join(grantees, "\n")) |
||||
defer os.RemoveAll(granteesPubkeyListFile) |
||||
|
||||
publisherDir, err := ioutil.TempDir("", "swarm-account-dir-temp") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(publisherDir) |
||||
|
||||
passwordFilename := testutil.TempFileWithContent(t, testPassphrase) |
||||
defer os.RemoveAll(passwordFilename) |
||||
actPasswordFilename := testutil.TempFileWithContent(t, "smth") |
||||
defer os.RemoveAll(actPasswordFilename) |
||||
_, publisherAccount := getTestAccount(t, publisherDir) |
||||
up = runSwarm(t, |
||||
"--bzzaccount", |
||||
publisherAccount.Address.String(), |
||||
"--password", |
||||
passwordFilename, |
||||
"--datadir", |
||||
publisherDir, |
||||
"--bzzapi", |
||||
cluster.Nodes[0].URL, |
||||
"access", |
||||
"new", |
||||
"act", |
||||
"--grant-keys", |
||||
granteesPubkeyListFile, |
||||
"--password", |
||||
actPasswordFilename, |
||||
ref, |
||||
) |
||||
|
||||
_, matches = up.ExpectRegexp(`[a-f\d]{64}`) |
||||
up.ExpectExit() |
||||
|
||||
if len(matches) == 0 { |
||||
t.Fatalf("stdout not matched") |
||||
} |
||||
|
||||
//get the public key from the publisher directory
|
||||
publicKeyFromDataDir := runSwarm(t, |
||||
"--bzzaccount", |
||||
publisherAccount.Address.String(), |
||||
"--password", |
||||
passwordFilename, |
||||
"--datadir", |
||||
publisherDir, |
||||
"print-keys", |
||||
"--compressed", |
||||
) |
||||
_, publicKeyString := publicKeyFromDataDir.ExpectRegexp(".+") |
||||
publicKeyFromDataDir.ExpectExit() |
||||
pkComp := strings.Split(publicKeyString[0], "=")[1] |
||||
|
||||
hash := matches[0] |
||||
m, _, err := client.DownloadManifest(hash) |
||||
if err != nil { |
||||
t.Fatalf("unmarshal manifest: %v", err) |
||||
} |
||||
|
||||
if len(m.Entries) != 1 { |
||||
t.Fatalf("expected one manifest entry, got %v", len(m.Entries)) |
||||
} |
||||
|
||||
e := m.Entries[0] |
||||
|
||||
ct := "application/bzz-manifest+json" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected %q content type, got %q", ct, e.ContentType) |
||||
} |
||||
|
||||
if e.Access == nil { |
||||
t.Fatal("manifest access is nil") |
||||
} |
||||
|
||||
a := e.Access |
||||
|
||||
if a.Type != "act" { |
||||
t.Fatalf(`got access type %q, expected "act"`, a.Type) |
||||
} |
||||
if len(a.Salt) < 32 { |
||||
t.Fatalf(`got salt with length %v, expected not less the 32 bytes`, len(a.Salt)) |
||||
} |
||||
|
||||
if a.Publisher != pkComp { |
||||
t.Fatal("publisher key did not match") |
||||
} |
||||
httpClient := &http.Client{} |
||||
|
||||
// all nodes except the skipped node should be able to decrypt the content
|
||||
for i, node := range cluster.Nodes { |
||||
log.Debug("trying to fetch from node", "node index", i) |
||||
|
||||
url := node.URL + "/" + "bzz:/" + hash |
||||
response, err := httpClient.Get(url) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
log.Debug("got response from node", "response code", response.StatusCode) |
||||
|
||||
if i == nodeToSkip { |
||||
log.Debug("reached node to skip", "status code", response.StatusCode) |
||||
|
||||
if response.StatusCode != http.StatusUnauthorized { |
||||
t.Fatalf("should be a 401") |
||||
} |
||||
|
||||
// try downloading using a password instead, using the unauthorized node
|
||||
passwordUrl := strings.Replace(url, "http://", "http://:smth@", -1) |
||||
response, err = httpClient.Get(passwordUrl) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if response.StatusCode != http.StatusOK { |
||||
t.Fatal("should be a 200") |
||||
} |
||||
|
||||
// now try with the wrong password, expect 401
|
||||
passwordUrl = strings.Replace(url, "http://", "http://:smthWrong@", -1) |
||||
response, err = httpClient.Get(passwordUrl) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if response.StatusCode != http.StatusUnauthorized { |
||||
t.Fatal("should be a 401") |
||||
} |
||||
continue |
||||
} |
||||
|
||||
if response.StatusCode != http.StatusOK { |
||||
t.Fatal("should be a 200") |
||||
} |
||||
d, err := ioutil.ReadAll(response.Body) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if string(d) != data { |
||||
t.Errorf("expected decrypted data %q, got %q", data, string(d)) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// TestKeypairSanity is a sanity test for the crypto scheme for ACT. it asserts the correct shared secret according to
|
||||
// the specs at https://github.com/ethersphere/swarm-docs/blob/eb857afda906c6e7bb90d37f3f334ccce5eef230/act.md
|
||||
func TestKeypairSanity(t *testing.T) { |
||||
salt := make([]byte, 32) |
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil { |
||||
t.Fatalf("reading from crypto/rand failed: %v", err.Error()) |
||||
} |
||||
sharedSecret := "a85586744a1ddd56a7ed9f33fa24f40dd745b3a941be296a0d60e329dbdb896d" |
||||
|
||||
for i, v := range []struct { |
||||
publisherPriv string |
||||
granteePub string |
||||
}{ |
||||
{ |
||||
publisherPriv: "ec5541555f3bc6376788425e9d1a62f55a82901683fd7062c5eddcc373a73459", |
||||
granteePub: "0226f213613e843a413ad35b40f193910d26eb35f00154afcde9ded57479a6224a", |
||||
}, |
||||
{ |
||||
publisherPriv: "70c7a73011aa56584a0009ab874794ee7e5652fd0c6911cd02f8b6267dd82d2d", |
||||
granteePub: "02e6f8d5e28faaa899744972bb847b6eb805a160494690c9ee7197ae9f619181db", |
||||
}, |
||||
} { |
||||
b, _ := hex.DecodeString(v.granteePub) |
||||
granteePub, _ := crypto.DecompressPubkey(b) |
||||
publisherPrivate, _ := crypto.HexToECDSA(v.publisherPriv) |
||||
|
||||
ssKey, err := api.NewSessionKeyPK(publisherPrivate, granteePub, salt) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
hasher := sha3.NewLegacyKeccak256() |
||||
hasher.Write(salt) |
||||
shared, err := hex.DecodeString(sharedSecret) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
hasher.Write(shared) |
||||
sum := hasher.Sum(nil) |
||||
|
||||
if !bytes.Equal(ssKey, sum) { |
||||
t.Fatalf("%d: got a session key mismatch", i) |
||||
} |
||||
} |
||||
} |
@ -1,24 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
var SwarmBootnodes = []string{ |
||||
// EF Swarm Bootnode - AWS - eu-central-1
|
||||
"enode://4c113504601930bf2000c29bcd98d1716b6167749f58bad703bae338332fe93cc9d9204f08afb44100dc7bea479205f5d162df579f9a8f76f8b402d339709023@3.122.203.99:30301", |
||||
// EF Swarm Bootnode - AWS - us-west-2
|
||||
"enode://89f2ede3371bff1ad9f2088f2012984e280287a4e2b68007c2a6ad994909c51886b4a8e9e2ecc97f9910aca538398e0a5804b0ee80a187fde1ba4f32626322ba@52.35.212.179:30301", |
||||
} |
@ -1,451 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
"reflect" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
"unicode" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/naoina/toml" |
||||
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api" |
||||
) |
||||
|
||||
var ( |
||||
//flag definition for the dumpconfig command
|
||||
DumpConfigCommand = cli.Command{ |
||||
Action: utils.MigrateFlags(dumpConfig), |
||||
Name: "dumpconfig", |
||||
Usage: "Show configuration values", |
||||
ArgsUsage: "", |
||||
Flags: app.Flags, |
||||
Category: "MISCELLANEOUS COMMANDS", |
||||
Description: `The dumpconfig command shows configuration values.`, |
||||
} |
||||
|
||||
//flag definition for the config file command
|
||||
SwarmTomlConfigPathFlag = cli.StringFlag{ |
||||
Name: "config", |
||||
Usage: "TOML configuration file", |
||||
} |
||||
) |
||||
|
||||
//constants for environment variables
|
||||
const ( |
||||
SwarmEnvChequebookAddr = "SWARM_CHEQUEBOOK_ADDR" |
||||
SwarmEnvAccount = "SWARM_ACCOUNT" |
||||
SwarmEnvListenAddr = "SWARM_LISTEN_ADDR" |
||||
SwarmEnvPort = "SWARM_PORT" |
||||
SwarmEnvNetworkID = "SWARM_NETWORK_ID" |
||||
SwarmEnvSwapEnable = "SWARM_SWAP_ENABLE" |
||||
SwarmEnvSwapAPI = "SWARM_SWAP_API" |
||||
SwarmEnvSyncDisable = "SWARM_SYNC_DISABLE" |
||||
SwarmEnvSyncUpdateDelay = "SWARM_ENV_SYNC_UPDATE_DELAY" |
||||
SwarmEnvMaxStreamPeerServers = "SWARM_ENV_MAX_STREAM_PEER_SERVERS" |
||||
SwarmEnvLightNodeEnable = "SWARM_LIGHT_NODE_ENABLE" |
||||
SwarmEnvDeliverySkipCheck = "SWARM_DELIVERY_SKIP_CHECK" |
||||
SwarmEnvENSAPI = "SWARM_ENS_API" |
||||
SwarmEnvENSAddr = "SWARM_ENS_ADDR" |
||||
SwarmEnvCORS = "SWARM_CORS" |
||||
SwarmEnvBootnodes = "SWARM_BOOTNODES" |
||||
SwarmEnvPSSEnable = "SWARM_PSS_ENABLE" |
||||
SwarmEnvStorePath = "SWARM_STORE_PATH" |
||||
SwarmEnvStoreCapacity = "SWARM_STORE_CAPACITY" |
||||
SwarmEnvStoreCacheCapacity = "SWARM_STORE_CACHE_CAPACITY" |
||||
SwarmEnvBootnodeMode = "SWARM_BOOTNODE_MODE" |
||||
SwarmAccessPassword = "SWARM_ACCESS_PASSWORD" |
||||
SwarmAutoDefaultPath = "SWARM_AUTO_DEFAULTPATH" |
||||
SwarmGlobalstoreAPI = "SWARM_GLOBALSTORE_API" |
||||
GethEnvDataDir = "GETH_DATADIR" |
||||
) |
||||
|
||||
// These settings ensure that TOML keys use the same names as Go struct fields.
|
||||
var tomlSettings = toml.Config{ |
||||
NormFieldName: func(rt reflect.Type, key string) string { |
||||
return key |
||||
}, |
||||
FieldToKey: func(rt reflect.Type, field string) string { |
||||
return field |
||||
}, |
||||
MissingField: func(rt reflect.Type, field string) error { |
||||
link := "" |
||||
if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" { |
||||
link = fmt.Sprintf(", check github.com/ethereum/go-ethereum/swarm/api/config.go for available fields") |
||||
} |
||||
return fmt.Errorf("field '%s' is not defined in %s%s", field, rt.String(), link) |
||||
}, |
||||
} |
||||
|
||||
//before booting the swarm node, build the configuration
|
||||
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) { |
||||
//start by creating a default config
|
||||
config = bzzapi.NewConfig() |
||||
//first load settings from config file (if provided)
|
||||
config, err = configFileOverride(config, ctx) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
//override settings provided by environment variables
|
||||
config = envVarsOverride(config) |
||||
//override settings provided by command line
|
||||
config = cmdLineOverride(config, ctx) |
||||
//validate configuration parameters
|
||||
err = validateConfig(config) |
||||
|
||||
return |
||||
} |
||||
|
||||
//finally, after the configuration build phase is finished, initialize
|
||||
func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context, nodeconfig *node.Config) error { |
||||
//at this point, all vars should be set in the Config
|
||||
//get the account for the provided swarm account
|
||||
prvkey := getAccount(config.BzzAccount, ctx, stack) |
||||
//set the resolved config path (geth --datadir)
|
||||
config.Path = expandPath(stack.InstanceDir()) |
||||
//finally, initialize the configuration
|
||||
err := config.Init(prvkey, nodeconfig.NodeKey()) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
//configuration phase completed here
|
||||
log.Debug("Starting Swarm with the following parameters:") |
||||
//after having created the config, print it to screen
|
||||
log.Debug(printConfig(config)) |
||||
return nil |
||||
} |
||||
|
||||
//configFileOverride overrides the current config with the config file, if a config file has been provided
|
||||
func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) { |
||||
var err error |
||||
|
||||
//only do something if the -config flag has been set
|
||||
if ctx.GlobalIsSet(SwarmTomlConfigPathFlag.Name) { |
||||
var filepath string |
||||
if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" { |
||||
utils.Fatalf("Config file flag provided with invalid file path") |
||||
} |
||||
var f *os.File |
||||
f, err = os.Open(filepath) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer f.Close() |
||||
|
||||
//decode the TOML file into a Config struct
|
||||
//note that we are decoding into the existing defaultConfig;
|
||||
//if an entry is not present in the file, the default entry is kept
|
||||
err = tomlSettings.NewDecoder(f).Decode(&config) |
||||
// Add file name to errors that have a line number.
|
||||
if _, ok := err.(*toml.LineError); ok { |
||||
err = errors.New(filepath + ", " + err.Error()) |
||||
} |
||||
} |
||||
return config, err |
||||
} |
||||
|
||||
// cmdLineOverride overrides the current config with whatever is provided through the command line
|
||||
// most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Config { |
||||
if keyid := ctx.GlobalString(SwarmAccountFlag.Name); keyid != "" { |
||||
currentConfig.BzzAccount = keyid |
||||
} |
||||
|
||||
if chbookaddr := ctx.GlobalString(ChequebookAddrFlag.Name); chbookaddr != "" { |
||||
currentConfig.Contract = common.HexToAddress(chbookaddr) |
||||
} |
||||
|
||||
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" { |
||||
id, err := strconv.ParseUint(networkid, 10, 64) |
||||
if err != nil { |
||||
utils.Fatalf("invalid cli flag %s: %v", SwarmNetworkIdFlag.Name, err) |
||||
} |
||||
if id != 0 { |
||||
currentConfig.NetworkID = id |
||||
} |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(utils.DataDirFlag.Name) { |
||||
if datadir := ctx.GlobalString(utils.DataDirFlag.Name); datadir != "" { |
||||
currentConfig.Path = expandPath(datadir) |
||||
} |
||||
} |
||||
|
||||
bzzport := ctx.GlobalString(SwarmPortFlag.Name) |
||||
if len(bzzport) > 0 { |
||||
currentConfig.Port = bzzport |
||||
} |
||||
|
||||
if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" { |
||||
currentConfig.ListenAddr = bzzaddr |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(SwarmSwapEnabledFlag.Name) { |
||||
currentConfig.SwapEnabled = true |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) { |
||||
currentConfig.SyncEnabled = false |
||||
} |
||||
|
||||
if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 { |
||||
currentConfig.SyncUpdateDelay = d |
||||
} |
||||
|
||||
// any value including 0 is acceptable
|
||||
currentConfig.MaxStreamPeerServers = ctx.GlobalInt(SwarmMaxStreamPeerServersFlag.Name) |
||||
|
||||
if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) { |
||||
currentConfig.LightNodeEnabled = true |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) { |
||||
currentConfig.DeliverySkipCheck = true |
||||
} |
||||
|
||||
currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name) |
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" { |
||||
utils.Fatalf(SwarmErrSwapSetNoAPI) |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(EnsAPIFlag.Name) { |
||||
ensAPIs := ctx.GlobalStringSlice(EnsAPIFlag.Name) |
||||
// preserve backward compatibility to disable ENS with --ens-api=""
|
||||
if len(ensAPIs) == 1 && ensAPIs[0] == "" { |
||||
ensAPIs = nil |
||||
} |
||||
for i := range ensAPIs { |
||||
ensAPIs[i] = expandPath(ensAPIs[i]) |
||||
} |
||||
|
||||
currentConfig.EnsAPIs = ensAPIs |
||||
} |
||||
|
||||
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" { |
||||
currentConfig.Cors = cors |
||||
} |
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" { |
||||
currentConfig.ChunkDbPath = storePath |
||||
} |
||||
|
||||
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 { |
||||
currentConfig.DbCapacity = storeCapacity |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(SwarmStoreCacheCapacity.Name) { |
||||
currentConfig.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name) |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) { |
||||
currentConfig.BootnodeMode = ctx.GlobalBool(SwarmBootnodeModeFlag.Name) |
||||
} |
||||
|
||||
if ctx.GlobalIsSet(SwarmGlobalStoreAPIFlag.Name) { |
||||
currentConfig.GlobalStoreAPI = ctx.GlobalString(SwarmGlobalStoreAPIFlag.Name) |
||||
} |
||||
|
||||
return currentConfig |
||||
|
||||
} |
||||
|
||||
// envVarsOverride overrides the current config with whatver is provided in environment variables
|
||||
// most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { |
||||
if keyid := os.Getenv(SwarmEnvAccount); keyid != "" { |
||||
currentConfig.BzzAccount = keyid |
||||
} |
||||
|
||||
if chbookaddr := os.Getenv(SwarmEnvChequebookAddr); chbookaddr != "" { |
||||
currentConfig.Contract = common.HexToAddress(chbookaddr) |
||||
} |
||||
|
||||
if networkid := os.Getenv(SwarmEnvNetworkID); networkid != "" { |
||||
id, err := strconv.ParseUint(networkid, 10, 64) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmEnvNetworkID, err) |
||||
} |
||||
if id != 0 { |
||||
currentConfig.NetworkID = id |
||||
} |
||||
} |
||||
|
||||
if datadir := os.Getenv(GethEnvDataDir); datadir != "" { |
||||
currentConfig.Path = expandPath(datadir) |
||||
} |
||||
|
||||
bzzport := os.Getenv(SwarmEnvPort) |
||||
if len(bzzport) > 0 { |
||||
currentConfig.Port = bzzport |
||||
} |
||||
|
||||
if bzzaddr := os.Getenv(SwarmEnvListenAddr); bzzaddr != "" { |
||||
currentConfig.ListenAddr = bzzaddr |
||||
} |
||||
|
||||
if swapenable := os.Getenv(SwarmEnvSwapEnable); swapenable != "" { |
||||
swap, err := strconv.ParseBool(swapenable) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmEnvSwapEnable, err) |
||||
} |
||||
currentConfig.SwapEnabled = swap |
||||
} |
||||
|
||||
if syncdisable := os.Getenv(SwarmEnvSyncDisable); syncdisable != "" { |
||||
sync, err := strconv.ParseBool(syncdisable) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmEnvSyncDisable, err) |
||||
} |
||||
currentConfig.SyncEnabled = !sync |
||||
} |
||||
|
||||
if v := os.Getenv(SwarmEnvDeliverySkipCheck); v != "" { |
||||
skipCheck, err := strconv.ParseBool(v) |
||||
if err != nil { |
||||
currentConfig.DeliverySkipCheck = skipCheck |
||||
} |
||||
} |
||||
|
||||
if v := os.Getenv(SwarmEnvSyncUpdateDelay); v != "" { |
||||
d, err := time.ParseDuration(v) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmEnvSyncUpdateDelay, err) |
||||
} |
||||
currentConfig.SyncUpdateDelay = d |
||||
} |
||||
|
||||
if max := os.Getenv(SwarmEnvMaxStreamPeerServers); max != "" { |
||||
m, err := strconv.Atoi(max) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmEnvMaxStreamPeerServers, err) |
||||
} |
||||
currentConfig.MaxStreamPeerServers = m |
||||
} |
||||
|
||||
if lne := os.Getenv(SwarmEnvLightNodeEnable); lne != "" { |
||||
lightnode, err := strconv.ParseBool(lne) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmEnvLightNodeEnable, err) |
||||
} |
||||
currentConfig.LightNodeEnabled = lightnode |
||||
} |
||||
|
||||
if swapapi := os.Getenv(SwarmEnvSwapAPI); swapapi != "" { |
||||
currentConfig.SwapAPI = swapapi |
||||
} |
||||
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" { |
||||
utils.Fatalf(SwarmErrSwapSetNoAPI) |
||||
} |
||||
|
||||
if ensapi := os.Getenv(SwarmEnvENSAPI); ensapi != "" { |
||||
currentConfig.EnsAPIs = strings.Split(ensapi, ",") |
||||
} |
||||
|
||||
if ensaddr := os.Getenv(SwarmEnvENSAddr); ensaddr != "" { |
||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr) |
||||
} |
||||
|
||||
if cors := os.Getenv(SwarmEnvCORS); cors != "" { |
||||
currentConfig.Cors = cors |
||||
} |
||||
|
||||
if bm := os.Getenv(SwarmEnvBootnodeMode); bm != "" { |
||||
bootnodeMode, err := strconv.ParseBool(bm) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmEnvBootnodeMode, err) |
||||
} |
||||
currentConfig.BootnodeMode = bootnodeMode |
||||
} |
||||
|
||||
if api := os.Getenv(SwarmGlobalstoreAPI); api != "" { |
||||
currentConfig.GlobalStoreAPI = api |
||||
} |
||||
|
||||
return currentConfig |
||||
} |
||||
|
||||
// dumpConfig is the dumpconfig command.
|
||||
// writes a default config to STDOUT
|
||||
func dumpConfig(ctx *cli.Context) error { |
||||
cfg, err := buildConfig(ctx) |
||||
if err != nil { |
||||
utils.Fatalf(fmt.Sprintf("Uh oh - dumpconfig triggered an error %v", err)) |
||||
} |
||||
comment := "" |
||||
out, err := tomlSettings.Marshal(&cfg) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
io.WriteString(os.Stdout, comment) |
||||
os.Stdout.Write(out) |
||||
return nil |
||||
} |
||||
|
||||
//validate configuration parameters
|
||||
func validateConfig(cfg *bzzapi.Config) (err error) { |
||||
for _, ensAPI := range cfg.EnsAPIs { |
||||
if ensAPI != "" { |
||||
if err := validateEnsAPIs(ensAPI); err != nil { |
||||
return fmt.Errorf("invalid format [tld:][contract-addr@]url for ENS API endpoint configuration %q: %v", ensAPI, err) |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
//validate EnsAPIs configuration parameter
|
||||
func validateEnsAPIs(s string) (err error) { |
||||
// missing contract address
|
||||
if strings.HasPrefix(s, "@") { |
||||
return errors.New("missing contract address") |
||||
} |
||||
// missing url
|
||||
if strings.HasSuffix(s, "@") { |
||||
return errors.New("missing url") |
||||
} |
||||
// missing tld
|
||||
if strings.HasPrefix(s, ":") { |
||||
return errors.New("missing tld") |
||||
} |
||||
// missing url
|
||||
if strings.HasSuffix(s, ":") { |
||||
return errors.New("missing url") |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
//print a Config as string
|
||||
func printConfig(config *bzzapi.Config) string { |
||||
out, err := tomlSettings.Marshal(&config) |
||||
if err != nil { |
||||
return fmt.Sprintf("Something is not right with the configuration: %v", err) |
||||
} |
||||
return string(out) |
||||
} |
@ -1,575 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"net" |
||||
"os" |
||||
"os/exec" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/docker/docker/pkg/reexec" |
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
) |
||||
|
||||
func TestConfigDump(t *testing.T) { |
||||
swarm := runSwarm(t, "dumpconfig") |
||||
defaultConf := api.NewConfig() |
||||
out, err := tomlSettings.Marshal(&defaultConf) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
swarm.Expect(string(out)) |
||||
swarm.ExpectExit() |
||||
} |
||||
|
||||
func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) { |
||||
flags := []string{ |
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", |
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", |
||||
fmt.Sprintf("--%s", SwarmSwapEnabledFlag.Name), |
||||
} |
||||
|
||||
swarm := runSwarm(t, flags...) |
||||
swarm.Expect("Fatal: " + SwarmErrSwapSetNoAPI + "\n") |
||||
swarm.ExpectExit() |
||||
} |
||||
|
||||
func TestConfigFailsNoBzzAccount(t *testing.T) { |
||||
flags := []string{ |
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", |
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", |
||||
} |
||||
|
||||
swarm := runSwarm(t, flags...) |
||||
swarm.Expect("Fatal: " + SwarmErrNoBZZAccount + "\n") |
||||
swarm.ExpectExit() |
||||
} |
||||
|
||||
func TestConfigCmdLineOverrides(t *testing.T) { |
||||
dir, err := ioutil.TempDir("", "bzztest") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
|
||||
conf, account := getTestAccount(t, dir) |
||||
node := &testNode{Dir: dir} |
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
flags := []string{ |
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", |
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, |
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name), |
||||
fmt.Sprintf("--%s", CorsStringFlag.Name), "*", |
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), |
||||
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name), |
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "", |
||||
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir, |
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath, |
||||
} |
||||
node.Cmd = runSwarm(t, flags...) |
||||
node.Cmd.InputLine(testPassphrase) |
||||
defer func() { |
||||
if t.Failed() { |
||||
node.Shutdown() |
||||
} |
||||
}() |
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { |
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint()) |
||||
if err == nil { |
||||
break |
||||
} |
||||
} |
||||
if node.Client == nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// load info
|
||||
var info swarm.Info |
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if info.Port != httpPort { |
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) |
||||
} |
||||
|
||||
if info.NetworkID != 42 { |
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID) |
||||
} |
||||
|
||||
if info.SyncEnabled { |
||||
t.Fatal("Expected Sync to be disabled, but is true") |
||||
} |
||||
|
||||
if !info.DeliverySkipCheck { |
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") |
||||
} |
||||
|
||||
if info.Cors != "*" { |
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors) |
||||
} |
||||
|
||||
node.Shutdown() |
||||
} |
||||
|
||||
func TestConfigFileOverrides(t *testing.T) { |
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewConfig() |
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = false |
||||
defaultConf.DeliverySkipCheck = true |
||||
defaultConf.NetworkID = 54 |
||||
defaultConf.Port = httpPort |
||||
defaultConf.DbCapacity = 9000000 |
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000 |
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second |
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML string
|
||||
out, err := tomlSettings.Marshal(&defaultConf) |
||||
if err != nil { |
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err) |
||||
} |
||||
//create file
|
||||
f, err := ioutil.TempFile("", "testconfig.toml") |
||||
if err != nil { |
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) |
||||
} |
||||
//write file
|
||||
_, err = f.WriteString(string(out)) |
||||
if err != nil { |
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) |
||||
} |
||||
f.Sync() |
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
conf, account := getTestAccount(t, dir) |
||||
node := &testNode{Dir: dir} |
||||
|
||||
flags := []string{ |
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(), |
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), |
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "", |
||||
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir, |
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath, |
||||
} |
||||
node.Cmd = runSwarm(t, flags...) |
||||
node.Cmd.InputLine(testPassphrase) |
||||
defer func() { |
||||
if t.Failed() { |
||||
node.Shutdown() |
||||
} |
||||
}() |
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { |
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint()) |
||||
if err == nil { |
||||
break |
||||
} |
||||
} |
||||
if node.Client == nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// load info
|
||||
var info swarm.Info |
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if info.Port != httpPort { |
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) |
||||
} |
||||
|
||||
if info.NetworkID != 54 { |
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID) |
||||
} |
||||
|
||||
if info.SyncEnabled { |
||||
t.Fatal("Expected Sync to be disabled, but is true") |
||||
} |
||||
|
||||
if !info.DeliverySkipCheck { |
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") |
||||
} |
||||
|
||||
if info.DbCapacity != 9000000 { |
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID) |
||||
} |
||||
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 { |
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval)) |
||||
} |
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { |
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) |
||||
} |
||||
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown() |
||||
} |
||||
|
||||
func TestConfigEnvVars(t *testing.T) { |
||||
// assign ports
|
||||
httpPort, err := assignTCPPort() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
envVars := os.Environ() |
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort)) |
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999")) |
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*")) |
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true")) |
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true")) |
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
conf, account := getTestAccount(t, dir) |
||||
node := &testNode{Dir: dir} |
||||
flags := []string{ |
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), |
||||
"--ens-api", "", |
||||
"--datadir", dir, |
||||
"--ipcpath", conf.IPCPath, |
||||
} |
||||
|
||||
//node.Cmd = runSwarm(t,flags...)
|
||||
//node.Cmd.cmd.Env = envVars
|
||||
//the above assignment does not work, so we need a custom Cmd here in order to pass envVars:
|
||||
cmd := &exec.Cmd{ |
||||
Path: reexec.Self(), |
||||
Args: append([]string{"swarm-test"}, flags...), |
||||
Stderr: os.Stderr, |
||||
Stdout: os.Stdout, |
||||
} |
||||
cmd.Env = envVars |
||||
//stdout, err := cmd.StdoutPipe()
|
||||
//if err != nil {
|
||||
// t.Fatal(err)
|
||||
//}
|
||||
//stdout = bufio.NewReader(stdout)
|
||||
var stdin io.WriteCloser |
||||
if stdin, err = cmd.StdinPipe(); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err := cmd.Start(); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
//cmd.InputLine(testPassphrase)
|
||||
io.WriteString(stdin, testPassphrase+"\n") |
||||
defer func() { |
||||
if t.Failed() { |
||||
node.Shutdown() |
||||
cmd.Process.Kill() |
||||
} |
||||
}() |
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { |
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint()) |
||||
if err == nil { |
||||
break |
||||
} |
||||
} |
||||
|
||||
if node.Client == nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// load info
|
||||
var info swarm.Info |
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if info.Port != httpPort { |
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) |
||||
} |
||||
|
||||
if info.NetworkID != 999 { |
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID) |
||||
} |
||||
|
||||
if info.Cors != "*" { |
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors) |
||||
} |
||||
|
||||
if info.SyncEnabled { |
||||
t.Fatal("Expected Sync to be disabled, but is true") |
||||
} |
||||
|
||||
if !info.DeliverySkipCheck { |
||||
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") |
||||
} |
||||
|
||||
node.Shutdown() |
||||
cmd.Process.Kill() |
||||
} |
||||
|
||||
func TestConfigCmdLineOverridesFile(t *testing.T) { |
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewConfig() |
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = true |
||||
defaultConf.NetworkID = 54 |
||||
defaultConf.Port = "8588" |
||||
defaultConf.DbCapacity = 9000000 |
||||
defaultConf.HiveParams.KeepAliveInterval = 6000000000 |
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second |
||||
//defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML file
|
||||
out, err := tomlSettings.Marshal(&defaultConf) |
||||
if err != nil { |
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err) |
||||
} |
||||
//write file
|
||||
fname := "testconfig.toml" |
||||
f, err := ioutil.TempFile("", fname) |
||||
if err != nil { |
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) |
||||
} |
||||
defer os.Remove(fname) |
||||
//write file
|
||||
_, err = f.WriteString(string(out)) |
||||
if err != nil { |
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) |
||||
} |
||||
f.Sync() |
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
conf, account := getTestAccount(t, dir) |
||||
node := &testNode{Dir: dir} |
||||
|
||||
expectNetworkId := uint64(77) |
||||
|
||||
flags := []string{ |
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77", |
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, |
||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name), |
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(), |
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), |
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "", |
||||
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir, |
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath, |
||||
} |
||||
node.Cmd = runSwarm(t, flags...) |
||||
node.Cmd.InputLine(testPassphrase) |
||||
defer func() { |
||||
if t.Failed() { |
||||
node.Shutdown() |
||||
} |
||||
}() |
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { |
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint()) |
||||
if err == nil { |
||||
break |
||||
} |
||||
} |
||||
if node.Client == nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// load info
|
||||
var info swarm.Info |
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if info.Port != httpPort { |
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) |
||||
} |
||||
|
||||
if info.NetworkID != expectNetworkId { |
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID) |
||||
} |
||||
|
||||
if info.SyncEnabled { |
||||
t.Fatal("Expected Sync to be disabled, but is true") |
||||
} |
||||
|
||||
if info.DbCapacity != 9000000 { |
||||
t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.DbCapacity) |
||||
} |
||||
|
||||
if info.HiveParams.KeepAliveInterval != 6000000000 { |
||||
t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval)) |
||||
} |
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { |
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) |
||||
} |
||||
|
||||
// if info.SyncParams.KeyBufferSize != 512 {
|
||||
// t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
// }
|
||||
|
||||
node.Shutdown() |
||||
} |
||||
|
||||
func TestConfigValidate(t *testing.T) { |
||||
for _, c := range []struct { |
||||
cfg *api.Config |
||||
err string |
||||
}{ |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"/data/testnet/geth.ipc", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"http://127.0.0.1:1234", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"ws://127.0.0.1:1234", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"test:/data/testnet/geth.ipc", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"test:ws://127.0.0.1:1234", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"test:314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"eth:314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"eth:314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:12344", |
||||
}}, |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"eth:", |
||||
}}, |
||||
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"eth:\": missing url", |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"314159265dD8dbb310642f98f50C066173C1259b@", |
||||
}}, |
||||
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"314159265dD8dbb310642f98f50C066173C1259b@\": missing url", |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
":314159265dD8dbb310642f98f50C066173C1259", |
||||
}}, |
||||
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \":314159265dD8dbb310642f98f50C066173C1259\": missing tld", |
||||
}, |
||||
{ |
||||
cfg: &api.Config{EnsAPIs: []string{ |
||||
"@/data/testnet/geth.ipc", |
||||
}}, |
||||
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"@/data/testnet/geth.ipc\": missing contract address", |
||||
}, |
||||
} { |
||||
err := validateConfig(c.cfg) |
||||
if c.err != "" && err.Error() != c.err { |
||||
t.Errorf("expected error %q, got %q", c.err, err) |
||||
} |
||||
if c.err == "" && err != nil { |
||||
t.Errorf("unexpected error %q", err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func assignTCPPort() (string, error) { |
||||
l, err := net.Listen("tcp", "127.0.0.1:0") |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
l.Close() |
||||
_, port, err := net.SplitHostPort(l.Addr().String()) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return port, nil |
||||
} |
@ -1,239 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"archive/tar" |
||||
"bytes" |
||||
"encoding/binary" |
||||
"encoding/hex" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
"path/filepath" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/localstore" |
||||
"github.com/syndtr/goleveldb/leveldb" |
||||
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var legacyKeyIndex = byte(0) |
||||
var keyData = byte(6) |
||||
|
||||
type dpaDBIndex struct { |
||||
Idx uint64 |
||||
Access uint64 |
||||
} |
||||
|
||||
var dbCommand = cli.Command{ |
||||
Name: "db", |
||||
CustomHelpTemplate: helpTemplate, |
||||
Usage: "manage the local chunk database", |
||||
ArgsUsage: "db COMMAND", |
||||
Description: "Manage the local chunk database", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
Action: dbExport, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "export", |
||||
Usage: "export a local chunk database as a tar archive (use - to send to stdout)", |
||||
ArgsUsage: "<chunkdb> <file>", |
||||
Description: ` |
||||
Export a local chunk database as a tar archive (use - to send to stdout). |
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar |
||||
|
||||
The export may be quite large, consider piping the output through the Unix |
||||
pv(1) tool to get a progress bar: |
||||
|
||||
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar |
||||
`, |
||||
}, |
||||
{ |
||||
Action: dbImport, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "import", |
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", |
||||
ArgsUsage: "<chunkdb> <file>", |
||||
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin). |
||||
|
||||
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar |
||||
|
||||
The import may be quite large, consider piping the input through the Unix |
||||
pv(1) tool to get a progress bar: |
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`, |
||||
Flags: []cli.Flag{ |
||||
SwarmLegacyFlag, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
func dbExport(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 3 { |
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key") |
||||
} |
||||
|
||||
var out io.Writer |
||||
if args[1] == "-" { |
||||
out = os.Stdout |
||||
} else { |
||||
f, err := os.Create(args[1]) |
||||
if err != nil { |
||||
utils.Fatalf("error opening output file: %s", err) |
||||
} |
||||
defer f.Close() |
||||
out = f |
||||
} |
||||
|
||||
isLegacy := localstore.IsLegacyDatabase(args[0]) |
||||
if isLegacy { |
||||
count, err := exportLegacy(args[0], common.Hex2Bytes(args[2]), out) |
||||
if err != nil { |
||||
utils.Fatalf("error exporting legacy local chunk database: %s", err) |
||||
} |
||||
|
||||
log.Info(fmt.Sprintf("successfully exported %d chunks from legacy db", count)) |
||||
return |
||||
} |
||||
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2])) |
||||
if err != nil { |
||||
utils.Fatalf("error opening local chunk database: %s", err) |
||||
} |
||||
defer store.Close() |
||||
|
||||
count, err := store.Export(out) |
||||
if err != nil { |
||||
utils.Fatalf("error exporting local chunk database: %s", err) |
||||
} |
||||
|
||||
log.Info(fmt.Sprintf("successfully exported %d chunks", count)) |
||||
} |
||||
|
||||
func dbImport(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 3 { |
||||
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key") |
||||
} |
||||
|
||||
legacy := ctx.IsSet(SwarmLegacyFlag.Name) |
||||
|
||||
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2])) |
||||
if err != nil { |
||||
utils.Fatalf("error opening local chunk database: %s", err) |
||||
} |
||||
defer store.Close() |
||||
|
||||
var in io.Reader |
||||
if args[1] == "-" { |
||||
in = os.Stdin |
||||
} else { |
||||
f, err := os.Open(args[1]) |
||||
if err != nil { |
||||
utils.Fatalf("error opening input file: %s", err) |
||||
} |
||||
defer f.Close() |
||||
in = f |
||||
} |
||||
|
||||
count, err := store.Import(in, legacy) |
||||
if err != nil { |
||||
utils.Fatalf("error importing local chunk database: %s", err) |
||||
} |
||||
|
||||
log.Info(fmt.Sprintf("successfully imported %d chunks", count)) |
||||
} |
||||
|
||||
func openLDBStore(path string, basekey []byte) (*localstore.DB, error) { |
||||
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { |
||||
return nil, fmt.Errorf("invalid chunkdb path: %s", err) |
||||
} |
||||
|
||||
return localstore.New(path, basekey, nil) |
||||
} |
||||
|
||||
func decodeIndex(data []byte, index *dpaDBIndex) error { |
||||
dec := rlp.NewStream(bytes.NewReader(data), 0) |
||||
return dec.Decode(index) |
||||
} |
||||
|
||||
func getDataKey(idx uint64, po uint8) []byte { |
||||
key := make([]byte, 10) |
||||
key[0] = keyData |
||||
key[1] = po |
||||
binary.BigEndian.PutUint64(key[2:], idx) |
||||
|
||||
return key |
||||
} |
||||
|
||||
func exportLegacy(path string, basekey []byte, out io.Writer) (int64, error) { |
||||
tw := tar.NewWriter(out) |
||||
defer tw.Close() |
||||
db, err := leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 128}) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
defer db.Close() |
||||
|
||||
it := db.NewIterator(nil, nil) |
||||
defer it.Release() |
||||
var count int64 |
||||
for ok := it.Seek([]byte{legacyKeyIndex}); ok; ok = it.Next() { |
||||
key := it.Key() |
||||
if (key == nil) || (key[0] != legacyKeyIndex) { |
||||
break |
||||
} |
||||
|
||||
var index dpaDBIndex |
||||
|
||||
hash := key[1:] |
||||
decodeIndex(it.Value(), &index) |
||||
|
||||
po := uint8(chunk.Proximity(basekey, hash)) |
||||
|
||||
datakey := getDataKey(index.Idx, po) |
||||
data, err := db.Get(datakey, nil) |
||||
if err != nil { |
||||
log.Crit(fmt.Sprintf("Chunk %x found but could not be accessed: %v, %x", key, err, datakey)) |
||||
continue |
||||
} |
||||
|
||||
hdr := &tar.Header{ |
||||
Name: hex.EncodeToString(hash), |
||||
Mode: 0644, |
||||
Size: int64(len(data)), |
||||
} |
||||
if err := tw.WriteHeader(hdr); err != nil { |
||||
return count, err |
||||
} |
||||
if _, err := tw.Write(data); err != nil { |
||||
return count, err |
||||
} |
||||
count++ |
||||
} |
||||
|
||||
return count, nil |
||||
} |
@ -1,112 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"path/filepath" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var downloadCommand = cli.Command{ |
||||
Action: download, |
||||
Name: "down", |
||||
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag}, |
||||
Usage: "downloads a swarm manifest or a file inside a manifest", |
||||
ArgsUsage: " <uri> [<dir>]", |
||||
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`, |
||||
} |
||||
|
||||
func download(ctx *cli.Context) { |
||||
log.Debug("downloading content using swarm down") |
||||
args := ctx.Args() |
||||
dest := "." |
||||
|
||||
switch len(args) { |
||||
case 0: |
||||
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]") |
||||
case 1: |
||||
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir")) |
||||
default: |
||||
log.Trace(fmt.Sprintf("destination path arg: %s", args[1])) |
||||
if absDest, err := filepath.Abs(args[1]); err == nil { |
||||
dest = absDest |
||||
} else { |
||||
utils.Fatalf("could not get download path: %v", err) |
||||
} |
||||
} |
||||
|
||||
var ( |
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name) |
||||
client = swarm.NewClient(bzzapi) |
||||
) |
||||
|
||||
if fi, err := os.Stat(dest); err == nil { |
||||
if isRecursive && !fi.Mode().IsDir() { |
||||
utils.Fatalf("destination path is not a directory!") |
||||
} |
||||
} else { |
||||
if !os.IsNotExist(err) { |
||||
utils.Fatalf("could not stat path: %v", err) |
||||
} |
||||
} |
||||
|
||||
uri, err := api.Parse(args[0]) |
||||
if err != nil { |
||||
utils.Fatalf("could not parse uri argument: %v", err) |
||||
} |
||||
|
||||
dl := func(credentials string) error { |
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive { |
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest, credentials); err != nil { |
||||
if err == swarm.ErrUnauthorized { |
||||
return err |
||||
} |
||||
return fmt.Errorf("directory %s: %v", uri.Path, err) |
||||
} |
||||
} else { |
||||
// we are downloading a file
|
||||
log.Debug("downloading file/path from a manifest", "uri.Addr", uri.Addr, "uri.Path", uri.Path) |
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest, credentials) |
||||
if err != nil { |
||||
if err == swarm.ErrUnauthorized { |
||||
return err |
||||
} |
||||
return fmt.Errorf("file %s from address: %s: %v", uri.Path, uri.Addr, err) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
if passwords := makePasswordList(ctx); passwords != nil { |
||||
password := getPassPhrase(fmt.Sprintf("Downloading %s is restricted", uri), 0, passwords) |
||||
err = dl(password) |
||||
} else { |
||||
err = dl("") |
||||
} |
||||
if err != nil { |
||||
utils.Fatalf("download: %v", err) |
||||
} |
||||
} |
@ -1,60 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command bzzhash computes a swarm tree hash.
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var hashesCommand = cli.Command{ |
||||
Action: hashes, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "hashes", |
||||
Usage: "print all hashes of a file to STDOUT", |
||||
ArgsUsage: "<file>", |
||||
Description: "Prints all hashes of a file to STDOUT", |
||||
} |
||||
|
||||
func hashes(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) < 1 { |
||||
utils.Fatalf("Usage: swarm hashes <file name>") |
||||
} |
||||
f, err := os.Open(args[0]) |
||||
if err != nil { |
||||
utils.Fatalf("Error opening file " + args[1]) |
||||
} |
||||
defer f.Close() |
||||
|
||||
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags()) |
||||
refs, err := fileStore.GetAllReferences(context.TODO(), f, false) |
||||
if err != nil { |
||||
utils.Fatalf("%v\n", err) |
||||
} else { |
||||
for _, r := range refs { |
||||
fmt.Println(r.String()) |
||||
} |
||||
} |
||||
} |
@ -1,287 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"archive/tar" |
||||
"bytes" |
||||
"compress/gzip" |
||||
"crypto/md5" |
||||
"encoding/base64" |
||||
"encoding/hex" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"path" |
||||
"runtime" |
||||
"strings" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/swarm/testdata" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
) |
||||
|
||||
const ( |
||||
DATABASE_FIXTURE_BZZ_ACCOUNT = "0aa159029fa13ffa8fa1c6fff6ebceface99d6a4" |
||||
DATABASE_FIXTURE_PASSWORD = "pass" |
||||
FIXTURE_DATADIR_PREFIX = "swarm/bzz-0aa159029fa13ffa8fa1c6fff6ebceface99d6a4" |
||||
FixtureBaseKey = "a9f22b3d77b4bdf5f3eefce995d6c8e7cecf2636f20956f08a0d1ed95adb52ad" |
||||
) |
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
// 1. runs swarm node
|
||||
// 2. uploads a random file
|
||||
// 3. runs an export of the local datastore
|
||||
// 4. runs a second swarm node
|
||||
// 5. imports the exported datastore
|
||||
// 6. fetches the uploaded random file from the second node
|
||||
func TestCLISwarmExportImport(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() |
||||
} |
||||
cluster := newTestCluster(t, 1) |
||||
|
||||
// generate random 1mb file
|
||||
content := testutil.RandomBytes(1, 1000000) |
||||
fileName := testutil.TempFileWithContent(t, string(content)) |
||||
defer os.Remove(fileName) |
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", fileName) |
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`) |
||||
up.ExpectExit() |
||||
hash := matches[0] |
||||
|
||||
var info swarm.Info |
||||
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
cluster.Stop() |
||||
defer cluster.Cleanup() |
||||
|
||||
// generate an export.tar
|
||||
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x")) |
||||
exportCmd.ExpectExit() |
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1) |
||||
|
||||
var info2 swarm.Info |
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop() |
||||
defer cluster2.Cleanup() |
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x")) |
||||
importCmd.ExpectExit() |
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x")) |
||||
|
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if res.StatusCode != 200 { |
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status) |
||||
} |
||||
|
||||
// compare downloaded file with the generated random file
|
||||
mustEqualFiles(t, bytes.NewReader(content), res.Body) |
||||
} |
||||
|
||||
// TestExportLegacyToNew checks that an old database gets imported correctly into the new localstore structure
|
||||
// The test sequence is as follows:
|
||||
// 1. unpack database fixture to tmp dir
|
||||
// 2. try to open with new swarm binary that should complain about old database
|
||||
// 3. export from old database
|
||||
// 4. remove the chunks folder
|
||||
// 5. import the dump
|
||||
// 6. file should be accessible
|
||||
func TestExportLegacyToNew(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() // this should be reenabled once the appveyor tests underlying issue is fixed
|
||||
} |
||||
/* |
||||
fixture bzz account 0aa159029fa13ffa8fa1c6fff6ebceface99d6a4 |
||||
*/ |
||||
const UPLOADED_FILE_MD5_HASH = "a001fdae53ba50cae584b8b02b06f821" |
||||
const UPLOADED_HASH = "67a86082ee0ea1bc7dd8d955bb1e14d04f61d55ae6a4b37b3d0296a3a95e454a" |
||||
tmpdir, err := ioutil.TempDir("", "swarm-test") |
||||
log.Trace("running legacy datastore migration test", "temp dir", tmpdir) |
||||
defer os.RemoveAll(tmpdir) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
inflateBase64Gzip(t, testdata.DATADIR_MIGRATION_FIXTURE, tmpdir) |
||||
|
||||
tmpPassword := testutil.TempFileWithContent(t, DATABASE_FIXTURE_PASSWORD) |
||||
defer os.Remove(tmpPassword) |
||||
|
||||
flags := []string{ |
||||
"--datadir", tmpdir, |
||||
"--bzzaccount", DATABASE_FIXTURE_BZZ_ACCOUNT, |
||||
"--password", tmpPassword, |
||||
} |
||||
|
||||
newSwarmOldDb := runSwarm(t, flags...) |
||||
_, matches := newSwarmOldDb.ExpectRegexp(".+") |
||||
newSwarmOldDb.ExpectExit() |
||||
|
||||
if len(matches) == 0 { |
||||
t.Fatalf("stdout not matched") |
||||
} |
||||
|
||||
if newSwarmOldDb.ExitStatus() == 0 { |
||||
t.Fatal("should error") |
||||
} |
||||
t.Log("exporting legacy database") |
||||
actualDataDir := path.Join(tmpdir, FIXTURE_DATADIR_PREFIX) |
||||
exportCmd := runSwarm(t, "--verbosity", "5", "db", "export", actualDataDir+"/chunks", tmpdir+"/export.tar", FixtureBaseKey) |
||||
exportCmd.ExpectExit() |
||||
|
||||
stat, err := os.Stat(tmpdir + "/export.tar") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// make some silly size assumption
|
||||
if stat.Size() < 90000 { |
||||
t.Fatal("export size too small") |
||||
} |
||||
log.Info("removing chunk datadir") |
||||
err = os.RemoveAll(path.Join(actualDataDir, "chunks")) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1) |
||||
var info2 swarm.Info |
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop() |
||||
defer cluster2.Cleanup() |
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", "--legacy", info2.Path+"/chunks", tmpdir+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x")) |
||||
importCmd.ExpectExit() |
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x")) |
||||
t.Log("trying to http get the file") |
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + UPLOADED_HASH) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if res.StatusCode != 200 { |
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status) |
||||
} |
||||
h := md5.New() |
||||
if _, err := io.Copy(h, res.Body); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
sum := h.Sum(nil) |
||||
|
||||
b, err := hex.DecodeString(UPLOADED_FILE_MD5_HASH) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if !bytes.Equal(sum, b) { |
||||
t.Fatal("should be equal") |
||||
} |
||||
} |
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) { |
||||
h := md5.New() |
||||
upLen, err := io.Copy(h, up) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
upHash := h.Sum(nil) |
||||
h.Reset() |
||||
downLen, err := io.Copy(h, down) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
downHash := h.Sum(nil) |
||||
|
||||
if !bytes.Equal(upHash, downHash) || upLen != downLen { |
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen) |
||||
} |
||||
} |
||||
|
||||
func inflateBase64Gzip(t *testing.T, base64File, directory string) { |
||||
t.Helper() |
||||
|
||||
f := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64File)) |
||||
gzf, err := gzip.NewReader(f) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
tarReader := tar.NewReader(gzf) |
||||
|
||||
for { |
||||
header, err := tarReader.Next() |
||||
if err == io.EOF { |
||||
break |
||||
} |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
name := header.Name |
||||
|
||||
switch header.Typeflag { |
||||
case tar.TypeDir: |
||||
err := os.Mkdir(path.Join(directory, name), os.ModePerm) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
case tar.TypeReg: |
||||
file, err := os.Create(path.Join(directory, name)) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if _, err := io.Copy(file, tarReader); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
file.Close() |
||||
default: |
||||
t.Fatal("shouldn't happen") |
||||
} |
||||
} |
||||
} |
@ -1,238 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command feed allows the user to create and update signed Swarm feeds
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var feedCommand = cli.Command{ |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "feed", |
||||
Usage: "(Advanced) Create and update Swarm Feeds", |
||||
ArgsUsage: "<create|update|info>", |
||||
Description: "Works with Swarm Feeds", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
Action: feedCreateManifest, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "create", |
||||
Usage: "creates and publishes a new feed manifest", |
||||
Description: `creates and publishes a new feed manifest pointing to a specified user's updates about a particular topic. |
||||
The feed topic can be built in the following ways: |
||||
* use --topic to set the topic to an arbitrary binary hex string. |
||||
* use --name to set the topic to a human-readable name. |
||||
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture. |
||||
* use both --topic and --name to create named subtopics.
|
||||
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning |
||||
this feed tracks a discussion about that contract. |
||||
The --user flag allows to have this manifest refer to a user other than yourself. If not specified, |
||||
it will then default to your local account (--bzzaccount)`, |
||||
Flags: []cli.Flag{SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag}, |
||||
}, |
||||
{ |
||||
Action: feedUpdate, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "update", |
||||
Usage: "updates the content of an existing Swarm Feed", |
||||
ArgsUsage: "<0x Hex data>", |
||||
Description: `publishes a new update on the specified topic |
||||
The feed topic can be built in the following ways: |
||||
* use --topic to set the topic to an arbitrary binary hex string. |
||||
* use --name to set the topic to a human-readable name. |
||||
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture. |
||||
* use both --topic and --name to create named subtopics.
|
||||
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning |
||||
this feed tracks a discussion about that contract. |
||||
|
||||
If you have a manifest, you can specify it with --manifest to refer to the feed, |
||||
instead of using --topic / --name |
||||
`, |
||||
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag}, |
||||
}, |
||||
{ |
||||
Action: feedInfo, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "info", |
||||
Usage: "obtains information about an existing Swarm feed", |
||||
Description: `obtains information about an existing Swarm feed |
||||
The topic can be specified directly with the --topic flag as an hex string |
||||
If no topic is specified, the default topic (zero) will be used |
||||
The --name flag can be used to specify subtopics with a specific name. |
||||
The --user flag allows to refer to a user other than yourself. If not specified, |
||||
it will then default to your local account (--bzzaccount) |
||||
If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user |
||||
to refer to the feed`, |
||||
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag}, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
func NewGenericSigner(ctx *cli.Context) feed.Signer { |
||||
return feed.NewGenericSigner(getPrivKey(ctx)) |
||||
} |
||||
|
||||
func getTopic(ctx *cli.Context) (topic feed.Topic) { |
||||
var name = ctx.String(SwarmFeedNameFlag.Name) |
||||
var relatedTopic = ctx.String(SwarmFeedTopicFlag.Name) |
||||
var relatedTopicBytes []byte |
||||
var err error |
||||
|
||||
if relatedTopic != "" { |
||||
relatedTopicBytes, err = hexutil.Decode(relatedTopic) |
||||
if err != nil { |
||||
utils.Fatalf("Error parsing topic: %s", err) |
||||
} |
||||
} |
||||
|
||||
topic, err = feed.NewTopic(name, relatedTopicBytes) |
||||
if err != nil { |
||||
utils.Fatalf("Error parsing topic: %s", err) |
||||
} |
||||
return topic |
||||
} |
||||
|
||||
// swarm feed create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
|
||||
// swarm feed update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
|
||||
// swarm feed info <Manifest Address or ENS domain>
|
||||
|
||||
func feedCreateManifest(ctx *cli.Context) { |
||||
var ( |
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client = swarm.NewClient(bzzapi) |
||||
) |
||||
|
||||
newFeedUpdateRequest := feed.NewFirstRequest(getTopic(ctx)) |
||||
newFeedUpdateRequest.Feed.User = feedGetUser(ctx) |
||||
|
||||
manifestAddress, err := client.CreateFeedWithManifest(newFeedUpdateRequest) |
||||
if err != nil { |
||||
utils.Fatalf("Error creating feed manifest: %s", err.Error()) |
||||
return |
||||
} |
||||
fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
|
||||
|
||||
} |
||||
|
||||
func feedUpdate(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
|
||||
var ( |
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client = swarm.NewClient(bzzapi) |
||||
manifestAddressOrDomain = ctx.String(SwarmFeedManifestFlag.Name) |
||||
) |
||||
|
||||
if len(args) < 1 { |
||||
fmt.Println("Incorrect number of arguments") |
||||
cli.ShowCommandHelpAndExit(ctx, "update", 1) |
||||
return |
||||
} |
||||
|
||||
signer := NewGenericSigner(ctx) |
||||
|
||||
data, err := hexutil.Decode(args[0]) |
||||
if err != nil { |
||||
utils.Fatalf("Error parsing data: %s", err.Error()) |
||||
return |
||||
} |
||||
|
||||
var updateRequest *feed.Request |
||||
var query *feed.Query |
||||
|
||||
if manifestAddressOrDomain == "" { |
||||
query = new(feed.Query) |
||||
query.User = signer.Address() |
||||
query.Topic = getTopic(ctx) |
||||
} |
||||
|
||||
// Retrieve a feed update request
|
||||
updateRequest, err = client.GetFeedRequest(query, manifestAddressOrDomain) |
||||
if err != nil { |
||||
utils.Fatalf("Error retrieving feed status: %s", err.Error()) |
||||
} |
||||
|
||||
// Check that the provided signer matches the request to sign
|
||||
if updateRequest.User != signer.Address() { |
||||
utils.Fatalf("Signer address does not match the update request") |
||||
} |
||||
|
||||
// set the new data
|
||||
updateRequest.SetData(data) |
||||
|
||||
// sign update
|
||||
if err = updateRequest.Sign(signer); err != nil { |
||||
utils.Fatalf("Error signing feed update: %s", err.Error()) |
||||
} |
||||
|
||||
// post update
|
||||
err = client.UpdateFeed(updateRequest) |
||||
if err != nil { |
||||
utils.Fatalf("Error updating feed: %s", err.Error()) |
||||
return |
||||
} |
||||
} |
||||
|
||||
func feedInfo(ctx *cli.Context) { |
||||
var ( |
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client = swarm.NewClient(bzzapi) |
||||
manifestAddressOrDomain = ctx.String(SwarmFeedManifestFlag.Name) |
||||
) |
||||
|
||||
var query *feed.Query |
||||
if manifestAddressOrDomain == "" { |
||||
query = new(feed.Query) |
||||
query.Topic = getTopic(ctx) |
||||
query.User = feedGetUser(ctx) |
||||
} |
||||
|
||||
metadata, err := client.GetFeedRequest(query, manifestAddressOrDomain) |
||||
if err != nil { |
||||
utils.Fatalf("Error retrieving feed metadata: %s", err.Error()) |
||||
return |
||||
} |
||||
encodedMetadata, err := metadata.MarshalJSON() |
||||
if err != nil { |
||||
utils.Fatalf("Error encoding metadata to JSON for display:%s", err) |
||||
} |
||||
fmt.Println(string(encodedMetadata)) |
||||
} |
||||
|
||||
func feedGetUser(ctx *cli.Context) common.Address { |
||||
var user = ctx.String(SwarmFeedUserFlag.Name) |
||||
if user != "" { |
||||
return common.HexToAddress(user) |
||||
} |
||||
pk := getPrivKey(ctx) |
||||
if pk == nil { |
||||
utils.Fatalf("Cannot read private key. Must specify --user or --bzzaccount") |
||||
} |
||||
return crypto.PubkeyToAddress(pk.PublicKey) |
||||
|
||||
} |
@ -1,196 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"io/ioutil" |
||||
"os" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
) |
||||
|
||||
func TestCLIFeedUpdate(t *testing.T) { |
||||
srv := swarmhttp.NewTestSwarmServer(t, func(api *api.API) swarmhttp.TestServer { |
||||
return swarmhttp.NewServer(api, "") |
||||
}, nil) |
||||
log.Info("starting a test swarm server") |
||||
defer srv.Close() |
||||
|
||||
// create a private key file for signing
|
||||
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979" |
||||
privKey, _ := crypto.HexToECDSA(privkeyHex) |
||||
address := crypto.PubkeyToAddress(privKey.PublicKey) |
||||
|
||||
pkFileName := testutil.TempFileWithContent(t, privkeyHex) |
||||
defer os.Remove(pkFileName) |
||||
|
||||
// compose a topic. We'll be doing quotes about Miguel de Cervantes
|
||||
var topic feed.Topic |
||||
subject := []byte("Miguel de Cervantes") |
||||
copy(topic[:], subject[:]) |
||||
name := "quotes" |
||||
|
||||
// prepare some data for the update
|
||||
data := []byte("En boca cerrada no entran moscas") |
||||
hexData := hexutil.Encode(data) |
||||
|
||||
flags := []string{ |
||||
"--bzzapi", srv.URL, |
||||
"--bzzaccount", pkFileName, |
||||
"feed", "update", |
||||
"--topic", topic.Hex(), |
||||
"--name", name, |
||||
hexData} |
||||
|
||||
// create an update and expect an exit without errors
|
||||
log.Info("updating a feed with 'swarm feed update'") |
||||
cmd := runSwarm(t, flags...) |
||||
cmd.ExpectExit() |
||||
|
||||
// now try to get the update using the client
|
||||
client := swarm.NewClient(srv.URL) |
||||
|
||||
// build the same topic as before, this time
|
||||
// we use NewTopic to create a topic automatically.
|
||||
topic, err := feed.NewTopic(name, subject) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// Feed configures whose updates we will be looking up.
|
||||
fd := feed.Feed{ |
||||
Topic: topic, |
||||
User: address, |
||||
} |
||||
|
||||
// Build a query to get the latest update
|
||||
query := feed.NewQueryLatest(&fd, lookup.NoClue) |
||||
|
||||
// retrieve content!
|
||||
reader, err := client.QueryFeed(query, "") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
retrieved, err := ioutil.ReadAll(reader) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// check we retrieved the sent information
|
||||
if !bytes.Equal(data, retrieved) { |
||||
t.Fatalf("Received %s, expected %s", retrieved, data) |
||||
} |
||||
|
||||
// Now retrieve info for the next update
|
||||
flags = []string{ |
||||
"--bzzapi", srv.URL, |
||||
"feed", "info", |
||||
"--topic", topic.Hex(), |
||||
"--user", address.Hex(), |
||||
} |
||||
|
||||
log.Info("getting feed info with 'swarm feed info'") |
||||
cmd = runSwarm(t, flags...) |
||||
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
|
||||
cmd.ExpectExit() |
||||
|
||||
// verify we can deserialize the result as a valid JSON
|
||||
var request feed.Request |
||||
err = json.Unmarshal([]byte(matches[0]), &request) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// make sure the retrieved feed is the same
|
||||
if request.Feed != fd { |
||||
t.Fatalf("Expected feed to be: %s, got %s", fd, request.Feed) |
||||
} |
||||
|
||||
// test publishing a manifest
|
||||
flags = []string{ |
||||
"--bzzapi", srv.URL, |
||||
"--bzzaccount", pkFileName, |
||||
"feed", "create", |
||||
"--topic", topic.Hex(), |
||||
} |
||||
|
||||
log.Info("Publishing manifest with 'swarm feed create'") |
||||
cmd = runSwarm(t, flags...) |
||||
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) |
||||
cmd.ExpectExit() |
||||
|
||||
manifestAddress := matches[0] // read the received feed manifest
|
||||
|
||||
// now attempt to lookup the latest update using a manifest instead
|
||||
reader, err = client.QueryFeed(nil, manifestAddress) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
retrieved, err = ioutil.ReadAll(reader) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if !bytes.Equal(data, retrieved) { |
||||
t.Fatalf("Received %s, expected %s", retrieved, data) |
||||
} |
||||
|
||||
// test publishing a manifest for a different user
|
||||
flags = []string{ |
||||
"--bzzapi", srv.URL, |
||||
"feed", "create", |
||||
"--topic", topic.Hex(), |
||||
"--user", "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // different user
|
||||
} |
||||
|
||||
log.Info("Publishing manifest with 'swarm feed create' for a different user") |
||||
cmd = runSwarm(t, flags...) |
||||
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) |
||||
cmd.ExpectExit() |
||||
|
||||
manifestAddress = matches[0] // read the received feed manifest
|
||||
|
||||
// now let's try to update that user's manifest which we don't have the private key for
|
||||
flags = []string{ |
||||
"--bzzapi", srv.URL, |
||||
"--bzzaccount", pkFileName, |
||||
"feed", "update", |
||||
"--manifest", manifestAddress, |
||||
hexData} |
||||
|
||||
// create an update and expect an error given there is a user mismatch
|
||||
log.Info("updating a feed with 'swarm feed update'") |
||||
cmd = runSwarm(t, flags...) |
||||
cmd.ExpectRegexp("Fatal:.*") // best way so far to detect a failure.
|
||||
cmd.ExpectExit() |
||||
if cmd.ExitStatus() == 0 { |
||||
t.Fatal("Expected nonzero exit code when updating a manifest with the wrong user. Got 0.") |
||||
} |
||||
} |
@ -1,189 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command feed allows the user to create and update signed Swarm feeds
|
||||
package main |
||||
|
||||
import cli "gopkg.in/urfave/cli.v1" |
||||
|
||||
var ( |
||||
ChequebookAddrFlag = cli.StringFlag{ |
||||
Name: "chequebook", |
||||
Usage: "chequebook contract address", |
||||
EnvVar: SwarmEnvChequebookAddr, |
||||
} |
||||
SwarmAccountFlag = cli.StringFlag{ |
||||
Name: "bzzaccount", |
||||
Usage: "Swarm account key file", |
||||
EnvVar: SwarmEnvAccount, |
||||
} |
||||
SwarmListenAddrFlag = cli.StringFlag{ |
||||
Name: "httpaddr", |
||||
Usage: "Swarm HTTP API listening interface", |
||||
EnvVar: SwarmEnvListenAddr, |
||||
} |
||||
SwarmPortFlag = cli.StringFlag{ |
||||
Name: "bzzport", |
||||
Usage: "Swarm local http api port", |
||||
EnvVar: SwarmEnvPort, |
||||
} |
||||
SwarmNetworkIdFlag = cli.IntFlag{ |
||||
Name: "bzznetworkid", |
||||
Usage: "Network identifier (integer, default 3=swarm testnet)", |
||||
EnvVar: SwarmEnvNetworkID, |
||||
} |
||||
SwarmSwapEnabledFlag = cli.BoolFlag{ |
||||
Name: "swap", |
||||
Usage: "Swarm SWAP enabled (default false)", |
||||
EnvVar: SwarmEnvSwapEnable, |
||||
} |
||||
SwarmSwapAPIFlag = cli.StringFlag{ |
||||
Name: "swap-api", |
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments", |
||||
EnvVar: SwarmEnvSwapAPI, |
||||
} |
||||
SwarmSyncDisabledFlag = cli.BoolTFlag{ |
||||
Name: "nosync", |
||||
Usage: "Disable swarm syncing", |
||||
EnvVar: SwarmEnvSyncDisable, |
||||
} |
||||
SwarmSyncUpdateDelay = cli.DurationFlag{ |
||||
Name: "sync-update-delay", |
||||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", |
||||
EnvVar: SwarmEnvSyncUpdateDelay, |
||||
} |
||||
SwarmMaxStreamPeerServersFlag = cli.IntFlag{ |
||||
Name: "max-stream-peer-servers", |
||||
Usage: "Limit of Stream peer servers, 0 denotes unlimited", |
||||
EnvVar: SwarmEnvMaxStreamPeerServers, |
||||
Value: 10000, // A very large default value is possible as stream servers have very small memory footprint
|
||||
} |
||||
SwarmLightNodeEnabled = cli.BoolFlag{ |
||||
Name: "lightnode", |
||||
Usage: "Enable Swarm LightNode (default false)", |
||||
EnvVar: SwarmEnvLightNodeEnable, |
||||
} |
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{ |
||||
Name: "delivery-skip-check", |
||||
Usage: "Skip chunk delivery check (default false)", |
||||
EnvVar: SwarmEnvDeliverySkipCheck, |
||||
} |
||||
EnsAPIFlag = cli.StringSliceFlag{ |
||||
Name: "ens-api", |
||||
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url", |
||||
EnvVar: SwarmEnvENSAPI, |
||||
} |
||||
SwarmApiFlag = cli.StringFlag{ |
||||
Name: "bzzapi", |
||||
Usage: "Specifies the Swarm HTTP endpoint to connect to", |
||||
Value: "http://127.0.0.1:8500", |
||||
} |
||||
SwarmRecursiveFlag = cli.BoolFlag{ |
||||
Name: "recursive", |
||||
Usage: "Upload directories recursively", |
||||
} |
||||
SwarmWantManifestFlag = cli.BoolTFlag{ |
||||
Name: "manifest", |
||||
Usage: "Automatic manifest upload (default true)", |
||||
} |
||||
SwarmUploadDefaultPath = cli.StringFlag{ |
||||
Name: "defaultpath", |
||||
Usage: "path to file served for empty url path (none)", |
||||
} |
||||
SwarmAccessGrantKeyFlag = cli.StringFlag{ |
||||
Name: "grant-key", |
||||
Usage: "grants a given public key access to an ACT", |
||||
} |
||||
SwarmAccessGrantKeysFlag = cli.StringFlag{ |
||||
Name: "grant-keys", |
||||
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT", |
||||
} |
||||
SwarmUpFromStdinFlag = cli.BoolFlag{ |
||||
Name: "stdin", |
||||
Usage: "reads data to be uploaded from stdin", |
||||
} |
||||
SwarmUploadMimeType = cli.StringFlag{ |
||||
Name: "mime", |
||||
Usage: "Manually specify MIME type", |
||||
} |
||||
SwarmEncryptedFlag = cli.BoolFlag{ |
||||
Name: "encrypt", |
||||
Usage: "use encrypted upload", |
||||
} |
||||
SwarmAccessPasswordFlag = cli.StringFlag{ |
||||
Name: "password", |
||||
Usage: "Password", |
||||
EnvVar: SwarmAccessPassword, |
||||
} |
||||
SwarmDryRunFlag = cli.BoolFlag{ |
||||
Name: "dry-run", |
||||
Usage: "dry-run", |
||||
} |
||||
CorsStringFlag = cli.StringFlag{ |
||||
Name: "corsdomain", |
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", |
||||
EnvVar: SwarmEnvCORS, |
||||
} |
||||
SwarmStorePath = cli.StringFlag{ |
||||
Name: "store.path", |
||||
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)", |
||||
EnvVar: SwarmEnvStorePath, |
||||
} |
||||
SwarmStoreCapacity = cli.Uint64Flag{ |
||||
Name: "store.size", |
||||
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)", |
||||
EnvVar: SwarmEnvStoreCapacity, |
||||
} |
||||
SwarmStoreCacheCapacity = cli.UintFlag{ |
||||
Name: "store.cache.size", |
||||
Usage: "Number of recent chunks cached in memory", |
||||
EnvVar: SwarmEnvStoreCacheCapacity, |
||||
Value: 10000, |
||||
} |
||||
SwarmCompressedFlag = cli.BoolFlag{ |
||||
Name: "compressed", |
||||
Usage: "Prints encryption keys in compressed form", |
||||
} |
||||
SwarmBootnodeModeFlag = cli.BoolFlag{ |
||||
Name: "bootnode-mode", |
||||
Usage: "Run Swarm in Bootnode mode", |
||||
} |
||||
SwarmFeedNameFlag = cli.StringFlag{ |
||||
Name: "name", |
||||
Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name", |
||||
} |
||||
SwarmFeedTopicFlag = cli.StringFlag{ |
||||
Name: "topic", |
||||
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters", |
||||
} |
||||
SwarmFeedManifestFlag = cli.StringFlag{ |
||||
Name: "manifest", |
||||
Usage: "Refers to the feed through a manifest", |
||||
} |
||||
SwarmFeedUserFlag = cli.StringFlag{ |
||||
Name: "user", |
||||
Usage: "Indicates the user who updates the feed", |
||||
} |
||||
SwarmGlobalStoreAPIFlag = cli.StringFlag{ |
||||
Name: "globalstore-api", |
||||
Usage: "URL of the Global Store API provider (only for testing)", |
||||
EnvVar: SwarmGlobalstoreAPI, |
||||
} |
||||
SwarmLegacyFlag = cli.BoolFlag{ |
||||
Name: "legacy", |
||||
Usage: "Use this flag when importing a db export from a legacy local store database dump (for schemas older than 'sanctuary')", |
||||
} |
||||
) |
@ -1,162 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"path/filepath" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/fuse" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var fsCommand = cli.Command{ |
||||
Name: "fs", |
||||
CustomHelpTemplate: helpTemplate, |
||||
Usage: "perform FUSE operations", |
||||
ArgsUsage: "fs COMMAND", |
||||
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
Action: mount, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "mount", |
||||
Usage: "mount a swarm hash to a mount point", |
||||
ArgsUsage: "swarm fs mount <manifest hash> <mount point>", |
||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", |
||||
}, |
||||
{ |
||||
Action: unmount, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "unmount", |
||||
Usage: "unmount a swarmfs mount", |
||||
ArgsUsage: "swarm fs unmount <mount point>", |
||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", |
||||
}, |
||||
{ |
||||
Action: listMounts, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "list", |
||||
Usage: "list swarmfs mounts", |
||||
ArgsUsage: "swarm fs list", |
||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
func mount(cliContext *cli.Context) { |
||||
args := cliContext.Args() |
||||
if len(args) < 2 { |
||||
utils.Fatalf("Usage: swarm fs mount <manifestHash> <file name>") |
||||
} |
||||
|
||||
client, err := dialRPC(cliContext) |
||||
if err != nil { |
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err) |
||||
} |
||||
defer client.Close() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
mf := &fuse.MountInfo{} |
||||
mountPoint, err := filepath.Abs(filepath.Clean(args[1])) |
||||
if err != nil { |
||||
utils.Fatalf("error expanding path for mount point: %v", err) |
||||
} |
||||
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint) |
||||
if err != nil { |
||||
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err) |
||||
} |
||||
} |
||||
|
||||
func unmount(cliContext *cli.Context) { |
||||
args := cliContext.Args() |
||||
|
||||
if len(args) < 1 { |
||||
utils.Fatalf("Usage: swarm fs unmount <mount path>") |
||||
} |
||||
client, err := dialRPC(cliContext) |
||||
if err != nil { |
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err) |
||||
} |
||||
defer client.Close() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
mf := fuse.MountInfo{} |
||||
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0]) |
||||
if err != nil { |
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) |
||||
} |
||||
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
|
||||
} |
||||
|
||||
func listMounts(cliContext *cli.Context) { |
||||
client, err := dialRPC(cliContext) |
||||
if err != nil { |
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err) |
||||
} |
||||
defer client.Close() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
var mf []fuse.MountInfo |
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts") |
||||
if err != nil { |
||||
utils.Fatalf("encountered an error calling the RPC endpoint while listing mounts: %v", err) |
||||
} |
||||
if len(mf) == 0 { |
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n") |
||||
} else { |
||||
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf)) |
||||
for i, mountInfo := range mf { |
||||
fmt.Printf("%d:\n", i) |
||||
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint) |
||||
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest) |
||||
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) { |
||||
endpoint := getIPCEndpoint(ctx) |
||||
log.Info("IPC endpoint", "path", endpoint) |
||||
return rpc.Dial(endpoint) |
||||
} |
||||
|
||||
func getIPCEndpoint(ctx *cli.Context) string { |
||||
cfg := defaultNodeConfig |
||||
utils.SetNodeConfig(ctx, &cfg) |
||||
|
||||
endpoint := cfg.IPCEndpoint() |
||||
|
||||
if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { |
||||
// Backwards compatibility with geth < 1.5 which required
|
||||
// these prefixes.
|
||||
endpoint = endpoint[4:] |
||||
} |
||||
return endpoint |
||||
} |
@ -1,260 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux freebsd
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"strings" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
type testFile struct { |
||||
filePath string |
||||
content string |
||||
} |
||||
|
||||
// TestCLISwarmFsDefaultIPCPath tests if the most basic fs command, i.e., list
|
||||
// can find and correctly connect to a running Swarm node on the default
|
||||
// IPCPath.
|
||||
func TestCLISwarmFsDefaultIPCPath(t *testing.T) { |
||||
cluster := newTestCluster(t, 1) |
||||
defer cluster.Shutdown() |
||||
|
||||
handlingNode := cluster.Nodes[0] |
||||
list := runSwarm(t, []string{ |
||||
"--datadir", handlingNode.Dir, |
||||
"fs", |
||||
"list", |
||||
}...) |
||||
|
||||
list.WaitExit() |
||||
if list.Err != nil { |
||||
t.Fatal(list.Err) |
||||
} |
||||
} |
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
//
|
||||
// This test fails on travis for macOS as this executable exits with code 1
|
||||
// and without any log messages in the log:
|
||||
// /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse.
|
||||
// This is the reason for this file not being built on darwin architecture.
|
||||
func TestCLISwarmFs(t *testing.T) { |
||||
cluster := newTestCluster(t, 3) |
||||
defer cluster.Shutdown() |
||||
|
||||
// create a tmp dir
|
||||
mountPoint, err := ioutil.TempDir("", "swarm-test") |
||||
log.Debug("swarmfs cli test", "1st mount", mountPoint) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(mountPoint) |
||||
|
||||
handlingNode := cluster.Nodes[0] |
||||
mhash := doUploadEmptyDir(t, handlingNode) |
||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
mount := runSwarm(t, []string{ |
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
"fs", |
||||
"mount", |
||||
mhash, |
||||
mountPoint, |
||||
}...) |
||||
mount.ExpectExit() |
||||
|
||||
filesToAssert := []*testFile{} |
||||
|
||||
dirPath, err := createDirInDir(mountPoint, "testSubDir") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
dummyContent := "somerandomtestcontentthatshouldbeasserted" |
||||
dirs := []string{ |
||||
mountPoint, |
||||
dirPath, |
||||
dirPath2, |
||||
} |
||||
files := []string{"f1.tmp", "f2.tmp"} |
||||
for _, d := range dirs { |
||||
for _, entry := range files { |
||||
tFile, err := createTestFileInPath(d, entry, dummyContent) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
filesToAssert = append(filesToAssert, tFile) |
||||
} |
||||
} |
||||
if len(filesToAssert) != len(dirs)*len(files) { |
||||
t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert)) |
||||
} |
||||
hashRegexp := `[a-f\d]{64}` |
||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
unmount := runSwarm(t, []string{ |
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
"fs", |
||||
"unmount", |
||||
mountPoint, |
||||
}...) |
||||
_, matches := unmount.ExpectRegexp(hashRegexp) |
||||
unmount.ExpectExit() |
||||
|
||||
hash := matches[0] |
||||
if hash == mhash { |
||||
t.Fatal("this should not be equal") |
||||
} |
||||
log.Debug("swarmfs cli test: asserting no files in mount point") |
||||
|
||||
//check that there's nothing in the mount folder
|
||||
filesInDir, err := ioutil.ReadDir(mountPoint) |
||||
if err != nil { |
||||
t.Fatalf("had an error reading the directory: %v", err) |
||||
} |
||||
|
||||
if len(filesInDir) != 0 { |
||||
t.Fatal("there shouldn't be anything here") |
||||
} |
||||
|
||||
secondMountPoint, err := ioutil.TempDir("", "swarm-test") |
||||
log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(secondMountPoint) |
||||
|
||||
log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
//remount, check files
|
||||
newMount := runSwarm(t, []string{ |
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
"fs", |
||||
"mount", |
||||
hash, // the latest hash
|
||||
secondMountPoint, |
||||
}...) |
||||
|
||||
newMount.ExpectExit() |
||||
time.Sleep(1 * time.Second) |
||||
|
||||
filesInDir, err = ioutil.ReadDir(secondMountPoint) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if len(filesInDir) == 0 { |
||||
t.Fatal("there should be something here") |
||||
} |
||||
|
||||
log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount") |
||||
|
||||
for _, file := range filesToAssert { |
||||
file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1) |
||||
fileBytes, err := ioutil.ReadFile(file.filePath) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) { |
||||
t.Fatal("this should be equal") |
||||
} |
||||
} |
||||
|
||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
unmountSec := runSwarm(t, []string{ |
||||
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
"fs", |
||||
"unmount", |
||||
secondMountPoint, |
||||
}...) |
||||
|
||||
_, matches = unmountSec.ExpectRegexp(hashRegexp) |
||||
unmountSec.ExpectExit() |
||||
|
||||
if matches[0] != hash { |
||||
t.Fatal("these should be equal - no changes made") |
||||
} |
||||
} |
||||
|
||||
func doUploadEmptyDir(t *testing.T, node *testNode) string { |
||||
// create a tmp dir
|
||||
tmpDir, err := ioutil.TempDir("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmpDir) |
||||
|
||||
hashRegexp := `[a-f\d]{64}` |
||||
|
||||
flags := []string{ |
||||
"--bzzapi", node.URL, |
||||
"--recursive", |
||||
"up", |
||||
tmpDir} |
||||
|
||||
log.Info("swarmfs cli test: uploading dir with 'swarm up'") |
||||
up := runSwarm(t, flags...) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
hash := matches[0] |
||||
log.Info("swarmfs cli test: dir uploaded", "hash", hash) |
||||
return hash |
||||
} |
||||
|
||||
func createDirInDir(createInDir string, dirToCreate string) (string, error) { |
||||
fullpath := filepath.Join(createInDir, dirToCreate) |
||||
err := os.MkdirAll(fullpath, 0777) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return fullpath, nil |
||||
} |
||||
|
||||
func createTestFileInPath(dir, filename, content string) (*testFile, error) { |
||||
tFile := &testFile{} |
||||
filePath := filepath.Join(dir, filename) |
||||
if file, err := os.Create(filePath); err == nil { |
||||
tFile.content = content |
||||
tFile.filePath = filePath |
||||
|
||||
_, err = io.WriteString(file, content) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
file.Close() |
||||
} |
||||
|
||||
return tFile, nil |
||||
} |
@ -1,66 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"net" |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/explorer" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
// serveChunkExplorer starts an http server in background with chunk explorer handler
|
||||
// using the provided global store. Server is started if the returned shutdown function
|
||||
// is not nil.
|
||||
func serveChunkExplorer(ctx *cli.Context, globalStore mock.GlobalStorer) (shutdown func(), err error) { |
||||
if !ctx.IsSet("explorer-address") { |
||||
return nil, nil |
||||
} |
||||
|
||||
corsOrigins := ctx.StringSlice("explorer-cors-origin") |
||||
server := &http.Server{ |
||||
Handler: explorer.NewHandler(globalStore, corsOrigins), |
||||
IdleTimeout: 30 * time.Minute, |
||||
ReadTimeout: 2 * time.Minute, |
||||
WriteTimeout: 2 * time.Minute, |
||||
} |
||||
listener, err := net.Listen("tcp", ctx.String("explorer-address")) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("explorer: %v", err) |
||||
} |
||||
log.Info("chunk explorer http", "address", listener.Addr().String(), "origins", corsOrigins) |
||||
|
||||
go func() { |
||||
if err := server.Serve(listener); err != nil { |
||||
log.Error("chunk explorer", "err", err) |
||||
} |
||||
}() |
||||
|
||||
return func() { |
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) |
||||
defer cancel() |
||||
if err := server.Shutdown(ctx); err != nil { |
||||
log.Error("chunk explorer: shutdown", "err", err) |
||||
} |
||||
}, nil |
||||
} |
@ -1,254 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"net/http" |
||||
"sort" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/explorer" |
||||
mockRPC "github.com/ethereum/go-ethereum/swarm/storage/mock/rpc" |
||||
) |
||||
|
||||
// TestExplorer validates basic chunk explorer functionality by storing
|
||||
// a small set of chunk and making http requests on exposed endpoint.
|
||||
// Full chunk explorer validation is done in mock/explorer package.
|
||||
func TestExplorer(t *testing.T) { |
||||
addr := findFreeTCPAddress(t) |
||||
explorerAddr := findFreeTCPAddress(t) |
||||
testCmd := runGlobalStore(t, "ws", "--addr", addr, "--explorer-address", explorerAddr) |
||||
defer testCmd.Kill() |
||||
|
||||
client := websocketClient(t, addr) |
||||
|
||||
store := mockRPC.NewGlobalStore(client) |
||||
defer store.Close() |
||||
|
||||
nodeKeys := map[string][]string{ |
||||
"a1": {"b1", "b2", "b3"}, |
||||
"a2": {"b3", "b4", "b5"}, |
||||
} |
||||
|
||||
keyNodes := make(map[string][]string) |
||||
|
||||
for addr, keys := range nodeKeys { |
||||
for _, key := range keys { |
||||
keyNodes[key] = append(keyNodes[key], addr) |
||||
} |
||||
} |
||||
|
||||
invalidAddr := "c1" |
||||
invalidKey := "d1" |
||||
|
||||
for addr, keys := range nodeKeys { |
||||
for _, key := range keys { |
||||
err := store.Put(common.HexToAddress(addr), common.Hex2Bytes(key), []byte("data")) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
endpoint := "http://" + explorerAddr |
||||
|
||||
t.Run("has key", func(t *testing.T) { |
||||
for addr, keys := range nodeKeys { |
||||
for _, key := range keys { |
||||
testStatusResponse(t, endpoint+"/api/has-key/"+addr+"/"+key, http.StatusOK) |
||||
testStatusResponse(t, endpoint+"/api/has-key/"+invalidAddr+"/"+key, http.StatusNotFound) |
||||
} |
||||
testStatusResponse(t, endpoint+"/api/has-key/"+addr+"/"+invalidKey, http.StatusNotFound) |
||||
} |
||||
testStatusResponse(t, endpoint+"/api/has-key/"+invalidAddr+"/"+invalidKey, http.StatusNotFound) |
||||
}) |
||||
|
||||
t.Run("keys", func(t *testing.T) { |
||||
var keys []string |
||||
for key := range keyNodes { |
||||
keys = append(keys, key) |
||||
} |
||||
sort.Strings(keys) |
||||
testKeysResponse(t, endpoint+"/api/keys", explorer.KeysResponse{ |
||||
Keys: keys, |
||||
}) |
||||
}) |
||||
|
||||
t.Run("nodes", func(t *testing.T) { |
||||
var nodes []string |
||||
for addr := range nodeKeys { |
||||
nodes = append(nodes, common.HexToAddress(addr).Hex()) |
||||
} |
||||
sort.Strings(nodes) |
||||
testNodesResponse(t, endpoint+"/api/nodes", explorer.NodesResponse{ |
||||
Nodes: nodes, |
||||
}) |
||||
}) |
||||
|
||||
t.Run("node keys", func(t *testing.T) { |
||||
for addr, keys := range nodeKeys { |
||||
testKeysResponse(t, endpoint+"/api/keys?node="+addr, explorer.KeysResponse{ |
||||
Keys: keys, |
||||
}) |
||||
} |
||||
testKeysResponse(t, endpoint+"/api/keys?node="+invalidAddr, explorer.KeysResponse{}) |
||||
}) |
||||
|
||||
t.Run("key nodes", func(t *testing.T) { |
||||
for key, addrs := range keyNodes { |
||||
var nodes []string |
||||
for _, addr := range addrs { |
||||
nodes = append(nodes, common.HexToAddress(addr).Hex()) |
||||
} |
||||
sort.Strings(nodes) |
||||
testNodesResponse(t, endpoint+"/api/nodes?key="+key, explorer.NodesResponse{ |
||||
Nodes: nodes, |
||||
}) |
||||
} |
||||
testNodesResponse(t, endpoint+"/api/nodes?key="+invalidKey, explorer.NodesResponse{}) |
||||
}) |
||||
} |
||||
|
||||
// TestExplorer_CORSOrigin validates if chunk explorer returns
|
||||
// correct CORS origin header in GET and OPTIONS requests.
|
||||
func TestExplorer_CORSOrigin(t *testing.T) { |
||||
origin := "http://localhost/" |
||||
addr := findFreeTCPAddress(t) |
||||
explorerAddr := findFreeTCPAddress(t) |
||||
testCmd := runGlobalStore(t, "ws", |
||||
"--addr", addr, |
||||
"--explorer-address", explorerAddr, |
||||
"--explorer-cors-origin", origin, |
||||
) |
||||
defer testCmd.Kill() |
||||
|
||||
// wait until the server is started
|
||||
waitHTTPEndpoint(t, explorerAddr) |
||||
|
||||
url := "http://" + explorerAddr + "/api/keys" |
||||
|
||||
t.Run("get", func(t *testing.T) { |
||||
req, err := http.NewRequest(http.MethodGet, url, nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
req.Header.Set("Origin", origin) |
||||
|
||||
resp, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
header := resp.Header.Get("Access-Control-Allow-Origin") |
||||
if header != origin { |
||||
t.Errorf("got Access-Control-Allow-Origin header %q, want %q", header, origin) |
||||
} |
||||
}) |
||||
|
||||
t.Run("preflight", func(t *testing.T) { |
||||
req, err := http.NewRequest(http.MethodOptions, url, nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
req.Header.Set("Origin", origin) |
||||
req.Header.Set("Access-Control-Request-Method", "GET") |
||||
|
||||
resp, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
header := resp.Header.Get("Access-Control-Allow-Origin") |
||||
if header != origin { |
||||
t.Errorf("got Access-Control-Allow-Origin header %q, want %q", header, origin) |
||||
} |
||||
}) |
||||
} |
||||
|
||||
// testStatusResponse makes an http request to provided url
|
||||
// and validates if response is explorer.StatusResponse for
|
||||
// the expected status code.
|
||||
func testStatusResponse(t *testing.T, url string, code int) { |
||||
t.Helper() |
||||
|
||||
resp, err := http.Get(url) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if resp.StatusCode != code { |
||||
t.Errorf("got status code %v, want %v", resp.StatusCode, code) |
||||
} |
||||
var r explorer.StatusResponse |
||||
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if r.Code != code { |
||||
t.Errorf("got response code %v, want %v", r.Code, code) |
||||
} |
||||
if r.Message != http.StatusText(code) { |
||||
t.Errorf("got response message %q, want %q", r.Message, http.StatusText(code)) |
||||
} |
||||
} |
||||
|
||||
// testKeysResponse makes an http request to provided url
|
||||
// and validates if response machhes expected explorer.KeysResponse.
|
||||
func testKeysResponse(t *testing.T, url string, want explorer.KeysResponse) { |
||||
t.Helper() |
||||
|
||||
resp, err := http.Get(url) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if resp.StatusCode != http.StatusOK { |
||||
t.Errorf("got status code %v, want %v", resp.StatusCode, http.StatusOK) |
||||
} |
||||
var r explorer.KeysResponse |
||||
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if fmt.Sprint(r.Keys) != fmt.Sprint(want.Keys) { |
||||
t.Errorf("got keys %v, want %v", r.Keys, want.Keys) |
||||
} |
||||
if r.Next != want.Next { |
||||
t.Errorf("got next %s, want %s", r.Next, want.Next) |
||||
} |
||||
} |
||||
|
||||
// testNodeResponse makes an http request to provided url
|
||||
// and validates if response machhes expected explorer.NodeResponse.
|
||||
func testNodesResponse(t *testing.T, url string, want explorer.NodesResponse) { |
||||
t.Helper() |
||||
|
||||
resp, err := http.Get(url) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if resp.StatusCode != http.StatusOK { |
||||
t.Errorf("got status code %v, want %v", resp.StatusCode, http.StatusOK) |
||||
} |
||||
var r explorer.NodesResponse |
||||
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if fmt.Sprint(r.Nodes) != fmt.Sprint(want.Nodes) { |
||||
t.Errorf("got nodes %v, want %v", r.Nodes, want.Nodes) |
||||
} |
||||
if r.Next != want.Next { |
||||
t.Errorf("got next %s, want %s", r.Next, want.Next) |
||||
} |
||||
} |
@ -1,120 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"io" |
||||
"net" |
||||
"net/http" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/db" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
// startHTTP starts a global store with HTTP RPC server.
|
||||
// It is used for "http" cli command.
|
||||
func startHTTP(ctx *cli.Context) (err error) { |
||||
server, cleanup, err := newServer(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer cleanup() |
||||
|
||||
listener, err := net.Listen("tcp", ctx.String("addr")) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
log.Info("http", "address", listener.Addr().String()) |
||||
|
||||
return http.Serve(listener, server) |
||||
} |
||||
|
||||
// startWS starts a global store with WebSocket RPC server.
|
||||
// It is used for "websocket" cli command.
|
||||
func startWS(ctx *cli.Context) (err error) { |
||||
server, cleanup, err := newServer(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer cleanup() |
||||
|
||||
listener, err := net.Listen("tcp", ctx.String("addr")) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
origins := ctx.StringSlice("origins") |
||||
log.Info("websocket", "address", listener.Addr().String(), "origins", origins) |
||||
|
||||
return http.Serve(listener, server.WebsocketHandler(origins)) |
||||
} |
||||
|
||||
// newServer creates a global store and starts a chunk explorer server if configured.
|
||||
// Returned cleanup function should be called only if err is nil.
|
||||
func newServer(ctx *cli.Context) (server *rpc.Server, cleanup func(), err error) { |
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(ctx.Int("verbosity")), log.StreamHandler(os.Stdout, log.TerminalFormat(false)))) |
||||
|
||||
cleanup = func() {} |
||||
var globalStore mock.GlobalStorer |
||||
dir := ctx.String("dir") |
||||
if dir != "" { |
||||
dbStore, err := db.NewGlobalStore(dir) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
cleanup = func() { |
||||
if err := dbStore.Close(); err != nil { |
||||
log.Error("global store: close", "err", err) |
||||
} |
||||
} |
||||
globalStore = dbStore |
||||
log.Info("database global store", "dir", dir) |
||||
} else { |
||||
globalStore = mem.NewGlobalStore() |
||||
log.Info("in-memory global store") |
||||
} |
||||
|
||||
server = rpc.NewServer() |
||||
if err := server.RegisterName("mockStore", globalStore); err != nil { |
||||
cleanup() |
||||
return nil, nil, err |
||||
} |
||||
|
||||
shutdown, err := serveChunkExplorer(ctx, globalStore) |
||||
if err != nil { |
||||
cleanup() |
||||
return nil, nil, err |
||||
} |
||||
if shutdown != nil { |
||||
cleanup = func() { |
||||
shutdown() |
||||
|
||||
if c, ok := globalStore.(io.Closer); ok { |
||||
if err := c.Close(); err != nil { |
||||
log.Error("global store: close", "err", err) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
return server, cleanup, nil |
||||
} |
@ -1,207 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"io/ioutil" |
||||
"net" |
||||
"net/http" |
||||
"os" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
mockRPC "github.com/ethereum/go-ethereum/swarm/storage/mock/rpc" |
||||
) |
||||
|
||||
// TestHTTP_InMemory tests in-memory global store that exposes
|
||||
// HTTP server.
|
||||
func TestHTTP_InMemory(t *testing.T) { |
||||
testHTTP(t, true) |
||||
} |
||||
|
||||
// TestHTTP_Database tests global store with persisted database
|
||||
// that exposes HTTP server.
|
||||
func TestHTTP_Database(t *testing.T) { |
||||
dir, err := ioutil.TempDir("", "swarm-global-store-") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
|
||||
// create a fresh global store
|
||||
testHTTP(t, true, "--dir", dir) |
||||
|
||||
// check if data saved by the previous global store instance
|
||||
testHTTP(t, false, "--dir", dir) |
||||
} |
||||
|
||||
// testWebsocket starts global store binary with HTTP server
|
||||
// and validates that it can store and retrieve data.
|
||||
// If put is false, no data will be stored, only retrieved,
|
||||
// giving the possibility to check if data is present in the
|
||||
// storage directory.
|
||||
func testHTTP(t *testing.T, put bool, args ...string) { |
||||
addr := findFreeTCPAddress(t) |
||||
testCmd := runGlobalStore(t, append([]string{"http", "--addr", addr}, args...)...) |
||||
defer testCmd.Kill() |
||||
|
||||
client, err := rpc.DialHTTP("http://" + addr) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// wait until global store process is started as
|
||||
// rpc.DialHTTP is actually not connecting
|
||||
waitHTTPEndpoint(t, addr) |
||||
|
||||
store := mockRPC.NewGlobalStore(client) |
||||
defer store.Close() |
||||
|
||||
node := store.NewNodeStore(common.HexToAddress("123abc")) |
||||
|
||||
wantKey := "key" |
||||
wantValue := "value" |
||||
|
||||
if put { |
||||
err = node.Put([]byte(wantKey), []byte(wantValue)) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
gotValue, err := node.Get([]byte(wantKey)) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if string(gotValue) != wantValue { |
||||
t.Errorf("got value %s for key %s, want %s", string(gotValue), wantKey, wantValue) |
||||
} |
||||
} |
||||
|
||||
// TestWebsocket_InMemory tests in-memory global store that exposes
|
||||
// WebSocket server.
|
||||
func TestWebsocket_InMemory(t *testing.T) { |
||||
testWebsocket(t, true) |
||||
} |
||||
|
||||
// TestWebsocket_Database tests global store with persisted database
|
||||
// that exposes HTTP server.
|
||||
func TestWebsocket_Database(t *testing.T) { |
||||
dir, err := ioutil.TempDir("", "swarm-global-store-") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
|
||||
// create a fresh global store
|
||||
testWebsocket(t, true, "--dir", dir) |
||||
|
||||
// check if data saved by the previous global store instance
|
||||
testWebsocket(t, false, "--dir", dir) |
||||
} |
||||
|
||||
// testWebsocket starts global store binary with WebSocket server
|
||||
// and validates that it can store and retrieve data.
|
||||
// If put is false, no data will be stored, only retrieved,
|
||||
// giving the possibility to check if data is present in the
|
||||
// storage directory.
|
||||
func testWebsocket(t *testing.T, put bool, args ...string) { |
||||
addr := findFreeTCPAddress(t) |
||||
testCmd := runGlobalStore(t, append([]string{"ws", "--addr", addr}, args...)...) |
||||
defer testCmd.Kill() |
||||
|
||||
client := websocketClient(t, addr) |
||||
|
||||
store := mockRPC.NewGlobalStore(client) |
||||
defer store.Close() |
||||
|
||||
node := store.NewNodeStore(common.HexToAddress("123abc")) |
||||
|
||||
wantKey := "key" |
||||
wantValue := "value" |
||||
|
||||
if put { |
||||
err := node.Put([]byte(wantKey), []byte(wantValue)) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
gotValue, err := node.Get([]byte(wantKey)) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if string(gotValue) != wantValue { |
||||
t.Errorf("got value %s for key %s, want %s", string(gotValue), wantKey, wantValue) |
||||
} |
||||
} |
||||
|
||||
// findFreeTCPAddress returns a local address (IP:Port) to which
|
||||
// global store can listen on.
|
||||
func findFreeTCPAddress(t *testing.T) (addr string) { |
||||
t.Helper() |
||||
|
||||
listener, err := net.Listen("tcp", "") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer listener.Close() |
||||
|
||||
return listener.Addr().String() |
||||
} |
||||
|
||||
// websocketClient waits until global store process is started
|
||||
// and returns rpc client.
|
||||
func websocketClient(t *testing.T, addr string) (client *rpc.Client) { |
||||
t.Helper() |
||||
|
||||
var err error |
||||
for i := 0; i < 1000; i++ { |
||||
client, err = rpc.DialWebsocket(context.Background(), "ws://"+addr, "") |
||||
if err == nil { |
||||
break |
||||
} |
||||
time.Sleep(10 * time.Millisecond) |
||||
} |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
return client |
||||
} |
||||
|
||||
// waitHTTPEndpoint retries http requests to a provided
|
||||
// address until the connection is established.
|
||||
func waitHTTPEndpoint(t *testing.T, addr string) { |
||||
t.Helper() |
||||
|
||||
var err error |
||||
for i := 0; i < 1000; i++ { |
||||
_, err = http.Get("http://" + addr) |
||||
if err == nil { |
||||
break |
||||
} |
||||
time.Sleep(10 * time.Millisecond) |
||||
} |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
@ -1,124 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var ( |
||||
version = "0.1" |
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
gitDate string |
||||
) |
||||
|
||||
func main() { |
||||
err := newApp().Run(os.Args) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
os.Exit(1) |
||||
} |
||||
} |
||||
|
||||
// newApp construct a new instance of Swarm Global Store.
|
||||
// Method Run is called on it in the main function and in tests.
|
||||
func newApp() (app *cli.App) { |
||||
app = cli.NewApp() |
||||
app.Name = "global-store" |
||||
app.Version = version |
||||
if len(gitCommit) >= 8 { |
||||
app.Version += "-" + gitCommit[:8] |
||||
} |
||||
if gitDate != "" { |
||||
app.Version += "-" + gitDate |
||||
} |
||||
app.Usage = "Swarm Global Store" |
||||
|
||||
// app flags (for all commands)
|
||||
app.Flags = []cli.Flag{ |
||||
cli.IntFlag{ |
||||
Name: "verbosity", |
||||
Value: 3, |
||||
Usage: "Verbosity level.", |
||||
}, |
||||
cli.StringFlag{ |
||||
Name: "explorer-address", |
||||
Value: "", |
||||
Usage: "Chunk explorer HTTP listener address.", |
||||
}, |
||||
cli.StringSliceFlag{ |
||||
Name: "explorer-cors-origin", |
||||
Value: nil, |
||||
Usage: "Chunk explorer CORS origin (can be specified multiple times).", |
||||
}, |
||||
} |
||||
|
||||
app.Commands = []cli.Command{ |
||||
{ |
||||
Name: "http", |
||||
Aliases: []string{"h"}, |
||||
Usage: "Start swarm global store with HTTP server.", |
||||
Action: startHTTP, |
||||
// Flags only for "start" command.
|
||||
// Allow app flags to be specified after the
|
||||
// command argument.
|
||||
Flags: append(app.Flags, |
||||
cli.StringFlag{ |
||||
Name: "dir", |
||||
Value: "", |
||||
Usage: "Data directory.", |
||||
}, |
||||
cli.StringFlag{ |
||||
Name: "addr", |
||||
Value: "0.0.0.0:3033", |
||||
Usage: "Address to listen for HTTP connections.", |
||||
}, |
||||
), |
||||
}, |
||||
{ |
||||
Name: "websocket", |
||||
Aliases: []string{"ws"}, |
||||
Usage: "Start swarm global store with WebSocket server.", |
||||
Action: startWS, |
||||
// Flags only for "start" command.
|
||||
// Allow app flags to be specified after the
|
||||
// command argument.
|
||||
Flags: append(app.Flags, |
||||
cli.StringFlag{ |
||||
Name: "dir", |
||||
Value: "", |
||||
Usage: "Data directory.", |
||||
}, |
||||
cli.StringFlag{ |
||||
Name: "addr", |
||||
Value: "0.0.0.0:3033", |
||||
Usage: "Address to listen for WebSocket connections.", |
||||
}, |
||||
cli.StringSliceFlag{ |
||||
Name: "origin", |
||||
Value: nil, |
||||
Usage: "WebSocket CORS origin (can be specified multiple times).", |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
return app |
||||
} |
@ -1,49 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"testing" |
||||
|
||||
"github.com/docker/docker/pkg/reexec" |
||||
"github.com/ethereum/go-ethereum/internal/cmdtest" |
||||
) |
||||
|
||||
func init() { |
||||
reexec.Register("swarm-global-store", func() { |
||||
if err := newApp().Run(os.Args); err != nil { |
||||
fmt.Fprintln(os.Stderr, err) |
||||
os.Exit(1) |
||||
} |
||||
os.Exit(0) |
||||
}) |
||||
} |
||||
|
||||
func runGlobalStore(t *testing.T, args ...string) *cmdtest.TestCmd { |
||||
tt := cmdtest.NewTestCmd(t, nil) |
||||
tt.Run("swarm-global-store", args...) |
||||
return tt |
||||
} |
||||
|
||||
func TestMain(m *testing.M) { |
||||
if reexec.Init() { |
||||
return |
||||
} |
||||
os.Exit(m.Run()) |
||||
} |
@ -1,116 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command bzzhash computes a swarm tree hash.
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/hex" |
||||
"fmt" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/contracts/ens" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var hashCommand = cli.Command{ |
||||
Action: hash, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "hash", |
||||
Usage: "print the swarm hash of a file or directory", |
||||
ArgsUsage: "<file>", |
||||
Description: "Prints the swarm hash of file or directory", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "ens", |
||||
Usage: "converts a swarm hash to an ens EIP1577 compatible CIDv1 hash", |
||||
ArgsUsage: "<ref>", |
||||
Description: "", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
Action: encodeEipHash, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "contenthash", |
||||
Usage: "converts a swarm hash to an ens EIP1577 compatible CIDv1 hash", |
||||
ArgsUsage: "<ref>", |
||||
Description: "", |
||||
}, |
||||
{ |
||||
Action: ensNodeHash, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "node", |
||||
Usage: "converts an ens name to an ENS node hash", |
||||
ArgsUsage: "<ref>", |
||||
Description: "", |
||||
}, |
||||
}, |
||||
}, |
||||
}} |
||||
|
||||
func hash(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) < 1 { |
||||
utils.Fatalf("Usage: swarm hash <file name>") |
||||
} |
||||
f, err := os.Open(args[0]) |
||||
if err != nil { |
||||
utils.Fatalf("Error opening file " + args[1]) |
||||
} |
||||
defer f.Close() |
||||
|
||||
stat, _ := f.Stat() |
||||
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags()) |
||||
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false) |
||||
if err != nil { |
||||
utils.Fatalf("%v\n", err) |
||||
} else { |
||||
fmt.Printf("%v\n", addr) |
||||
} |
||||
} |
||||
func ensNodeHash(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) < 1 { |
||||
utils.Fatalf("Usage: swarm hash ens node <ens name>") |
||||
} |
||||
ensName := args[0] |
||||
|
||||
hash := ens.EnsNode(ensName) |
||||
|
||||
stringHex := hex.EncodeToString(hash[:]) |
||||
fmt.Println(stringHex) |
||||
} |
||||
func encodeEipHash(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) < 1 { |
||||
utils.Fatalf("Usage: swarm hash ens <swarm hash>") |
||||
} |
||||
swarmHash := args[0] |
||||
|
||||
hash := common.HexToHash(swarmHash) |
||||
ensHash, err := ens.EncodeSwarmHash(hash) |
||||
if err != nil { |
||||
utils.Fatalf("error converting swarm hash", err) |
||||
} |
||||
|
||||
stringHex := hex.EncodeToString(ensHash) |
||||
fmt.Println(stringHex) |
||||
} |
@ -1,70 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"strings" |
||||
"text/tabwriter" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var listCommand = cli.Command{ |
||||
Action: list, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "ls", |
||||
Usage: "list files and directories contained in a manifest", |
||||
ArgsUsage: "<manifest> [<prefix>]", |
||||
Description: "Lists files and directories contained in a manifest", |
||||
} |
||||
|
||||
func list(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
|
||||
if len(args) < 1 { |
||||
utils.Fatalf("Please supply a manifest reference as the first argument") |
||||
} else if len(args) > 2 { |
||||
utils.Fatalf("Too many arguments - usage 'swarm ls manifest [prefix]'") |
||||
} |
||||
manifest := args[0] |
||||
|
||||
var prefix string |
||||
if len(args) == 2 { |
||||
prefix = args[1] |
||||
} |
||||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client := swarm.NewClient(bzzapi) |
||||
list, err := client.List(manifest, prefix, "") |
||||
if err != nil { |
||||
utils.Fatalf("Failed to generate file and directory list: %s", err) |
||||
} |
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0) |
||||
defer w.Flush() |
||||
fmt.Fprintln(w, "HASH\tCONTENT TYPE\tPATH") |
||||
for _, prefix := range list.CommonPrefixes { |
||||
fmt.Fprintf(w, "%s\t%s\t%s\n", "", "DIR", prefix) |
||||
} |
||||
for _, entry := range list.Entries { |
||||
fmt.Fprintf(w, "%s\t%s\t%s\n", entry.Hash, entry.ContentType, entry.Path) |
||||
} |
||||
} |
@ -1,475 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"crypto/ecdsa" |
||||
"encoding/hex" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"os/signal" |
||||
"runtime" |
||||
"sort" |
||||
"strconv" |
||||
"strings" |
||||
"syscall" |
||||
|
||||
"github.com/ethereum/go-ethereum/accounts" |
||||
"github.com/ethereum/go-ethereum/accounts/keystore" |
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/console" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/internal/debug" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm" |
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api" |
||||
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock" |
||||
mockrpc "github.com/ethereum/go-ethereum/swarm/storage/mock/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/tracing" |
||||
sv "github.com/ethereum/go-ethereum/swarm/version" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
const clientIdentifier = "swarm" |
||||
const helpTemplate = `NAME: |
||||
{{.HelpName}} - {{.Usage}} |
||||
|
||||
USAGE: |
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} |
||||
|
||||
CATEGORY: |
||||
{{.Category}}{{end}}{{if .Description}} |
||||
|
||||
DESCRIPTION: |
||||
{{.Description}}{{end}}{{if .VisibleFlags}} |
||||
|
||||
OPTIONS: |
||||
{{range .VisibleFlags}}{{.}} |
||||
{{end}}{{end}} |
||||
` |
||||
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
// this variable will be assigned if corresponding parameter is passed with install, but not with test
|
||||
// e.g.: go install -ldflags "-X main.gitCommit=ed1312d01b19e04ef578946226e5d8069d5dfd5a" ./cmd/swarm
|
||||
var gitCommit string |
||||
|
||||
//declare a few constant error messages, useful for later error check comparisons in test
|
||||
var ( |
||||
SwarmErrNoBZZAccount = "bzzaccount option is required but not set; check your config file, command line or environment variables" |
||||
SwarmErrSwapSetNoAPI = "SWAP is enabled but --swap-api is not set" |
||||
) |
||||
|
||||
// this help command gets added to any subcommand that does not define it explicitly
|
||||
var defaultSubcommandHelp = cli.Command{ |
||||
Action: func(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "", 1) }, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "help", |
||||
Usage: "shows this help", |
||||
Hidden: true, |
||||
} |
||||
|
||||
var defaultNodeConfig = node.DefaultConfig |
||||
|
||||
// This init function sets defaults so cmd/swarm can run alongside geth.
|
||||
func init() { |
||||
sv.GitCommit = gitCommit |
||||
defaultNodeConfig.Name = clientIdentifier |
||||
defaultNodeConfig.Version = sv.VersionWithCommit(gitCommit) |
||||
defaultNodeConfig.P2P.ListenAddr = ":30399" |
||||
defaultNodeConfig.IPCPath = "bzzd.ipc" |
||||
// Set flag defaults for --help display.
|
||||
utils.ListenPortFlag.Value = 30399 |
||||
} |
||||
|
||||
var app = utils.NewApp("", "", "Ethereum Swarm") |
||||
|
||||
// This init function creates the cli.App.
|
||||
func init() { |
||||
app.Action = bzzd |
||||
app.Version = sv.ArchiveVersion(gitCommit) |
||||
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" |
||||
app.Commands = []cli.Command{ |
||||
{ |
||||
Action: version, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "version", |
||||
Usage: "Print version numbers", |
||||
Description: "The output of this command is supposed to be machine-readable", |
||||
}, |
||||
{ |
||||
Action: keys, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "print-keys", |
||||
Flags: []cli.Flag{SwarmCompressedFlag}, |
||||
Usage: "Print public key information", |
||||
Description: "The output of this command is supposed to be machine-readable", |
||||
}, |
||||
// See upload.go
|
||||
upCommand, |
||||
// See access.go
|
||||
accessCommand, |
||||
// See feeds.go
|
||||
feedCommand, |
||||
// See list.go
|
||||
listCommand, |
||||
// See hash.go
|
||||
hashCommand, |
||||
// See download.go
|
||||
downloadCommand, |
||||
// See manifest.go
|
||||
manifestCommand, |
||||
// See fs.go
|
||||
fsCommand, |
||||
// See db.go
|
||||
dbCommand, |
||||
// See config.go
|
||||
DumpConfigCommand, |
||||
// hashesCommand
|
||||
hashesCommand, |
||||
} |
||||
|
||||
// append a hidden help subcommand to all commands that have subcommands
|
||||
// if a help command was already defined above, that one will take precedence.
|
||||
addDefaultHelpSubcommands(app.Commands) |
||||
|
||||
sort.Sort(cli.CommandsByName(app.Commands)) |
||||
|
||||
app.Flags = []cli.Flag{ |
||||
utils.IdentityFlag, |
||||
utils.DataDirFlag, |
||||
utils.BootnodesFlag, |
||||
utils.KeyStoreDirFlag, |
||||
utils.ListenPortFlag, |
||||
utils.DiscoveryV5Flag, |
||||
utils.NetrestrictFlag, |
||||
utils.NodeKeyFileFlag, |
||||
utils.NodeKeyHexFlag, |
||||
utils.MaxPeersFlag, |
||||
utils.NATFlag, |
||||
utils.IPCDisabledFlag, |
||||
utils.IPCPathFlag, |
||||
utils.PasswordFileFlag, |
||||
// bzzd-specific flags
|
||||
CorsStringFlag, |
||||
EnsAPIFlag, |
||||
SwarmTomlConfigPathFlag, |
||||
SwarmSwapEnabledFlag, |
||||
SwarmSwapAPIFlag, |
||||
SwarmSyncDisabledFlag, |
||||
SwarmSyncUpdateDelay, |
||||
SwarmMaxStreamPeerServersFlag, |
||||
SwarmLightNodeEnabled, |
||||
SwarmDeliverySkipCheckFlag, |
||||
SwarmListenAddrFlag, |
||||
SwarmPortFlag, |
||||
SwarmAccountFlag, |
||||
SwarmNetworkIdFlag, |
||||
ChequebookAddrFlag, |
||||
// upload flags
|
||||
SwarmApiFlag, |
||||
SwarmRecursiveFlag, |
||||
SwarmWantManifestFlag, |
||||
SwarmUploadDefaultPath, |
||||
SwarmUpFromStdinFlag, |
||||
SwarmUploadMimeType, |
||||
// bootnode mode
|
||||
SwarmBootnodeModeFlag, |
||||
// storage flags
|
||||
SwarmStorePath, |
||||
SwarmStoreCapacity, |
||||
SwarmStoreCacheCapacity, |
||||
SwarmGlobalStoreAPIFlag, |
||||
} |
||||
rpcFlags := []cli.Flag{ |
||||
utils.WSEnabledFlag, |
||||
utils.WSListenAddrFlag, |
||||
utils.WSPortFlag, |
||||
utils.WSApiFlag, |
||||
utils.WSAllowedOriginsFlag, |
||||
} |
||||
app.Flags = append(app.Flags, rpcFlags...) |
||||
app.Flags = append(app.Flags, debug.Flags...) |
||||
app.Flags = append(app.Flags, swarmmetrics.Flags...) |
||||
app.Flags = append(app.Flags, tracing.Flags...) |
||||
app.Before = func(ctx *cli.Context) error { |
||||
runtime.GOMAXPROCS(runtime.NumCPU()) |
||||
if err := debug.Setup(ctx, ""); err != nil { |
||||
return err |
||||
} |
||||
swarmmetrics.Setup(ctx) |
||||
tracing.Setup(ctx) |
||||
return nil |
||||
} |
||||
app.After = func(ctx *cli.Context) error { |
||||
debug.Exit() |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
func main() { |
||||
if err := app.Run(os.Args); err != nil { |
||||
fmt.Fprintln(os.Stderr, err) |
||||
os.Exit(1) |
||||
} |
||||
} |
||||
|
||||
func keys(ctx *cli.Context) error { |
||||
privateKey := getPrivKey(ctx) |
||||
pubkey := crypto.FromECDSAPub(&privateKey.PublicKey) |
||||
pubkeyhex := hex.EncodeToString(pubkey) |
||||
pubCompressed := hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)) |
||||
bzzkey := crypto.Keccak256Hash(pubkey).Hex() |
||||
|
||||
if !ctx.Bool(SwarmCompressedFlag.Name) { |
||||
fmt.Println(fmt.Sprintf("bzzkey=%s", bzzkey[2:])) |
||||
fmt.Println(fmt.Sprintf("publicKey=%s", pubkeyhex)) |
||||
} |
||||
fmt.Println(fmt.Sprintf("publicKeyCompressed=%s", pubCompressed)) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func version(ctx *cli.Context) error { |
||||
fmt.Println(strings.Title(clientIdentifier)) |
||||
fmt.Println("Version:", sv.VersionWithMeta) |
||||
if gitCommit != "" { |
||||
fmt.Println("Git Commit:", gitCommit) |
||||
} |
||||
fmt.Println("Go Version:", runtime.Version()) |
||||
fmt.Println("OS:", runtime.GOOS) |
||||
return nil |
||||
} |
||||
|
||||
func bzzd(ctx *cli.Context) error { |
||||
//build a valid bzzapi.Config from all available sources:
|
||||
//default config, file config, command line and env vars
|
||||
|
||||
bzzconfig, err := buildConfig(ctx) |
||||
if err != nil { |
||||
utils.Fatalf("unable to configure swarm: %v", err) |
||||
} |
||||
|
||||
cfg := defaultNodeConfig |
||||
|
||||
//pss operates on ws
|
||||
cfg.WSModules = append(cfg.WSModules, "pss") |
||||
|
||||
//geth only supports --datadir via command line
|
||||
//in order to be consistent within swarm, if we pass --datadir via environment variable
|
||||
//or via config file, we get the same directory for geth and swarm
|
||||
if _, err := os.Stat(bzzconfig.Path); err == nil { |
||||
cfg.DataDir = bzzconfig.Path |
||||
} |
||||
|
||||
//optionally set the bootnodes before configuring the node
|
||||
setSwarmBootstrapNodes(ctx, &cfg) |
||||
//setup the ethereum node
|
||||
utils.SetNodeConfig(ctx, &cfg) |
||||
|
||||
//disable dynamic dialing from p2p/discovery
|
||||
cfg.P2P.NoDial = true |
||||
|
||||
stack, err := node.New(&cfg) |
||||
if err != nil { |
||||
utils.Fatalf("can't create node: %v", err) |
||||
} |
||||
defer stack.Close() |
||||
|
||||
//a few steps need to be done after the config phase is completed,
|
||||
//due to overriding behavior
|
||||
err = initSwarmNode(bzzconfig, stack, ctx, &cfg) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
//register BZZ as node.Service in the ethereum node
|
||||
registerBzzService(bzzconfig, stack) |
||||
//start the node
|
||||
utils.StartNode(stack) |
||||
|
||||
go func() { |
||||
sigc := make(chan os.Signal, 1) |
||||
signal.Notify(sigc, syscall.SIGTERM) |
||||
defer signal.Stop(sigc) |
||||
<-sigc |
||||
log.Info("Got sigterm, shutting swarm down...") |
||||
stack.Stop() |
||||
}() |
||||
|
||||
// add swarm bootnodes, because swarm doesn't use p2p package's discovery discv5
|
||||
go func() { |
||||
s := stack.Server() |
||||
|
||||
for _, n := range cfg.P2P.BootstrapNodes { |
||||
s.AddPeer(n) |
||||
} |
||||
}() |
||||
|
||||
stack.Wait() |
||||
return nil |
||||
} |
||||
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) { |
||||
//define the swarm service boot function
|
||||
boot := func(_ *node.ServiceContext) (node.Service, error) { |
||||
var nodeStore *mock.NodeStore |
||||
if bzzconfig.GlobalStoreAPI != "" { |
||||
// connect to global store
|
||||
client, err := rpc.Dial(bzzconfig.GlobalStoreAPI) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("global store: %v", err) |
||||
} |
||||
globalStore := mockrpc.NewGlobalStore(client) |
||||
// create a node store for this swarm key on global store
|
||||
nodeStore = globalStore.NewNodeStore(common.HexToAddress(bzzconfig.BzzKey)) |
||||
} |
||||
return swarm.NewSwarm(bzzconfig, nodeStore) |
||||
} |
||||
//register within the ethereum node
|
||||
if err := stack.Register(boot); err != nil { |
||||
utils.Fatalf("Failed to register the Swarm service: %v", err) |
||||
} |
||||
} |
||||
|
||||
func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey { |
||||
//an account is mandatory
|
||||
if bzzaccount == "" { |
||||
utils.Fatalf(SwarmErrNoBZZAccount) |
||||
} |
||||
// Try to load the arg as a hex key file.
|
||||
if key, err := crypto.LoadECDSA(bzzaccount); err == nil { |
||||
log.Info("Swarm account key loaded", "address", crypto.PubkeyToAddress(key.PublicKey)) |
||||
return key |
||||
} |
||||
// Otherwise try getting it from the keystore.
|
||||
am := stack.AccountManager() |
||||
ks := am.Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) |
||||
|
||||
return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx)) |
||||
} |
||||
|
||||
// getPrivKey returns the private key of the specified bzzaccount
|
||||
// Used only by client commands, such as `feed`
|
||||
func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey { |
||||
// booting up the swarm node just as we do in bzzd action
|
||||
bzzconfig, err := buildConfig(ctx) |
||||
if err != nil { |
||||
utils.Fatalf("unable to configure swarm: %v", err) |
||||
} |
||||
cfg := defaultNodeConfig |
||||
if _, err := os.Stat(bzzconfig.Path); err == nil { |
||||
cfg.DataDir = bzzconfig.Path |
||||
} |
||||
utils.SetNodeConfig(ctx, &cfg) |
||||
stack, err := node.New(&cfg) |
||||
if err != nil { |
||||
utils.Fatalf("can't create node: %v", err) |
||||
} |
||||
defer stack.Close() |
||||
|
||||
return getAccount(bzzconfig.BzzAccount, ctx, stack) |
||||
} |
||||
|
||||
func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey { |
||||
var a accounts.Account |
||||
var err error |
||||
if common.IsHexAddress(account) { |
||||
a, err = ks.Find(accounts.Account{Address: common.HexToAddress(account)}) |
||||
} else if ix, ixerr := strconv.Atoi(account); ixerr == nil && ix > 0 { |
||||
if accounts := ks.Accounts(); len(accounts) > ix { |
||||
a = accounts[ix] |
||||
} else { |
||||
err = fmt.Errorf("index %d higher than number of accounts %d", ix, len(accounts)) |
||||
} |
||||
} else { |
||||
utils.Fatalf("Can't find swarm account key %s", account) |
||||
} |
||||
if err != nil { |
||||
utils.Fatalf("Can't find swarm account key: %v - Is the provided bzzaccount(%s) from the right datadir/Path?", err, account) |
||||
} |
||||
keyjson, err := ioutil.ReadFile(a.URL.Path) |
||||
if err != nil { |
||||
utils.Fatalf("Can't load swarm account key: %v", err) |
||||
} |
||||
for i := 0; i < 3; i++ { |
||||
password := getPassPhrase(fmt.Sprintf("Unlocking swarm account %s [%d/3]", a.Address.Hex(), i+1), i, passwords) |
||||
key, err := keystore.DecryptKey(keyjson, password) |
||||
if err == nil { |
||||
return key.PrivateKey |
||||
} |
||||
} |
||||
utils.Fatalf("Can't decrypt swarm account key") |
||||
return nil |
||||
} |
||||
|
||||
// getPassPhrase retrieves the password associated with bzz account, either by fetching
|
||||
// from a list of pre-loaded passwords, or by requesting it interactively from user.
|
||||
func getPassPhrase(prompt string, i int, passwords []string) string { |
||||
// non-interactive
|
||||
if len(passwords) > 0 { |
||||
if i < len(passwords) { |
||||
return passwords[i] |
||||
} |
||||
return passwords[len(passwords)-1] |
||||
} |
||||
|
||||
// fallback to interactive mode
|
||||
if prompt != "" { |
||||
fmt.Println(prompt) |
||||
} |
||||
password, err := console.Stdin.PromptPassword("Passphrase: ") |
||||
if err != nil { |
||||
utils.Fatalf("Failed to read passphrase: %v", err) |
||||
} |
||||
return password |
||||
} |
||||
|
||||
// addDefaultHelpSubcommand scans through defined CLI commands and adds
|
||||
// a basic help subcommand to each
|
||||
// if a help command is already defined, it will take precedence over the default.
|
||||
func addDefaultHelpSubcommands(commands []cli.Command) { |
||||
for i := range commands { |
||||
cmd := &commands[i] |
||||
if cmd.Subcommands != nil { |
||||
cmd.Subcommands = append(cmd.Subcommands, defaultSubcommandHelp) |
||||
addDefaultHelpSubcommands(cmd.Subcommands) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) { |
||||
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) || ctx.GlobalIsSet(utils.BootnodesV4Flag.Name) { |
||||
return |
||||
} |
||||
|
||||
cfg.P2P.BootstrapNodes = []*enode.Node{} |
||||
|
||||
for _, url := range SwarmBootnodes { |
||||
node, err := enode.ParseV4(url) |
||||
if err != nil { |
||||
log.Error("Bootstrap URL invalid", "enode", url, "err", err) |
||||
} |
||||
cfg.P2P.BootstrapNodes = append(cfg.P2P.BootstrapNodes, node) |
||||
} |
||||
|
||||
} |
@ -1,353 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command MANIFEST update
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var manifestCommand = cli.Command{ |
||||
Name: "manifest", |
||||
CustomHelpTemplate: helpTemplate, |
||||
Usage: "perform operations on swarm manifests", |
||||
ArgsUsage: "COMMAND", |
||||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove", |
||||
Subcommands: []cli.Command{ |
||||
{ |
||||
Action: manifestAdd, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "add", |
||||
Usage: "add a new path to the manifest", |
||||
ArgsUsage: "<MANIFEST> <path> <hash>", |
||||
Description: "Adds a new path to the manifest", |
||||
}, |
||||
{ |
||||
Action: manifestUpdate, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "update", |
||||
Usage: "update the hash for an already existing path in the manifest", |
||||
ArgsUsage: "<MANIFEST> <path> <newhash>", |
||||
Description: "Update the hash for an already existing path in the manifest", |
||||
}, |
||||
{ |
||||
Action: manifestRemove, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "remove", |
||||
Usage: "removes a path from the manifest", |
||||
ArgsUsage: "<MANIFEST> <path>", |
||||
Description: "Removes a path from the manifest", |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
// manifestAdd adds a new entry to the manifest at the given path.
|
||||
// New entry hash, the last argument, must be the hash of a manifest
|
||||
// with only one entry, which meta-data will be added to the original manifest.
|
||||
// On success, this function will print new (updated) manifest's hash.
|
||||
func manifestAdd(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 3 { |
||||
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>") |
||||
} |
||||
|
||||
var ( |
||||
mhash = args[0] |
||||
path = args[1] |
||||
hash = args[2] |
||||
) |
||||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client := swarm.NewClient(bzzapi) |
||||
|
||||
m, _, err := client.DownloadManifest(hash) |
||||
if err != nil { |
||||
utils.Fatalf("Error downloading manifest to add: %v", err) |
||||
} |
||||
l := len(m.Entries) |
||||
if l == 0 { |
||||
utils.Fatalf("No entries in manifest %s", hash) |
||||
} else if l > 1 { |
||||
utils.Fatalf("Too many entries in manifest %s", hash) |
||||
} |
||||
|
||||
newManifest := addEntryToManifest(client, mhash, path, m.Entries[0]) |
||||
fmt.Println(newManifest) |
||||
} |
||||
|
||||
// manifestUpdate replaces an existing entry of the manifest at the given path.
|
||||
// New entry hash, the last argument, must be the hash of a manifest
|
||||
// with only one entry, which meta-data will be added to the original manifest.
|
||||
// On success, this function will print hash of the updated manifest.
|
||||
func manifestUpdate(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 3 { |
||||
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>") |
||||
} |
||||
|
||||
var ( |
||||
mhash = args[0] |
||||
path = args[1] |
||||
hash = args[2] |
||||
) |
||||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client := swarm.NewClient(bzzapi) |
||||
|
||||
m, _, err := client.DownloadManifest(hash) |
||||
if err != nil { |
||||
utils.Fatalf("Error downloading manifest to update: %v", err) |
||||
} |
||||
l := len(m.Entries) |
||||
if l == 0 { |
||||
utils.Fatalf("No entries in manifest %s", hash) |
||||
} else if l > 1 { |
||||
utils.Fatalf("Too many entries in manifest %s", hash) |
||||
} |
||||
|
||||
newManifest, _, defaultEntryUpdated := updateEntryInManifest(client, mhash, path, m.Entries[0], true) |
||||
if defaultEntryUpdated { |
||||
// Print informational message to stderr
|
||||
// allowing the user to get the new manifest hash from stdout
|
||||
// without the need to parse the complete output.
|
||||
fmt.Fprintln(os.Stderr, "Manifest default entry is updated, too") |
||||
} |
||||
fmt.Println(newManifest) |
||||
} |
||||
|
||||
// manifestRemove removes an existing entry of the manifest at the given path.
|
||||
// On success, this function will print hash of the manifest which does not
|
||||
// contain the path.
|
||||
func manifestRemove(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
if len(args) != 2 { |
||||
utils.Fatalf("Need exactly two arguments <MHASH> <path>") |
||||
} |
||||
|
||||
var ( |
||||
mhash = args[0] |
||||
path = args[1] |
||||
) |
||||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
client := swarm.NewClient(bzzapi) |
||||
|
||||
newManifest := removeEntryFromManifest(client, mhash, path) |
||||
fmt.Println(newManifest) |
||||
} |
||||
|
||||
func addEntryToManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry) string { |
||||
var longestPathEntry = api.ManifestEntry{} |
||||
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash) |
||||
if err != nil { |
||||
utils.Fatalf("Manifest download failed: %v", err) |
||||
} |
||||
|
||||
// See if we path is in this Manifest or do we have to dig deeper
|
||||
for _, e := range mroot.Entries { |
||||
if path == e.Path { |
||||
utils.Fatalf("Path %s already present, not adding anything", path) |
||||
} else { |
||||
if e.ContentType == api.ManifestType { |
||||
prfxlen := strings.HasPrefix(path, e.Path) |
||||
if prfxlen && len(path) > len(longestPathEntry.Path) { |
||||
longestPathEntry = e |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
if longestPathEntry.Path != "" { |
||||
// Load the child Manifest add the entry there
|
||||
newPath := path[len(longestPathEntry.Path):] |
||||
newHash := addEntryToManifest(client, longestPathEntry.Hash, newPath, entry) |
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{} |
||||
for _, e := range mroot.Entries { |
||||
if longestPathEntry.Path == e.Path { |
||||
e.Hash = newHash |
||||
} |
||||
newMRoot.Entries = append(newMRoot.Entries, e) |
||||
} |
||||
mroot = newMRoot |
||||
} else { |
||||
// Add the entry in the leaf Manifest
|
||||
entry.Path = path |
||||
mroot.Entries = append(mroot.Entries, entry) |
||||
} |
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted) |
||||
if err != nil { |
||||
utils.Fatalf("Manifest upload failed: %v", err) |
||||
} |
||||
return newManifestHash |
||||
} |
||||
|
||||
// updateEntryInManifest updates an existing entry o path with a new one in the manifest with provided mhash
|
||||
// finding the path recursively through all nested manifests. Argument isRoot is used for default
|
||||
// entry update detection. If the updated entry has the same hash as the default entry, then the
|
||||
// default entry in root manifest will be updated too.
|
||||
// Returned values are the new manifest hash, hash of the entry that was replaced by the new entry and
|
||||
// a a bool that is true if default entry is updated.
|
||||
func updateEntryInManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry, isRoot bool) (newManifestHash, oldHash string, defaultEntryUpdated bool) { |
||||
var ( |
||||
newEntry = api.ManifestEntry{} |
||||
longestPathEntry = api.ManifestEntry{} |
||||
) |
||||
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash) |
||||
if err != nil { |
||||
utils.Fatalf("Manifest download failed: %v", err) |
||||
} |
||||
|
||||
// See if we path is in this Manifest or do we have to dig deeper
|
||||
for _, e := range mroot.Entries { |
||||
if path == e.Path { |
||||
newEntry = e |
||||
// keep the reference of the hash of the entry that should be replaced
|
||||
// for default entry detection
|
||||
oldHash = e.Hash |
||||
} else { |
||||
if e.ContentType == api.ManifestType { |
||||
prfxlen := strings.HasPrefix(path, e.Path) |
||||
if prfxlen && len(path) > len(longestPathEntry.Path) { |
||||
longestPathEntry = e |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
if longestPathEntry.Path == "" && newEntry.Path == "" { |
||||
utils.Fatalf("Path %s not present in the Manifest, not setting anything", path) |
||||
} |
||||
|
||||
if longestPathEntry.Path != "" { |
||||
// Load the child Manifest add the entry there
|
||||
newPath := path[len(longestPathEntry.Path):] |
||||
var newHash string |
||||
newHash, oldHash, _ = updateEntryInManifest(client, longestPathEntry.Hash, newPath, entry, false) |
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{} |
||||
for _, e := range mroot.Entries { |
||||
if longestPathEntry.Path == e.Path { |
||||
e.Hash = newHash |
||||
} |
||||
newMRoot.Entries = append(newMRoot.Entries, e) |
||||
|
||||
} |
||||
mroot = newMRoot |
||||
} |
||||
|
||||
// update the manifest if the new entry is found and
|
||||
// check if default entry should be updated
|
||||
if newEntry.Path != "" || isRoot { |
||||
// Replace the hash for leaf Manifest
|
||||
newMRoot := &api.Manifest{} |
||||
for _, e := range mroot.Entries { |
||||
if newEntry.Path == e.Path { |
||||
entry.Path = e.Path |
||||
newMRoot.Entries = append(newMRoot.Entries, entry) |
||||
} else if isRoot && e.Path == "" && e.Hash == oldHash { |
||||
entry.Path = e.Path |
||||
newMRoot.Entries = append(newMRoot.Entries, entry) |
||||
defaultEntryUpdated = true |
||||
} else { |
||||
newMRoot.Entries = append(newMRoot.Entries, e) |
||||
} |
||||
} |
||||
mroot = newMRoot |
||||
} |
||||
|
||||
newManifestHash, err = client.UploadManifest(mroot, isEncrypted) |
||||
if err != nil { |
||||
utils.Fatalf("Manifest upload failed: %v", err) |
||||
} |
||||
return newManifestHash, oldHash, defaultEntryUpdated |
||||
} |
||||
|
||||
func removeEntryFromManifest(client *swarm.Client, mhash, path string) string { |
||||
var ( |
||||
entryToRemove = api.ManifestEntry{} |
||||
longestPathEntry = api.ManifestEntry{} |
||||
) |
||||
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash) |
||||
if err != nil { |
||||
utils.Fatalf("Manifest download failed: %v", err) |
||||
} |
||||
|
||||
// See if we path is in this Manifest or do we have to dig deeper
|
||||
for _, entry := range mroot.Entries { |
||||
if path == entry.Path { |
||||
entryToRemove = entry |
||||
} else { |
||||
if entry.ContentType == api.ManifestType { |
||||
prfxlen := strings.HasPrefix(path, entry.Path) |
||||
if prfxlen && len(path) > len(longestPathEntry.Path) { |
||||
longestPathEntry = entry |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
if longestPathEntry.Path == "" && entryToRemove.Path == "" { |
||||
utils.Fatalf("Path %s not present in the Manifest, not removing anything", path) |
||||
} |
||||
|
||||
if longestPathEntry.Path != "" { |
||||
// Load the child Manifest remove the entry there
|
||||
newPath := path[len(longestPathEntry.Path):] |
||||
newHash := removeEntryFromManifest(client, longestPathEntry.Hash, newPath) |
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{} |
||||
for _, entry := range mroot.Entries { |
||||
if longestPathEntry.Path == entry.Path { |
||||
entry.Hash = newHash |
||||
} |
||||
newMRoot.Entries = append(newMRoot.Entries, entry) |
||||
} |
||||
mroot = newMRoot |
||||
} |
||||
|
||||
if entryToRemove.Path != "" { |
||||
// remove the entry in this Manifest
|
||||
newMRoot := &api.Manifest{} |
||||
for _, entry := range mroot.Entries { |
||||
if entryToRemove.Path != entry.Path { |
||||
newMRoot.Entries = append(newMRoot.Entries, entry) |
||||
} |
||||
} |
||||
mroot = newMRoot |
||||
} |
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted) |
||||
if err != nil { |
||||
utils.Fatalf("Manifest upload failed: %v", err) |
||||
} |
||||
return newManifestHash |
||||
} |
@ -1,597 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"runtime" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" |
||||
) |
||||
|
||||
// TestManifestChange tests manifest add, update and remove
|
||||
// cli commands without encryption.
|
||||
func TestManifestChange(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() |
||||
} |
||||
|
||||
testManifestChange(t, false) |
||||
} |
||||
|
||||
// TestManifestChange tests manifest add, update and remove
|
||||
// cli commands with encryption enabled.
|
||||
func TestManifestChangeEncrypted(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() |
||||
} |
||||
|
||||
testManifestChange(t, true) |
||||
} |
||||
|
||||
// testManifestChange performs cli commands:
|
||||
// - manifest add
|
||||
// - manifest update
|
||||
// - manifest remove
|
||||
// on a manifest, testing the functionality of this
|
||||
// comands on paths that are in root manifest or a nested one.
|
||||
// Argument encrypt controls whether to use encryption or not.
|
||||
func testManifestChange(t *testing.T, encrypt bool) { |
||||
t.Parallel() |
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
defer srv.Close() |
||||
|
||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmp) |
||||
|
||||
origDir := filepath.Join(tmp, "orig") |
||||
if err := os.Mkdir(origDir, 0777); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
indexDataFilename := filepath.Join(origDir, "index.html") |
||||
err = ioutil.WriteFile(indexDataFilename, []byte("<h1>Test</h1>"), 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
// Files paths robots.txt and robots.html share the same prefix "robots."
|
||||
// which will result a manifest with a nested manifest under path "robots.".
|
||||
// This will allow testing manifest changes on both root and nested manifest.
|
||||
err = ioutil.WriteFile(filepath.Join(origDir, "robots.txt"), []byte("Disallow: /"), 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
err = ioutil.WriteFile(filepath.Join(origDir, "robots.html"), []byte("<strong>No Robots Allowed</strong>"), 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
err = ioutil.WriteFile(filepath.Join(origDir, "mutants.txt"), []byte("Frank\nMarcus"), 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
args := []string{ |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"--recursive", |
||||
"--defaultpath", |
||||
indexDataFilename, |
||||
"up", |
||||
origDir, |
||||
} |
||||
if encrypt { |
||||
args = append(args, "--encrypt") |
||||
} |
||||
|
||||
origManifestHash := runSwarmExpectHash(t, args...) |
||||
|
||||
checkHashLength(t, origManifestHash, encrypt) |
||||
|
||||
client := swarm.NewClient(srv.URL) |
||||
|
||||
// upload a new file and use its manifest to add it the original manifest.
|
||||
t.Run("add", func(t *testing.T) { |
||||
humansData := []byte("Ann\nBob") |
||||
humansDataFilename := filepath.Join(tmp, "humans.txt") |
||||
err = ioutil.WriteFile(humansDataFilename, humansData, 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
humansManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"up", |
||||
humansDataFilename, |
||||
) |
||||
|
||||
newManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"manifest", |
||||
"add", |
||||
origManifestHash, |
||||
"humans.txt", |
||||
humansManifestHash, |
||||
) |
||||
|
||||
checkHashLength(t, newManifestHash, encrypt) |
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt) |
||||
|
||||
var found bool |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "humans.txt" { |
||||
found = true |
||||
if e.Size != int64(len(humansData)) { |
||||
t.Errorf("expected humans.txt size %v, got %v", len(humansData), e.Size) |
||||
} |
||||
if e.ModTime.IsZero() { |
||||
t.Errorf("got zero mod time for humans.txt") |
||||
} |
||||
ct := "text/plain; charset=utf-8" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType) |
||||
} |
||||
break |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatal("no humans.txt in new manifest") |
||||
} |
||||
|
||||
checkFile(t, client, newManifestHash, "humans.txt", humansData) |
||||
}) |
||||
|
||||
// upload a new file and use its manifest to add it the original manifest,
|
||||
// but ensure that the file will be in the nested manifest of the original one.
|
||||
t.Run("add nested", func(t *testing.T) { |
||||
robotsData := []byte(`{"disallow": "/"}`) |
||||
robotsDataFilename := filepath.Join(tmp, "robots.json") |
||||
err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
robotsManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"up", |
||||
robotsDataFilename, |
||||
) |
||||
|
||||
newManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"manifest", |
||||
"add", |
||||
origManifestHash, |
||||
"robots.json", |
||||
robotsManifestHash, |
||||
) |
||||
|
||||
checkHashLength(t, newManifestHash, encrypt) |
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt) |
||||
|
||||
var found bool |
||||
loop: |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "robots." { |
||||
nestedManifest := downloadManifest(t, client, e.Hash, encrypt) |
||||
for _, e := range nestedManifest.Entries { |
||||
if e.Path == "json" { |
||||
found = true |
||||
if e.Size != int64(len(robotsData)) { |
||||
t.Errorf("expected robots.json size %v, got %v", len(robotsData), e.Size) |
||||
} |
||||
if e.ModTime.IsZero() { |
||||
t.Errorf("got zero mod time for robots.json") |
||||
} |
||||
ct := "application/json" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType) |
||||
} |
||||
break loop |
||||
} |
||||
} |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatal("no robots.json in new manifest") |
||||
} |
||||
|
||||
checkFile(t, client, newManifestHash, "robots.json", robotsData) |
||||
}) |
||||
|
||||
// upload a new file and use its manifest to change the file it the original manifest.
|
||||
t.Run("update", func(t *testing.T) { |
||||
indexData := []byte("<h1>Ethereum Swarm</h1>") |
||||
indexDataFilename := filepath.Join(tmp, "index.html") |
||||
err = ioutil.WriteFile(indexDataFilename, indexData, 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
indexManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"up", |
||||
indexDataFilename, |
||||
) |
||||
|
||||
newManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"manifest", |
||||
"update", |
||||
origManifestHash, |
||||
"index.html", |
||||
indexManifestHash, |
||||
) |
||||
|
||||
checkHashLength(t, newManifestHash, encrypt) |
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt) |
||||
|
||||
var found bool |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "index.html" { |
||||
found = true |
||||
if e.Size != int64(len(indexData)) { |
||||
t.Errorf("expected index.html size %v, got %v", len(indexData), e.Size) |
||||
} |
||||
if e.ModTime.IsZero() { |
||||
t.Errorf("got zero mod time for index.html") |
||||
} |
||||
ct := "text/html; charset=utf-8" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType) |
||||
} |
||||
break |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatal("no index.html in new manifest") |
||||
} |
||||
|
||||
checkFile(t, client, newManifestHash, "index.html", indexData) |
||||
|
||||
// check default entry change
|
||||
checkFile(t, client, newManifestHash, "", indexData) |
||||
}) |
||||
|
||||
// upload a new file and use its manifest to change the file it the original manifest,
|
||||
// but ensure that the file is in the nested manifest of the original one.
|
||||
t.Run("update nested", func(t *testing.T) { |
||||
robotsData := []byte(`<string>Only humans allowed!!!</strong>`) |
||||
robotsDataFilename := filepath.Join(tmp, "robots.html") |
||||
err = ioutil.WriteFile(robotsDataFilename, robotsData, 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
humansManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"up", |
||||
robotsDataFilename, |
||||
) |
||||
|
||||
newManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"manifest", |
||||
"update", |
||||
origManifestHash, |
||||
"robots.html", |
||||
humansManifestHash, |
||||
) |
||||
|
||||
checkHashLength(t, newManifestHash, encrypt) |
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt) |
||||
|
||||
var found bool |
||||
loop: |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "robots." { |
||||
nestedManifest := downloadManifest(t, client, e.Hash, encrypt) |
||||
for _, e := range nestedManifest.Entries { |
||||
if e.Path == "html" { |
||||
found = true |
||||
if e.Size != int64(len(robotsData)) { |
||||
t.Errorf("expected robots.html size %v, got %v", len(robotsData), e.Size) |
||||
} |
||||
if e.ModTime.IsZero() { |
||||
t.Errorf("got zero mod time for robots.html") |
||||
} |
||||
ct := "text/html; charset=utf-8" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType) |
||||
} |
||||
break loop |
||||
} |
||||
} |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatal("no robots.html in new manifest") |
||||
} |
||||
|
||||
checkFile(t, client, newManifestHash, "robots.html", robotsData) |
||||
}) |
||||
|
||||
// remove a file from the manifest.
|
||||
t.Run("remove", func(t *testing.T) { |
||||
newManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"manifest", |
||||
"remove", |
||||
origManifestHash, |
||||
"mutants.txt", |
||||
) |
||||
|
||||
checkHashLength(t, newManifestHash, encrypt) |
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt) |
||||
|
||||
var found bool |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "mutants.txt" { |
||||
found = true |
||||
break |
||||
} |
||||
} |
||||
if found { |
||||
t.Fatal("mutants.txt is not removed") |
||||
} |
||||
}) |
||||
|
||||
// remove a file from the manifest, but ensure that the file is in
|
||||
// the nested manifest of the original one.
|
||||
t.Run("remove nested", func(t *testing.T) { |
||||
newManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"manifest", |
||||
"remove", |
||||
origManifestHash, |
||||
"robots.html", |
||||
) |
||||
|
||||
checkHashLength(t, newManifestHash, encrypt) |
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt) |
||||
|
||||
var found bool |
||||
loop: |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "robots." { |
||||
nestedManifest := downloadManifest(t, client, e.Hash, encrypt) |
||||
for _, e := range nestedManifest.Entries { |
||||
if e.Path == "html" { |
||||
found = true |
||||
break loop |
||||
} |
||||
} |
||||
} |
||||
} |
||||
if found { |
||||
t.Fatal("robots.html in not removed") |
||||
} |
||||
}) |
||||
} |
||||
|
||||
// TestNestedDefaultEntryUpdate tests if the default entry is updated
|
||||
// if the file in nested manifest used for it is also updated.
|
||||
func TestNestedDefaultEntryUpdate(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() |
||||
} |
||||
|
||||
testNestedDefaultEntryUpdate(t, false) |
||||
} |
||||
|
||||
// TestNestedDefaultEntryUpdateEncrypted tests if the default entry
|
||||
// of encrypted upload is updated if the file in nested manifest
|
||||
// used for it is also updated.
|
||||
func TestNestedDefaultEntryUpdateEncrypted(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() |
||||
} |
||||
|
||||
testNestedDefaultEntryUpdate(t, true) |
||||
} |
||||
|
||||
func testNestedDefaultEntryUpdate(t *testing.T, encrypt bool) { |
||||
t.Parallel() |
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
defer srv.Close() |
||||
|
||||
tmp, err := ioutil.TempDir("", "swarm-manifest-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmp) |
||||
|
||||
origDir := filepath.Join(tmp, "orig") |
||||
if err := os.Mkdir(origDir, 0777); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
indexData := []byte("<h1>Test</h1>") |
||||
indexDataFilename := filepath.Join(origDir, "index.html") |
||||
err = ioutil.WriteFile(indexDataFilename, indexData, 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
// Add another file with common prefix as the default entry to test updates of
|
||||
// default entry with nested manifests.
|
||||
err = ioutil.WriteFile(filepath.Join(origDir, "index.txt"), []byte("Test"), 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
args := []string{ |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"--recursive", |
||||
"--defaultpath", |
||||
indexDataFilename, |
||||
"up", |
||||
origDir, |
||||
} |
||||
if encrypt { |
||||
args = append(args, "--encrypt") |
||||
} |
||||
|
||||
origManifestHash := runSwarmExpectHash(t, args...) |
||||
|
||||
checkHashLength(t, origManifestHash, encrypt) |
||||
|
||||
client := swarm.NewClient(srv.URL) |
||||
|
||||
newIndexData := []byte("<h1>Ethereum Swarm</h1>") |
||||
newIndexDataFilename := filepath.Join(tmp, "index.html") |
||||
err = ioutil.WriteFile(newIndexDataFilename, newIndexData, 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
newIndexManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"up", |
||||
newIndexDataFilename, |
||||
) |
||||
|
||||
newManifestHash := runSwarmExpectHash(t, |
||||
"--bzzapi", |
||||
srv.URL, |
||||
"manifest", |
||||
"update", |
||||
origManifestHash, |
||||
"index.html", |
||||
newIndexManifestHash, |
||||
) |
||||
|
||||
checkHashLength(t, newManifestHash, encrypt) |
||||
|
||||
newManifest := downloadManifest(t, client, newManifestHash, encrypt) |
||||
|
||||
var found bool |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "index." { |
||||
found = true |
||||
newManifest = downloadManifest(t, client, e.Hash, encrypt) |
||||
break |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatal("no index. path in new manifest") |
||||
} |
||||
|
||||
found = false |
||||
for _, e := range newManifest.Entries { |
||||
if e.Path == "html" { |
||||
found = true |
||||
if e.Size != int64(len(newIndexData)) { |
||||
t.Errorf("expected index.html size %v, got %v", len(newIndexData), e.Size) |
||||
} |
||||
if e.ModTime.IsZero() { |
||||
t.Errorf("got zero mod time for index.html") |
||||
} |
||||
ct := "text/html; charset=utf-8" |
||||
if e.ContentType != ct { |
||||
t.Errorf("expected content type %q, got %q", ct, e.ContentType) |
||||
} |
||||
break |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatal("no html in new manifest") |
||||
} |
||||
|
||||
checkFile(t, client, newManifestHash, "index.html", newIndexData) |
||||
|
||||
// check default entry change
|
||||
checkFile(t, client, newManifestHash, "", newIndexData) |
||||
} |
||||
|
||||
func runSwarmExpectHash(t *testing.T, args ...string) (hash string) { |
||||
t.Helper() |
||||
hashRegexp := `[a-f\d]{64,128}` |
||||
up := runSwarm(t, args...) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
|
||||
if len(matches) < 1 { |
||||
t.Fatal("no matches found") |
||||
} |
||||
return matches[0] |
||||
} |
||||
|
||||
func checkHashLength(t *testing.T, hash string, encrypted bool) { |
||||
t.Helper() |
||||
l := len(hash) |
||||
if encrypted && l != 128 { |
||||
t.Errorf("expected hash length 128, got %v", l) |
||||
} |
||||
if !encrypted && l != 64 { |
||||
t.Errorf("expected hash length 64, got %v", l) |
||||
} |
||||
} |
||||
|
||||
func downloadManifest(t *testing.T, client *swarm.Client, hash string, encrypted bool) (manifest *api.Manifest) { |
||||
t.Helper() |
||||
m, isEncrypted, err := client.DownloadManifest(hash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if encrypted != isEncrypted { |
||||
t.Error("new manifest encryption flag is not correct") |
||||
} |
||||
return m |
||||
} |
||||
|
||||
func checkFile(t *testing.T, client *swarm.Client, hash, path string, expected []byte) { |
||||
t.Helper() |
||||
f, err := client.Download(hash, path) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
got, err := ioutil.ReadAll(f) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(got, expected) { |
||||
t.Errorf("expected file content %q, got %q", expected, got) |
||||
} |
||||
} |
@ -1,124 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main |
||||
|
||||
// Standard "mime" package rely on system-settings, see mime.osInitMime
|
||||
// Swarm will run on many OS/Platform/Docker and must behave similar
|
||||
// This command generates code to add common mime types based on mime.types file
|
||||
//
|
||||
// mime.types file provided by mailcap, which follow https://www.iana.org/assignments/media-types/media-types.xhtml
|
||||
//
|
||||
// Get last version of mime.types file by:
|
||||
// docker run --rm -v $(pwd):/tmp alpine:edge /bin/sh -c "apk add -U mailcap; mv /etc/mime.types /tmp"
|
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"flag" |
||||
"html/template" |
||||
"io/ioutil" |
||||
"strings" |
||||
|
||||
"log" |
||||
) |
||||
|
||||
var ( |
||||
typesFlag = flag.String("types", "", "Input mime.types file") |
||||
packageFlag = flag.String("package", "", "Golang package in output file") |
||||
outFlag = flag.String("out", "", "Output file name for the generated mime types") |
||||
) |
||||
|
||||
type mime struct { |
||||
Name string |
||||
Exts []string |
||||
} |
||||
|
||||
type templateParams struct { |
||||
PackageName string |
||||
Mimes []mime |
||||
} |
||||
|
||||
func main() { |
||||
// Parse and ensure all needed inputs are specified
|
||||
flag.Parse() |
||||
if *typesFlag == "" { |
||||
log.Fatalf("--types is required") |
||||
} |
||||
if *packageFlag == "" { |
||||
log.Fatalf("--types is required") |
||||
} |
||||
if *outFlag == "" { |
||||
log.Fatalf("--out is required") |
||||
} |
||||
|
||||
params := templateParams{ |
||||
PackageName: *packageFlag, |
||||
} |
||||
|
||||
types, err := ioutil.ReadFile(*typesFlag) |
||||
if err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
|
||||
scanner := bufio.NewScanner(bytes.NewReader(types)) |
||||
for scanner.Scan() { |
||||
txt := scanner.Text() |
||||
if strings.HasPrefix(txt, "#") || len(txt) == 0 { |
||||
continue |
||||
} |
||||
parts := strings.Fields(txt) |
||||
if len(parts) == 1 { |
||||
continue |
||||
} |
||||
params.Mimes = append(params.Mimes, mime{parts[0], parts[1:]}) |
||||
} |
||||
|
||||
if err = scanner.Err(); err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
|
||||
result := bytes.NewBuffer([]byte{}) |
||||
|
||||
if err := template.Must(template.New("_").Parse(tpl)).Execute(result, params); err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
|
||||
if err := ioutil.WriteFile(*outFlag, result.Bytes(), 0600); err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
var tpl = `// Code generated by github.com/ethereum/go-ethereum/cmd/swarm/mimegen. DO NOT EDIT.
|
||||
|
||||
package {{ .PackageName }} |
||||
|
||||
import "mime" |
||||
func init() { |
||||
var mimeTypes = map[string]string{ |
||||
{{- range .Mimes -}} |
||||
{{ $name := .Name -}} |
||||
{{- range .Exts }} |
||||
".{{ . }}": "{{ $name | html }}", |
||||
{{- end }} |
||||
{{- end }} |
||||
} |
||||
for ext, name := range mimeTypes { |
||||
if err := mime.AddExtensionType(ext, name); err != nil { |
||||
panic(err) |
||||
} |
||||
} |
||||
} |
||||
` |
File diff suppressed because it is too large
Load Diff
@ -1,502 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"crypto/ecdsa" |
||||
"flag" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net" |
||||
"os" |
||||
"path" |
||||
"path/filepath" |
||||
"runtime" |
||||
"sync" |
||||
"syscall" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/docker/docker/pkg/reexec" |
||||
"github.com/ethereum/go-ethereum/accounts" |
||||
"github.com/ethereum/go-ethereum/accounts/keystore" |
||||
"github.com/ethereum/go-ethereum/internal/cmdtest" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" |
||||
) |
||||
|
||||
var loglevel = flag.Int("loglevel", 3, "verbosity of logs") |
||||
|
||||
func init() { |
||||
// Run the app if we've been exec'd as "swarm-test" in runSwarm.
|
||||
reexec.Register("swarm-test", func() { |
||||
if err := app.Run(os.Args); err != nil { |
||||
fmt.Fprintln(os.Stderr, err) |
||||
os.Exit(1) |
||||
} |
||||
os.Exit(0) |
||||
}) |
||||
} |
||||
|
||||
const clusterSize = 3 |
||||
|
||||
func serverFunc(api *api.API) swarmhttp.TestServer { |
||||
return swarmhttp.NewServer(api, "") |
||||
} |
||||
func TestMain(m *testing.M) { |
||||
// check if we have been reexec'd
|
||||
if reexec.Init() { |
||||
return |
||||
} |
||||
os.Exit(m.Run()) |
||||
} |
||||
|
||||
func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd { |
||||
tt := cmdtest.NewTestCmd(t, nil) |
||||
|
||||
found := false |
||||
for _, v := range args { |
||||
if v == "--bootnodes" { |
||||
found = true |
||||
break |
||||
} |
||||
} |
||||
|
||||
if !found { |
||||
args = append([]string{"--bootnodes", ""}, args...) |
||||
} |
||||
|
||||
// Boot "swarm". This actually runs the test binary but the TestMain
|
||||
// function will prevent any tests from running.
|
||||
tt.Run("swarm-test", args...) |
||||
|
||||
return tt |
||||
} |
||||
|
||||
type testCluster struct { |
||||
Nodes []*testNode |
||||
TmpDir string |
||||
} |
||||
|
||||
// newTestCluster starts a test swarm cluster of the given size.
|
||||
//
|
||||
// A temporary directory is created and each node gets a data directory inside
|
||||
// it.
|
||||
//
|
||||
// Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
|
||||
// ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
|
||||
// as flags).
|
||||
//
|
||||
// When starting more than one node, they are connected together using the
|
||||
// admin SetPeer RPC method.
|
||||
|
||||
func newTestCluster(t *testing.T, size int) *testCluster { |
||||
cluster := &testCluster{} |
||||
defer func() { |
||||
if t.Failed() { |
||||
cluster.Shutdown() |
||||
} |
||||
}() |
||||
|
||||
tmpdir, err := ioutil.TempDir("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
cluster.TmpDir = tmpdir |
||||
|
||||
// start the nodes
|
||||
cluster.StartNewNodes(t, size) |
||||
|
||||
if size == 1 { |
||||
return cluster |
||||
} |
||||
|
||||
// connect the nodes together
|
||||
for _, node := range cluster.Nodes { |
||||
if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
// wait until all nodes have the correct number of peers
|
||||
outer: |
||||
for _, node := range cluster.Nodes { |
||||
var peers []*p2p.PeerInfo |
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) { |
||||
if err := node.Client.Call(&peers, "admin_peers"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if len(peers) == len(cluster.Nodes)-1 { |
||||
continue outer |
||||
} |
||||
} |
||||
t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1) |
||||
} |
||||
|
||||
return cluster |
||||
} |
||||
|
||||
func (c *testCluster) Shutdown() { |
||||
c.Stop() |
||||
c.Cleanup() |
||||
} |
||||
|
||||
func (c *testCluster) Stop() { |
||||
for _, node := range c.Nodes { |
||||
node.Shutdown() |
||||
} |
||||
} |
||||
|
||||
func (c *testCluster) StartNewNodes(t *testing.T, size int) { |
||||
c.Nodes = make([]*testNode, 0, size) |
||||
|
||||
errors := make(chan error, size) |
||||
nodes := make(chan *testNode, size) |
||||
for i := 0; i < size; i++ { |
||||
go func(nodeIndex int) { |
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", nodeIndex)) |
||||
if err := os.Mkdir(dir, 0700); err != nil { |
||||
errors <- err |
||||
return |
||||
} |
||||
|
||||
node := newTestNode(t, dir) |
||||
node.Name = fmt.Sprintf("swarm%02d", nodeIndex) |
||||
nodes <- node |
||||
}(i) |
||||
} |
||||
|
||||
for i := 0; i < size; i++ { |
||||
select { |
||||
case node := <-nodes: |
||||
c.Nodes = append(c.Nodes, node) |
||||
case err := <-errors: |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
if t.Failed() { |
||||
c.Shutdown() |
||||
t.FailNow() |
||||
} |
||||
} |
||||
|
||||
func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) { |
||||
c.Nodes = make([]*testNode, 0, size) |
||||
for i := 0; i < size; i++ { |
||||
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) |
||||
node := existingTestNode(t, dir, bzzaccount) |
||||
node.Name = fmt.Sprintf("swarm%02d", i) |
||||
|
||||
c.Nodes = append(c.Nodes, node) |
||||
} |
||||
} |
||||
|
||||
func (c *testCluster) Cleanup() { |
||||
os.RemoveAll(c.TmpDir) |
||||
} |
||||
|
||||
type testNode struct { |
||||
Name string |
||||
Addr string |
||||
URL string |
||||
Enode string |
||||
Dir string |
||||
IpcPath string |
||||
PrivateKey *ecdsa.PrivateKey |
||||
Client *rpc.Client |
||||
Cmd *cmdtest.TestCmd |
||||
} |
||||
|
||||
const testPassphrase = "swarm-test-passphrase" |
||||
|
||||
func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) { |
||||
// create key
|
||||
conf = &node.Config{ |
||||
DataDir: dir, |
||||
IPCPath: "bzzd.ipc", |
||||
NoUSB: true, |
||||
} |
||||
n, err := node.New(conf) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" { |
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String()) |
||||
} |
||||
|
||||
return conf, account |
||||
} |
||||
|
||||
func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { |
||||
conf, _ := getTestAccount(t, dir) |
||||
node := &testNode{Dir: dir} |
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" { |
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount) |
||||
} |
||||
|
||||
// assign ports
|
||||
ports, err := getAvailableTCPPorts(2) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
p2pPort := ports[0] |
||||
httpPort := ports[1] |
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t, |
||||
"--bootnodes", "", |
||||
"--port", p2pPort, |
||||
"--nat", "extip:127.0.0.1", |
||||
"--datadir", dir, |
||||
"--ipcpath", conf.IPCPath, |
||||
"--ens-api", "", |
||||
"--bzzaccount", bzzaccount, |
||||
"--bzznetworkid", "321", |
||||
"--bzzport", httpPort, |
||||
"--verbosity", fmt.Sprint(*loglevel), |
||||
) |
||||
node.Cmd.InputLine(testPassphrase) |
||||
defer func() { |
||||
if t.Failed() { |
||||
node.Shutdown() |
||||
} |
||||
}() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
// ensure that all ports have active listeners
|
||||
// so that the next node will not get the same
|
||||
// when calling getAvailableTCPPorts
|
||||
err = waitTCPPorts(ctx, ports...) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { |
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint()) |
||||
if err == nil { |
||||
break |
||||
} |
||||
} |
||||
if node.Client == nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// load info
|
||||
var info swarm.Info |
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port) |
||||
node.URL = "http://" + node.Addr |
||||
|
||||
var nodeInfo p2p.NodeInfo |
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
node.Enode = nodeInfo.Enode |
||||
node.IpcPath = conf.IPCPath |
||||
return node |
||||
} |
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode { |
||||
|
||||
conf, account := getTestAccount(t, dir) |
||||
ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1) |
||||
|
||||
pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase}) |
||||
|
||||
node := &testNode{Dir: dir, PrivateKey: pk} |
||||
|
||||
// assign ports
|
||||
ports, err := getAvailableTCPPorts(2) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
p2pPort := ports[0] |
||||
httpPort := ports[1] |
||||
|
||||
// start the node
|
||||
node.Cmd = runSwarm(t, |
||||
"--bootnodes", "", |
||||
"--port", p2pPort, |
||||
"--nat", "extip:127.0.0.1", |
||||
"--datadir", dir, |
||||
"--ipcpath", conf.IPCPath, |
||||
"--ens-api", "", |
||||
"--bzzaccount", account.Address.String(), |
||||
"--bzznetworkid", "321", |
||||
"--bzzport", httpPort, |
||||
"--verbosity", fmt.Sprint(*loglevel), |
||||
) |
||||
node.Cmd.InputLine(testPassphrase) |
||||
defer func() { |
||||
if t.Failed() { |
||||
node.Shutdown() |
||||
} |
||||
}() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
// ensure that all ports have active listeners
|
||||
// so that the next node will not get the same
|
||||
// when calling getAvailableTCPPorts
|
||||
err = waitTCPPorts(ctx, ports...) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { |
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint()) |
||||
if err == nil { |
||||
break |
||||
} |
||||
} |
||||
if node.Client == nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// load info
|
||||
var info swarm.Info |
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
node.Addr = net.JoinHostPort("127.0.0.1", info.Port) |
||||
node.URL = "http://" + node.Addr |
||||
|
||||
var nodeInfo p2p.NodeInfo |
||||
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
node.Enode = nodeInfo.Enode |
||||
node.IpcPath = conf.IPCPath |
||||
return node |
||||
} |
||||
|
||||
func (n *testNode) Shutdown() { |
||||
if n.Cmd != nil { |
||||
n.Cmd.Kill() |
||||
} |
||||
} |
||||
|
||||
// getAvailableTCPPorts returns a set of ports that
|
||||
// nothing is listening on at the time.
|
||||
//
|
||||
// Function assignTCPPort cannot be called in sequence
|
||||
// and guardantee that the same port will be returned in
|
||||
// different calls as the listener is closed within the function,
|
||||
// not after all listeners are started and selected unique
|
||||
// available ports.
|
||||
func getAvailableTCPPorts(count int) (ports []string, err error) { |
||||
for i := 0; i < count; i++ { |
||||
l, err := net.Listen("tcp", "127.0.0.1:0") |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// defer close in the loop to be sure the same port will not
|
||||
// be selected in the next iteration
|
||||
defer l.Close() |
||||
|
||||
_, port, err := net.SplitHostPort(l.Addr().String()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ports = append(ports, port) |
||||
} |
||||
return ports, nil |
||||
} |
||||
|
||||
// waitTCPPorts blocks until tcp connections can be
|
||||
// established on all provided ports. It runs all
|
||||
// ports dialers in parallel, and returns the first
|
||||
// encountered error.
|
||||
// See waitTCPPort also.
|
||||
func waitTCPPorts(ctx context.Context, ports ...string) error { |
||||
var err error |
||||
// mu locks err variable that is assigned in
|
||||
// other goroutines
|
||||
var mu sync.Mutex |
||||
|
||||
// cancel is canceling all goroutines
|
||||
// when the firs error is returned
|
||||
// to prevent unnecessary waiting
|
||||
ctx, cancel := context.WithCancel(ctx) |
||||
defer cancel() |
||||
|
||||
var wg sync.WaitGroup |
||||
for _, port := range ports { |
||||
wg.Add(1) |
||||
go func(port string) { |
||||
defer wg.Done() |
||||
|
||||
e := waitTCPPort(ctx, port) |
||||
|
||||
mu.Lock() |
||||
defer mu.Unlock() |
||||
if e != nil && err == nil { |
||||
err = e |
||||
cancel() |
||||
} |
||||
}(port) |
||||
} |
||||
wg.Wait() |
||||
|
||||
return err |
||||
} |
||||
|
||||
// waitTCPPort blocks until tcp connection can be established
|
||||
// ona provided port. It has a 3 minute timeout as maximum,
|
||||
// to prevent long waiting, but it can be shortened with
|
||||
// a provided context instance. Dialer has a 10 second timeout
|
||||
// in every iteration, and connection refused error will be
|
||||
// retried in 100 milliseconds periods.
|
||||
func waitTCPPort(ctx context.Context, port string) error { |
||||
ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) |
||||
defer cancel() |
||||
|
||||
for { |
||||
c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port) |
||||
if err != nil { |
||||
if operr, ok := err.(*net.OpError); ok { |
||||
if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED { |
||||
time.Sleep(100 * time.Millisecond) |
||||
continue |
||||
} |
||||
} |
||||
return err |
||||
} |
||||
return c.Close() |
||||
} |
||||
} |
@ -1,291 +0,0 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/md5" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"os" |
||||
"os/exec" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
"github.com/pborman/uuid" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
const ( |
||||
feedRandomDataLength = 8 |
||||
) |
||||
|
||||
func feedUploadAndSyncCmd(ctx *cli.Context) error { |
||||
errc := make(chan error) |
||||
|
||||
go func() { |
||||
errc <- feedUploadAndSync(ctx) |
||||
}() |
||||
|
||||
select { |
||||
case err := <-errc: |
||||
if err != nil { |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1) |
||||
} |
||||
return err |
||||
case <-time.After(time.Duration(timeout) * time.Second): |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1) |
||||
|
||||
return fmt.Errorf("timeout after %v sec", timeout) |
||||
} |
||||
} |
||||
|
||||
func feedUploadAndSync(c *cli.Context) error { |
||||
log.Info("generating and uploading feeds to " + httpEndpoint(hosts[0]) + " and syncing") |
||||
|
||||
// create a random private key to sign updates with and derive the address
|
||||
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer pkFile.Close() |
||||
defer os.Remove(pkFile.Name()) |
||||
|
||||
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001976" |
||||
privKey, err := crypto.HexToECDSA(privkeyHex) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
user := crypto.PubkeyToAddress(privKey.PublicKey) |
||||
userHex := hexutil.Encode(user.Bytes()) |
||||
|
||||
// save the private key to a file
|
||||
_, err = io.WriteString(pkFile, privkeyHex) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// keep hex strings for topic and subtopic
|
||||
var topicHex string |
||||
var subTopicHex string |
||||
|
||||
// and create combination hex topics for bzz-feed retrieval
|
||||
// xor'ed with topic (zero-value topic if no topic)
|
||||
var subTopicOnlyHex string |
||||
var mergedSubTopicHex string |
||||
|
||||
// generate random topic and subtopic and put a hex on them
|
||||
topicBytes, err := generateRandomData(feed.TopicLength) |
||||
topicHex = hexutil.Encode(topicBytes) |
||||
subTopicBytes, err := generateRandomData(8) |
||||
subTopicHex = hexutil.Encode(subTopicBytes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
mergedSubTopic, err := feed.NewTopic(subTopicHex, topicBytes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
mergedSubTopicHex = hexutil.Encode(mergedSubTopic[:]) |
||||
subTopicOnlyBytes, err := feed.NewTopic(subTopicHex, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
subTopicOnlyHex = hexutil.Encode(subTopicOnlyBytes[:]) |
||||
|
||||
// create feed manifest, topic only
|
||||
var out bytes.Buffer |
||||
cmd := exec.Command("swarm", "--bzzapi", httpEndpoint(hosts[0]), "feed", "create", "--topic", topicHex, "--user", userHex) |
||||
cmd.Stdout = &out |
||||
log.Debug("create feed manifest topic cmd", "cmd", cmd) |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
manifestWithTopic := strings.TrimRight(out.String(), string([]byte{0x0a})) |
||||
if len(manifestWithTopic) != 64 { |
||||
return fmt.Errorf("unknown feed create manifest hash format (topic): (%d) %s", len(out.String()), manifestWithTopic) |
||||
} |
||||
log.Debug("create topic feed", "manifest", manifestWithTopic) |
||||
out.Reset() |
||||
|
||||
// create feed manifest, subtopic only
|
||||
cmd = exec.Command("swarm", "--bzzapi", httpEndpoint(hosts[0]), "feed", "create", "--name", subTopicHex, "--user", userHex) |
||||
cmd.Stdout = &out |
||||
log.Debug("create feed manifest subtopic cmd", "cmd", cmd) |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
manifestWithSubTopic := strings.TrimRight(out.String(), string([]byte{0x0a})) |
||||
if len(manifestWithSubTopic) != 64 { |
||||
return fmt.Errorf("unknown feed create manifest hash format (subtopic): (%d) %s", len(out.String()), manifestWithSubTopic) |
||||
} |
||||
log.Debug("create subtopic feed", "manifest", manifestWithTopic) |
||||
out.Reset() |
||||
|
||||
// create feed manifest, merged topic
|
||||
cmd = exec.Command("swarm", "--bzzapi", httpEndpoint(hosts[0]), "feed", "create", "--topic", topicHex, "--name", subTopicHex, "--user", userHex) |
||||
cmd.Stdout = &out |
||||
log.Debug("create feed manifest mergetopic cmd", "cmd", cmd) |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
manifestWithMergedTopic := strings.TrimRight(out.String(), string([]byte{0x0a})) |
||||
if len(manifestWithMergedTopic) != 64 { |
||||
return fmt.Errorf("unknown feed create manifest hash format (mergedtopic): (%d) %s", len(out.String()), manifestWithMergedTopic) |
||||
} |
||||
log.Debug("create mergedtopic feed", "manifest", manifestWithMergedTopic) |
||||
out.Reset() |
||||
|
||||
// create test data
|
||||
data, err := generateRandomData(feedRandomDataLength) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
h := md5.New() |
||||
h.Write(data) |
||||
dataHash := h.Sum(nil) |
||||
dataHex := hexutil.Encode(data) |
||||
|
||||
// update with topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, dataHex) |
||||
cmd.Stdout = &out |
||||
log.Debug("update feed manifest topic cmd", "cmd", cmd) |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
log.Debug("feed update topic", "out", out) |
||||
out.Reset() |
||||
|
||||
// update with subtopic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--name", subTopicHex, dataHex) |
||||
cmd.Stdout = &out |
||||
log.Debug("update feed manifest subtopic cmd", "cmd", cmd) |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
log.Debug("feed update subtopic", "out", out) |
||||
out.Reset() |
||||
|
||||
// update with merged topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, "--name", subTopicHex, dataHex) |
||||
cmd.Stdout = &out |
||||
log.Debug("update feed manifest merged topic cmd", "cmd", cmd) |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
log.Debug("feed update mergedtopic", "out", out) |
||||
out.Reset() |
||||
|
||||
time.Sleep(3 * time.Second) |
||||
|
||||
// retrieve the data
|
||||
wg := sync.WaitGroup{} |
||||
for _, host := range hosts { |
||||
// raw retrieve, topic only
|
||||
for _, hex := range []string{topicHex, subTopicOnlyHex, mergedSubTopicHex} { |
||||
wg.Add(1) |
||||
ruid := uuid.New()[:8] |
||||
go func(hex string, endpoint string, ruid string) { |
||||
for { |
||||
err := fetchFeed(hex, userHex, httpEndpoint(host), dataHash, ruid) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
wg.Done() |
||||
return |
||||
} |
||||
}(hex, httpEndpoint(host), ruid) |
||||
} |
||||
} |
||||
wg.Wait() |
||||
log.Info("all endpoints synced random data successfully") |
||||
|
||||
// upload test file
|
||||
log.Info("feed uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed) |
||||
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000) |
||||
|
||||
hash, err := upload(randomBytes, httpEndpoint(hosts[0])) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
hashBytes, err := hexutil.Decode("0x" + hash) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
multihashHex := hexutil.Encode(hashBytes) |
||||
fileHash := h.Sum(nil) |
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fileHash)) |
||||
|
||||
// update file with topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, multihashHex) |
||||
cmd.Stdout = &out |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
log.Debug("feed update topic", "out", out) |
||||
out.Reset() |
||||
|
||||
// update file with subtopic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--name", subTopicHex, multihashHex) |
||||
cmd.Stdout = &out |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
log.Debug("feed update subtopic", "out", out) |
||||
out.Reset() |
||||
|
||||
// update file with merged topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, "--name", subTopicHex, multihashHex) |
||||
cmd.Stdout = &out |
||||
err = cmd.Run() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
log.Debug("feed update mergedtopic", "out", out) |
||||
out.Reset() |
||||
|
||||
time.Sleep(3 * time.Second) |
||||
|
||||
for _, host := range hosts { |
||||
|
||||
// manifest retrieve, topic only
|
||||
for _, url := range []string{manifestWithTopic, manifestWithSubTopic, manifestWithMergedTopic} { |
||||
wg.Add(1) |
||||
ruid := uuid.New()[:8] |
||||
go func(url string, endpoint string, ruid string) { |
||||
for { |
||||
err := fetch(url, endpoint, fileHash, ruid) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
wg.Done() |
||||
return |
||||
} |
||||
}(url, httpEndpoint(host), ruid) |
||||
} |
||||
|
||||
} |
||||
wg.Wait() |
||||
log.Info("all endpoints synced random file successfully") |
||||
|
||||
return nil |
||||
} |
@ -1,195 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
gethmetrics "github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/metrics/influxdb" |
||||
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/tracing" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var ( |
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
) |
||||
|
||||
var ( |
||||
allhosts string |
||||
hosts []string |
||||
filesize int |
||||
syncDelay bool |
||||
inputSeed int |
||||
httpPort int |
||||
wsPort int |
||||
verbosity int |
||||
timeout int |
||||
single bool |
||||
onlyUpload bool |
||||
) |
||||
|
||||
func main() { |
||||
|
||||
app := cli.NewApp() |
||||
app.Name = "smoke-test" |
||||
app.Usage = "" |
||||
|
||||
app.Flags = []cli.Flag{ |
||||
cli.StringFlag{ |
||||
Name: "hosts", |
||||
Value: "", |
||||
Usage: "comma-separated list of swarm hosts", |
||||
Destination: &allhosts, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "http-port", |
||||
Value: 80, |
||||
Usage: "http port", |
||||
Destination: &httpPort, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "ws-port", |
||||
Value: 8546, |
||||
Usage: "ws port", |
||||
Destination: &wsPort, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "seed", |
||||
Value: 0, |
||||
Usage: "input seed in case we need deterministic upload", |
||||
Destination: &inputSeed, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "filesize", |
||||
Value: 1024, |
||||
Usage: "file size for generated random file in KB", |
||||
Destination: &filesize, |
||||
}, |
||||
cli.BoolFlag{ |
||||
Name: "sync-delay", |
||||
Usage: "wait for content to be synced", |
||||
Destination: &syncDelay, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "verbosity", |
||||
Value: 1, |
||||
Usage: "verbosity", |
||||
Destination: &verbosity, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "timeout", |
||||
Value: 180, |
||||
Usage: "timeout in seconds after which kill the process", |
||||
Destination: &timeout, |
||||
}, |
||||
cli.BoolFlag{ |
||||
Name: "single", |
||||
Usage: "whether to fetch content from a single node or from all nodes", |
||||
Destination: &single, |
||||
}, |
||||
cli.BoolFlag{ |
||||
Name: "only-upload", |
||||
Usage: "whether to only upload content to a single node without fetching", |
||||
Destination: &onlyUpload, |
||||
}, |
||||
} |
||||
|
||||
app.Flags = append(app.Flags, []cli.Flag{ |
||||
utils.MetricsEnabledFlag, |
||||
swarmmetrics.MetricsInfluxDBEndpointFlag, |
||||
swarmmetrics.MetricsInfluxDBDatabaseFlag, |
||||
swarmmetrics.MetricsInfluxDBUsernameFlag, |
||||
swarmmetrics.MetricsInfluxDBPasswordFlag, |
||||
swarmmetrics.MetricsInfluxDBTagsFlag, |
||||
}...) |
||||
|
||||
app.Flags = append(app.Flags, tracing.Flags...) |
||||
|
||||
app.Commands = []cli.Command{ |
||||
{ |
||||
Name: "upload_and_sync", |
||||
Aliases: []string{"c"}, |
||||
Usage: "upload and sync", |
||||
Action: wrapCliCommand("upload-and-sync", uploadAndSyncCmd), |
||||
}, |
||||
{ |
||||
Name: "feed_sync", |
||||
Aliases: []string{"f"}, |
||||
Usage: "feed update generate, upload and sync", |
||||
Action: wrapCliCommand("feed-and-sync", feedUploadAndSyncCmd), |
||||
}, |
||||
{ |
||||
Name: "upload_speed", |
||||
Aliases: []string{"u"}, |
||||
Usage: "measure upload speed", |
||||
Action: wrapCliCommand("upload-speed", uploadSpeedCmd), |
||||
}, |
||||
{ |
||||
Name: "sliding_window", |
||||
Aliases: []string{"s"}, |
||||
Usage: "measure network aggregate capacity", |
||||
Action: wrapCliCommand("sliding-window", slidingWindowCmd), |
||||
}, |
||||
} |
||||
|
||||
sort.Sort(cli.FlagsByName(app.Flags)) |
||||
sort.Sort(cli.CommandsByName(app.Commands)) |
||||
|
||||
app.Before = func(ctx *cli.Context) error { |
||||
tracing.Setup(ctx) |
||||
return nil |
||||
} |
||||
|
||||
app.After = func(ctx *cli.Context) error { |
||||
return emitMetrics(ctx) |
||||
} |
||||
|
||||
err := app.Run(os.Args) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
|
||||
os.Exit(1) |
||||
} |
||||
} |
||||
|
||||
func emitMetrics(ctx *cli.Context) error { |
||||
if gethmetrics.Enabled { |
||||
var ( |
||||
endpoint = ctx.GlobalString(swarmmetrics.MetricsInfluxDBEndpointFlag.Name) |
||||
database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name) |
||||
username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name) |
||||
password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name) |
||||
tags = ctx.GlobalString(swarmmetrics.MetricsInfluxDBTagsFlag.Name) |
||||
) |
||||
|
||||
tagsMap := utils.SplitTagsFlag(tags) |
||||
tagsMap["version"] = gitCommit |
||||
tagsMap["filesize"] = fmt.Sprintf("%v", filesize) |
||||
|
||||
return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", tagsMap) |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -1,149 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"math/rand" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
"github.com/pborman/uuid" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
type uploadResult struct { |
||||
hash string |
||||
digest []byte |
||||
} |
||||
|
||||
func slidingWindowCmd(ctx *cli.Context) error { |
||||
errc := make(chan error) |
||||
|
||||
go func() { |
||||
errc <- slidingWindow(ctx) |
||||
}() |
||||
|
||||
err := <-errc |
||||
if err != nil { |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func slidingWindow(ctx *cli.Context) error { |
||||
var hashes []uploadResult //swarm hashes of the uploads
|
||||
nodes := len(hosts) |
||||
log.Info("sliding window test started", "nodes", nodes, "filesize(kb)", filesize, "timeout", timeout) |
||||
uploadedBytes := 0 |
||||
networkDepth := 0 |
||||
errored := false |
||||
|
||||
outer: |
||||
for { |
||||
seed = int(time.Now().UTC().UnixNano()) |
||||
log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed) |
||||
|
||||
t1 := time.Now() |
||||
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000) |
||||
|
||||
hash, err := upload(randomBytes, httpEndpoint(hosts[0])) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
metrics.GetOrRegisterResettingTimer("sliding-window.upload-time", nil).UpdateSince(t1) |
||||
metrics.GetOrRegisterGauge("sliding-window.upload-depth", nil).Update(int64(len(hashes))) |
||||
|
||||
fhash, err := digest(bytes.NewReader(randomBytes)) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash), "wait for sync", syncDelay) |
||||
hashes = append(hashes, uploadResult{hash: hash, digest: fhash}) |
||||
|
||||
if syncDelay { |
||||
waitToSync() |
||||
} |
||||
|
||||
uploadedBytes += filesize * 1000 |
||||
q := make(chan struct{}, 1) |
||||
d := make(chan struct{}) |
||||
defer close(q) |
||||
defer close(d) |
||||
for i, v := range hashes { |
||||
timeoutC := time.After(time.Duration(timeout) * time.Second) |
||||
errored = false |
||||
|
||||
task: |
||||
for { |
||||
select { |
||||
case q <- struct{}{}: |
||||
go func() { |
||||
var start time.Time |
||||
done := false |
||||
for !done { |
||||
log.Info("trying to retrieve hash", "hash", v.hash) |
||||
idx := 1 + rand.Intn(len(hosts)-1) |
||||
ruid := uuid.New()[:8] |
||||
start = time.Now() |
||||
// fetch hangs when swarm dies out, so we have to jump through a bit more hoops to actually
|
||||
// catch the timeout, but also allow this retry logic
|
||||
err := fetch(v.hash, httpEndpoint(hosts[idx]), v.digest, ruid) |
||||
if err != nil { |
||||
log.Error("error fetching hash", "err", err) |
||||
continue |
||||
} |
||||
done = true |
||||
} |
||||
metrics.GetOrRegisterResettingTimer("sliding-window.single.fetch-time", nil).UpdateSince(start) |
||||
d <- struct{}{} |
||||
}() |
||||
case <-d: |
||||
<-q |
||||
break task |
||||
case <-timeoutC: |
||||
errored = true |
||||
log.Error("error retrieving hash. timeout", "hash idx", i) |
||||
metrics.GetOrRegisterCounter("sliding-window.single.error", nil).Inc(1) |
||||
break outer |
||||
default: |
||||
} |
||||
} |
||||
|
||||
networkDepth = i |
||||
metrics.GetOrRegisterGauge("sliding-window.network-depth", nil).Update(int64(networkDepth)) |
||||
log.Info("sliding window test successfully fetched file", "currentDepth", networkDepth) |
||||
// this test might take a long time to finish - but we'd like to see metrics while they accumulate and not just when
|
||||
// the test finishes. therefore emit the metrics on each iteration
|
||||
emitMetrics(ctx) |
||||
} |
||||
} |
||||
|
||||
log.Info("sliding window test finished", "errored?", errored, "networkDepth", networkDepth, "networkDepth(kb)", networkDepth*filesize) |
||||
log.Info("stats", "uploadedFiles", len(hashes), "uploadedKb", uploadedBytes/1000, "filesizeKb", filesize) |
||||
|
||||
return nil |
||||
} |
@ -1,376 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/hex" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"math/rand" |
||||
"os" |
||||
"strings" |
||||
"sync" |
||||
"sync/atomic" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
func uploadAndSyncCmd(ctx *cli.Context) error { |
||||
// use input seed if it has been set
|
||||
if inputSeed != 0 { |
||||
seed = inputSeed |
||||
} |
||||
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000) |
||||
|
||||
errc := make(chan error) |
||||
|
||||
go func() { |
||||
errc <- uploadAndSync(ctx, randomBytes) |
||||
}() |
||||
|
||||
var err error |
||||
select { |
||||
case err = <-errc: |
||||
if err != nil { |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1) |
||||
} |
||||
case <-time.After(time.Duration(timeout) * time.Second): |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1) |
||||
|
||||
err = fmt.Errorf("timeout after %v sec", timeout) |
||||
} |
||||
|
||||
// trigger debug functionality on randomBytes
|
||||
e := trackChunks(randomBytes[:], true) |
||||
if e != nil { |
||||
log.Error(e.Error()) |
||||
} |
||||
|
||||
return err |
||||
} |
||||
|
||||
func trackChunks(testData []byte, submitMetrics bool) error { |
||||
addrs, err := getAllRefs(testData) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
for i, ref := range addrs { |
||||
log.Debug(fmt.Sprintf("ref %d", i), "ref", ref) |
||||
} |
||||
|
||||
var globalYes, globalNo int |
||||
var globalMu sync.Mutex |
||||
var hasErr bool |
||||
|
||||
var wg sync.WaitGroup |
||||
wg.Add(len(hosts)) |
||||
|
||||
var mu sync.Mutex // mutex protecting the allHostsChunks and bzzAddrs maps
|
||||
allHostChunks := map[string]string{} // host->bitvector of presence for chunks
|
||||
bzzAddrs := map[string]string{} // host->bzzAddr
|
||||
|
||||
for _, host := range hosts { |
||||
host := host |
||||
go func() { |
||||
defer wg.Done() |
||||
httpHost := fmt.Sprintf("ws://%s:%d", host, 8546) |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) |
||||
defer cancel() |
||||
|
||||
rpcClient, err := rpc.DialContext(ctx, httpHost) |
||||
if rpcClient != nil { |
||||
defer rpcClient.Close() |
||||
} |
||||
if err != nil { |
||||
log.Error("error dialing host", "err", err, "host", httpHost) |
||||
hasErr = true |
||||
return |
||||
} |
||||
|
||||
hostChunks, err := getChunksBitVectorFromHost(rpcClient, addrs) |
||||
if err != nil { |
||||
log.Error("error getting chunks bit vector from host", "err", err, "host", httpHost) |
||||
hasErr = true |
||||
return |
||||
} |
||||
|
||||
bzzAddr, err := getBzzAddrFromHost(rpcClient) |
||||
if err != nil { |
||||
log.Error("error getting bzz addrs from host", "err", err, "host", httpHost) |
||||
hasErr = true |
||||
return |
||||
} |
||||
|
||||
mu.Lock() |
||||
allHostChunks[host] = hostChunks |
||||
bzzAddrs[host] = bzzAddr |
||||
mu.Unlock() |
||||
|
||||
yes, no := 0, 0 |
||||
for _, val := range hostChunks { |
||||
if val == '1' { |
||||
yes++ |
||||
} else { |
||||
no++ |
||||
} |
||||
} |
||||
|
||||
if no == 0 { |
||||
log.Info("host reported to have all chunks", "host", host) |
||||
} |
||||
|
||||
log.Debug("chunks", "chunks", hostChunks, "yes", yes, "no", no, "host", host) |
||||
|
||||
if submitMetrics { |
||||
globalMu.Lock() |
||||
globalYes += yes |
||||
globalNo += no |
||||
globalMu.Unlock() |
||||
} |
||||
}() |
||||
} |
||||
|
||||
wg.Wait() |
||||
|
||||
checkChunksVsMostProxHosts(addrs, allHostChunks, bzzAddrs) |
||||
|
||||
if !hasErr && submitMetrics { |
||||
// remove the chunks stored on the uploader node
|
||||
globalYes -= len(addrs) |
||||
|
||||
metrics.GetOrRegisterCounter("deployment.chunks.yes", nil).Inc(int64(globalYes)) |
||||
metrics.GetOrRegisterCounter("deployment.chunks.no", nil).Inc(int64(globalNo)) |
||||
metrics.GetOrRegisterCounter("deployment.chunks.refs", nil).Inc(int64(len(addrs))) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// getChunksBitVectorFromHost returns a bit vector of presence for a given slice of chunks from a given host
|
||||
func getChunksBitVectorFromHost(client *rpc.Client, addrs []storage.Address) (string, error) { |
||||
var hostChunks string |
||||
|
||||
err := client.Call(&hostChunks, "bzz_has", addrs) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
return hostChunks, nil |
||||
} |
||||
|
||||
// getBzzAddrFromHost returns the bzzAddr for a given host
|
||||
func getBzzAddrFromHost(client *rpc.Client) (string, error) { |
||||
var hive string |
||||
|
||||
err := client.Call(&hive, "bzz_hive") |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
// we make an ugly assumption about the output format of the hive.String() method
|
||||
// ideally we should replace this with an API call that returns the bzz addr for a given host,
|
||||
// but this also works for now (provided we don't change the hive.String() method, which we haven't in some time
|
||||
ss := strings.Split(strings.Split(hive, "\n")[3], " ") |
||||
return ss[len(ss)-1], nil |
||||
} |
||||
|
||||
// checkChunksVsMostProxHosts is checking:
|
||||
// 1. whether a chunk has been found at less than 2 hosts. Considering our NN size, this should not happen.
|
||||
// 2. if a chunk is not found at its closest node. This should also not happen.
|
||||
// Together with the --only-upload flag, we could run this smoke test and make sure that our syncing
|
||||
// functionality is correct (without even trying to retrieve the content).
|
||||
//
|
||||
// addrs - a slice with all uploaded chunk refs
|
||||
// allHostChunks - host->bit vector, showing what chunks are present on what hosts
|
||||
// bzzAddrs - host->bzz address, used when determining the most proximate host for a given chunk
|
||||
func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[string]string, bzzAddrs map[string]string) { |
||||
for k, v := range bzzAddrs { |
||||
log.Trace("bzzAddr", "bzz", v, "host", k) |
||||
} |
||||
|
||||
for i := range addrs { |
||||
var foundAt int |
||||
maxProx := -1 |
||||
var maxProxHost string |
||||
for host := range allHostChunks { |
||||
if allHostChunks[host][i] == '1' { |
||||
foundAt++ |
||||
} |
||||
|
||||
ba, err := hex.DecodeString(bzzAddrs[host]) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// calculate the host closest to any chunk
|
||||
prox := chunk.Proximity(addrs[i], ba) |
||||
if prox > maxProx { |
||||
maxProx = prox |
||||
maxProxHost = host |
||||
} |
||||
} |
||||
|
||||
if allHostChunks[maxProxHost][i] == '0' { |
||||
log.Error("chunk not found at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost]) |
||||
} else { |
||||
log.Trace("chunk present at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost]) |
||||
} |
||||
|
||||
// if chunk found at less than 2 hosts
|
||||
if foundAt < 2 { |
||||
log.Error("chunk found at less than two hosts", "foundAt", foundAt, "ref", addrs[i]) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func getAllRefs(testData []byte) (storage.AddressCollection, error) { |
||||
datadir, err := ioutil.TempDir("", "chunk-debug") |
||||
if err != nil { |
||||
return nil, fmt.Errorf("unable to create temp dir: %v", err) |
||||
} |
||||
defer os.RemoveAll(datadir) |
||||
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), chunk.NewTags()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
reader := bytes.NewReader(testData) |
||||
return fileStore.GetAllReferences(context.Background(), reader, false) |
||||
} |
||||
|
||||
func uploadAndSync(c *cli.Context, randomBytes []byte) error { |
||||
log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed) |
||||
|
||||
t1 := time.Now() |
||||
hash, err := upload(randomBytes, httpEndpoint(hosts[0])) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
t2 := time.Since(t1) |
||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.upload-time", nil).Update(t2) |
||||
|
||||
fhash, err := digest(bytes.NewReader(randomBytes)) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash)) |
||||
|
||||
// wait to sync and log chunks before fetch attempt, only if syncDelay is set to true
|
||||
if syncDelay { |
||||
waitToSync() |
||||
|
||||
log.Debug("chunks before fetch attempt", "hash", hash) |
||||
|
||||
err = trackChunks(randomBytes, false) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
} |
||||
} |
||||
|
||||
if onlyUpload { |
||||
log.Debug("only-upload is true, stoppping test", "hash", hash) |
||||
return nil |
||||
} |
||||
|
||||
randIndex := 1 + rand.Intn(len(hosts)-1) |
||||
|
||||
for { |
||||
start := time.Now() |
||||
err := fetch(hash, httpEndpoint(hosts[randIndex]), fhash, "") |
||||
if err != nil { |
||||
time.Sleep(2 * time.Second) |
||||
continue |
||||
} |
||||
ended := time.Since(start) |
||||
|
||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended) |
||||
log.Info("fetch successful", "took", ended, "endpoint", httpEndpoint(hosts[randIndex])) |
||||
break |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func isSyncing(wsHost string) (bool, error) { |
||||
rpcClient, err := rpc.Dial(wsHost) |
||||
if rpcClient != nil { |
||||
defer rpcClient.Close() |
||||
} |
||||
|
||||
if err != nil { |
||||
log.Error("error dialing host", "err", err) |
||||
return false, err |
||||
} |
||||
|
||||
var isSyncing bool |
||||
err = rpcClient.Call(&isSyncing, "bzz_isSyncing") |
||||
if err != nil { |
||||
log.Error("error calling host for isSyncing", "err", err) |
||||
return false, err |
||||
} |
||||
|
||||
log.Debug("isSyncing result", "host", wsHost, "isSyncing", isSyncing) |
||||
|
||||
return isSyncing, nil |
||||
} |
||||
|
||||
func waitToSync() { |
||||
t1 := time.Now() |
||||
|
||||
ns := uint64(1) |
||||
|
||||
for ns > 0 { |
||||
time.Sleep(3 * time.Second) |
||||
|
||||
notSynced := uint64(0) |
||||
var wg sync.WaitGroup |
||||
wg.Add(len(hosts)) |
||||
for i := 0; i < len(hosts); i++ { |
||||
i := i |
||||
go func(idx int) { |
||||
stillSyncing, err := isSyncing(wsEndpoint(hosts[idx])) |
||||
|
||||
if stillSyncing || err != nil { |
||||
atomic.AddUint64(¬Synced, 1) |
||||
} |
||||
wg.Done() |
||||
}(i) |
||||
} |
||||
wg.Wait() |
||||
|
||||
ns = atomic.LoadUint64(¬Synced) |
||||
} |
||||
|
||||
t2 := time.Since(t1) |
||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.single.wait-for-sync.deployment", nil).Update(t2) |
||||
} |
@ -1,73 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
func uploadSpeedCmd(ctx *cli.Context) error { |
||||
log.Info("uploading to "+hosts[0], "seed", seed) |
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000) |
||||
|
||||
errc := make(chan error) |
||||
|
||||
go func() { |
||||
errc <- uploadSpeed(ctx, randomBytes) |
||||
}() |
||||
|
||||
select { |
||||
case err := <-errc: |
||||
if err != nil { |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1) |
||||
} |
||||
return err |
||||
case <-time.After(time.Duration(timeout) * time.Second): |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1) |
||||
|
||||
// trigger debug functionality on randomBytes
|
||||
|
||||
return fmt.Errorf("timeout after %v sec", timeout) |
||||
} |
||||
} |
||||
|
||||
func uploadSpeed(c *cli.Context, data []byte) error { |
||||
t1 := time.Now() |
||||
hash, err := upload(data, hosts[0]) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
metrics.GetOrRegisterCounter("upload-speed.upload-time", nil).Inc(int64(time.Since(t1))) |
||||
|
||||
fhash, err := digest(bytes.NewReader(data)) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) |
||||
return nil |
||||
} |
@ -1,231 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"crypto/md5" |
||||
crand "crypto/rand" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"math/rand" |
||||
"net/http" |
||||
"net/http/httptrace" |
||||
"os" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
"github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"github.com/ethereum/go-ethereum/swarm/spancontext" |
||||
opentracing "github.com/opentracing/opentracing-go" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var ( |
||||
commandName = "" |
||||
seed = int(time.Now().UTC().UnixNano()) |
||||
) |
||||
|
||||
func init() { |
||||
rand.Seed(int64(seed)) |
||||
} |
||||
|
||||
func httpEndpoint(host string) string { |
||||
return fmt.Sprintf("http://%s:%d", host, httpPort) |
||||
} |
||||
|
||||
func wsEndpoint(host string) string { |
||||
return fmt.Sprintf("ws://%s:%d", host, wsPort) |
||||
} |
||||
|
||||
func wrapCliCommand(name string, command func(*cli.Context) error) func(*cli.Context) error { |
||||
return func(ctx *cli.Context) error { |
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(false)))) |
||||
|
||||
commandName = name |
||||
|
||||
hosts = strings.Split(allhosts, ",") |
||||
|
||||
defer func(now time.Time) { |
||||
totalTime := time.Since(now) |
||||
log.Info("total time", "time", totalTime, "kb", filesize) |
||||
metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime) |
||||
}(time.Now()) |
||||
|
||||
log.Info("smoke test starting", "task", name, "timeout", timeout) |
||||
metrics.GetOrRegisterCounter(name, nil).Inc(1) |
||||
|
||||
return command(ctx) |
||||
} |
||||
} |
||||
|
||||
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error { |
||||
ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch") |
||||
defer sp.Finish() |
||||
|
||||
log.Trace("sleeping", "ruid", ruid) |
||||
time.Sleep(3 * time.Second) |
||||
|
||||
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user) |
||||
|
||||
var tn time.Time |
||||
reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user |
||||
req, _ := http.NewRequest("GET", reqUri, nil) |
||||
|
||||
opentracing.GlobalTracer().Inject( |
||||
sp.Context(), |
||||
opentracing.HTTPHeaders, |
||||
opentracing.HTTPHeadersCarrier(req.Header)) |
||||
|
||||
trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn) |
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace)) |
||||
transport := http.DefaultTransport |
||||
|
||||
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
tn = time.Now() |
||||
res, err := transport.RoundTrip(req) |
||||
if err != nil { |
||||
log.Error(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength) |
||||
|
||||
if res.StatusCode != 200 { |
||||
return fmt.Errorf("expected status code %d, got %v (ruid %v)", 200, res.StatusCode, ruid) |
||||
} |
||||
|
||||
defer res.Body.Close() |
||||
|
||||
rdigest, err := digest(res.Body) |
||||
if err != nil { |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
if !bytes.Equal(rdigest, original) { |
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original) |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string) error { |
||||
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch") |
||||
defer sp.Finish() |
||||
|
||||
log.Info("http get request", "ruid", ruid, "endpoint", endpoint, "hash", hash) |
||||
|
||||
var tn time.Time |
||||
reqUri := endpoint + "/bzz:/" + hash + "/" |
||||
req, _ := http.NewRequest("GET", reqUri, nil) |
||||
|
||||
opentracing.GlobalTracer().Inject( |
||||
sp.Context(), |
||||
opentracing.HTTPHeaders, |
||||
opentracing.HTTPHeadersCarrier(req.Header)) |
||||
|
||||
trace := client.GetClientTrace(commandName+" - http get", commandName, ruid, &tn) |
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace)) |
||||
transport := http.DefaultTransport |
||||
|
||||
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
tn = time.Now() |
||||
res, err := transport.RoundTrip(req) |
||||
if err != nil { |
||||
log.Error(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
log.Info("http get response", "ruid", ruid, "endpoint", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength) |
||||
|
||||
if res.StatusCode != 200 { |
||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode) |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
defer res.Body.Close() |
||||
|
||||
rdigest, err := digest(res.Body) |
||||
if err != nil { |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
if !bytes.Equal(rdigest, original) { |
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original) |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// upload an arbitrary byte as a plaintext file to `endpoint` using the api client
|
||||
func upload(data []byte, endpoint string) (string, error) { |
||||
swarm := client.NewClient(endpoint) |
||||
f := &client.File{ |
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)), |
||||
ManifestEntry: api.ManifestEntry{ |
||||
ContentType: "text/plain", |
||||
Mode: 0660, |
||||
Size: int64(len(data)), |
||||
}, |
||||
} |
||||
|
||||
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
|
||||
return swarm.Upload(f, "", false) |
||||
} |
||||
|
||||
func digest(r io.Reader) ([]byte, error) { |
||||
h := md5.New() |
||||
_, err := io.Copy(h, r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return h.Sum(nil), nil |
||||
} |
||||
|
||||
// generates random data in heap buffer
|
||||
func generateRandomData(datasize int) ([]byte, error) { |
||||
b := make([]byte, datasize) |
||||
c, err := crand.Read(b) |
||||
if err != nil { |
||||
return nil, err |
||||
} else if c != datasize { |
||||
return nil, errors.New("short read") |
||||
} |
||||
return b, nil |
||||
} |
@ -1,160 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"path" |
||||
"path/filepath" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
"github.com/ethereum/go-ethereum/swarm/network/simulation" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
// create is used as the entry function for "create" app command.
|
||||
func create(ctx *cli.Context) error { |
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(ctx.Int("verbosity")), log.StreamHandler(os.Stdout, log.TerminalFormat(true)))) |
||||
|
||||
if len(ctx.Args()) < 1 { |
||||
return errors.New("argument should be the filename to verify or write-to") |
||||
} |
||||
filename, err := touchPath(ctx.Args()[0]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return createSnapshot(filename, ctx.Int("nodes"), strings.Split(ctx.String("services"), ",")) |
||||
} |
||||
|
||||
// createSnapshot creates a new snapshot on filesystem with provided filename,
|
||||
// number of nodes and service names.
|
||||
func createSnapshot(filename string, nodes int, services []string) (err error) { |
||||
log.Debug("create snapshot", "filename", filename, "nodes", nodes, "services", services) |
||||
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{ |
||||
"bzz": func(ctx *adapters.ServiceContext, bucket *sync.Map) (node.Service, func(), error) { |
||||
addr := network.NewAddr(ctx.Config.Node()) |
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams()) |
||||
hp := network.NewHiveParams() |
||||
hp.KeepAliveInterval = time.Duration(200) * time.Millisecond |
||||
hp.Discovery = true // discovery must be enabled when creating a snapshot
|
||||
|
||||
// store the kademlia in the bucket, needed later in the WaitTillHealthy function
|
||||
bucket.Store(simulation.BucketKeyKademlia, kad) |
||||
|
||||
config := &network.BzzConfig{ |
||||
OverlayAddr: addr.Over(), |
||||
UnderlayAddr: addr.Under(), |
||||
HiveParams: hp, |
||||
} |
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil |
||||
}, |
||||
}) |
||||
defer sim.Close() |
||||
|
||||
ids, err := sim.AddNodes(nodes) |
||||
if err != nil { |
||||
return fmt.Errorf("add nodes: %v", err) |
||||
} |
||||
|
||||
err = sim.Net.ConnectNodesRing(ids) |
||||
if err != nil { |
||||
return fmt.Errorf("connect nodes: %v", err) |
||||
} |
||||
|
||||
ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute) |
||||
defer cancelSimRun() |
||||
if _, err := sim.WaitTillHealthy(ctx); err != nil { |
||||
return fmt.Errorf("wait for healthy kademlia: %v", err) |
||||
} |
||||
|
||||
var snap *simulations.Snapshot |
||||
if len(services) > 0 { |
||||
// If service names are provided, include them in the snapshot.
|
||||
// But, check if "bzz" service is not among them to remove it
|
||||
// form the snapshot as it exists on snapshot creation.
|
||||
var removeServices []string |
||||
var wantBzz bool |
||||
for _, s := range services { |
||||
if s == "bzz" { |
||||
wantBzz = true |
||||
break |
||||
} |
||||
} |
||||
if !wantBzz { |
||||
removeServices = []string{"bzz"} |
||||
} |
||||
snap, err = sim.Net.SnapshotWithServices(services, removeServices) |
||||
} else { |
||||
snap, err = sim.Net.Snapshot() |
||||
} |
||||
if err != nil { |
||||
return fmt.Errorf("create snapshot: %v", err) |
||||
} |
||||
jsonsnapshot, err := json.Marshal(snap) |
||||
if err != nil { |
||||
return fmt.Errorf("json encode snapshot: %v", err) |
||||
} |
||||
return ioutil.WriteFile(filename, jsonsnapshot, 0666) |
||||
} |
||||
|
||||
// touchPath creates an empty file and all subdirectories
|
||||
// that are missing.
|
||||
func touchPath(filename string) (string, error) { |
||||
if path.IsAbs(filename) { |
||||
if _, err := os.Stat(filename); err == nil { |
||||
// path exists, overwrite
|
||||
return filename, nil |
||||
} |
||||
} |
||||
|
||||
d, f := path.Split(filename) |
||||
dir, err := filepath.Abs(filepath.Dir(os.Args[0])) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
_, err = os.Stat(path.Join(dir, filename)) |
||||
if err == nil { |
||||
// path exists, overwrite
|
||||
return filename, nil |
||||
} |
||||
|
||||
dirPath := path.Join(dir, d) |
||||
filePath := path.Join(dirPath, f) |
||||
if d != "" { |
||||
err = os.MkdirAll(dirPath, os.ModeDir) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
} |
||||
|
||||
return filePath, nil |
||||
} |
@ -1,140 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"sort" |
||||
"strconv" |
||||
"strings" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
) |
||||
|
||||
// TestSnapshotCreate is a high level e2e test that tests for snapshot generation.
|
||||
// It runs a few "create" commands with different flag values and loads generated
|
||||
// snapshot files to validate their content.
|
||||
func TestSnapshotCreate(t *testing.T) { |
||||
t.Skip("test is flaky. disabling until underlying problem is addressed") |
||||
|
||||
for _, v := range []struct { |
||||
name string |
||||
nodes int |
||||
services string |
||||
}{ |
||||
{ |
||||
name: "defaults", |
||||
}, |
||||
{ |
||||
name: "more nodes", |
||||
nodes: defaultNodes + 4, |
||||
}, |
||||
{ |
||||
name: "services", |
||||
services: "stream,pss,zorglub", |
||||
}, |
||||
{ |
||||
name: "services with bzz", |
||||
services: "bzz,pss", |
||||
}, |
||||
} { |
||||
t.Run(v.name, func(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
file, err := ioutil.TempFile("", "swarm-snapshot") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.Remove(file.Name()) |
||||
|
||||
if err = file.Close(); err != nil { |
||||
t.Error(err) |
||||
} |
||||
|
||||
args := []string{"create"} |
||||
if v.nodes > 0 { |
||||
args = append(args, "--nodes", strconv.Itoa(v.nodes)) |
||||
} |
||||
if v.services != "" { |
||||
args = append(args, "--services", v.services) |
||||
} |
||||
testCmd := runSnapshot(t, append(args, file.Name())...) |
||||
|
||||
testCmd.WaitExit() |
||||
if code := testCmd.ExitStatus(); code != 0 { |
||||
t.Fatalf("command exit code %v, expected 0", code) |
||||
} |
||||
|
||||
f, err := os.Open(file.Name()) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer func() { |
||||
err := f.Close() |
||||
if err != nil { |
||||
t.Error("closing snapshot file", "err", err) |
||||
} |
||||
}() |
||||
|
||||
b, err := ioutil.ReadAll(f) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
var snap simulations.Snapshot |
||||
err = json.Unmarshal(b, &snap) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
wantNodes := v.nodes |
||||
if wantNodes == 0 { |
||||
wantNodes = defaultNodes |
||||
} |
||||
gotNodes := len(snap.Nodes) |
||||
if gotNodes != wantNodes { |
||||
t.Errorf("got %v nodes, want %v", gotNodes, wantNodes) |
||||
} |
||||
|
||||
if len(snap.Conns) == 0 { |
||||
t.Error("no connections in a snapshot") |
||||
} |
||||
|
||||
var wantServices []string |
||||
if v.services != "" { |
||||
wantServices = strings.Split(v.services, ",") |
||||
} else { |
||||
wantServices = []string{"bzz"} |
||||
} |
||||
// sort service names so they can be comparable
|
||||
// as strings to every node sorted services
|
||||
sort.Strings(wantServices) |
||||
|
||||
for i, n := range snap.Nodes { |
||||
gotServices := n.Node.Config.Services |
||||
sort.Strings(gotServices) |
||||
if fmt.Sprint(gotServices) != fmt.Sprint(wantServices) { |
||||
t.Errorf("got services %v for node %v, want %v", gotServices, i, wantServices) |
||||
} |
||||
} |
||||
|
||||
}) |
||||
} |
||||
} |
@ -1,83 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
var gitDate string |
||||
|
||||
// default value for "create" command --nodes flag
|
||||
const defaultNodes = 8 |
||||
|
||||
func main() { |
||||
err := newApp().Run(os.Args) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
os.Exit(1) |
||||
} |
||||
} |
||||
|
||||
// newApp construct a new instance of Swarm Snapshot Utility.
|
||||
// Method Run is called on it in the main function and in tests.
|
||||
func newApp() (app *cli.App) { |
||||
app = utils.NewApp(gitCommit, gitDate, "Swarm Snapshot Utility") |
||||
|
||||
app.Name = "swarm-snapshot" |
||||
app.Usage = "" |
||||
|
||||
// app flags (for all commands)
|
||||
app.Flags = []cli.Flag{ |
||||
cli.IntFlag{ |
||||
Name: "verbosity", |
||||
Value: 1, |
||||
Usage: "verbosity level", |
||||
}, |
||||
} |
||||
|
||||
app.Commands = []cli.Command{ |
||||
{ |
||||
Name: "create", |
||||
Aliases: []string{"c"}, |
||||
Usage: "create a swarm snapshot", |
||||
Action: create, |
||||
// Flags only for "create" command.
|
||||
// Allow app flags to be specified after the
|
||||
// command argument.
|
||||
Flags: append(app.Flags, |
||||
cli.IntFlag{ |
||||
Name: "nodes", |
||||
Value: defaultNodes, |
||||
Usage: "number of nodes", |
||||
}, |
||||
cli.StringFlag{ |
||||
Name: "services", |
||||
Value: "bzz", |
||||
Usage: "comma separated list of services to boot the nodes with", |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
return app |
||||
} |
@ -1,49 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"testing" |
||||
|
||||
"github.com/docker/docker/pkg/reexec" |
||||
"github.com/ethereum/go-ethereum/internal/cmdtest" |
||||
) |
||||
|
||||
func init() { |
||||
reexec.Register("swarm-snapshot", func() { |
||||
if err := newApp().Run(os.Args); err != nil { |
||||
fmt.Fprintln(os.Stderr, err) |
||||
os.Exit(1) |
||||
} |
||||
os.Exit(0) |
||||
}) |
||||
} |
||||
|
||||
func runSnapshot(t *testing.T, args ...string) *cmdtest.TestCmd { |
||||
tt := cmdtest.NewTestCmd(t, nil) |
||||
tt.Run("swarm-snapshot", args...) |
||||
return tt |
||||
} |
||||
|
||||
func TestMain(m *testing.M) { |
||||
if reexec.Init() { |
||||
return |
||||
} |
||||
os.Exit(m.Run()) |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -1,188 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command bzzup uploads files to the swarm HTTP API.
|
||||
package main |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"os" |
||||
"os/user" |
||||
"path" |
||||
"path/filepath" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var upCommand = cli.Command{ |
||||
Action: upload, |
||||
CustomHelpTemplate: helpTemplate, |
||||
Name: "up", |
||||
Usage: "uploads a file or directory to swarm using the HTTP API", |
||||
ArgsUsage: "<file>", |
||||
Flags: []cli.Flag{SwarmEncryptedFlag}, |
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash", |
||||
} |
||||
|
||||
func upload(ctx *cli.Context) { |
||||
args := ctx.Args() |
||||
var ( |
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name) |
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) |
||||
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) |
||||
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) |
||||
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) |
||||
client = swarm.NewClient(bzzapi) |
||||
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name) |
||||
autoDefaultPath = false |
||||
file string |
||||
) |
||||
if autoDefaultPathString := os.Getenv(SwarmAutoDefaultPath); autoDefaultPathString != "" { |
||||
b, err := strconv.ParseBool(autoDefaultPathString) |
||||
if err != nil { |
||||
utils.Fatalf("invalid environment variable %s: %v", SwarmAutoDefaultPath, err) |
||||
} |
||||
autoDefaultPath = b |
||||
} |
||||
if len(args) != 1 { |
||||
if fromStdin { |
||||
tmp, err := ioutil.TempFile("", "swarm-stdin") |
||||
if err != nil { |
||||
utils.Fatalf("error create tempfile: %s", err) |
||||
} |
||||
defer os.Remove(tmp.Name()) |
||||
n, err := io.Copy(tmp, os.Stdin) |
||||
if err != nil { |
||||
utils.Fatalf("error copying stdin to tempfile: %s", err) |
||||
} else if n == 0 { |
||||
utils.Fatalf("error reading from stdin: zero length") |
||||
} |
||||
file = tmp.Name() |
||||
} else { |
||||
utils.Fatalf("Need filename as the first and only argument") |
||||
} |
||||
} else { |
||||
file = expandPath(args[0]) |
||||
} |
||||
|
||||
if !wantManifest { |
||||
f, err := swarm.Open(file) |
||||
if err != nil { |
||||
utils.Fatalf("Error opening file: %s", err) |
||||
} |
||||
defer f.Close() |
||||
hash, err := client.UploadRaw(f, f.Size, toEncrypt) |
||||
if err != nil { |
||||
utils.Fatalf("Upload failed: %s", err) |
||||
} |
||||
fmt.Println(hash) |
||||
return |
||||
} |
||||
|
||||
stat, err := os.Stat(file) |
||||
if err != nil { |
||||
utils.Fatalf("Error opening file: %s", err) |
||||
} |
||||
|
||||
// define a function which either uploads a directory or single file
|
||||
// based on the type of the file being uploaded
|
||||
var doUpload func() (hash string, err error) |
||||
if stat.IsDir() { |
||||
doUpload = func() (string, error) { |
||||
if !recursive { |
||||
return "", errors.New("Argument is a directory and recursive upload is disabled") |
||||
} |
||||
if autoDefaultPath && defaultPath == "" { |
||||
defaultEntryCandidate := path.Join(file, "index.html") |
||||
log.Debug("trying to find default path", "path", defaultEntryCandidate) |
||||
defaultEntryStat, err := os.Stat(defaultEntryCandidate) |
||||
if err == nil && !defaultEntryStat.IsDir() { |
||||
log.Debug("setting auto detected default path", "path", defaultEntryCandidate) |
||||
defaultPath = defaultEntryCandidate |
||||
} |
||||
} |
||||
if defaultPath != "" { |
||||
// construct absolute default path
|
||||
absDefaultPath, _ := filepath.Abs(defaultPath) |
||||
absFile, _ := filepath.Abs(file) |
||||
// make sure absolute directory ends with only one "/"
|
||||
// to trim it from absolute default path and get relative default path
|
||||
absFile = strings.TrimRight(absFile, "/") + "/" |
||||
if absDefaultPath != "" && absFile != "" && strings.HasPrefix(absDefaultPath, absFile) { |
||||
defaultPath = strings.TrimPrefix(absDefaultPath, absFile) |
||||
} |
||||
} |
||||
return client.UploadDirectory(file, defaultPath, "", toEncrypt) |
||||
} |
||||
} else { |
||||
doUpload = func() (string, error) { |
||||
f, err := swarm.Open(file) |
||||
if err != nil { |
||||
return "", fmt.Errorf("error opening file: %s", err) |
||||
} |
||||
defer f.Close() |
||||
if mimeType != "" { |
||||
f.ContentType = mimeType |
||||
} |
||||
return client.Upload(f, "", toEncrypt) |
||||
} |
||||
} |
||||
hash, err := doUpload() |
||||
if err != nil { |
||||
utils.Fatalf("Upload failed: %s", err) |
||||
} |
||||
fmt.Println(hash) |
||||
} |
||||
|
||||
// Expands a file path
|
||||
// 1. replace tilde with users home dir
|
||||
// 2. expands embedded environment variables
|
||||
// 3. cleans the path, e.g. /a/b/../c -> /a/c
|
||||
// Note, it has limitations, e.g. ~someuser/tmp will not be expanded
|
||||
func expandPath(p string) string { |
||||
if i := strings.Index(p, ":"); i > 0 { |
||||
return p |
||||
} |
||||
if i := strings.Index(p, "@"); i > 0 { |
||||
return p |
||||
} |
||||
if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") { |
||||
if home := homeDir(); home != "" { |
||||
p = home + p[1:] |
||||
} |
||||
} |
||||
return path.Clean(os.ExpandEnv(p)) |
||||
} |
||||
|
||||
func homeDir() string { |
||||
if home := os.Getenv("HOME"); home != "" { |
||||
return home |
||||
} |
||||
if usr, err := user.Current(); err == nil { |
||||
return usr.HomeDir |
||||
} |
||||
return "" |
||||
} |
@ -1,359 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"path" |
||||
"path/filepath" |
||||
"runtime" |
||||
"strings" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
"github.com/mattn/go-colorable" |
||||
) |
||||
|
||||
func init() { |
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) |
||||
} |
||||
|
||||
func TestSwarmUp(t *testing.T) { |
||||
if runtime.GOOS == "windows" { |
||||
t.Skip() |
||||
} |
||||
|
||||
cluster := newTestCluster(t, clusterSize) |
||||
defer cluster.Shutdown() |
||||
|
||||
cases := []struct { |
||||
name string |
||||
f func(t *testing.T, cluster *testCluster) |
||||
}{ |
||||
{"NoEncryption", testNoEncryption}, |
||||
{"Encrypted", testEncrypted}, |
||||
{"RecursiveNoEncryption", testRecursiveNoEncryption}, |
||||
{"RecursiveEncrypted", testRecursiveEncrypted}, |
||||
{"DefaultPathAll", testDefaultPathAll}, |
||||
} |
||||
|
||||
for _, tc := range cases { |
||||
t.Run(tc.name, func(t *testing.T) { |
||||
tc.f(t, cluster) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
// testNoEncryption tests that running 'swarm up' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func testNoEncryption(t *testing.T, cluster *testCluster) { |
||||
testDefault(t, cluster, false) |
||||
} |
||||
|
||||
// testEncrypted tests that running 'swarm up --encrypted' makes the resulting file
|
||||
// available from all nodes via the HTTP API
|
||||
func testEncrypted(t *testing.T, cluster *testCluster) { |
||||
testDefault(t, cluster, true) |
||||
} |
||||
|
||||
func testRecursiveNoEncryption(t *testing.T, cluster *testCluster) { |
||||
testRecursive(t, cluster, false) |
||||
} |
||||
|
||||
func testRecursiveEncrypted(t *testing.T, cluster *testCluster) { |
||||
testRecursive(t, cluster, true) |
||||
} |
||||
|
||||
func testDefault(t *testing.T, cluster *testCluster, toEncrypt bool) { |
||||
tmpFileName := testutil.TempFileWithContent(t, data) |
||||
defer os.Remove(tmpFileName) |
||||
|
||||
// write data to file
|
||||
hashRegexp := `[a-f\d]{64}` |
||||
flags := []string{ |
||||
"--bzzapi", cluster.Nodes[0].URL, |
||||
"up", |
||||
tmpFileName} |
||||
if toEncrypt { |
||||
hashRegexp = `[a-f\d]{128}` |
||||
flags = []string{ |
||||
"--bzzapi", cluster.Nodes[0].URL, |
||||
"up", |
||||
"--encrypt", |
||||
tmpFileName} |
||||
} |
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'")) |
||||
up := runSwarm(t, flags...) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
hash := matches[0] |
||||
log.Info("file uploaded", "hash", hash) |
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes { |
||||
log.Info("getting file from node", "node", node.Name) |
||||
|
||||
res, err := http.Get(node.URL + "/bzz:/" + hash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer res.Body.Close() |
||||
|
||||
reply, err := ioutil.ReadAll(res.Body) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if res.StatusCode != 200 { |
||||
t.Fatalf("expected HTTP status 200, got %s", res.Status) |
||||
} |
||||
if string(reply) != data { |
||||
t.Fatalf("expected HTTP body %q, got %q", data, reply) |
||||
} |
||||
log.Debug("verifying uploaded file using `swarm down`") |
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test") |
||||
tmpDownload = path.Join(tmpDownload, "tmpfile.tmp") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmpDownload) |
||||
|
||||
bzzLocator := "bzz:/" + hash |
||||
flags = []string{ |
||||
"--bzzapi", cluster.Nodes[0].URL, |
||||
"down", |
||||
bzzLocator, |
||||
tmpDownload, |
||||
} |
||||
|
||||
down := runSwarm(t, flags...) |
||||
down.ExpectExit() |
||||
|
||||
fi, err := os.Stat(tmpDownload) |
||||
if err != nil { |
||||
t.Fatalf("could not stat path: %v", err) |
||||
} |
||||
|
||||
switch mode := fi.Mode(); { |
||||
case mode.IsRegular(): |
||||
downloadedBytes, err := ioutil.ReadFile(tmpDownload) |
||||
if err != nil { |
||||
t.Fatalf("had an error reading the downloaded file: %v", err) |
||||
} |
||||
if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) { |
||||
t.Fatalf("retrieved data and posted data not equal!") |
||||
} |
||||
|
||||
default: |
||||
t.Fatalf("expected to download regular file, got %s", fi.Mode()) |
||||
} |
||||
} |
||||
|
||||
timeout := time.Duration(2 * time.Second) |
||||
httpClient := http.Client{ |
||||
Timeout: timeout, |
||||
} |
||||
|
||||
// try to squeeze a timeout by getting an non-existent hash from each node
|
||||
for _, node := range cluster.Nodes { |
||||
_, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340") |
||||
// we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
|
||||
if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") { |
||||
t.Fatal(err) |
||||
} |
||||
// this is disabled since it takes 60s due to netstore timeout
|
||||
// if res.StatusCode != 404 {
|
||||
// t.Fatalf("expected HTTP status 404, got %s", res.Status)
|
||||
// }
|
||||
} |
||||
} |
||||
|
||||
func testRecursive(t *testing.T, cluster *testCluster, toEncrypt bool) { |
||||
tmpUploadDir, err := ioutil.TempDir("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmpUploadDir) |
||||
// create tmp files
|
||||
for _, path := range []string{"tmp1", "tmp2"} { |
||||
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
hashRegexp := `[a-f\d]{64}` |
||||
flags := []string{ |
||||
"--bzzapi", cluster.Nodes[0].URL, |
||||
"--recursive", |
||||
"up", |
||||
tmpUploadDir} |
||||
if toEncrypt { |
||||
hashRegexp = `[a-f\d]{128}` |
||||
flags = []string{ |
||||
"--bzzapi", cluster.Nodes[0].URL, |
||||
"--recursive", |
||||
"up", |
||||
"--encrypt", |
||||
tmpUploadDir} |
||||
} |
||||
// upload the file with 'swarm up' and expect a hash
|
||||
log.Info(fmt.Sprintf("uploading file with 'swarm up'")) |
||||
up := runSwarm(t, flags...) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
hash := matches[0] |
||||
log.Info("dir uploaded", "hash", hash) |
||||
|
||||
// get the file from the HTTP API of each node
|
||||
for _, node := range cluster.Nodes { |
||||
log.Info("getting file from node", "node", node.Name) |
||||
//try to get the content with `swarm down`
|
||||
tmpDownload, err := ioutil.TempDir("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmpDownload) |
||||
bzzLocator := "bzz:/" + hash |
||||
flagss := []string{ |
||||
"--bzzapi", cluster.Nodes[0].URL, |
||||
"down", |
||||
"--recursive", |
||||
bzzLocator, |
||||
tmpDownload, |
||||
} |
||||
|
||||
fmt.Println("downloading from swarm with recursive") |
||||
down := runSwarm(t, flagss...) |
||||
down.ExpectExit() |
||||
|
||||
files, err := ioutil.ReadDir(tmpDownload) |
||||
for _, v := range files { |
||||
fi, err := os.Stat(path.Join(tmpDownload, v.Name())) |
||||
if err != nil { |
||||
t.Fatalf("got an error: %v", err) |
||||
} |
||||
|
||||
switch mode := fi.Mode(); { |
||||
case mode.IsRegular(): |
||||
if file, err := swarmapi.Open(path.Join(tmpDownload, v.Name())); err != nil { |
||||
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err) |
||||
} else { |
||||
ff := make([]byte, len(data)) |
||||
io.ReadFull(file, ff) |
||||
buf := bytes.NewBufferString(data) |
||||
|
||||
if !bytes.Equal(ff, buf.Bytes()) { |
||||
t.Fatalf("retrieved data and posted data not equal!") |
||||
} |
||||
} |
||||
default: |
||||
t.Fatalf("this shouldnt happen") |
||||
} |
||||
} |
||||
if err != nil { |
||||
t.Fatalf("could not list files at: %v", files) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// testDefaultPathAll tests swarm recursive upload with relative and absolute
|
||||
// default paths and with encryption.
|
||||
func testDefaultPathAll(t *testing.T, cluster *testCluster) { |
||||
testDefaultPath(t, cluster, false, false) |
||||
testDefaultPath(t, cluster, false, true) |
||||
testDefaultPath(t, cluster, true, false) |
||||
testDefaultPath(t, cluster, true, true) |
||||
} |
||||
|
||||
func testDefaultPath(t *testing.T, cluster *testCluster, toEncrypt bool, absDefaultPath bool) { |
||||
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmp) |
||||
|
||||
err = ioutil.WriteFile(filepath.Join(tmp, "index.html"), []byte("<h1>Test</h1>"), 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
err = ioutil.WriteFile(filepath.Join(tmp, "robots.txt"), []byte("Disallow: /"), 0666) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
defaultPath := "index.html" |
||||
if absDefaultPath { |
||||
defaultPath = filepath.Join(tmp, defaultPath) |
||||
} |
||||
|
||||
args := []string{ |
||||
"--bzzapi", |
||||
cluster.Nodes[0].URL, |
||||
"--recursive", |
||||
"--defaultpath", |
||||
defaultPath, |
||||
"up", |
||||
tmp, |
||||
} |
||||
if toEncrypt { |
||||
args = append(args, "--encrypt") |
||||
} |
||||
|
||||
up := runSwarm(t, args...) |
||||
hashRegexp := `[a-f\d]{64,128}` |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
hash := matches[0] |
||||
|
||||
client := swarmapi.NewClient(cluster.Nodes[0].URL) |
||||
|
||||
m, isEncrypted, err := client.DownloadManifest(hash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if toEncrypt != isEncrypted { |
||||
t.Error("downloaded manifest is not encrypted") |
||||
} |
||||
|
||||
var found bool |
||||
var entriesCount int |
||||
for _, e := range m.Entries { |
||||
entriesCount++ |
||||
if e.Path == "" { |
||||
found = true |
||||
} |
||||
} |
||||
|
||||
if !found { |
||||
t.Error("manifest default entry was not found") |
||||
} |
||||
|
||||
if entriesCount != 3 { |
||||
t.Errorf("manifest contains %v entries, expected %v", entriesCount, 3) |
||||
} |
||||
} |
@ -1,68 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chequebook |
||||
|
||||
import ( |
||||
"errors" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
const Version = "1.0" |
||||
|
||||
var errNoChequebook = errors.New("no chequebook") |
||||
|
||||
type API struct { |
||||
chequebookf func() *Chequebook |
||||
} |
||||
|
||||
func NewAPI(ch func() *Chequebook) *API { |
||||
return &API{ch} |
||||
} |
||||
|
||||
func (a *API) Balance() (string, error) { |
||||
ch := a.chequebookf() |
||||
if ch == nil { |
||||
return "", errNoChequebook |
||||
} |
||||
return ch.Balance().String(), nil |
||||
} |
||||
|
||||
func (a *API) Issue(beneficiary common.Address, amount *big.Int) (cheque *Cheque, err error) { |
||||
ch := a.chequebookf() |
||||
if ch == nil { |
||||
return nil, errNoChequebook |
||||
} |
||||
return ch.Issue(beneficiary, amount) |
||||
} |
||||
|
||||
func (a *API) Cash(cheque *Cheque) (txhash string, err error) { |
||||
ch := a.chequebookf() |
||||
if ch == nil { |
||||
return "", errNoChequebook |
||||
} |
||||
return ch.Cash(cheque) |
||||
} |
||||
|
||||
func (a *API) Deposit(amount *big.Int) (txhash string, err error) { |
||||
ch := a.chequebookf() |
||||
if ch == nil { |
||||
return "", errNoChequebook |
||||
} |
||||
return ch.Deposit(amount) |
||||
} |
@ -1,642 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package chequebook package wraps the 'chequebook' Ethereum smart contract.
|
||||
//
|
||||
// The functions in this package allow using chequebook for
|
||||
// issuing, receiving, verifying cheques in ether; (auto)cashing cheques in ether
|
||||
// as well as (auto)depositing ether to the chequebook contract.
|
||||
package chequebook |
||||
|
||||
//go:generate abigen --sol contract/chequebook.sol --exc contract/mortal.sol:mortal,contract/owned.sol:owned --pkg contract --out contract/chequebook.go
|
||||
//go:generate go run ./gencode.go
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"crypto/ecdsa" |
||||
"encoding/json" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"math/big" |
||||
"os" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/contracts/chequebook/contract" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/services/swap/swap" |
||||
) |
||||
|
||||
// TODO(zelig): watch peer solvency and notify of bouncing cheques
|
||||
// TODO(zelig): enable paying with cheque by signing off
|
||||
|
||||
// Some functionality requires interacting with the blockchain:
|
||||
// * setting current balance on peer's chequebook
|
||||
// * sending the transaction to cash the cheque
|
||||
// * depositing ether to the chequebook
|
||||
// * watching incoming ether
|
||||
|
||||
var ( |
||||
gasToCash = uint64(2000000) // gas cost of a cash transaction using chequebook
|
||||
// gasToDeploy = uint64(3000000)
|
||||
) |
||||
|
||||
// Backend wraps all methods required for chequebook operation.
|
||||
type Backend interface { |
||||
bind.ContractBackend |
||||
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) |
||||
BalanceAt(ctx context.Context, address common.Address, blockNum *big.Int) (*big.Int, error) |
||||
} |
||||
|
||||
// Cheque represents a payment promise to a single beneficiary.
|
||||
type Cheque struct { |
||||
Contract common.Address // address of chequebook, needed to avoid cross-contract submission
|
||||
Beneficiary common.Address |
||||
Amount *big.Int // cumulative amount of all funds sent
|
||||
Sig []byte // signature Sign(Keccak256(contract, beneficiary, amount), prvKey)
|
||||
} |
||||
|
||||
func (ch *Cheque) String() string { |
||||
return fmt.Sprintf("contract: %s, beneficiary: %s, amount: %v, signature: %x", ch.Contract.Hex(), ch.Beneficiary.Hex(), ch.Amount, ch.Sig) |
||||
} |
||||
|
||||
type Params struct { |
||||
ContractCode, ContractAbi string |
||||
} |
||||
|
||||
var ContractParams = &Params{contract.ChequebookBin, contract.ChequebookABI} |
||||
|
||||
// Chequebook can create and sign cheques from a single contract to multiple beneficiaries.
|
||||
// It is the outgoing payment handler for peer to peer micropayments.
|
||||
type Chequebook struct { |
||||
path string // path to chequebook file
|
||||
prvKey *ecdsa.PrivateKey // private key to sign cheque with
|
||||
lock sync.Mutex //
|
||||
backend Backend // blockchain API
|
||||
quit chan bool // when closed causes autodeposit to stop
|
||||
owner common.Address // owner address (derived from pubkey)
|
||||
contract *contract.Chequebook // abigen binding
|
||||
session *contract.ChequebookSession // abigen binding with Tx Opts
|
||||
|
||||
// persisted fields
|
||||
balance *big.Int // not synced with blockchain
|
||||
contractAddr common.Address // contract address
|
||||
sent map[common.Address]*big.Int //tallies for beneficiaries
|
||||
|
||||
txhash string // tx hash of last deposit tx
|
||||
threshold *big.Int // threshold that triggers autodeposit if not nil
|
||||
buffer *big.Int // buffer to keep on top of balance for fork protection
|
||||
|
||||
log log.Logger // contextual logger with the contract address embedded
|
||||
} |
||||
|
||||
func (cb *Chequebook) String() string { |
||||
return fmt.Sprintf("contract: %s, owner: %s, balance: %v, signer: %x", cb.contractAddr.Hex(), cb.owner.Hex(), cb.balance, cb.prvKey.PublicKey) |
||||
} |
||||
|
||||
// NewChequebook creates a new Chequebook.
|
||||
func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.PrivateKey, backend Backend) (*Chequebook, error) { |
||||
balance := new(big.Int) |
||||
sent := make(map[common.Address]*big.Int) |
||||
|
||||
chbook, err := contract.NewChequebook(contractAddr, backend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
transactOpts := bind.NewKeyedTransactor(prvKey) |
||||
session := &contract.ChequebookSession{ |
||||
Contract: chbook, |
||||
TransactOpts: *transactOpts, |
||||
} |
||||
|
||||
cb := &Chequebook{ |
||||
prvKey: prvKey, |
||||
balance: balance, |
||||
contractAddr: contractAddr, |
||||
sent: sent, |
||||
path: path, |
||||
backend: backend, |
||||
owner: transactOpts.From, |
||||
contract: chbook, |
||||
session: session, |
||||
log: log.New("contract", contractAddr), |
||||
} |
||||
if (contractAddr != common.Address{}) { |
||||
cb.setBalanceFromBlockChain() |
||||
cb.log.Trace("New chequebook initialised", "owner", cb.owner, "balance", cb.balance) |
||||
} |
||||
return cb, nil |
||||
} |
||||
|
||||
func (cb *Chequebook) setBalanceFromBlockChain() { |
||||
balance, err := cb.backend.BalanceAt(context.TODO(), cb.contractAddr, nil) |
||||
if err != nil { |
||||
log.Error("Failed to retrieve chequebook balance", "err", err) |
||||
} else { |
||||
cb.balance.Set(balance) |
||||
} |
||||
} |
||||
|
||||
// LoadChequebook loads a chequebook from disk (file path).
|
||||
func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, checkBalance bool) (*Chequebook, error) { |
||||
data, err := ioutil.ReadFile(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
cb, _ := NewChequebook(path, common.Address{}, prvKey, backend) |
||||
|
||||
if err = json.Unmarshal(data, cb); err != nil { |
||||
return nil, err |
||||
} |
||||
if checkBalance { |
||||
cb.setBalanceFromBlockChain() |
||||
} |
||||
log.Trace("Loaded chequebook from disk", "path", path) |
||||
|
||||
return cb, nil |
||||
} |
||||
|
||||
// chequebookFile is the JSON representation of a chequebook.
|
||||
type chequebookFile struct { |
||||
Balance string |
||||
Contract string |
||||
Owner string |
||||
Sent map[string]string |
||||
} |
||||
|
||||
// UnmarshalJSON deserialises a chequebook.
|
||||
func (cb *Chequebook) UnmarshalJSON(data []byte) error { |
||||
var file chequebookFile |
||||
err := json.Unmarshal(data, &file) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
_, ok := cb.balance.SetString(file.Balance, 10) |
||||
if !ok { |
||||
return fmt.Errorf("cumulative amount sent: unable to convert string to big integer: %v", file.Balance) |
||||
} |
||||
cb.contractAddr = common.HexToAddress(file.Contract) |
||||
for addr, sent := range file.Sent { |
||||
cb.sent[common.HexToAddress(addr)], ok = new(big.Int).SetString(sent, 10) |
||||
if !ok { |
||||
return fmt.Errorf("beneficiary %v cumulative amount sent: unable to convert string to big integer: %v", addr, sent) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// MarshalJSON serialises a chequebook.
|
||||
func (cb *Chequebook) MarshalJSON() ([]byte, error) { |
||||
var file = &chequebookFile{ |
||||
Balance: cb.balance.String(), |
||||
Contract: cb.contractAddr.Hex(), |
||||
Owner: cb.owner.Hex(), |
||||
Sent: make(map[string]string), |
||||
} |
||||
for addr, sent := range cb.sent { |
||||
file.Sent[addr.Hex()] = sent.String() |
||||
} |
||||
return json.Marshal(file) |
||||
} |
||||
|
||||
// Save persists the chequebook on disk, remembering balance, contract address and
|
||||
// cumulative amount of funds sent for each beneficiary.
|
||||
func (cb *Chequebook) Save() error { |
||||
data, err := json.MarshalIndent(cb, "", " ") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
cb.log.Trace("Saving chequebook to disk", cb.path) |
||||
|
||||
return ioutil.WriteFile(cb.path, data, os.ModePerm) |
||||
} |
||||
|
||||
// Stop quits the autodeposit go routine to terminate
|
||||
func (cb *Chequebook) Stop() { |
||||
defer cb.lock.Unlock() |
||||
cb.lock.Lock() |
||||
if cb.quit != nil { |
||||
close(cb.quit) |
||||
cb.quit = nil |
||||
} |
||||
} |
||||
|
||||
// Issue creates a cheque signed by the chequebook owner's private key. The
|
||||
// signer commits to a contract (one that they own), a beneficiary and amount.
|
||||
func (cb *Chequebook) Issue(beneficiary common.Address, amount *big.Int) (*Cheque, error) { |
||||
defer cb.lock.Unlock() |
||||
cb.lock.Lock() |
||||
|
||||
if amount.Sign() <= 0 { |
||||
return nil, fmt.Errorf("amount must be greater than zero (%v)", amount) |
||||
} |
||||
var ( |
||||
ch *Cheque |
||||
err error |
||||
) |
||||
if cb.balance.Cmp(amount) < 0 { |
||||
err = fmt.Errorf("insufficient funds to issue cheque for amount: %v. balance: %v", amount, cb.balance) |
||||
} else { |
||||
var sig []byte |
||||
sent, found := cb.sent[beneficiary] |
||||
if !found { |
||||
sent = new(big.Int) |
||||
cb.sent[beneficiary] = sent |
||||
} |
||||
sum := new(big.Int).Set(sent) |
||||
sum.Add(sum, amount) |
||||
|
||||
sig, err = crypto.Sign(sigHash(cb.contractAddr, beneficiary, sum), cb.prvKey) |
||||
if err == nil { |
||||
ch = &Cheque{ |
||||
Contract: cb.contractAddr, |
||||
Beneficiary: beneficiary, |
||||
Amount: sum, |
||||
Sig: sig, |
||||
} |
||||
sent.Set(sum) |
||||
cb.balance.Sub(cb.balance, amount) // subtract amount from balance
|
||||
} |
||||
} |
||||
// auto deposit if threshold is set and balance is less then threshold
|
||||
// note this is called even if issuing cheque fails
|
||||
// so we reattempt depositing
|
||||
if cb.threshold != nil { |
||||
if cb.balance.Cmp(cb.threshold) < 0 { |
||||
send := new(big.Int).Sub(cb.buffer, cb.balance) |
||||
cb.deposit(send) |
||||
} |
||||
} |
||||
return ch, err |
||||
} |
||||
|
||||
// Cash is a convenience method to cash any cheque.
|
||||
func (cb *Chequebook) Cash(ch *Cheque) (string, error) { |
||||
return ch.Cash(cb.session) |
||||
} |
||||
|
||||
// data to sign: contract address, beneficiary, cumulative amount of funds ever sent
|
||||
func sigHash(contract, beneficiary common.Address, sum *big.Int) []byte { |
||||
bigamount := sum.Bytes() |
||||
if len(bigamount) > 32 { |
||||
return nil |
||||
} |
||||
var amount32 [32]byte |
||||
copy(amount32[32-len(bigamount):32], bigamount) |
||||
input := append(contract.Bytes(), beneficiary.Bytes()...) |
||||
input = append(input, amount32[:]...) |
||||
return crypto.Keccak256(input) |
||||
} |
||||
|
||||
// Balance returns the current balance of the chequebook.
|
||||
func (cb *Chequebook) Balance() *big.Int { |
||||
defer cb.lock.Unlock() |
||||
cb.lock.Lock() |
||||
return new(big.Int).Set(cb.balance) |
||||
} |
||||
|
||||
// Owner returns the owner account of the chequebook.
|
||||
func (cb *Chequebook) Owner() common.Address { |
||||
return cb.owner |
||||
} |
||||
|
||||
// Address returns the on-chain contract address of the chequebook.
|
||||
func (cb *Chequebook) Address() common.Address { |
||||
return cb.contractAddr |
||||
} |
||||
|
||||
// Deposit deposits money to the chequebook account.
|
||||
func (cb *Chequebook) Deposit(amount *big.Int) (string, error) { |
||||
defer cb.lock.Unlock() |
||||
cb.lock.Lock() |
||||
return cb.deposit(amount) |
||||
} |
||||
|
||||
// deposit deposits amount to the chequebook account.
|
||||
// The caller must hold lock.
|
||||
func (cb *Chequebook) deposit(amount *big.Int) (string, error) { |
||||
// since the amount is variable here, we do not use sessions
|
||||
depositTransactor := bind.NewKeyedTransactor(cb.prvKey) |
||||
depositTransactor.Value = amount |
||||
chbookRaw := &contract.ChequebookRaw{Contract: cb.contract} |
||||
tx, err := chbookRaw.Transfer(depositTransactor) |
||||
if err != nil { |
||||
cb.log.Warn("Failed to fund chequebook", "amount", amount, "balance", cb.balance, "target", cb.buffer, "err", err) |
||||
return "", err |
||||
} |
||||
// assume that transaction is actually successful, we add the amount to balance right away
|
||||
cb.balance.Add(cb.balance, amount) |
||||
cb.log.Trace("Deposited funds to chequebook", "amount", amount, "balance", cb.balance, "target", cb.buffer) |
||||
return tx.Hash().Hex(), nil |
||||
} |
||||
|
||||
// AutoDeposit (re)sets interval time and amount which triggers sending funds to the
|
||||
// chequebook. Contract backend needs to be set if threshold is not less than buffer, then
|
||||
// deposit will be triggered on every new cheque issued.
|
||||
func (cb *Chequebook) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) { |
||||
defer cb.lock.Unlock() |
||||
cb.lock.Lock() |
||||
cb.threshold = threshold |
||||
cb.buffer = buffer |
||||
cb.autoDeposit(interval) |
||||
} |
||||
|
||||
// autoDeposit starts a goroutine that periodically sends funds to the chequebook
|
||||
// contract caller holds the lock the go routine terminates if Chequebook.quit is closed.
|
||||
func (cb *Chequebook) autoDeposit(interval time.Duration) { |
||||
if cb.quit != nil { |
||||
close(cb.quit) |
||||
cb.quit = nil |
||||
} |
||||
// if threshold >= balance autodeposit after every cheque issued
|
||||
if interval == time.Duration(0) || cb.threshold != nil && cb.buffer != nil && cb.threshold.Cmp(cb.buffer) >= 0 { |
||||
return |
||||
} |
||||
|
||||
ticker := time.NewTicker(interval) |
||||
cb.quit = make(chan bool) |
||||
quit := cb.quit |
||||
|
||||
go func() { |
||||
for { |
||||
select { |
||||
case <-quit: |
||||
return |
||||
case <-ticker.C: |
||||
cb.lock.Lock() |
||||
if cb.balance.Cmp(cb.buffer) < 0 { |
||||
amount := new(big.Int).Sub(cb.buffer, cb.balance) |
||||
txhash, err := cb.deposit(amount) |
||||
if err == nil { |
||||
cb.txhash = txhash |
||||
} |
||||
} |
||||
cb.lock.Unlock() |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// Outbox can issue cheques from a single contract to a single beneficiary.
|
||||
type Outbox struct { |
||||
chequeBook *Chequebook |
||||
beneficiary common.Address |
||||
} |
||||
|
||||
// NewOutbox creates an outbox.
|
||||
func NewOutbox(cb *Chequebook, beneficiary common.Address) *Outbox { |
||||
return &Outbox{cb, beneficiary} |
||||
} |
||||
|
||||
// Issue creates cheque.
|
||||
func (o *Outbox) Issue(amount *big.Int) (swap.Promise, error) { |
||||
return o.chequeBook.Issue(o.beneficiary, amount) |
||||
} |
||||
|
||||
// AutoDeposit enables auto-deposits on the underlying chequebook.
|
||||
func (o *Outbox) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) { |
||||
o.chequeBook.AutoDeposit(interval, threshold, buffer) |
||||
} |
||||
|
||||
// Stop helps satisfy the swap.OutPayment interface.
|
||||
func (o *Outbox) Stop() {} |
||||
|
||||
// String implements fmt.Stringer.
|
||||
func (o *Outbox) String() string { |
||||
return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", o.chequeBook.Address().Hex(), o.beneficiary.Hex(), o.chequeBook.Balance()) |
||||
} |
||||
|
||||
// Inbox can deposit, verify and cash cheques from a single contract to a single
|
||||
// beneficiary. It is the incoming payment handler for peer to peer micropayments.
|
||||
type Inbox struct { |
||||
lock sync.Mutex |
||||
contract common.Address // peer's chequebook contract
|
||||
beneficiary common.Address // local peer's receiving address
|
||||
sender common.Address // local peer's address to send cashing tx from
|
||||
signer *ecdsa.PublicKey // peer's public key
|
||||
txhash string // tx hash of last cashing tx
|
||||
session *contract.ChequebookSession // abi contract backend with tx opts
|
||||
quit chan bool // when closed causes autocash to stop
|
||||
maxUncashed *big.Int // threshold that triggers autocashing
|
||||
cashed *big.Int // cumulative amount cashed
|
||||
cheque *Cheque // last cheque, nil if none yet received
|
||||
log log.Logger // contextual logger with the contract address embedded
|
||||
} |
||||
|
||||
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
|
||||
// from blockchain when first cheque is received.
|
||||
func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address, signer *ecdsa.PublicKey, abigen bind.ContractBackend) (*Inbox, error) { |
||||
if signer == nil { |
||||
return nil, fmt.Errorf("signer is null") |
||||
} |
||||
chbook, err := contract.NewChequebook(contractAddr, abigen) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
transactOpts := bind.NewKeyedTransactor(prvKey) |
||||
transactOpts.GasLimit = gasToCash |
||||
session := &contract.ChequebookSession{ |
||||
Contract: chbook, |
||||
TransactOpts: *transactOpts, |
||||
} |
||||
sender := transactOpts.From |
||||
|
||||
inbox := &Inbox{ |
||||
contract: contractAddr, |
||||
beneficiary: beneficiary, |
||||
sender: sender, |
||||
signer: signer, |
||||
session: session, |
||||
cashed: new(big.Int).Set(common.Big0), |
||||
log: log.New("contract", contractAddr), |
||||
} |
||||
inbox.log.Trace("New chequebook inbox initialized", "beneficiary", inbox.beneficiary, "signer", hexutil.Bytes(crypto.FromECDSAPub(signer))) |
||||
return inbox, nil |
||||
} |
||||
|
||||
func (i *Inbox) String() string { |
||||
return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", i.contract.Hex(), i.beneficiary.Hex(), i.cheque.Amount) |
||||
} |
||||
|
||||
// Stop quits the autocash goroutine.
|
||||
func (i *Inbox) Stop() { |
||||
defer i.lock.Unlock() |
||||
i.lock.Lock() |
||||
if i.quit != nil { |
||||
close(i.quit) |
||||
i.quit = nil |
||||
} |
||||
} |
||||
|
||||
// Cash attempts to cash the current cheque.
|
||||
func (i *Inbox) Cash() (string, error) { |
||||
if i.cheque == nil { |
||||
return "", nil |
||||
} |
||||
txhash, err := i.cheque.Cash(i.session) |
||||
i.log.Trace("Cashing in chequebook cheque", "amount", i.cheque.Amount, "beneficiary", i.beneficiary) |
||||
i.cashed = i.cheque.Amount |
||||
|
||||
return txhash, err |
||||
} |
||||
|
||||
// AutoCash (re)sets maximum time and amount which triggers cashing of the last uncashed
|
||||
// cheque if maxUncashed is set to 0, then autocash on receipt.
|
||||
func (i *Inbox) AutoCash(cashInterval time.Duration, maxUncashed *big.Int) { |
||||
defer i.lock.Unlock() |
||||
i.lock.Lock() |
||||
i.maxUncashed = maxUncashed |
||||
i.autoCash(cashInterval) |
||||
} |
||||
|
||||
// autoCash starts a loop that periodically clears the last cheque
|
||||
// if the peer is trusted. Clearing period could be 24h or a week.
|
||||
// The caller must hold lock.
|
||||
func (i *Inbox) autoCash(cashInterval time.Duration) { |
||||
if i.quit != nil { |
||||
close(i.quit) |
||||
i.quit = nil |
||||
} |
||||
// if maxUncashed is set to 0, then autocash on receipt
|
||||
if cashInterval == time.Duration(0) || i.maxUncashed != nil && i.maxUncashed.Sign() == 0 { |
||||
return |
||||
} |
||||
|
||||
ticker := time.NewTicker(cashInterval) |
||||
i.quit = make(chan bool) |
||||
quit := i.quit |
||||
|
||||
go func() { |
||||
for { |
||||
select { |
||||
case <-quit: |
||||
return |
||||
case <-ticker.C: |
||||
i.lock.Lock() |
||||
if i.cheque != nil && i.cheque.Amount.Cmp(i.cashed) != 0 { |
||||
txhash, err := i.Cash() |
||||
if err == nil { |
||||
i.txhash = txhash |
||||
} |
||||
} |
||||
i.lock.Unlock() |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// Receive is called to deposit the latest cheque to the incoming Inbox.
|
||||
// The given promise must be a *Cheque.
|
||||
func (i *Inbox) Receive(promise swap.Promise) (*big.Int, error) { |
||||
ch := promise.(*Cheque) |
||||
|
||||
defer i.lock.Unlock() |
||||
i.lock.Lock() |
||||
|
||||
var sum *big.Int |
||||
if i.cheque == nil { |
||||
// the sum is checked against the blockchain once a cheque is received
|
||||
tally, err := i.session.Sent(i.beneficiary) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err) |
||||
} |
||||
sum = tally |
||||
} else { |
||||
sum = i.cheque.Amount |
||||
} |
||||
|
||||
amount, err := ch.Verify(i.signer, i.contract, i.beneficiary, sum) |
||||
var uncashed *big.Int |
||||
if err == nil { |
||||
i.cheque = ch |
||||
|
||||
if i.maxUncashed != nil { |
||||
uncashed = new(big.Int).Sub(ch.Amount, i.cashed) |
||||
if i.maxUncashed.Cmp(uncashed) < 0 { |
||||
i.Cash() |
||||
} |
||||
} |
||||
i.log.Trace("Received cheque in chequebook inbox", "amount", amount, "uncashed", uncashed) |
||||
} |
||||
|
||||
return amount, err |
||||
} |
||||
|
||||
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
|
||||
func (ch *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) { |
||||
log.Trace("Verifying chequebook cheque", "cheque", ch, "sum", sum) |
||||
if sum == nil { |
||||
return nil, fmt.Errorf("invalid amount") |
||||
} |
||||
|
||||
if ch.Beneficiary != beneficiary { |
||||
return nil, fmt.Errorf("beneficiary mismatch: %v != %v", ch.Beneficiary.Hex(), beneficiary.Hex()) |
||||
} |
||||
if ch.Contract != contract { |
||||
return nil, fmt.Errorf("contract mismatch: %v != %v", ch.Contract.Hex(), contract.Hex()) |
||||
} |
||||
|
||||
amount := new(big.Int).Set(ch.Amount) |
||||
if sum != nil { |
||||
amount.Sub(amount, sum) |
||||
if amount.Sign() <= 0 { |
||||
return nil, fmt.Errorf("incorrect amount: %v <= 0", amount) |
||||
} |
||||
} |
||||
|
||||
pubKey, err := crypto.SigToPub(sigHash(ch.Contract, beneficiary, ch.Amount), ch.Sig) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("invalid signature: %v", err) |
||||
} |
||||
if !bytes.Equal(crypto.FromECDSAPub(pubKey), crypto.FromECDSAPub(signerKey)) { |
||||
return nil, fmt.Errorf("signer mismatch: %x != %x", crypto.FromECDSAPub(pubKey), crypto.FromECDSAPub(signerKey)) |
||||
} |
||||
return amount, nil |
||||
} |
||||
|
||||
// v/r/s representation of signature
|
||||
func sig2vrs(sig []byte) (v byte, r, s [32]byte) { |
||||
v = sig[64] + 27 |
||||
copy(r[:], sig[:32]) |
||||
copy(s[:], sig[32:64]) |
||||
return |
||||
} |
||||
|
||||
// Cash cashes the cheque by sending an Ethereum transaction.
|
||||
func (ch *Cheque) Cash(session *contract.ChequebookSession) (string, error) { |
||||
v, r, s := sig2vrs(ch.Sig) |
||||
tx, err := session.Cash(ch.Beneficiary, ch.Amount, v, r, s) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return tx.Hash().Hex(), nil |
||||
} |
||||
|
||||
// ValidateCode checks that the on-chain code at address matches the expected chequebook
|
||||
// contract code. This is used to detect suicided chequebooks.
|
||||
func ValidateCode(ctx context.Context, b Backend, address common.Address) (bool, error) { |
||||
code, err := b.CodeAt(ctx, address, nil) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
return bytes.Equal(code, common.FromHex(contract.ContractDeployedCode)), nil |
||||
} |
@ -1,487 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chequebook |
||||
|
||||
import ( |
||||
"crypto/ecdsa" |
||||
"math/big" |
||||
"os" |
||||
"path/filepath" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/contracts/chequebook/contract" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
var ( |
||||
key0, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") |
||||
key1, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") |
||||
key2, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") |
||||
addr0 = crypto.PubkeyToAddress(key0.PublicKey) |
||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey) |
||||
addr2 = crypto.PubkeyToAddress(key2.PublicKey) |
||||
) |
||||
|
||||
func newTestBackend() *backends.SimulatedBackend { |
||||
return backends.NewSimulatedBackend(core.GenesisAlloc{ |
||||
addr0: {Balance: big.NewInt(1000000000)}, |
||||
addr1: {Balance: big.NewInt(1000000000)}, |
||||
addr2: {Balance: big.NewInt(1000000000)}, |
||||
}, 10000000) |
||||
} |
||||
|
||||
func deploy(prvKey *ecdsa.PrivateKey, amount *big.Int, backend *backends.SimulatedBackend) (common.Address, error) { |
||||
deployTransactor := bind.NewKeyedTransactor(prvKey) |
||||
deployTransactor.Value = amount |
||||
addr, _, _, err := contract.DeployChequebook(deployTransactor, backend) |
||||
if err != nil { |
||||
return common.Address{}, err |
||||
} |
||||
backend.Commit() |
||||
return addr, nil |
||||
} |
||||
|
||||
func TestIssueAndReceive(t *testing.T) { |
||||
path := filepath.Join(os.TempDir(), "chequebook-test.json") |
||||
backend := newTestBackend() |
||||
addr0, err := deploy(key0, big.NewInt(0), backend) |
||||
if err != nil { |
||||
t.Fatalf("deploy contract: expected no error, got %v", err) |
||||
} |
||||
chbook, err := NewChequebook(path, addr0, key0, backend) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
chbook.sent[addr1] = new(big.Int).SetUint64(42) |
||||
amount := common.Big1 |
||||
|
||||
if _, err = chbook.Issue(addr1, amount); err == nil { |
||||
t.Fatalf("expected insufficient funds error, got none") |
||||
} |
||||
|
||||
chbook.balance = new(big.Int).Set(common.Big1) |
||||
if chbook.Balance().Cmp(common.Big1) != 0 { |
||||
t.Fatalf("expected: %v, got %v", "0", chbook.Balance()) |
||||
} |
||||
|
||||
ch, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
if chbook.Balance().Sign() != 0 { |
||||
t.Errorf("expected: %v, got %v", "0", chbook.Balance()) |
||||
} |
||||
|
||||
chbox, err := NewInbox(key1, addr0, addr1, &key0.PublicKey, backend) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
received, err := chbox.Receive(ch) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
if received.Cmp(big.NewInt(43)) != 0 { |
||||
t.Errorf("expected: %v, got %v", "43", received) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestCheckbookFile(t *testing.T) { |
||||
path := filepath.Join(os.TempDir(), "chequebook-test.json") |
||||
backend := newTestBackend() |
||||
chbook, err := NewChequebook(path, addr0, key0, backend) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
chbook.sent[addr1] = new(big.Int).SetUint64(42) |
||||
chbook.balance = new(big.Int).Set(common.Big1) |
||||
|
||||
chbook.Save() |
||||
|
||||
chbook, err = LoadChequebook(path, key0, backend, false) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
if chbook.Balance().Cmp(common.Big1) != 0 { |
||||
t.Errorf("expected: %v, got %v", "0", chbook.Balance()) |
||||
} |
||||
|
||||
var ch *Cheque |
||||
if ch, err = chbook.Issue(addr1, common.Big1); err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
if ch.Amount.Cmp(new(big.Int).SetUint64(43)) != 0 { |
||||
t.Errorf("expected: %v, got %v", "0", ch.Amount) |
||||
} |
||||
|
||||
err = chbook.Save() |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
} |
||||
|
||||
func TestVerifyErrors(t *testing.T) { |
||||
path0 := filepath.Join(os.TempDir(), "chequebook-test-0.json") |
||||
backend := newTestBackend() |
||||
contr0, err := deploy(key0, common.Big2, backend) |
||||
if err != nil { |
||||
t.Errorf("expected no error, got %v", err) |
||||
} |
||||
chbook0, err := NewChequebook(path0, contr0, key0, backend) |
||||
if err != nil { |
||||
t.Errorf("expected no error, got %v", err) |
||||
} |
||||
|
||||
path1 := filepath.Join(os.TempDir(), "chequebook-test-1.json") |
||||
contr1, _ := deploy(key1, common.Big2, backend) |
||||
chbook1, err := NewChequebook(path1, contr1, key1, backend) |
||||
if err != nil { |
||||
t.Errorf("expected no error, got %v", err) |
||||
} |
||||
|
||||
chbook0.sent[addr1] = new(big.Int).SetUint64(42) |
||||
chbook0.balance = new(big.Int).Set(common.Big2) |
||||
chbook1.balance = new(big.Int).Set(common.Big1) |
||||
amount := common.Big1 |
||||
ch0, err := chbook0.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
chbox, err := NewInbox(key1, contr0, addr1, &key0.PublicKey, backend) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
received, err := chbox.Receive(ch0) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
if received.Cmp(big.NewInt(43)) != 0 { |
||||
t.Errorf("expected: %v, got %v", "43", received) |
||||
} |
||||
|
||||
ch1, err := chbook0.Issue(addr2, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
received, err = chbox.Receive(ch1) |
||||
t.Logf("correct error: %v", err) |
||||
if err == nil { |
||||
t.Fatalf("expected receiver error, got none and value %v", received) |
||||
} |
||||
|
||||
ch2, err := chbook1.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
received, err = chbox.Receive(ch2) |
||||
t.Logf("correct error: %v", err) |
||||
if err == nil { |
||||
t.Fatalf("expected sender error, got none and value %v", received) |
||||
} |
||||
|
||||
_, err = chbook1.Issue(addr1, new(big.Int).SetInt64(-1)) |
||||
t.Logf("correct error: %v", err) |
||||
if err == nil { |
||||
t.Fatalf("expected incorrect amount error, got none") |
||||
} |
||||
|
||||
received, err = chbox.Receive(ch0) |
||||
t.Logf("correct error: %v", err) |
||||
if err == nil { |
||||
t.Fatalf("expected incorrect amount error, got none and value %v", received) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestDeposit(t *testing.T) { |
||||
path0 := filepath.Join(os.TempDir(), "chequebook-test-0.json") |
||||
backend := newTestBackend() |
||||
contr0, _ := deploy(key0, new(big.Int), backend) |
||||
|
||||
chbook, err := NewChequebook(path0, contr0, key0, backend) |
||||
if err != nil { |
||||
t.Errorf("expected no error, got %v", err) |
||||
} |
||||
|
||||
balance := new(big.Int).SetUint64(42) |
||||
chbook.Deposit(balance) |
||||
backend.Commit() |
||||
if chbook.Balance().Cmp(balance) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) |
||||
} |
||||
|
||||
amount := common.Big1 |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
exp := new(big.Int).SetUint64(41) |
||||
if chbook.Balance().Cmp(exp) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) |
||||
} |
||||
|
||||
// autodeposit on each issue
|
||||
chbook.AutoDeposit(0, balance, balance) |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
if chbook.Balance().Cmp(balance) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) |
||||
} |
||||
|
||||
// autodeposit off
|
||||
chbook.AutoDeposit(0, common.Big0, balance) |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
exp = new(big.Int).SetUint64(40) |
||||
if chbook.Balance().Cmp(exp) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) |
||||
} |
||||
|
||||
// autodeposit every 200ms if new cheque issued
|
||||
interval := 200 * time.Millisecond |
||||
chbook.AutoDeposit(interval, common.Big1, balance) |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
exp = new(big.Int).SetUint64(38) |
||||
if chbook.Balance().Cmp(exp) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) |
||||
} |
||||
|
||||
time.Sleep(3 * interval) |
||||
backend.Commit() |
||||
if chbook.Balance().Cmp(balance) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) |
||||
} |
||||
|
||||
exp = new(big.Int).SetUint64(40) |
||||
chbook.AutoDeposit(4*interval, exp, balance) |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
time.Sleep(3 * interval) |
||||
backend.Commit() |
||||
if chbook.Balance().Cmp(exp) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) |
||||
} |
||||
|
||||
_, err = chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
time.Sleep(1 * interval) |
||||
backend.Commit() |
||||
|
||||
if chbook.Balance().Cmp(balance) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) |
||||
} |
||||
|
||||
chbook.AutoDeposit(1*interval, common.Big0, balance) |
||||
chbook.Stop() |
||||
|
||||
_, err = chbook.Issue(addr1, common.Big1) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
_, err = chbook.Issue(addr1, common.Big2) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
time.Sleep(1 * interval) |
||||
backend.Commit() |
||||
|
||||
exp = new(big.Int).SetUint64(39) |
||||
if chbook.Balance().Cmp(exp) != 0 { |
||||
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestCash(t *testing.T) { |
||||
path := filepath.Join(os.TempDir(), "chequebook-test.json") |
||||
backend := newTestBackend() |
||||
contr0, _ := deploy(key0, common.Big2, backend) |
||||
|
||||
chbook, err := NewChequebook(path, contr0, key0, backend) |
||||
if err != nil { |
||||
t.Errorf("expected no error, got %v", err) |
||||
} |
||||
chbook.sent[addr1] = new(big.Int).SetUint64(42) |
||||
amount := common.Big1 |
||||
chbook.balance = new(big.Int).Set(common.Big1) |
||||
ch, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
chbox, err := NewInbox(key1, contr0, addr1, &key0.PublicKey, backend) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
// cashing latest cheque
|
||||
if _, err = chbox.Receive(ch); err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
if _, err = ch.Cash(chbook.session); err != nil { |
||||
t.Fatal("Cash failed:", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
chbook.balance = new(big.Int).Set(common.Big3) |
||||
ch0, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
ch1, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
interval := 10 * time.Millisecond |
||||
// setting autocash with interval of 10ms
|
||||
chbox.AutoCash(interval, nil) |
||||
_, err = chbox.Receive(ch0) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
_, err = chbox.Receive(ch1) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
// after 3x interval time and 2 cheques received, exactly one cashing tx is sent
|
||||
time.Sleep(4 * interval) |
||||
backend.Commit() |
||||
|
||||
// after stopping autocash no more tx are sent
|
||||
ch2, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
chbox.Stop() |
||||
_, err = chbox.Receive(ch2) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
time.Sleep(2 * interval) |
||||
backend.Commit() |
||||
|
||||
// autocash below 1
|
||||
chbook.balance = big.NewInt(2) |
||||
chbox.AutoCash(0, common.Big1) |
||||
|
||||
ch3, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
ch4, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
_, err = chbox.Receive(ch3) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
_, err = chbox.Receive(ch4) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
// autochash on receipt when maxUncashed is 0
|
||||
chbook.balance = new(big.Int).Set(common.Big2) |
||||
chbox.AutoCash(0, common.Big0) |
||||
|
||||
ch5, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
ch6, err := chbook.Issue(addr1, amount) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
|
||||
_, err = chbox.Receive(ch5) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
_, err = chbox.Receive(ch6) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
backend.Commit() |
||||
|
||||
} |
@ -1,367 +0,0 @@ |
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package contract |
||||
|
||||
import ( |
||||
"math/big" |
||||
"strings" |
||||
|
||||
ethereum "github.com/ethereum/go-ethereum" |
||||
"github.com/ethereum/go-ethereum/accounts/abi" |
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
) |
||||
|
||||
// ChequebookABI is the input ABI used to generate the binding from.
|
||||
const ChequebookABI = "[{\"constant\":false,\"inputs\":[],\"name\":\"kill\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"address\"}],\"name\":\"sent\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"beneficiary\",\"type\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\"},{\"name\":\"sig_v\",\"type\":\"uint8\"},{\"name\":\"sig_r\",\"type\":\"bytes32\"},{\"name\":\"sig_s\",\"type\":\"bytes32\"}],\"name\":\"cash\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"payable\":true,\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"deadbeat\",\"type\":\"address\"}],\"name\":\"Overdraft\",\"type\":\"event\"}]" |
||||
|
||||
// ChequebookBin is the compiled bytecode used for deploying new contracts.
|
||||
const ChequebookBin = `0x606060405260008054600160a060020a033316600160a060020a03199091161790556102ec806100306000396000f3006060604052600436106100565763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166341c0e1b581146100585780637bf786f81461006b578063fbf788d61461009c575b005b341561006357600080fd5b6100566100ca565b341561007657600080fd5b61008a600160a060020a03600435166100f1565b60405190815260200160405180910390f35b34156100a757600080fd5b610056600160a060020a036004351660243560ff60443516606435608435610103565b60005433600160a060020a03908116911614156100ef57600054600160a060020a0316ff5b565b60016020526000908152604090205481565b600160a060020a0385166000908152600160205260408120548190861161012957600080fd5b3087876040516c01000000000000000000000000600160a060020a03948516810282529290931690910260148301526028820152604801604051809103902091506001828686866040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f115156101cf57600080fd5b505060206040510351600054600160a060020a039081169116146101f257600080fd5b50600160a060020a03808716600090815260016020526040902054860390301631811161026257600160a060020a0387166000818152600160205260409081902088905582156108fc0290839051600060405180830381858888f19350505050151561025d57600080fd5b6102b7565b6000547f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f97890600160a060020a0316604051600160a060020a03909116815260200160405180910390a186600160a060020a0316ff5b505050505050505600a165627a7a72305820533e856fc37e3d64d1706bcc7dfb6b1d490c8d566ea498d9d01ec08965a896ca0029` |
||||
|
||||
// DeployChequebook deploys a new Ethereum contract, binding an instance of Chequebook to it.
|
||||
func DeployChequebook(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Chequebook, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(ChequebookABI)) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ChequebookBin), backend) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
return address, tx, &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}, ChequebookFilterer: ChequebookFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// Chequebook is an auto generated Go binding around an Ethereum contract.
|
||||
type Chequebook struct { |
||||
ChequebookCaller // Read-only binding to the contract
|
||||
ChequebookTransactor // Write-only binding to the contract
|
||||
ChequebookFilterer // Log filterer for contract events
|
||||
} |
||||
|
||||
// ChequebookCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
type ChequebookCaller struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ChequebookTransactor is an auto generated write-only Go binding around an Ethereum contract.
|
||||
type ChequebookTransactor struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ChequebookFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type ChequebookFilterer struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ChequebookSession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type ChequebookSession struct { |
||||
Contract *Chequebook // Generic contract binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// ChequebookCallerSession is an auto generated read-only Go binding around an Ethereum contract,
|
||||
// with pre-set call options.
|
||||
type ChequebookCallerSession struct { |
||||
Contract *ChequebookCaller // Generic contract caller binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
} |
||||
|
||||
// ChequebookTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
|
||||
// with pre-set transact options.
|
||||
type ChequebookTransactorSession struct { |
||||
Contract *ChequebookTransactor // Generic contract transactor binding to set the session for
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// ChequebookRaw is an auto generated low-level Go binding around an Ethereum contract.
|
||||
type ChequebookRaw struct { |
||||
Contract *Chequebook // Generic contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// ChequebookCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
|
||||
type ChequebookCallerRaw struct { |
||||
Contract *ChequebookCaller // Generic read-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// ChequebookTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
|
||||
type ChequebookTransactorRaw struct { |
||||
Contract *ChequebookTransactor // Generic write-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// NewChequebook creates a new instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebook(address common.Address, backend bind.ContractBackend) (*Chequebook, error) { |
||||
contract, err := bindChequebook(address, backend, backend, backend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}, ChequebookFilterer: ChequebookFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// NewChequebookCaller creates a new read-only instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebookCaller(address common.Address, caller bind.ContractCaller) (*ChequebookCaller, error) { |
||||
contract, err := bindChequebook(address, caller, nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ChequebookCaller{contract: contract}, nil |
||||
} |
||||
|
||||
// NewChequebookTransactor creates a new write-only instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebookTransactor(address common.Address, transactor bind.ContractTransactor) (*ChequebookTransactor, error) { |
||||
contract, err := bindChequebook(address, nil, transactor, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ChequebookTransactor{contract: contract}, nil |
||||
} |
||||
|
||||
// NewChequebookFilterer creates a new log filterer instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebookFilterer(address common.Address, filterer bind.ContractFilterer) (*ChequebookFilterer, error) { |
||||
contract, err := bindChequebook(address, nil, nil, filterer) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ChequebookFilterer{contract: contract}, nil |
||||
} |
||||
|
||||
// bindChequebook binds a generic wrapper to an already deployed contract.
|
||||
func bindChequebook(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(ChequebookABI)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_Chequebook *ChequebookRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _Chequebook.Contract.ChequebookCaller.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_Chequebook *ChequebookRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _Chequebook.Contract.ChequebookTransactor.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_Chequebook *ChequebookRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _Chequebook.Contract.ChequebookTransactor.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_Chequebook *ChequebookCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _Chequebook.Contract.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_Chequebook *ChequebookTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _Chequebook.Contract.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_Chequebook *ChequebookTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _Chequebook.Contract.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Sent is a free data retrieval call binding the contract method 0x7bf786f8.
|
||||
//
|
||||
// Solidity: function sent( address) constant returns(uint256)
|
||||
func (_Chequebook *ChequebookCaller) Sent(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { |
||||
var ( |
||||
ret0 = new(*big.Int) |
||||
) |
||||
out := ret0 |
||||
err := _Chequebook.contract.Call(opts, out, "sent", arg0) |
||||
return *ret0, err |
||||
} |
||||
|
||||
// Sent is a free data retrieval call binding the contract method 0x7bf786f8.
|
||||
//
|
||||
// Solidity: function sent( address) constant returns(uint256)
|
||||
func (_Chequebook *ChequebookSession) Sent(arg0 common.Address) (*big.Int, error) { |
||||
return _Chequebook.Contract.Sent(&_Chequebook.CallOpts, arg0) |
||||
} |
||||
|
||||
// Sent is a free data retrieval call binding the contract method 0x7bf786f8.
|
||||
//
|
||||
// Solidity: function sent( address) constant returns(uint256)
|
||||
func (_Chequebook *ChequebookCallerSession) Sent(arg0 common.Address) (*big.Int, error) { |
||||
return _Chequebook.Contract.Sent(&_Chequebook.CallOpts, arg0) |
||||
} |
||||
|
||||
// Cash is a paid mutator transaction binding the contract method 0xfbf788d6.
|
||||
//
|
||||
// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns()
|
||||
func (_Chequebook *ChequebookTransactor) Cash(opts *bind.TransactOpts, beneficiary common.Address, amount *big.Int, sigV uint8, sigR [32]byte, sigS [32]byte) (*types.Transaction, error) { |
||||
return _Chequebook.contract.Transact(opts, "cash", beneficiary, amount, sigV, sigR, sigS) |
||||
} |
||||
|
||||
// Cash is a paid mutator transaction binding the contract method 0xfbf788d6.
|
||||
//
|
||||
// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns()
|
||||
func (_Chequebook *ChequebookSession) Cash(beneficiary common.Address, amount *big.Int, sigV uint8, sigR [32]byte, sigS [32]byte) (*types.Transaction, error) { |
||||
return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sigV, sigR, sigS) |
||||
} |
||||
|
||||
// Cash is a paid mutator transaction binding the contract method 0xfbf788d6.
|
||||
//
|
||||
// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns()
|
||||
func (_Chequebook *ChequebookTransactorSession) Cash(beneficiary common.Address, amount *big.Int, sigV uint8, sigR [32]byte, sigS [32]byte) (*types.Transaction, error) { |
||||
return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sigV, sigR, sigS) |
||||
} |
||||
|
||||
// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5.
|
||||
//
|
||||
// Solidity: function kill() returns()
|
||||
func (_Chequebook *ChequebookTransactor) Kill(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _Chequebook.contract.Transact(opts, "kill") |
||||
} |
||||
|
||||
// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5.
|
||||
//
|
||||
// Solidity: function kill() returns()
|
||||
func (_Chequebook *ChequebookSession) Kill() (*types.Transaction, error) { |
||||
return _Chequebook.Contract.Kill(&_Chequebook.TransactOpts) |
||||
} |
||||
|
||||
// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5.
|
||||
//
|
||||
// Solidity: function kill() returns()
|
||||
func (_Chequebook *ChequebookTransactorSession) Kill() (*types.Transaction, error) { |
||||
return _Chequebook.Contract.Kill(&_Chequebook.TransactOpts) |
||||
} |
||||
|
||||
// ChequebookOverdraftIterator is returned from FilterOverdraft and is used to iterate over the raw logs and unpacked data for Overdraft events raised by the Chequebook contract.
|
||||
type ChequebookOverdraftIterator struct { |
||||
Event *ChequebookOverdraft // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ChequebookOverdraftIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ChequebookOverdraft) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ChequebookOverdraft) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error retruned any retrieval or parsing error occurred during filtering.
|
||||
func (it *ChequebookOverdraftIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ChequebookOverdraftIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ChequebookOverdraft represents a Overdraft event raised by the Chequebook contract.
|
||||
type ChequebookOverdraft struct { |
||||
Deadbeat common.Address |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterOverdraft is a free log retrieval operation binding the contract event 0x2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f978.
|
||||
//
|
||||
// Solidity: event Overdraft(deadbeat address)
|
||||
func (_Chequebook *ChequebookFilterer) FilterOverdraft(opts *bind.FilterOpts) (*ChequebookOverdraftIterator, error) { |
||||
|
||||
logs, sub, err := _Chequebook.contract.FilterLogs(opts, "Overdraft") |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ChequebookOverdraftIterator{contract: _Chequebook.contract, event: "Overdraft", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchOverdraft is a free log subscription operation binding the contract event 0x2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f978.
|
||||
//
|
||||
// Solidity: event Overdraft(deadbeat address)
|
||||
func (_Chequebook *ChequebookFilterer) WatchOverdraft(opts *bind.WatchOpts, sink chan<- *ChequebookOverdraft) (event.Subscription, error) { |
||||
|
||||
logs, sub, err := _Chequebook.contract.WatchLogs(opts, "Overdraft") |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ChequebookOverdraft) |
||||
if err := _Chequebook.contract.UnpackLog(event, "Overdraft", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
@ -1,47 +0,0 @@ |
||||
pragma solidity ^0.4.18; |
||||
|
||||
import "./mortal.sol"; |
||||
|
||||
/// @title Chequebook for Ethereum micropayments |
||||
/// @author Daniel A. Nagy <daniel@ethereum.org> |
||||
contract chequebook is mortal { |
||||
// Cumulative paid amount in wei to each beneficiary |
||||
mapping (address => uint256) public sent; |
||||
|
||||
/// @notice Overdraft event |
||||
event Overdraft(address deadbeat); |
||||
|
||||
// Allow sending ether to the chequebook. |
||||
function() public payable { } |
||||
|
||||
/// @notice Cash cheque |
||||
/// |
||||
/// @param beneficiary beneficiary address |
||||
/// @param amount cumulative amount in wei |
||||
/// @param sig_v signature parameter v |
||||
/// @param sig_r signature parameter r |
||||
/// @param sig_s signature parameter s |
||||
/// The digital signature is calculated on the concatenated triplet of contract address, beneficiary address and cumulative amount |
||||
function cash(address beneficiary, uint256 amount, uint8 sig_v, bytes32 sig_r, bytes32 sig_s) public { |
||||
// Check if the cheque is old. |
||||
// Only cheques that are more recent than the last cashed one are considered. |
||||
require(amount > sent[beneficiary]); |
||||
// Check the digital signature of the cheque. |
||||
bytes32 hash = keccak256(address(this), beneficiary, amount); |
||||
require(owner == ecrecover(hash, sig_v, sig_r, sig_s)); |
||||
// Attempt sending the difference between the cumulative amount on the cheque |
||||
// and the cumulative amount on the last cashed cheque to beneficiary. |
||||
uint256 diff = amount - sent[beneficiary]; |
||||
if (diff <= this.balance) { |
||||
// update the cumulative amount before sending |
||||
sent[beneficiary] = amount; |
||||
beneficiary.transfer(diff); |
||||
} else { |
||||
// Upon failure, punish owner for writing a bounced cheque. |
||||
// owner.sendToDebtorsPrison(); |
||||
Overdraft(owner); |
||||
// Compensate beneficiary. |
||||
selfdestruct(beneficiary); |
||||
} |
||||
} |
||||
} |
@ -1,5 +0,0 @@ |
||||
package contract |
||||
|
||||
// ContractDeployedCode is used to detect suicides. This constant needs to be
|
||||
// updated when the contract code is changed.
|
||||
const ContractDeployedCode = "0x6060604052600436106100565763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166341c0e1b581146100585780637bf786f81461006b578063fbf788d61461009c575b005b341561006357600080fd5b6100566100ca565b341561007657600080fd5b61008a600160a060020a03600435166100f1565b60405190815260200160405180910390f35b34156100a757600080fd5b610056600160a060020a036004351660243560ff60443516606435608435610103565b60005433600160a060020a03908116911614156100ef57600054600160a060020a0316ff5b565b60016020526000908152604090205481565b600160a060020a0385166000908152600160205260408120548190861161012957600080fd5b3087876040516c01000000000000000000000000600160a060020a03948516810282529290931690910260148301526028820152604801604051809103902091506001828686866040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f115156101cf57600080fd5b505060206040510351600054600160a060020a039081169116146101f257600080fd5b50600160a060020a03808716600090815260016020526040902054860390301631811161026257600160a060020a0387166000818152600160205260409081902088905582156108fc0290839051600060405180830381858888f19350505050151561025d57600080fd5b6102b7565b6000547f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f97890600160a060020a0316604051600160a060020a03909116815260200160405180910390a186600160a060020a0316ff5b505050505050505600a165627a7a72305820533e856fc37e3d64d1706bcc7dfb6b1d490c8d566ea498d9d01ec08965a896ca0029" |
@ -1,10 +0,0 @@ |
||||
pragma solidity ^0.4.0; |
||||
|
||||
import "./owned.sol"; |
||||
|
||||
contract mortal is owned { |
||||
function kill() public { |
||||
if (msg.sender == owner) |
||||
selfdestruct(owner); |
||||
} |
||||
} |
@ -1,15 +0,0 @@ |
||||
pragma solidity ^0.4.0; |
||||
|
||||
contract owned { |
||||
address owner; |
||||
|
||||
modifier onlyowner() { |
||||
if (msg.sender == owner) { |
||||
_; |
||||
} |
||||
} |
||||
|
||||
function owned() public { |
||||
owner = msg.sender; |
||||
} |
||||
} |
@ -1,70 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build none
|
||||
|
||||
// This program generates contract/code.go, which contains the chequebook code
|
||||
// after deployment.
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io/ioutil" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" |
||||
"github.com/ethereum/go-ethereum/contracts/chequebook/contract" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
var ( |
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") |
||||
testAlloc = core.GenesisAlloc{ |
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(500000000000)}, |
||||
} |
||||
) |
||||
|
||||
func main() { |
||||
backend := backends.NewSimulatedBackend(testAlloc, uint64(100000000)) |
||||
auth := bind.NewKeyedTransactor(testKey) |
||||
|
||||
// Deploy the contract, get the code.
|
||||
addr, _, _, err := contract.DeployChequebook(auth, backend) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
backend.Commit() |
||||
code, err := backend.CodeAt(nil, addr, nil) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if len(code) == 0 { |
||||
panic("empty code") |
||||
} |
||||
|
||||
// Write the output file.
|
||||
content := fmt.Sprintf(`package contract |
||||
|
||||
// ContractDeployedCode is used to detect suicides. This constant needs to be
|
||||
// updated when the contract code is changed.
|
||||
const ContractDeployedCode = "%#x" |
||||
`, code) |
||||
if err := ioutil.WriteFile("contract/code.go", []byte(content), 0644); err != nil { |
||||
panic(err) |
||||
} |
||||
} |
@ -1,30 +0,0 @@ |
||||
# Swarm ENS interface |
||||
|
||||
## Usage |
||||
|
||||
Full documentation for the Ethereum Name Service [can be found as EIP 137](https://github.com/ethereum/EIPs/issues/137). |
||||
This package offers a simple binding that streamlines the registration of arbitrary UTF8 domain names to swarm content hashes. |
||||
|
||||
## Development |
||||
|
||||
The SOL file in contract subdirectory implements the ENS root registry, a simple |
||||
first-in, first-served registrar for the root namespace, and a simple resolver contract; |
||||
they're used in tests, and can be used to deploy these contracts for your own purposes. |
||||
|
||||
The solidity source code can be found at [github.com/arachnid/ens/](https://github.com/arachnid/ens/). |
||||
|
||||
The go bindings for ENS contracts are generated using `abigen` via the go generator: |
||||
|
||||
```shell |
||||
go generate ./contracts/ens |
||||
``` |
||||
|
||||
## Fallback contract support |
||||
|
||||
In order to better support content resolution on different service providers (such as Swarm and IPFS), [EIP-1577](https://eips.ethereum.org/EIPS/eip-1577) |
||||
was introduced and with it changes that allow applications to know _where_ content hashes are stored (i.e. if the |
||||
requested hash resides on Swarm or IPFS). |
||||
|
||||
The code under `contracts/ens/contract` reflects the new Public Resolver changes and the code under `fallback_contract` allows |
||||
us to support the old contract resolution in cases where the ENS name owner did not update her Resolver contract, until the migration |
||||
period ends (date arbitrarily set to June 1st, 2019). |
@ -1,121 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ens |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
const ( |
||||
cidv1 = 0x1 |
||||
|
||||
nsIpfs = 0xe3 |
||||
nsSwarm = 0xe4 |
||||
|
||||
swarmTypecode = 0xfa // swarm manifest, see https://github.com/multiformats/multicodec/blob/master/table.csv
|
||||
swarmHashtype = 0x1b // keccak256, see https://github.com/multiformats/multicodec/blob/master/table.csv
|
||||
|
||||
hashLength = 32 |
||||
) |
||||
|
||||
// deocodeEIP1577ContentHash decodes a chain-stored content hash from an ENS record according to EIP-1577
|
||||
// a successful decode will result the different parts of the content hash in accordance to the CID spec
|
||||
// Note: only CIDv1 is supported
|
||||
func decodeEIP1577ContentHash(buf []byte) (storageNs, contentType, hashType, hashLength uint64, hash []byte, err error) { |
||||
if len(buf) < 10 { |
||||
return 0, 0, 0, 0, nil, errors.New("buffer too short") |
||||
} |
||||
|
||||
storageNs, n := binary.Uvarint(buf) |
||||
|
||||
buf = buf[n:] |
||||
vers, n := binary.Uvarint(buf) |
||||
|
||||
if vers != 1 { |
||||
return 0, 0, 0, 0, nil, fmt.Errorf("expected cid v1, got: %d", vers) |
||||
} |
||||
buf = buf[n:] |
||||
contentType, n = binary.Uvarint(buf) |
||||
|
||||
buf = buf[n:] |
||||
hashType, n = binary.Uvarint(buf) |
||||
|
||||
buf = buf[n:] |
||||
hashLength, n = binary.Uvarint(buf) |
||||
|
||||
hash = buf[n:] |
||||
|
||||
if len(hash) != int(hashLength) { |
||||
return 0, 0, 0, 0, nil, errors.New("hash length mismatch") |
||||
} |
||||
return storageNs, contentType, hashType, hashLength, hash, nil |
||||
} |
||||
|
||||
func extractContentHash(buf []byte) (common.Hash, error) { |
||||
storageNs, _ /*contentType*/, _ /* hashType*/, decodedHashLength, hashBytes, err := decodeEIP1577ContentHash(buf) |
||||
|
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
|
||||
if storageNs != nsSwarm { |
||||
return common.Hash{}, errors.New("unknown storage system") |
||||
} |
||||
|
||||
//todo: for the time being we implement loose enforcement for the EIP rules until ENS manager is updated
|
||||
/*if contentType != swarmTypecode { |
||||
return common.Hash{}, errors.New("unknown content type") |
||||
} |
||||
|
||||
if hashType != swarmHashtype { |
||||
return common.Hash{}, errors.New("unknown multihash type") |
||||
}*/ |
||||
|
||||
if decodedHashLength != hashLength { |
||||
return common.Hash{}, errors.New("odd hash length, swarm expects 32 bytes") |
||||
} |
||||
|
||||
if len(hashBytes) != int(hashLength) { |
||||
return common.Hash{}, errors.New("hash length mismatch") |
||||
} |
||||
|
||||
return common.BytesToHash(buf), nil |
||||
} |
||||
|
||||
func EncodeSwarmHash(hash common.Hash) ([]byte, error) { |
||||
var cidBytes []byte |
||||
var headerBytes = []byte{ |
||||
nsSwarm, //swarm namespace
|
||||
cidv1, // CIDv1
|
||||
swarmTypecode, // swarm hash
|
||||
swarmHashtype, // keccak256 hash
|
||||
hashLength, //hash length. 32 bytes
|
||||
} |
||||
|
||||
varintbuf := make([]byte, binary.MaxVarintLen64) |
||||
for _, v := range headerBytes { |
||||
n := binary.PutUvarint(varintbuf, uint64(v)) |
||||
cidBytes = append(cidBytes, varintbuf[:n]...) |
||||
} |
||||
|
||||
cidBytes = append(cidBytes, hash[:]...) |
||||
return cidBytes, nil |
||||
} |
@ -1,158 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ens |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"encoding/hex" |
||||
"fmt" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// Tests for the decoding of the example ENS
|
||||
func TestEIPSpecCidDecode(t *testing.T) { |
||||
const ( |
||||
eipSpecHash = "e3010170122029f2d17be6139079dc48696d1f582a8530eb9805b561eda517e22a892c7e3f1f" |
||||
eipHash = "29f2d17be6139079dc48696d1f582a8530eb9805b561eda517e22a892c7e3f1f" |
||||
dagPb = 0x70 |
||||
sha2256 = 0x12 |
||||
) |
||||
b, err := hex.DecodeString(eipSpecHash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
hashBytes, err := hex.DecodeString(eipHash) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
storageNs, contentType, hashType, hashLength, decodedHashBytes, err := decodeEIP1577ContentHash(b) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if storageNs != nsIpfs { |
||||
t.Fatal("wrong ns") |
||||
} |
||||
if contentType != dagPb { |
||||
t.Fatal("should be ipfs typecode") |
||||
} |
||||
if hashType != sha2256 { |
||||
t.Fatal("should be sha2-256") |
||||
} |
||||
if hashLength != 32 { |
||||
t.Fatal("should be 32") |
||||
} |
||||
if !bytes.Equal(hashBytes, decodedHashBytes) { |
||||
t.Fatal("should be equal") |
||||
} |
||||
|
||||
} |
||||
func TestManualCidDecode(t *testing.T) { |
||||
// call cid encode method with hash. expect byte slice returned, compare according to spec
|
||||
|
||||
for _, v := range []struct { |
||||
name string |
||||
headerBytes []byte |
||||
wantErr bool |
||||
}{ |
||||
{ |
||||
name: "values correct, should not fail", |
||||
headerBytes: []byte{0xe4, 0x01, 0xfa, 0x1b, 0x20}, |
||||
wantErr: false, |
||||
}, |
||||
{ |
||||
name: "cid version wrong, should fail", |
||||
headerBytes: []byte{0xe4, 0x00, 0xfa, 0x1b, 0x20}, |
||||
wantErr: true, |
||||
}, |
||||
{ |
||||
name: "hash length wrong, should fail", |
||||
headerBytes: []byte{0xe4, 0x01, 0xfa, 0x1b, 0x1f}, |
||||
wantErr: true, |
||||
}, |
||||
{ |
||||
name: "values correct for ipfs, should fail", |
||||
headerBytes: []byte{0xe3, 0x01, 0x70, 0x12, 0x20}, |
||||
wantErr: true, |
||||
}, |
||||
{ |
||||
name: "loose values for swarm, todo remove, should not fail", |
||||
headerBytes: []byte{0xe4, 0x01, 0x70, 0x12, 0x20}, |
||||
wantErr: false, |
||||
}, |
||||
{ |
||||
name: "loose values for swarm, todo remove, should not fail", |
||||
headerBytes: []byte{0xe4, 0x01, 0x99, 0x99, 0x20}, |
||||
wantErr: false, |
||||
}, |
||||
} { |
||||
t.Run(v.name, func(t *testing.T) { |
||||
const eipHash = "29f2d17be6139079dc48696d1f582a8530eb9805b561eda517e22a892c7e3f1f" |
||||
|
||||
var bb []byte |
||||
buf := make([]byte, binary.MaxVarintLen64) |
||||
for _, vv := range v.headerBytes { |
||||
n := binary.PutUvarint(buf, uint64(vv)) |
||||
bb = append(bb, buf[:n]...) |
||||
} |
||||
|
||||
h := common.HexToHash(eipHash) |
||||
bb = append(bb, h[:]...) |
||||
str := hex.EncodeToString(bb) |
||||
fmt.Println(str) |
||||
decodedHash, e := extractContentHash(bb) |
||||
switch v.wantErr { |
||||
case true: |
||||
if e == nil { |
||||
t.Fatal("the decode should fail") |
||||
} |
||||
case false: |
||||
if e != nil { |
||||
t.Fatalf("the deccode shouldnt fail: %v", e) |
||||
} |
||||
if !bytes.Equal(decodedHash[:], h[:]) { |
||||
t.Fatal("hashes not equal") |
||||
} |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestManuelCidEncode(t *testing.T) { |
||||
// call cid encode method with hash. expect byte slice returned, compare according to spec
|
||||
const eipHash = "29f2d17be6139079dc48696d1f582a8530eb9805b561eda517e22a892c7e3f1f" |
||||
cidBytes, err := EncodeSwarmHash(common.HexToHash(eipHash)) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// logic in extractContentHash is unit tested thoroughly
|
||||
// hence we just check that the returned hash is equal
|
||||
h, err := extractContentHash(cidBytes) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if bytes.Equal(h[:], cidBytes) { |
||||
t.Fatal("should be equal") |
||||
} |
||||
} |
@ -1,26 +0,0 @@ |
||||
pragma solidity >=0.4.24; |
||||
|
||||
interface ENS { |
||||
|
||||
// Logged when the owner of a node assigns a new owner to a subnode. |
||||
event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); |
||||
|
||||
// Logged when the owner of a node transfers ownership to a new account. |
||||
event Transfer(bytes32 indexed node, address owner); |
||||
|
||||
// Logged when the resolver for a node changes. |
||||
event NewResolver(bytes32 indexed node, address resolver); |
||||
|
||||
// Logged when the TTL of a node changes |
||||
event NewTTL(bytes32 indexed node, uint64 ttl); |
||||
|
||||
|
||||
function setSubnodeOwner(bytes32 node, bytes32 label, address owner) external; |
||||
function setResolver(bytes32 node, address resolver) external; |
||||
function setOwner(bytes32 node, address owner) external; |
||||
function setTTL(bytes32 node, uint64 ttl) external; |
||||
function owner(bytes32 node) external view returns (address); |
||||
function resolver(bytes32 node) external view returns (address); |
||||
function ttl(bytes32 node) external view returns (uint64); |
||||
|
||||
} |
@ -1,99 +0,0 @@ |
||||
pragma solidity ^0.5.0; |
||||
|
||||
import "./ENS.sol"; |
||||
|
||||
/** |
||||
* The ENS registry contract. |
||||
*/ |
||||
contract ENSRegistry is ENS { |
||||
struct Record { |
||||
address owner; |
||||
address resolver; |
||||
uint64 ttl; |
||||
} |
||||
|
||||
mapping (bytes32 => Record) records; |
||||
|
||||
// Permits modifications only by the owner of the specified node. |
||||
modifier only_owner(bytes32 node) { |
||||
require(records[node].owner == msg.sender); |
||||
_; |
||||
} |
||||
|
||||
/** |
||||
* @dev Constructs a new ENS registrar. |
||||
*/ |
||||
constructor() public { |
||||
records[0x0].owner = msg.sender; |
||||
} |
||||
|
||||
/** |
||||
* @dev Transfers ownership of a node to a new address. May only be called by the current owner of the node. |
||||
* @param node The node to transfer ownership of. |
||||
* @param owner The address of the new owner. |
||||
*/ |
||||
function setOwner(bytes32 node, address owner) external only_owner(node) { |
||||
emit Transfer(node, owner); |
||||
records[node].owner = owner; |
||||
} |
||||
|
||||
/** |
||||
* @dev Transfers ownership of a subnode keccak256(node, label) to a new address. May only be called by the owner of the parent node. |
||||
* @param node The parent node. |
||||
* @param label The hash of the label specifying the subnode. |
||||
* @param owner The address of the new owner. |
||||
*/ |
||||
function setSubnodeOwner(bytes32 node, bytes32 label, address owner) external only_owner(node) { |
||||
bytes32 subnode = keccak256(abi.encodePacked(node, label)); |
||||
emit NewOwner(node, label, owner); |
||||
records[subnode].owner = owner; |
||||
} |
||||
|
||||
/** |
||||
* @dev Sets the resolver address for the specified node. |
||||
* @param node The node to update. |
||||
* @param resolver The address of the resolver. |
||||
*/ |
||||
function setResolver(bytes32 node, address resolver) external only_owner(node) { |
||||
emit NewResolver(node, resolver); |
||||
records[node].resolver = resolver; |
||||
} |
||||
|
||||
/** |
||||
* @dev Sets the TTL for the specified node. |
||||
* @param node The node to update. |
||||
* @param ttl The TTL in seconds. |
||||
*/ |
||||
function setTTL(bytes32 node, uint64 ttl) external only_owner(node) { |
||||
emit NewTTL(node, ttl); |
||||
records[node].ttl = ttl; |
||||
} |
||||
|
||||
/** |
||||
* @dev Returns the address that owns the specified node. |
||||
* @param node The specified node. |
||||
* @return address of the owner. |
||||
*/ |
||||
function owner(bytes32 node) external view returns (address) { |
||||
return records[node].owner; |
||||
} |
||||
|
||||
/** |
||||
* @dev Returns the address of the resolver for the specified node. |
||||
* @param node The specified node. |
||||
* @return address of the resolver. |
||||
*/ |
||||
function resolver(bytes32 node) external view returns (address) { |
||||
return records[node].resolver; |
||||
} |
||||
|
||||
/** |
||||
* @dev Returns the TTL of a node, and any records associated with it. |
||||
* @param node The specified node. |
||||
* @return ttl of the node. |
||||
*/ |
||||
function ttl(bytes32 node) external view returns (uint64) { |
||||
return records[node].ttl; |
||||
} |
||||
|
||||
} |
@ -1,36 +0,0 @@ |
||||
pragma solidity ^0.5.0; |
||||
|
||||
import "./ENS.sol"; |
||||
|
||||
/** |
||||
* A registrar that allocates subdomains to the first person to claim them. |
||||
*/ |
||||
contract FIFSRegistrar { |
||||
ENS ens; |
||||
bytes32 rootNode; |
||||
|
||||
modifier only_owner(bytes32 label) { |
||||
address currentOwner = ens.owner(keccak256(abi.encodePacked(rootNode, label))); |
||||
require(currentOwner == address(0x0) || currentOwner == msg.sender); |
||||
_; |
||||
} |
||||
|
||||
/** |
||||
* Constructor. |
||||
* @param ensAddr The address of the ENS registry. |
||||
* @param node The node that this registrar administers. |
||||
*/ |
||||
constructor(ENS ensAddr, bytes32 node) public { |
||||
ens = ensAddr; |
||||
rootNode = node; |
||||
} |
||||
|
||||
/** |
||||
* Register a name, or change the owner of an existing registration. |
||||
* @param label The hash of the label to register. |
||||
* @param owner The address of the new owner. |
||||
*/ |
||||
function register(bytes32 label, address owner) public only_owner(label) { |
||||
ens.setSubnodeOwner(rootNode, label, owner); |
||||
} |
||||
} |
@ -1,212 +0,0 @@ |
||||
pragma solidity >=0.4.25; |
||||
|
||||
import "./ENS.sol"; |
||||
|
||||
/** |
||||
* A simple resolver anyone can use; only allows the owner of a node to set its |
||||
* address. |
||||
*/ |
||||
contract PublicResolver { |
||||
|
||||
bytes4 constant INTERFACE_META_ID = 0x01ffc9a7; |
||||
bytes4 constant ADDR_INTERFACE_ID = 0x3b3b57de; |
||||
bytes4 constant NAME_INTERFACE_ID = 0x691f3431; |
||||
bytes4 constant ABI_INTERFACE_ID = 0x2203ab56; |
||||
bytes4 constant PUBKEY_INTERFACE_ID = 0xc8690233; |
||||
bytes4 constant TEXT_INTERFACE_ID = 0x59d1d43c; |
||||
bytes4 constant CONTENTHASH_INTERFACE_ID = 0xbc1c58d1; |
||||
|
||||
event AddrChanged(bytes32 indexed node, address a); |
||||
event NameChanged(bytes32 indexed node, string name); |
||||
event ABIChanged(bytes32 indexed node, uint256 indexed contentType); |
||||
event PubkeyChanged(bytes32 indexed node, bytes32 x, bytes32 y); |
||||
event TextChanged(bytes32 indexed node, string indexedKey, string key); |
||||
event ContenthashChanged(bytes32 indexed node, bytes hash); |
||||
|
||||
struct PublicKey { |
||||
bytes32 x; |
||||
bytes32 y; |
||||
} |
||||
|
||||
struct Record { |
||||
address addr; |
||||
string name; |
||||
PublicKey pubkey; |
||||
mapping(string=>string) text; |
||||
mapping(uint256=>bytes) abis; |
||||
bytes contenthash; |
||||
} |
||||
|
||||
ENS ens; |
||||
|
||||
mapping (bytes32 => Record) records; |
||||
|
||||
modifier onlyOwner(bytes32 node) { |
||||
require(ens.owner(node) == msg.sender); |
||||
_; |
||||
} |
||||
|
||||
/** |
||||
* Constructor. |
||||
* @param ensAddr The ENS registrar contract. |
||||
*/ |
||||
constructor(ENS ensAddr) public { |
||||
ens = ensAddr; |
||||
} |
||||
|
||||
/** |
||||
* Sets the address associated with an ENS node. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* @param node The node to update. |
||||
* @param addr The address to set. |
||||
*/ |
||||
function setAddr(bytes32 node, address addr) external onlyOwner(node) { |
||||
records[node].addr = addr; |
||||
emit AddrChanged(node, addr); |
||||
} |
||||
|
||||
/** |
||||
* Sets the contenthash associated with an ENS node. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* @param node The node to update. |
||||
* @param hash The contenthash to set |
||||
*/ |
||||
function setContenthash(bytes32 node, bytes calldata hash) external onlyOwner(node) { |
||||
records[node].contenthash = hash; |
||||
emit ContenthashChanged(node, hash); |
||||
} |
||||
|
||||
/** |
||||
* Sets the name associated with an ENS node, for reverse records. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* @param node The node to update. |
||||
* @param name The name to set. |
||||
*/ |
||||
function setName(bytes32 node, string calldata name) external onlyOwner(node) { |
||||
records[node].name = name; |
||||
emit NameChanged(node, name); |
||||
} |
||||
|
||||
/** |
||||
* Sets the ABI associated with an ENS node. |
||||
* Nodes may have one ABI of each content type. To remove an ABI, set it to |
||||
* the empty string. |
||||
* @param node The node to update. |
||||
* @param contentType The content type of the ABI |
||||
* @param data The ABI data. |
||||
*/ |
||||
function setABI(bytes32 node, uint256 contentType, bytes calldata data) external onlyOwner(node) { |
||||
// Content types must be powers of 2 |
||||
require(((contentType - 1) & contentType) == 0); |
||||
|
||||
records[node].abis[contentType] = data; |
||||
emit ABIChanged(node, contentType); |
||||
} |
||||
|
||||
/** |
||||
* Sets the SECP256k1 public key associated with an ENS node. |
||||
* @param node The ENS node to query |
||||
* @param x the X coordinate of the curve point for the public key. |
||||
* @param y the Y coordinate of the curve point for the public key. |
||||
*/ |
||||
function setPubkey(bytes32 node, bytes32 x, bytes32 y) external onlyOwner(node) { |
||||
records[node].pubkey = PublicKey(x, y); |
||||
emit PubkeyChanged(node, x, y); |
||||
} |
||||
|
||||
/** |
||||
* Sets the text data associated with an ENS node and key. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* @param node The node to update. |
||||
* @param key The key to set. |
||||
* @param value The text data value to set. |
||||
*/ |
||||
function setText(bytes32 node, string calldata key, string calldata value) external onlyOwner(node) { |
||||
records[node].text[key] = value; |
||||
emit TextChanged(node, key, key); |
||||
} |
||||
|
||||
/** |
||||
* Returns the text data associated with an ENS node and key. |
||||
* @param node The ENS node to query. |
||||
* @param key The text data key to query. |
||||
* @return The associated text data. |
||||
*/ |
||||
function text(bytes32 node, string calldata key) external view returns (string memory) { |
||||
return records[node].text[key]; |
||||
} |
||||
|
||||
/** |
||||
* Returns the SECP256k1 public key associated with an ENS node. |
||||
* Defined in EIP 619. |
||||
* @param node The ENS node to query |
||||
* @return x, y the X and Y coordinates of the curve point for the public key. |
||||
*/ |
||||
function pubkey(bytes32 node) external view returns (bytes32 x, bytes32 y) { |
||||
return (records[node].pubkey.x, records[node].pubkey.y); |
||||
} |
||||
|
||||
/** |
||||
* Returns the ABI associated with an ENS node. |
||||
* Defined in EIP205. |
||||
* @param node The ENS node to query |
||||
* @param contentTypes A bitwise OR of the ABI formats accepted by the caller. |
||||
* @return contentType The content type of the return value |
||||
* @return data The ABI data |
||||
*/ |
||||
function ABI(bytes32 node, uint256 contentTypes) external view returns (uint256, bytes memory) { |
||||
Record storage record = records[node]; |
||||
|
||||
for (uint256 contentType = 1; contentType <= contentTypes; contentType <<= 1) { |
||||
if ((contentType & contentTypes) != 0 && record.abis[contentType].length > 0) { |
||||
return (contentType, record.abis[contentType]); |
||||
} |
||||
} |
||||
|
||||
bytes memory empty; |
||||
return (0, empty); |
||||
} |
||||
|
||||
/** |
||||
* Returns the name associated with an ENS node, for reverse records. |
||||
* Defined in EIP181. |
||||
* @param node The ENS node to query. |
||||
* @return The associated name. |
||||
*/ |
||||
function name(bytes32 node) external view returns (string memory) { |
||||
return records[node].name; |
||||
} |
||||
|
||||
/** |
||||
* Returns the address associated with an ENS node. |
||||
* @param node The ENS node to query. |
||||
* @return The associated address. |
||||
*/ |
||||
function addr(bytes32 node) external view returns (address) { |
||||
return records[node].addr; |
||||
} |
||||
|
||||
/** |
||||
* Returns the contenthash associated with an ENS node. |
||||
* @param node The ENS node to query. |
||||
* @return The associated contenthash. |
||||
*/ |
||||
function contenthash(bytes32 node) external view returns (bytes memory) { |
||||
return records[node].contenthash; |
||||
} |
||||
|
||||
/** |
||||
* Returns true if the resolver implements the interface specified by the provided hash. |
||||
* @param interfaceID The ID of the interface to check for. |
||||
* @return True if the contract implements the requested interface. |
||||
*/ |
||||
function supportsInterface(bytes4 interfaceID) external pure returns (bool) { |
||||
return interfaceID == ADDR_INTERFACE_ID || |
||||
interfaceID == NAME_INTERFACE_ID || |
||||
interfaceID == ABI_INTERFACE_ID || |
||||
interfaceID == PUBKEY_INTERFACE_ID || |
||||
interfaceID == TEXT_INTERFACE_ID || |
||||
interfaceID == CONTENTHASH_INTERFACE_ID || |
||||
interfaceID == INTERFACE_META_ID; |
||||
} |
||||
} |
@ -1,892 +0,0 @@ |
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package contract |
||||
|
||||
import ( |
||||
"math/big" |
||||
"strings" |
||||
|
||||
ethereum "github.com/ethereum/go-ethereum" |
||||
"github.com/ethereum/go-ethereum/accounts/abi" |
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
) |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var ( |
||||
_ = big.NewInt |
||||
_ = strings.NewReader |
||||
_ = ethereum.NotFound |
||||
_ = abi.U256 |
||||
_ = bind.Bind |
||||
_ = common.Big1 |
||||
_ = types.BloomLookup |
||||
_ = event.NewSubscription |
||||
) |
||||
|
||||
// ENSABI is the input ABI used to generate the binding from.
|
||||
const ENSABI = "[{\"constant\":true,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"}],\"name\":\"resolver\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"}],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"label\",\"type\":\"bytes32\"},{\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"setSubnodeOwner\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"ttl\",\"type\":\"uint64\"}],\"name\":\"setTTL\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"}],\"name\":\"ttl\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"resolver\",\"type\":\"address\"}],\"name\":\"setResolver\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":true,\"name\":\"label\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"NewOwner\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"resolver\",\"type\":\"address\"}],\"name\":\"NewResolver\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"ttl\",\"type\":\"uint64\"}],\"name\":\"NewTTL\",\"type\":\"event\"}]" |
||||
|
||||
// ENSBin is the compiled bytecode used for deploying new contracts.
|
||||
const ENSBin = `0x` |
||||
|
||||
// DeployENS deploys a new Ethereum contract, binding an instance of ENS to it.
|
||||
func DeployENS(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ENS, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(ENSABI)) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ENSBin), backend) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
return address, tx, &ENS{ENSCaller: ENSCaller{contract: contract}, ENSTransactor: ENSTransactor{contract: contract}, ENSFilterer: ENSFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// ENS is an auto generated Go binding around an Ethereum contract.
|
||||
type ENS struct { |
||||
ENSCaller // Read-only binding to the contract
|
||||
ENSTransactor // Write-only binding to the contract
|
||||
ENSFilterer // Log filterer for contract events
|
||||
} |
||||
|
||||
// ENSCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
type ENSCaller struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ENSTransactor is an auto generated write-only Go binding around an Ethereum contract.
|
||||
type ENSTransactor struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ENSFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type ENSFilterer struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ENSSession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type ENSSession struct { |
||||
Contract *ENS // Generic contract binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// ENSCallerSession is an auto generated read-only Go binding around an Ethereum contract,
|
||||
// with pre-set call options.
|
||||
type ENSCallerSession struct { |
||||
Contract *ENSCaller // Generic contract caller binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
} |
||||
|
||||
// ENSTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
|
||||
// with pre-set transact options.
|
||||
type ENSTransactorSession struct { |
||||
Contract *ENSTransactor // Generic contract transactor binding to set the session for
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// ENSRaw is an auto generated low-level Go binding around an Ethereum contract.
|
||||
type ENSRaw struct { |
||||
Contract *ENS // Generic contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// ENSCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
|
||||
type ENSCallerRaw struct { |
||||
Contract *ENSCaller // Generic read-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// ENSTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
|
||||
type ENSTransactorRaw struct { |
||||
Contract *ENSTransactor // Generic write-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// NewENS creates a new instance of ENS, bound to a specific deployed contract.
|
||||
func NewENS(address common.Address, backend bind.ContractBackend) (*ENS, error) { |
||||
contract, err := bindENS(address, backend, backend, backend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENS{ENSCaller: ENSCaller{contract: contract}, ENSTransactor: ENSTransactor{contract: contract}, ENSFilterer: ENSFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// NewENSCaller creates a new read-only instance of ENS, bound to a specific deployed contract.
|
||||
func NewENSCaller(address common.Address, caller bind.ContractCaller) (*ENSCaller, error) { |
||||
contract, err := bindENS(address, caller, nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSCaller{contract: contract}, nil |
||||
} |
||||
|
||||
// NewENSTransactor creates a new write-only instance of ENS, bound to a specific deployed contract.
|
||||
func NewENSTransactor(address common.Address, transactor bind.ContractTransactor) (*ENSTransactor, error) { |
||||
contract, err := bindENS(address, nil, transactor, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSTransactor{contract: contract}, nil |
||||
} |
||||
|
||||
// NewENSFilterer creates a new log filterer instance of ENS, bound to a specific deployed contract.
|
||||
func NewENSFilterer(address common.Address, filterer bind.ContractFilterer) (*ENSFilterer, error) { |
||||
contract, err := bindENS(address, nil, nil, filterer) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSFilterer{contract: contract}, nil |
||||
} |
||||
|
||||
// bindENS binds a generic wrapper to an already deployed contract.
|
||||
func bindENS(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(ENSABI)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_ENS *ENSRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _ENS.Contract.ENSCaller.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_ENS *ENSRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _ENS.Contract.ENSTransactor.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_ENS *ENSRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _ENS.Contract.ENSTransactor.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_ENS *ENSCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _ENS.Contract.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_ENS *ENSTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _ENS.Contract.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_ENS *ENSTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _ENS.Contract.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Owner is a free data retrieval call binding the contract method 0x02571be3.
|
||||
//
|
||||
// Solidity: function owner(bytes32 node) constant returns(address)
|
||||
func (_ENS *ENSCaller) Owner(opts *bind.CallOpts, node [32]byte) (common.Address, error) { |
||||
var ( |
||||
ret0 = new(common.Address) |
||||
) |
||||
out := ret0 |
||||
err := _ENS.contract.Call(opts, out, "owner", node) |
||||
return *ret0, err |
||||
} |
||||
|
||||
// Owner is a free data retrieval call binding the contract method 0x02571be3.
|
||||
//
|
||||
// Solidity: function owner(bytes32 node) constant returns(address)
|
||||
func (_ENS *ENSSession) Owner(node [32]byte) (common.Address, error) { |
||||
return _ENS.Contract.Owner(&_ENS.CallOpts, node) |
||||
} |
||||
|
||||
// Owner is a free data retrieval call binding the contract method 0x02571be3.
|
||||
//
|
||||
// Solidity: function owner(bytes32 node) constant returns(address)
|
||||
func (_ENS *ENSCallerSession) Owner(node [32]byte) (common.Address, error) { |
||||
return _ENS.Contract.Owner(&_ENS.CallOpts, node) |
||||
} |
||||
|
||||
// Resolver is a free data retrieval call binding the contract method 0x0178b8bf.
|
||||
//
|
||||
// Solidity: function resolver(bytes32 node) constant returns(address)
|
||||
func (_ENS *ENSCaller) Resolver(opts *bind.CallOpts, node [32]byte) (common.Address, error) { |
||||
var ( |
||||
ret0 = new(common.Address) |
||||
) |
||||
out := ret0 |
||||
err := _ENS.contract.Call(opts, out, "resolver", node) |
||||
return *ret0, err |
||||
} |
||||
|
||||
// Resolver is a free data retrieval call binding the contract method 0x0178b8bf.
|
||||
//
|
||||
// Solidity: function resolver(bytes32 node) constant returns(address)
|
||||
func (_ENS *ENSSession) Resolver(node [32]byte) (common.Address, error) { |
||||
return _ENS.Contract.Resolver(&_ENS.CallOpts, node) |
||||
} |
||||
|
||||
// Resolver is a free data retrieval call binding the contract method 0x0178b8bf.
|
||||
//
|
||||
// Solidity: function resolver(bytes32 node) constant returns(address)
|
||||
func (_ENS *ENSCallerSession) Resolver(node [32]byte) (common.Address, error) { |
||||
return _ENS.Contract.Resolver(&_ENS.CallOpts, node) |
||||
} |
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(bytes32 node) constant returns(uint64)
|
||||
func (_ENS *ENSCaller) Ttl(opts *bind.CallOpts, node [32]byte) (uint64, error) { |
||||
var ( |
||||
ret0 = new(uint64) |
||||
) |
||||
out := ret0 |
||||
err := _ENS.contract.Call(opts, out, "ttl", node) |
||||
return *ret0, err |
||||
} |
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(bytes32 node) constant returns(uint64)
|
||||
func (_ENS *ENSSession) Ttl(node [32]byte) (uint64, error) { |
||||
return _ENS.Contract.Ttl(&_ENS.CallOpts, node) |
||||
} |
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(bytes32 node) constant returns(uint64)
|
||||
func (_ENS *ENSCallerSession) Ttl(node [32]byte) (uint64, error) { |
||||
return _ENS.Contract.Ttl(&_ENS.CallOpts, node) |
||||
} |
||||
|
||||
// SetOwner is a paid mutator transaction binding the contract method 0x5b0fc9c3.
|
||||
//
|
||||
// Solidity: function setOwner(bytes32 node, address owner) returns()
|
||||
func (_ENS *ENSTransactor) SetOwner(opts *bind.TransactOpts, node [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENS.contract.Transact(opts, "setOwner", node, owner) |
||||
} |
||||
|
||||
// SetOwner is a paid mutator transaction binding the contract method 0x5b0fc9c3.
|
||||
//
|
||||
// Solidity: function setOwner(bytes32 node, address owner) returns()
|
||||
func (_ENS *ENSSession) SetOwner(node [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetOwner(&_ENS.TransactOpts, node, owner) |
||||
} |
||||
|
||||
// SetOwner is a paid mutator transaction binding the contract method 0x5b0fc9c3.
|
||||
//
|
||||
// Solidity: function setOwner(bytes32 node, address owner) returns()
|
||||
func (_ENS *ENSTransactorSession) SetOwner(node [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetOwner(&_ENS.TransactOpts, node, owner) |
||||
} |
||||
|
||||
// SetResolver is a paid mutator transaction binding the contract method 0x1896f70a.
|
||||
//
|
||||
// Solidity: function setResolver(bytes32 node, address resolver) returns()
|
||||
func (_ENS *ENSTransactor) SetResolver(opts *bind.TransactOpts, node [32]byte, resolver common.Address) (*types.Transaction, error) { |
||||
return _ENS.contract.Transact(opts, "setResolver", node, resolver) |
||||
} |
||||
|
||||
// SetResolver is a paid mutator transaction binding the contract method 0x1896f70a.
|
||||
//
|
||||
// Solidity: function setResolver(bytes32 node, address resolver) returns()
|
||||
func (_ENS *ENSSession) SetResolver(node [32]byte, resolver common.Address) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetResolver(&_ENS.TransactOpts, node, resolver) |
||||
} |
||||
|
||||
// SetResolver is a paid mutator transaction binding the contract method 0x1896f70a.
|
||||
//
|
||||
// Solidity: function setResolver(bytes32 node, address resolver) returns()
|
||||
func (_ENS *ENSTransactorSession) SetResolver(node [32]byte, resolver common.Address) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetResolver(&_ENS.TransactOpts, node, resolver) |
||||
} |
||||
|
||||
// SetSubnodeOwner is a paid mutator transaction binding the contract method 0x06ab5923.
|
||||
//
|
||||
// Solidity: function setSubnodeOwner(bytes32 node, bytes32 label, address owner) returns()
|
||||
func (_ENS *ENSTransactor) SetSubnodeOwner(opts *bind.TransactOpts, node [32]byte, label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENS.contract.Transact(opts, "setSubnodeOwner", node, label, owner) |
||||
} |
||||
|
||||
// SetSubnodeOwner is a paid mutator transaction binding the contract method 0x06ab5923.
|
||||
//
|
||||
// Solidity: function setSubnodeOwner(bytes32 node, bytes32 label, address owner) returns()
|
||||
func (_ENS *ENSSession) SetSubnodeOwner(node [32]byte, label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetSubnodeOwner(&_ENS.TransactOpts, node, label, owner) |
||||
} |
||||
|
||||
// SetSubnodeOwner is a paid mutator transaction binding the contract method 0x06ab5923.
|
||||
//
|
||||
// Solidity: function setSubnodeOwner(bytes32 node, bytes32 label, address owner) returns()
|
||||
func (_ENS *ENSTransactorSession) SetSubnodeOwner(node [32]byte, label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetSubnodeOwner(&_ENS.TransactOpts, node, label, owner) |
||||
} |
||||
|
||||
// SetTTL is a paid mutator transaction binding the contract method 0x14ab9038.
|
||||
//
|
||||
// Solidity: function setTTL(bytes32 node, uint64 ttl) returns()
|
||||
func (_ENS *ENSTransactor) SetTTL(opts *bind.TransactOpts, node [32]byte, ttl uint64) (*types.Transaction, error) { |
||||
return _ENS.contract.Transact(opts, "setTTL", node, ttl) |
||||
} |
||||
|
||||
// SetTTL is a paid mutator transaction binding the contract method 0x14ab9038.
|
||||
//
|
||||
// Solidity: function setTTL(bytes32 node, uint64 ttl) returns()
|
||||
func (_ENS *ENSSession) SetTTL(node [32]byte, ttl uint64) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetTTL(&_ENS.TransactOpts, node, ttl) |
||||
} |
||||
|
||||
// SetTTL is a paid mutator transaction binding the contract method 0x14ab9038.
|
||||
//
|
||||
// Solidity: function setTTL(bytes32 node, uint64 ttl) returns()
|
||||
func (_ENS *ENSTransactorSession) SetTTL(node [32]byte, ttl uint64) (*types.Transaction, error) { |
||||
return _ENS.Contract.SetTTL(&_ENS.TransactOpts, node, ttl) |
||||
} |
||||
|
||||
// ENSNewOwnerIterator is returned from FilterNewOwner and is used to iterate over the raw logs and unpacked data for NewOwner events raised by the ENS contract.
|
||||
type ENSNewOwnerIterator struct { |
||||
Event *ENSNewOwner // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSNewOwnerIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSNewOwner) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSNewOwner) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSNewOwnerIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSNewOwnerIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSNewOwner represents a NewOwner event raised by the ENS contract.
|
||||
type ENSNewOwner struct { |
||||
Node [32]byte |
||||
Label [32]byte |
||||
Owner common.Address |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterNewOwner is a free log retrieval operation binding the contract event 0xce0457fe73731f824cc272376169235128c118b49d344817417c6d108d155e82.
|
||||
//
|
||||
// Solidity: event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner)
|
||||
func (_ENS *ENSFilterer) FilterNewOwner(opts *bind.FilterOpts, node [][32]byte, label [][32]byte) (*ENSNewOwnerIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
var labelRule []interface{} |
||||
for _, labelItem := range label { |
||||
labelRule = append(labelRule, labelItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.FilterLogs(opts, "NewOwner", nodeRule, labelRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSNewOwnerIterator{contract: _ENS.contract, event: "NewOwner", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchNewOwner is a free log subscription operation binding the contract event 0xce0457fe73731f824cc272376169235128c118b49d344817417c6d108d155e82.
|
||||
//
|
||||
// Solidity: event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner)
|
||||
func (_ENS *ENSFilterer) WatchNewOwner(opts *bind.WatchOpts, sink chan<- *ENSNewOwner, node [][32]byte, label [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
var labelRule []interface{} |
||||
for _, labelItem := range label { |
||||
labelRule = append(labelRule, labelItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.WatchLogs(opts, "NewOwner", nodeRule, labelRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSNewOwner) |
||||
if err := _ENS.contract.UnpackLog(event, "NewOwner", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
||||
|
||||
// ENSNewResolverIterator is returned from FilterNewResolver and is used to iterate over the raw logs and unpacked data for NewResolver events raised by the ENS contract.
|
||||
type ENSNewResolverIterator struct { |
||||
Event *ENSNewResolver // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSNewResolverIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSNewResolver) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSNewResolver) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSNewResolverIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSNewResolverIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSNewResolver represents a NewResolver event raised by the ENS contract.
|
||||
type ENSNewResolver struct { |
||||
Node [32]byte |
||||
Resolver common.Address |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterNewResolver is a free log retrieval operation binding the contract event 0x335721b01866dc23fbee8b6b2c7b1e14d6f05c28cd35a2c934239f94095602a0.
|
||||
//
|
||||
// Solidity: event NewResolver(bytes32 indexed node, address resolver)
|
||||
func (_ENS *ENSFilterer) FilterNewResolver(opts *bind.FilterOpts, node [][32]byte) (*ENSNewResolverIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.FilterLogs(opts, "NewResolver", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSNewResolverIterator{contract: _ENS.contract, event: "NewResolver", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchNewResolver is a free log subscription operation binding the contract event 0x335721b01866dc23fbee8b6b2c7b1e14d6f05c28cd35a2c934239f94095602a0.
|
||||
//
|
||||
// Solidity: event NewResolver(bytes32 indexed node, address resolver)
|
||||
func (_ENS *ENSFilterer) WatchNewResolver(opts *bind.WatchOpts, sink chan<- *ENSNewResolver, node [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.WatchLogs(opts, "NewResolver", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSNewResolver) |
||||
if err := _ENS.contract.UnpackLog(event, "NewResolver", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
||||
|
||||
// ENSNewTTLIterator is returned from FilterNewTTL and is used to iterate over the raw logs and unpacked data for NewTTL events raised by the ENS contract.
|
||||
type ENSNewTTLIterator struct { |
||||
Event *ENSNewTTL // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSNewTTLIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSNewTTL) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSNewTTL) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSNewTTLIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSNewTTLIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSNewTTL represents a NewTTL event raised by the ENS contract.
|
||||
type ENSNewTTL struct { |
||||
Node [32]byte |
||||
Ttl uint64 |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterNewTTL is a free log retrieval operation binding the contract event 0x1d4f9bbfc9cab89d66e1a1562f2233ccbf1308cb4f63de2ead5787adddb8fa68.
|
||||
//
|
||||
// Solidity: event NewTTL(bytes32 indexed node, uint64 ttl)
|
||||
func (_ENS *ENSFilterer) FilterNewTTL(opts *bind.FilterOpts, node [][32]byte) (*ENSNewTTLIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.FilterLogs(opts, "NewTTL", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSNewTTLIterator{contract: _ENS.contract, event: "NewTTL", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchNewTTL is a free log subscription operation binding the contract event 0x1d4f9bbfc9cab89d66e1a1562f2233ccbf1308cb4f63de2ead5787adddb8fa68.
|
||||
//
|
||||
// Solidity: event NewTTL(bytes32 indexed node, uint64 ttl)
|
||||
func (_ENS *ENSFilterer) WatchNewTTL(opts *bind.WatchOpts, sink chan<- *ENSNewTTL, node [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.WatchLogs(opts, "NewTTL", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSNewTTL) |
||||
if err := _ENS.contract.UnpackLog(event, "NewTTL", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
||||
|
||||
// ENSTransferIterator is returned from FilterTransfer and is used to iterate over the raw logs and unpacked data for Transfer events raised by the ENS contract.
|
||||
type ENSTransferIterator struct { |
||||
Event *ENSTransfer // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSTransferIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSTransfer) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSTransfer) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSTransferIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSTransferIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSTransfer represents a Transfer event raised by the ENS contract.
|
||||
type ENSTransfer struct { |
||||
Node [32]byte |
||||
Owner common.Address |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterTransfer is a free log retrieval operation binding the contract event 0xd4735d920b0f87494915f556dd9b54c8f309026070caea5c737245152564d266.
|
||||
//
|
||||
// Solidity: event Transfer(bytes32 indexed node, address owner)
|
||||
func (_ENS *ENSFilterer) FilterTransfer(opts *bind.FilterOpts, node [][32]byte) (*ENSTransferIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.FilterLogs(opts, "Transfer", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSTransferIterator{contract: _ENS.contract, event: "Transfer", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchTransfer is a free log subscription operation binding the contract event 0xd4735d920b0f87494915f556dd9b54c8f309026070caea5c737245152564d266.
|
||||
//
|
||||
// Solidity: event Transfer(bytes32 indexed node, address owner)
|
||||
func (_ENS *ENSFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *ENSTransfer, node [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENS.contract.WatchLogs(opts, "Transfer", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSTransfer) |
||||
if err := _ENS.contract.UnpackLog(event, "Transfer", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
@ -1,892 +0,0 @@ |
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package contract |
||||
|
||||
import ( |
||||
"math/big" |
||||
"strings" |
||||
|
||||
ethereum "github.com/ethereum/go-ethereum" |
||||
"github.com/ethereum/go-ethereum/accounts/abi" |
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
) |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var ( |
||||
_ = big.NewInt |
||||
_ = strings.NewReader |
||||
_ = ethereum.NotFound |
||||
_ = abi.U256 |
||||
_ = bind.Bind |
||||
_ = common.Big1 |
||||
_ = types.BloomLookup |
||||
_ = event.NewSubscription |
||||
) |
||||
|
||||
// ENSRegistryABI is the input ABI used to generate the binding from.
|
||||
const ENSRegistryABI = "[{\"constant\":true,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"}],\"name\":\"resolver\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"}],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"label\",\"type\":\"bytes32\"},{\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"setSubnodeOwner\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"ttl\",\"type\":\"uint64\"}],\"name\":\"setTTL\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"}],\"name\":\"ttl\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"resolver\",\"type\":\"address\"}],\"name\":\"setResolver\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"node\",\"type\":\"bytes32\"},{\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":true,\"name\":\"label\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"NewOwner\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"resolver\",\"type\":\"address\"}],\"name\":\"NewResolver\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"node\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"ttl\",\"type\":\"uint64\"}],\"name\":\"NewTTL\",\"type\":\"event\"}]" |
||||
|
||||
// ENSRegistryBin is the compiled bytecode used for deploying new contracts.
|
||||
const ENSRegistryBin = `0x608060405234801561001057600080fd5b5060008080526020527fad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb58054600160a060020a0319163317905561059d806100596000396000f3fe6080604052600436106100825763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416630178b8bf811461008757806302571be3146100cd57806306ab5923146100f757806314ab90381461013857806316a25cbd146101725780631896f70a146101b95780635b0fc9c3146101f2575b600080fd5b34801561009357600080fd5b506100b1600480360360208110156100aa57600080fd5b503561022b565b60408051600160a060020a039092168252519081900360200190f35b3480156100d957600080fd5b506100b1600480360360208110156100f057600080fd5b5035610249565b34801561010357600080fd5b506101366004803603606081101561011a57600080fd5b5080359060208101359060400135600160a060020a0316610264565b005b34801561014457600080fd5b506101366004803603604081101561015b57600080fd5b508035906020013567ffffffffffffffff1661032e565b34801561017e57600080fd5b5061019c6004803603602081101561019557600080fd5b50356103f7565b6040805167ffffffffffffffff9092168252519081900360200190f35b3480156101c557600080fd5b50610136600480360360408110156101dc57600080fd5b5080359060200135600160a060020a031661042e565b3480156101fe57600080fd5b506101366004803603604081101561021557600080fd5b5080359060200135600160a060020a03166104d1565b600090815260208190526040902060010154600160a060020a031690565b600090815260208190526040902054600160a060020a031690565b6000838152602081905260409020548390600160a060020a0316331461028957600080fd5b6040805160208082018790528183018690528251808303840181526060830180855281519190920120600160a060020a0386169091529151859187917fce0457fe73731f824cc272376169235128c118b49d344817417c6d108d155e829181900360800190a36000908152602081905260409020805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a039390931692909217909155505050565b6000828152602081905260409020548290600160a060020a0316331461035357600080fd5b6040805167ffffffffffffffff84168152905184917f1d4f9bbfc9cab89d66e1a1562f2233ccbf1308cb4f63de2ead5787adddb8fa68919081900360200190a250600091825260208290526040909120600101805467ffffffffffffffff90921674010000000000000000000000000000000000000000027fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff909216919091179055565b60009081526020819052604090206001015474010000000000000000000000000000000000000000900467ffffffffffffffff1690565b6000828152602081905260409020548290600160a060020a0316331461045357600080fd5b60408051600160a060020a0384168152905184917f335721b01866dc23fbee8b6b2c7b1e14d6f05c28cd35a2c934239f94095602a0919081900360200190a250600091825260208290526040909120600101805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a03909216919091179055565b6000828152602081905260409020548290600160a060020a031633146104f657600080fd5b60408051600160a060020a0384168152905184917fd4735d920b0f87494915f556dd9b54c8f309026070caea5c737245152564d266919081900360200190a250600091825260208290526040909120805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0390921691909117905556fea165627a7a723058208be97eda88107945616fbd44aa4f2f1ce188b1a930a4bc5f8e1fb7924395d1650029` |
||||
|
||||
// DeployENSRegistry deploys a new Ethereum contract, binding an instance of ENSRegistry to it.
|
||||
func DeployENSRegistry(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ENSRegistry, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(ENSRegistryABI)) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ENSRegistryBin), backend) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
return address, tx, &ENSRegistry{ENSRegistryCaller: ENSRegistryCaller{contract: contract}, ENSRegistryTransactor: ENSRegistryTransactor{contract: contract}, ENSRegistryFilterer: ENSRegistryFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// ENSRegistry is an auto generated Go binding around an Ethereum contract.
|
||||
type ENSRegistry struct { |
||||
ENSRegistryCaller // Read-only binding to the contract
|
||||
ENSRegistryTransactor // Write-only binding to the contract
|
||||
ENSRegistryFilterer // Log filterer for contract events
|
||||
} |
||||
|
||||
// ENSRegistryCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
type ENSRegistryCaller struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ENSRegistryTransactor is an auto generated write-only Go binding around an Ethereum contract.
|
||||
type ENSRegistryTransactor struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ENSRegistryFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type ENSRegistryFilterer struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// ENSRegistrySession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type ENSRegistrySession struct { |
||||
Contract *ENSRegistry // Generic contract binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// ENSRegistryCallerSession is an auto generated read-only Go binding around an Ethereum contract,
|
||||
// with pre-set call options.
|
||||
type ENSRegistryCallerSession struct { |
||||
Contract *ENSRegistryCaller // Generic contract caller binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
} |
||||
|
||||
// ENSRegistryTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
|
||||
// with pre-set transact options.
|
||||
type ENSRegistryTransactorSession struct { |
||||
Contract *ENSRegistryTransactor // Generic contract transactor binding to set the session for
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// ENSRegistryRaw is an auto generated low-level Go binding around an Ethereum contract.
|
||||
type ENSRegistryRaw struct { |
||||
Contract *ENSRegistry // Generic contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// ENSRegistryCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
|
||||
type ENSRegistryCallerRaw struct { |
||||
Contract *ENSRegistryCaller // Generic read-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// ENSRegistryTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
|
||||
type ENSRegistryTransactorRaw struct { |
||||
Contract *ENSRegistryTransactor // Generic write-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// NewENSRegistry creates a new instance of ENSRegistry, bound to a specific deployed contract.
|
||||
func NewENSRegistry(address common.Address, backend bind.ContractBackend) (*ENSRegistry, error) { |
||||
contract, err := bindENSRegistry(address, backend, backend, backend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistry{ENSRegistryCaller: ENSRegistryCaller{contract: contract}, ENSRegistryTransactor: ENSRegistryTransactor{contract: contract}, ENSRegistryFilterer: ENSRegistryFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// NewENSRegistryCaller creates a new read-only instance of ENSRegistry, bound to a specific deployed contract.
|
||||
func NewENSRegistryCaller(address common.Address, caller bind.ContractCaller) (*ENSRegistryCaller, error) { |
||||
contract, err := bindENSRegistry(address, caller, nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistryCaller{contract: contract}, nil |
||||
} |
||||
|
||||
// NewENSRegistryTransactor creates a new write-only instance of ENSRegistry, bound to a specific deployed contract.
|
||||
func NewENSRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*ENSRegistryTransactor, error) { |
||||
contract, err := bindENSRegistry(address, nil, transactor, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistryTransactor{contract: contract}, nil |
||||
} |
||||
|
||||
// NewENSRegistryFilterer creates a new log filterer instance of ENSRegistry, bound to a specific deployed contract.
|
||||
func NewENSRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*ENSRegistryFilterer, error) { |
||||
contract, err := bindENSRegistry(address, nil, nil, filterer) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistryFilterer{contract: contract}, nil |
||||
} |
||||
|
||||
// bindENSRegistry binds a generic wrapper to an already deployed contract.
|
||||
func bindENSRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(ENSRegistryABI)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_ENSRegistry *ENSRegistryRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _ENSRegistry.Contract.ENSRegistryCaller.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_ENSRegistry *ENSRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.ENSRegistryTransactor.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_ENSRegistry *ENSRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.ENSRegistryTransactor.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_ENSRegistry *ENSRegistryCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _ENSRegistry.Contract.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_ENSRegistry *ENSRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_ENSRegistry *ENSRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Owner is a free data retrieval call binding the contract method 0x02571be3.
|
||||
//
|
||||
// Solidity: function owner(bytes32 node) constant returns(address)
|
||||
func (_ENSRegistry *ENSRegistryCaller) Owner(opts *bind.CallOpts, node [32]byte) (common.Address, error) { |
||||
var ( |
||||
ret0 = new(common.Address) |
||||
) |
||||
out := ret0 |
||||
err := _ENSRegistry.contract.Call(opts, out, "owner", node) |
||||
return *ret0, err |
||||
} |
||||
|
||||
// Owner is a free data retrieval call binding the contract method 0x02571be3.
|
||||
//
|
||||
// Solidity: function owner(bytes32 node) constant returns(address)
|
||||
func (_ENSRegistry *ENSRegistrySession) Owner(node [32]byte) (common.Address, error) { |
||||
return _ENSRegistry.Contract.Owner(&_ENSRegistry.CallOpts, node) |
||||
} |
||||
|
||||
// Owner is a free data retrieval call binding the contract method 0x02571be3.
|
||||
//
|
||||
// Solidity: function owner(bytes32 node) constant returns(address)
|
||||
func (_ENSRegistry *ENSRegistryCallerSession) Owner(node [32]byte) (common.Address, error) { |
||||
return _ENSRegistry.Contract.Owner(&_ENSRegistry.CallOpts, node) |
||||
} |
||||
|
||||
// Resolver is a free data retrieval call binding the contract method 0x0178b8bf.
|
||||
//
|
||||
// Solidity: function resolver(bytes32 node) constant returns(address)
|
||||
func (_ENSRegistry *ENSRegistryCaller) Resolver(opts *bind.CallOpts, node [32]byte) (common.Address, error) { |
||||
var ( |
||||
ret0 = new(common.Address) |
||||
) |
||||
out := ret0 |
||||
err := _ENSRegistry.contract.Call(opts, out, "resolver", node) |
||||
return *ret0, err |
||||
} |
||||
|
||||
// Resolver is a free data retrieval call binding the contract method 0x0178b8bf.
|
||||
//
|
||||
// Solidity: function resolver(bytes32 node) constant returns(address)
|
||||
func (_ENSRegistry *ENSRegistrySession) Resolver(node [32]byte) (common.Address, error) { |
||||
return _ENSRegistry.Contract.Resolver(&_ENSRegistry.CallOpts, node) |
||||
} |
||||
|
||||
// Resolver is a free data retrieval call binding the contract method 0x0178b8bf.
|
||||
//
|
||||
// Solidity: function resolver(bytes32 node) constant returns(address)
|
||||
func (_ENSRegistry *ENSRegistryCallerSession) Resolver(node [32]byte) (common.Address, error) { |
||||
return _ENSRegistry.Contract.Resolver(&_ENSRegistry.CallOpts, node) |
||||
} |
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(bytes32 node) constant returns(uint64)
|
||||
func (_ENSRegistry *ENSRegistryCaller) Ttl(opts *bind.CallOpts, node [32]byte) (uint64, error) { |
||||
var ( |
||||
ret0 = new(uint64) |
||||
) |
||||
out := ret0 |
||||
err := _ENSRegistry.contract.Call(opts, out, "ttl", node) |
||||
return *ret0, err |
||||
} |
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(bytes32 node) constant returns(uint64)
|
||||
func (_ENSRegistry *ENSRegistrySession) Ttl(node [32]byte) (uint64, error) { |
||||
return _ENSRegistry.Contract.Ttl(&_ENSRegistry.CallOpts, node) |
||||
} |
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(bytes32 node) constant returns(uint64)
|
||||
func (_ENSRegistry *ENSRegistryCallerSession) Ttl(node [32]byte) (uint64, error) { |
||||
return _ENSRegistry.Contract.Ttl(&_ENSRegistry.CallOpts, node) |
||||
} |
||||
|
||||
// SetOwner is a paid mutator transaction binding the contract method 0x5b0fc9c3.
|
||||
//
|
||||
// Solidity: function setOwner(bytes32 node, address owner) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactor) SetOwner(opts *bind.TransactOpts, node [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.contract.Transact(opts, "setOwner", node, owner) |
||||
} |
||||
|
||||
// SetOwner is a paid mutator transaction binding the contract method 0x5b0fc9c3.
|
||||
//
|
||||
// Solidity: function setOwner(bytes32 node, address owner) returns()
|
||||
func (_ENSRegistry *ENSRegistrySession) SetOwner(node [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetOwner(&_ENSRegistry.TransactOpts, node, owner) |
||||
} |
||||
|
||||
// SetOwner is a paid mutator transaction binding the contract method 0x5b0fc9c3.
|
||||
//
|
||||
// Solidity: function setOwner(bytes32 node, address owner) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactorSession) SetOwner(node [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetOwner(&_ENSRegistry.TransactOpts, node, owner) |
||||
} |
||||
|
||||
// SetResolver is a paid mutator transaction binding the contract method 0x1896f70a.
|
||||
//
|
||||
// Solidity: function setResolver(bytes32 node, address resolver) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactor) SetResolver(opts *bind.TransactOpts, node [32]byte, resolver common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.contract.Transact(opts, "setResolver", node, resolver) |
||||
} |
||||
|
||||
// SetResolver is a paid mutator transaction binding the contract method 0x1896f70a.
|
||||
//
|
||||
// Solidity: function setResolver(bytes32 node, address resolver) returns()
|
||||
func (_ENSRegistry *ENSRegistrySession) SetResolver(node [32]byte, resolver common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetResolver(&_ENSRegistry.TransactOpts, node, resolver) |
||||
} |
||||
|
||||
// SetResolver is a paid mutator transaction binding the contract method 0x1896f70a.
|
||||
//
|
||||
// Solidity: function setResolver(bytes32 node, address resolver) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactorSession) SetResolver(node [32]byte, resolver common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetResolver(&_ENSRegistry.TransactOpts, node, resolver) |
||||
} |
||||
|
||||
// SetSubnodeOwner is a paid mutator transaction binding the contract method 0x06ab5923.
|
||||
//
|
||||
// Solidity: function setSubnodeOwner(bytes32 node, bytes32 label, address owner) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactor) SetSubnodeOwner(opts *bind.TransactOpts, node [32]byte, label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.contract.Transact(opts, "setSubnodeOwner", node, label, owner) |
||||
} |
||||
|
||||
// SetSubnodeOwner is a paid mutator transaction binding the contract method 0x06ab5923.
|
||||
//
|
||||
// Solidity: function setSubnodeOwner(bytes32 node, bytes32 label, address owner) returns()
|
||||
func (_ENSRegistry *ENSRegistrySession) SetSubnodeOwner(node [32]byte, label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetSubnodeOwner(&_ENSRegistry.TransactOpts, node, label, owner) |
||||
} |
||||
|
||||
// SetSubnodeOwner is a paid mutator transaction binding the contract method 0x06ab5923.
|
||||
//
|
||||
// Solidity: function setSubnodeOwner(bytes32 node, bytes32 label, address owner) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactorSession) SetSubnodeOwner(node [32]byte, label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetSubnodeOwner(&_ENSRegistry.TransactOpts, node, label, owner) |
||||
} |
||||
|
||||
// SetTTL is a paid mutator transaction binding the contract method 0x14ab9038.
|
||||
//
|
||||
// Solidity: function setTTL(bytes32 node, uint64 ttl) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactor) SetTTL(opts *bind.TransactOpts, node [32]byte, ttl uint64) (*types.Transaction, error) { |
||||
return _ENSRegistry.contract.Transact(opts, "setTTL", node, ttl) |
||||
} |
||||
|
||||
// SetTTL is a paid mutator transaction binding the contract method 0x14ab9038.
|
||||
//
|
||||
// Solidity: function setTTL(bytes32 node, uint64 ttl) returns()
|
||||
func (_ENSRegistry *ENSRegistrySession) SetTTL(node [32]byte, ttl uint64) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetTTL(&_ENSRegistry.TransactOpts, node, ttl) |
||||
} |
||||
|
||||
// SetTTL is a paid mutator transaction binding the contract method 0x14ab9038.
|
||||
//
|
||||
// Solidity: function setTTL(bytes32 node, uint64 ttl) returns()
|
||||
func (_ENSRegistry *ENSRegistryTransactorSession) SetTTL(node [32]byte, ttl uint64) (*types.Transaction, error) { |
||||
return _ENSRegistry.Contract.SetTTL(&_ENSRegistry.TransactOpts, node, ttl) |
||||
} |
||||
|
||||
// ENSRegistryNewOwnerIterator is returned from FilterNewOwner and is used to iterate over the raw logs and unpacked data for NewOwner events raised by the ENSRegistry contract.
|
||||
type ENSRegistryNewOwnerIterator struct { |
||||
Event *ENSRegistryNewOwner // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSRegistryNewOwnerIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryNewOwner) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryNewOwner) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSRegistryNewOwnerIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSRegistryNewOwnerIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSRegistryNewOwner represents a NewOwner event raised by the ENSRegistry contract.
|
||||
type ENSRegistryNewOwner struct { |
||||
Node [32]byte |
||||
Label [32]byte |
||||
Owner common.Address |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterNewOwner is a free log retrieval operation binding the contract event 0xce0457fe73731f824cc272376169235128c118b49d344817417c6d108d155e82.
|
||||
//
|
||||
// Solidity: event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) FilterNewOwner(opts *bind.FilterOpts, node [][32]byte, label [][32]byte) (*ENSRegistryNewOwnerIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
var labelRule []interface{} |
||||
for _, labelItem := range label { |
||||
labelRule = append(labelRule, labelItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.FilterLogs(opts, "NewOwner", nodeRule, labelRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistryNewOwnerIterator{contract: _ENSRegistry.contract, event: "NewOwner", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchNewOwner is a free log subscription operation binding the contract event 0xce0457fe73731f824cc272376169235128c118b49d344817417c6d108d155e82.
|
||||
//
|
||||
// Solidity: event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) WatchNewOwner(opts *bind.WatchOpts, sink chan<- *ENSRegistryNewOwner, node [][32]byte, label [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
var labelRule []interface{} |
||||
for _, labelItem := range label { |
||||
labelRule = append(labelRule, labelItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.WatchLogs(opts, "NewOwner", nodeRule, labelRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSRegistryNewOwner) |
||||
if err := _ENSRegistry.contract.UnpackLog(event, "NewOwner", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
||||
|
||||
// ENSRegistryNewResolverIterator is returned from FilterNewResolver and is used to iterate over the raw logs and unpacked data for NewResolver events raised by the ENSRegistry contract.
|
||||
type ENSRegistryNewResolverIterator struct { |
||||
Event *ENSRegistryNewResolver // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSRegistryNewResolverIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryNewResolver) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryNewResolver) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSRegistryNewResolverIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSRegistryNewResolverIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSRegistryNewResolver represents a NewResolver event raised by the ENSRegistry contract.
|
||||
type ENSRegistryNewResolver struct { |
||||
Node [32]byte |
||||
Resolver common.Address |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterNewResolver is a free log retrieval operation binding the contract event 0x335721b01866dc23fbee8b6b2c7b1e14d6f05c28cd35a2c934239f94095602a0.
|
||||
//
|
||||
// Solidity: event NewResolver(bytes32 indexed node, address resolver)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) FilterNewResolver(opts *bind.FilterOpts, node [][32]byte) (*ENSRegistryNewResolverIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.FilterLogs(opts, "NewResolver", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistryNewResolverIterator{contract: _ENSRegistry.contract, event: "NewResolver", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchNewResolver is a free log subscription operation binding the contract event 0x335721b01866dc23fbee8b6b2c7b1e14d6f05c28cd35a2c934239f94095602a0.
|
||||
//
|
||||
// Solidity: event NewResolver(bytes32 indexed node, address resolver)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) WatchNewResolver(opts *bind.WatchOpts, sink chan<- *ENSRegistryNewResolver, node [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.WatchLogs(opts, "NewResolver", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSRegistryNewResolver) |
||||
if err := _ENSRegistry.contract.UnpackLog(event, "NewResolver", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
||||
|
||||
// ENSRegistryNewTTLIterator is returned from FilterNewTTL and is used to iterate over the raw logs and unpacked data for NewTTL events raised by the ENSRegistry contract.
|
||||
type ENSRegistryNewTTLIterator struct { |
||||
Event *ENSRegistryNewTTL // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSRegistryNewTTLIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryNewTTL) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryNewTTL) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSRegistryNewTTLIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSRegistryNewTTLIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSRegistryNewTTL represents a NewTTL event raised by the ENSRegistry contract.
|
||||
type ENSRegistryNewTTL struct { |
||||
Node [32]byte |
||||
Ttl uint64 |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterNewTTL is a free log retrieval operation binding the contract event 0x1d4f9bbfc9cab89d66e1a1562f2233ccbf1308cb4f63de2ead5787adddb8fa68.
|
||||
//
|
||||
// Solidity: event NewTTL(bytes32 indexed node, uint64 ttl)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) FilterNewTTL(opts *bind.FilterOpts, node [][32]byte) (*ENSRegistryNewTTLIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.FilterLogs(opts, "NewTTL", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistryNewTTLIterator{contract: _ENSRegistry.contract, event: "NewTTL", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchNewTTL is a free log subscription operation binding the contract event 0x1d4f9bbfc9cab89d66e1a1562f2233ccbf1308cb4f63de2ead5787adddb8fa68.
|
||||
//
|
||||
// Solidity: event NewTTL(bytes32 indexed node, uint64 ttl)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) WatchNewTTL(opts *bind.WatchOpts, sink chan<- *ENSRegistryNewTTL, node [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.WatchLogs(opts, "NewTTL", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSRegistryNewTTL) |
||||
if err := _ENSRegistry.contract.UnpackLog(event, "NewTTL", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
||||
|
||||
// ENSRegistryTransferIterator is returned from FilterTransfer and is used to iterate over the raw logs and unpacked data for Transfer events raised by the ENSRegistry contract.
|
||||
type ENSRegistryTransferIterator struct { |
||||
Event *ENSRegistryTransfer // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
} |
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ENSRegistryTransferIterator) Next() bool { |
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil { |
||||
return false |
||||
} |
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done { |
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryTransfer) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select { |
||||
case log := <-it.logs: |
||||
it.Event = new(ENSRegistryTransfer) |
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { |
||||
it.fail = err |
||||
return false |
||||
} |
||||
it.Event.Raw = log |
||||
return true |
||||
|
||||
case err := <-it.sub.Err(): |
||||
it.done = true |
||||
it.fail = err |
||||
return it.Next() |
||||
} |
||||
} |
||||
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *ENSRegistryTransferIterator) Error() error { |
||||
return it.fail |
||||
} |
||||
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ENSRegistryTransferIterator) Close() error { |
||||
it.sub.Unsubscribe() |
||||
return nil |
||||
} |
||||
|
||||
// ENSRegistryTransfer represents a Transfer event raised by the ENSRegistry contract.
|
||||
type ENSRegistryTransfer struct { |
||||
Node [32]byte |
||||
Owner common.Address |
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
} |
||||
|
||||
// FilterTransfer is a free log retrieval operation binding the contract event 0xd4735d920b0f87494915f556dd9b54c8f309026070caea5c737245152564d266.
|
||||
//
|
||||
// Solidity: event Transfer(bytes32 indexed node, address owner)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) FilterTransfer(opts *bind.FilterOpts, node [][32]byte) (*ENSRegistryTransferIterator, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.FilterLogs(opts, "Transfer", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENSRegistryTransferIterator{contract: _ENSRegistry.contract, event: "Transfer", logs: logs, sub: sub}, nil |
||||
} |
||||
|
||||
// WatchTransfer is a free log subscription operation binding the contract event 0xd4735d920b0f87494915f556dd9b54c8f309026070caea5c737245152564d266.
|
||||
//
|
||||
// Solidity: event Transfer(bytes32 indexed node, address owner)
|
||||
func (_ENSRegistry *ENSRegistryFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *ENSRegistryTransfer, node [][32]byte) (event.Subscription, error) { |
||||
|
||||
var nodeRule []interface{} |
||||
for _, nodeItem := range node { |
||||
nodeRule = append(nodeRule, nodeItem) |
||||
} |
||||
|
||||
logs, sub, err := _ENSRegistry.contract.WatchLogs(opts, "Transfer", nodeRule) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return event.NewSubscription(func(quit <-chan struct{}) error { |
||||
defer sub.Unsubscribe() |
||||
for { |
||||
select { |
||||
case log := <-logs: |
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ENSRegistryTransfer) |
||||
if err := _ENSRegistry.contract.UnpackLog(event, "Transfer", log); err != nil { |
||||
return err |
||||
} |
||||
event.Raw = log |
||||
|
||||
select { |
||||
case sink <- event: |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
case err := <-sub.Err(): |
||||
return err |
||||
case <-quit: |
||||
return nil |
||||
} |
||||
} |
||||
}), nil |
||||
} |
@ -1,210 +0,0 @@ |
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package contract |
||||
|
||||
import ( |
||||
"math/big" |
||||
"strings" |
||||
|
||||
ethereum "github.com/ethereum/go-ethereum" |
||||
"github.com/ethereum/go-ethereum/accounts/abi" |
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
) |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var ( |
||||
_ = big.NewInt |
||||
_ = strings.NewReader |
||||
_ = ethereum.NotFound |
||||
_ = abi.U256 |
||||
_ = bind.Bind |
||||
_ = common.Big1 |
||||
_ = types.BloomLookup |
||||
_ = event.NewSubscription |
||||
) |
||||
|
||||
// FIFSRegistrarABI is the input ABI used to generate the binding from.
|
||||
const FIFSRegistrarABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"label\",\"type\":\"bytes32\"},{\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"register\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"name\":\"ensAddr\",\"type\":\"address\"},{\"name\":\"node\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"}]" |
||||
|
||||
// FIFSRegistrarBin is the compiled bytecode used for deploying new contracts.
|
||||
const FIFSRegistrarBin = `0x608060405234801561001057600080fd5b506040516040806102cc8339810180604052604081101561003057600080fd5b50805160209091015160008054600160a060020a031916600160a060020a0390931692909217825560015561026190819061006b90396000f3fe6080604052600436106100405763ffffffff7c0100000000000000000000000000000000000000000000000000000000600035041663d22057a98114610045575b600080fd5b34801561005157600080fd5b5061008b6004803603604081101561006857600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff1661008d565b005b6000805460015460408051602080820193909352808201879052815180820383018152606082018084528151918501919091207f02571be3000000000000000000000000000000000000000000000000000000009091526064820152905186949373ffffffffffffffffffffffffffffffffffffffff16926302571be39260848082019391829003018186803b15801561012657600080fd5b505afa15801561013a573d6000803e3d6000fd5b505050506040513d602081101561015057600080fd5b5051905073ffffffffffffffffffffffffffffffffffffffff8116158061018c575073ffffffffffffffffffffffffffffffffffffffff811633145b151561019757600080fd5b60008054600154604080517f06ab592300000000000000000000000000000000000000000000000000000000815260048101929092526024820188905273ffffffffffffffffffffffffffffffffffffffff878116604484015290519216926306ab59239260648084019382900301818387803b15801561021757600080fd5b505af115801561022b573d6000803e3d6000fd5b505050505050505056fea165627a7a723058200f21424d48c6fc6f2bc79f5b36b3a0e3067a97d4ce084ab0e0f9106303a3ee520029` |
||||
|
||||
// DeployFIFSRegistrar deploys a new Ethereum contract, binding an instance of FIFSRegistrar to it.
|
||||
func DeployFIFSRegistrar(auth *bind.TransactOpts, backend bind.ContractBackend, ensAddr common.Address, node [32]byte) (common.Address, *types.Transaction, *FIFSRegistrar, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(FIFSRegistrarABI)) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(FIFSRegistrarBin), backend, ensAddr, node) |
||||
if err != nil { |
||||
return common.Address{}, nil, nil, err |
||||
} |
||||
return address, tx, &FIFSRegistrar{FIFSRegistrarCaller: FIFSRegistrarCaller{contract: contract}, FIFSRegistrarTransactor: FIFSRegistrarTransactor{contract: contract}, FIFSRegistrarFilterer: FIFSRegistrarFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// FIFSRegistrar is an auto generated Go binding around an Ethereum contract.
|
||||
type FIFSRegistrar struct { |
||||
FIFSRegistrarCaller // Read-only binding to the contract
|
||||
FIFSRegistrarTransactor // Write-only binding to the contract
|
||||
FIFSRegistrarFilterer // Log filterer for contract events
|
||||
} |
||||
|
||||
// FIFSRegistrarCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarCaller struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// FIFSRegistrarTransactor is an auto generated write-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarTransactor struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// FIFSRegistrarFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type FIFSRegistrarFilterer struct { |
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
} |
||||
|
||||
// FIFSRegistrarSession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type FIFSRegistrarSession struct { |
||||
Contract *FIFSRegistrar // Generic contract binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// FIFSRegistrarCallerSession is an auto generated read-only Go binding around an Ethereum contract,
|
||||
// with pre-set call options.
|
||||
type FIFSRegistrarCallerSession struct { |
||||
Contract *FIFSRegistrarCaller // Generic contract caller binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
} |
||||
|
||||
// FIFSRegistrarTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
|
||||
// with pre-set transact options.
|
||||
type FIFSRegistrarTransactorSession struct { |
||||
Contract *FIFSRegistrarTransactor // Generic contract transactor binding to set the session for
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
} |
||||
|
||||
// FIFSRegistrarRaw is an auto generated low-level Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarRaw struct { |
||||
Contract *FIFSRegistrar // Generic contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// FIFSRegistrarCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarCallerRaw struct { |
||||
Contract *FIFSRegistrarCaller // Generic read-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// FIFSRegistrarTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarTransactorRaw struct { |
||||
Contract *FIFSRegistrarTransactor // Generic write-only contract binding to access the raw methods on
|
||||
} |
||||
|
||||
// NewFIFSRegistrar creates a new instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrar(address common.Address, backend bind.ContractBackend) (*FIFSRegistrar, error) { |
||||
contract, err := bindFIFSRegistrar(address, backend, backend, backend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &FIFSRegistrar{FIFSRegistrarCaller: FIFSRegistrarCaller{contract: contract}, FIFSRegistrarTransactor: FIFSRegistrarTransactor{contract: contract}, FIFSRegistrarFilterer: FIFSRegistrarFilterer{contract: contract}}, nil |
||||
} |
||||
|
||||
// NewFIFSRegistrarCaller creates a new read-only instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrarCaller(address common.Address, caller bind.ContractCaller) (*FIFSRegistrarCaller, error) { |
||||
contract, err := bindFIFSRegistrar(address, caller, nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &FIFSRegistrarCaller{contract: contract}, nil |
||||
} |
||||
|
||||
// NewFIFSRegistrarTransactor creates a new write-only instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrarTransactor(address common.Address, transactor bind.ContractTransactor) (*FIFSRegistrarTransactor, error) { |
||||
contract, err := bindFIFSRegistrar(address, nil, transactor, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &FIFSRegistrarTransactor{contract: contract}, nil |
||||
} |
||||
|
||||
// NewFIFSRegistrarFilterer creates a new log filterer instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrarFilterer(address common.Address, filterer bind.ContractFilterer) (*FIFSRegistrarFilterer, error) { |
||||
contract, err := bindFIFSRegistrar(address, nil, nil, filterer) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &FIFSRegistrarFilterer{contract: contract}, nil |
||||
} |
||||
|
||||
// bindFIFSRegistrar binds a generic wrapper to an already deployed contract.
|
||||
func bindFIFSRegistrar(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { |
||||
parsed, err := abi.JSON(strings.NewReader(FIFSRegistrarABI)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_FIFSRegistrar *FIFSRegistrarRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _FIFSRegistrar.Contract.FIFSRegistrarCaller.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_FIFSRegistrar *FIFSRegistrarRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _FIFSRegistrar.Contract.FIFSRegistrarTransactor.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_FIFSRegistrar *FIFSRegistrarRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _FIFSRegistrar.Contract.FIFSRegistrarTransactor.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_FIFSRegistrar *FIFSRegistrarCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { |
||||
return _FIFSRegistrar.Contract.contract.Call(opts, result, method, params...) |
||||
} |
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { |
||||
return _FIFSRegistrar.Contract.contract.Transfer(opts) |
||||
} |
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { |
||||
return _FIFSRegistrar.Contract.contract.Transact(opts, method, params...) |
||||
} |
||||
|
||||
// Register is a paid mutator transaction binding the contract method 0xd22057a9.
|
||||
//
|
||||
// Solidity: function register(bytes32 label, address owner) returns()
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactor) Register(opts *bind.TransactOpts, label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _FIFSRegistrar.contract.Transact(opts, "register", label, owner) |
||||
} |
||||
|
||||
// Register is a paid mutator transaction binding the contract method 0xd22057a9.
|
||||
//
|
||||
// Solidity: function register(bytes32 label, address owner) returns()
|
||||
func (_FIFSRegistrar *FIFSRegistrarSession) Register(label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _FIFSRegistrar.Contract.Register(&_FIFSRegistrar.TransactOpts, label, owner) |
||||
} |
||||
|
||||
// Register is a paid mutator transaction binding the contract method 0xd22057a9.
|
||||
//
|
||||
// Solidity: function register(bytes32 label, address owner) returns()
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactorSession) Register(label [32]byte, owner common.Address) (*types.Transaction, error) { |
||||
return _FIFSRegistrar.Contract.Register(&_FIFSRegistrar.TransactOpts, label, owner) |
||||
} |
File diff suppressed because one or more lines are too long
@ -1,263 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ens |
||||
|
||||
//go:generate abigen --sol contract/ENS.sol --pkg contract --out contract/ens.go
|
||||
//go:generate abigen --sol contract/ENSRegistry.sol --exc contract/ENS.sol:ENS --pkg contract --out contract/ensregistry.go
|
||||
//go:generate abigen --sol contract/FIFSRegistrar.sol --exc contract/ENS.sol:ENS --pkg contract --out contract/fifsregistrar.go
|
||||
//go:generate abigen --sol contract/PublicResolver.sol --exc contract/ENS.sol:ENS --pkg contract --out contract/publicresolver.go
|
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/contracts/ens/contract" |
||||
"github.com/ethereum/go-ethereum/contracts/ens/fallback_contract" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
var ( |
||||
MainNetAddress = common.HexToAddress("0x314159265dD8dbb310642f98f50C066173C1259b") |
||||
TestNetAddress = common.HexToAddress("0x112234455c3a32fd11230c42e7bccd4a84e02010") |
||||
contentHash_Interface_Id [4]byte |
||||
) |
||||
|
||||
const contentHash_Interface_Id_Spec = 0xbc1c58d1 |
||||
|
||||
func init() { |
||||
binary.BigEndian.PutUint32(contentHash_Interface_Id[:], contentHash_Interface_Id_Spec) |
||||
} |
||||
|
||||
// ENS is the swarm domain name registry and resolver
|
||||
type ENS struct { |
||||
*contract.ENSSession |
||||
contractBackend bind.ContractBackend |
||||
} |
||||
|
||||
// NewENS creates a struct exposing convenient high-level operations for interacting with
|
||||
// the Ethereum Name Service.
|
||||
func NewENS(transactOpts *bind.TransactOpts, contractAddr common.Address, contractBackend bind.ContractBackend) (*ENS, error) { |
||||
ens, err := contract.NewENS(contractAddr, contractBackend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ENS{ |
||||
&contract.ENSSession{ |
||||
Contract: ens, |
||||
TransactOpts: *transactOpts, |
||||
}, |
||||
contractBackend, |
||||
}, nil |
||||
} |
||||
|
||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first-in, first-served' root registrar.
|
||||
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (common.Address, *ENS, error) { |
||||
// Deploy the ENS registry
|
||||
ensAddr, _, _, err := contract.DeployENSRegistry(transactOpts, contractBackend) |
||||
if err != nil { |
||||
return ensAddr, nil, err |
||||
} |
||||
ens, err := NewENS(transactOpts, ensAddr, contractBackend) |
||||
if err != nil { |
||||
return ensAddr, nil, err |
||||
} |
||||
// Deploy the registrar
|
||||
regAddr, _, _, err := contract.DeployFIFSRegistrar(transactOpts, contractBackend, ensAddr, [32]byte{}) |
||||
if err != nil { |
||||
return ensAddr, nil, err |
||||
} |
||||
// Set the registrar as owner of the ENS root
|
||||
if _, err = ens.SetOwner([32]byte{}, regAddr); err != nil { |
||||
return ensAddr, nil, err |
||||
} |
||||
return ensAddr, ens, nil |
||||
} |
||||
|
||||
func ensParentNode(name string) (common.Hash, common.Hash) { |
||||
parts := strings.SplitN(name, ".", 2) |
||||
label := crypto.Keccak256Hash([]byte(parts[0])) |
||||
if len(parts) == 1 { |
||||
return [32]byte{}, label |
||||
} |
||||
parentNode, parentLabel := ensParentNode(parts[1]) |
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:]), label |
||||
} |
||||
|
||||
func EnsNode(name string) common.Hash { |
||||
parentNode, parentLabel := ensParentNode(name) |
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:]) |
||||
} |
||||
|
||||
func (ens *ENS) getResolver(node [32]byte) (*contract.PublicResolverSession, error) { |
||||
resolverAddr, err := ens.Resolver(node) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resolver, err := contract.NewPublicResolver(resolverAddr, ens.contractBackend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &contract.PublicResolverSession{ |
||||
Contract: resolver, |
||||
TransactOpts: ens.TransactOpts, |
||||
}, nil |
||||
} |
||||
|
||||
func (ens *ENS) getFallbackResolver(node [32]byte) (*fallback_contract.PublicResolverSession, error) { |
||||
resolverAddr, err := ens.Resolver(node) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resolver, err := fallback_contract.NewPublicResolver(resolverAddr, ens.contractBackend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &fallback_contract.PublicResolverSession{ |
||||
Contract: resolver, |
||||
TransactOpts: ens.TransactOpts, |
||||
}, nil |
||||
} |
||||
|
||||
func (ens *ENS) getRegistrar(node [32]byte) (*contract.FIFSRegistrarSession, error) { |
||||
registrarAddr, err := ens.Owner(node) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
registrar, err := contract.NewFIFSRegistrar(registrarAddr, ens.contractBackend) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &contract.FIFSRegistrarSession{ |
||||
Contract: registrar, |
||||
TransactOpts: ens.TransactOpts, |
||||
}, nil |
||||
} |
||||
|
||||
// Resolve is a non-transactional call that returns the content hash associated with a name.
|
||||
func (ens *ENS) Resolve(name string) (common.Hash, error) { |
||||
node := EnsNode(name) |
||||
|
||||
resolver, err := ens.getResolver(node) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
|
||||
// IMPORTANT: The old contract is deprecated. This code should be removed latest on June 1st 2019
|
||||
supported, err := resolver.SupportsInterface(contentHash_Interface_Id) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
|
||||
if !supported { |
||||
resolver, err := ens.getFallbackResolver(node) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
ret, err := resolver.Content(node) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
return common.BytesToHash(ret[:]), nil |
||||
} |
||||
|
||||
// END DEPRECATED CODE
|
||||
|
||||
contentHash, err := resolver.Contenthash(node) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
|
||||
return extractContentHash(contentHash) |
||||
} |
||||
|
||||
// Addr is a non-transactional call that returns the address associated with a name.
|
||||
func (ens *ENS) Addr(name string) (common.Address, error) { |
||||
node := EnsNode(name) |
||||
|
||||
resolver, err := ens.getResolver(node) |
||||
if err != nil { |
||||
return common.Address{}, err |
||||
} |
||||
ret, err := resolver.Addr(node) |
||||
if err != nil { |
||||
return common.Address{}, err |
||||
} |
||||
return common.BytesToAddress(ret[:]), nil |
||||
} |
||||
|
||||
// SetAddress sets the address associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setAddress` function.
|
||||
func (ens *ENS) SetAddr(name string, addr common.Address) (*types.Transaction, error) { |
||||
node := EnsNode(name) |
||||
|
||||
resolver, err := ens.getResolver(node) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
opts := ens.TransactOpts |
||||
opts.GasLimit = 200000 |
||||
return resolver.Contract.SetAddr(&opts, node, addr) |
||||
} |
||||
|
||||
// Register registers a new domain name for the caller, making them the owner of the new name.
|
||||
// Only works if the registrar for the parent domain implements the FIFS registrar protocol.
|
||||
func (ens *ENS) Register(name string) (*types.Transaction, error) { |
||||
parentNode, label := ensParentNode(name) |
||||
registrar, err := ens.getRegistrar(parentNode) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return registrar.Contract.Register(&ens.TransactOpts, label, ens.TransactOpts.From) |
||||
} |
||||
|
||||
// SetContentHash sets the content hash associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setContenthash` function.
|
||||
func (ens *ENS) SetContentHash(name string, hash []byte) (*types.Transaction, error) { |
||||
node := EnsNode(name) |
||||
|
||||
resolver, err := ens.getResolver(node) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
opts := ens.TransactOpts |
||||
opts.GasLimit = 200000 |
||||
|
||||
// IMPORTANT: The old contract is deprecated. This code should be removed latest on June 1st 2019
|
||||
supported, err := resolver.SupportsInterface(contentHash_Interface_Id) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if !supported { |
||||
resolver, err := ens.getFallbackResolver(node) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
opts := ens.TransactOpts |
||||
opts.GasLimit = 200000 |
||||
var b [32]byte |
||||
copy(b[:], hash) |
||||
return resolver.Contract.SetContent(&opts, node, b) |
||||
} |
||||
|
||||
// END DEPRECATED CODE
|
||||
return resolver.Contract.SetContenthash(&opts, node, hash) |
||||
} |
@ -1,126 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ens |
||||
|
||||
import ( |
||||
"math/big" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind" |
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/contracts/ens/contract" |
||||
"github.com/ethereum/go-ethereum/contracts/ens/fallback_contract" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
var ( |
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") |
||||
name = "my name on ENS" |
||||
hash = crypto.Keccak256Hash([]byte("my content")) |
||||
fallbackHash = crypto.Keccak256Hash([]byte("my content hash")) |
||||
addr = crypto.PubkeyToAddress(key.PublicKey) |
||||
testAddr = common.HexToAddress("0x1234123412341234123412341234123412341234") |
||||
) |
||||
|
||||
func TestENS(t *testing.T) { |
||||
contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) |
||||
transactOpts := bind.NewKeyedTransactor(key) |
||||
|
||||
ensAddr, ens, err := DeployENS(transactOpts, contractBackend) |
||||
if err != nil { |
||||
t.Fatalf("can't deploy root registry: %v", err) |
||||
} |
||||
contractBackend.Commit() |
||||
|
||||
// Set ourself as the owner of the name.
|
||||
if _, err := ens.Register(name); err != nil { |
||||
t.Fatalf("can't register: %v", err) |
||||
} |
||||
contractBackend.Commit() |
||||
|
||||
// Deploy a resolver and make it responsible for the name.
|
||||
resolverAddr, _, _, err := contract.DeployPublicResolver(transactOpts, contractBackend, ensAddr) |
||||
if err != nil { |
||||
t.Fatalf("can't deploy resolver: %v", err) |
||||
} |
||||
|
||||
if _, err := ens.SetResolver(EnsNode(name), resolverAddr); err != nil { |
||||
t.Fatalf("can't set resolver: %v", err) |
||||
} |
||||
contractBackend.Commit() |
||||
|
||||
// Set the content hash for the name.
|
||||
cid, err := EncodeSwarmHash(hash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if _, err = ens.SetContentHash(name, cid); err != nil { |
||||
t.Fatalf("can't set content hash: %v", err) |
||||
} |
||||
contractBackend.Commit() |
||||
|
||||
// Try to resolve the name.
|
||||
resolvedHash, err := ens.Resolve(name) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
if resolvedHash.Hex() != hash.Hex() { |
||||
t.Fatalf("resolve error, expected %v, got %v", hash.Hex(), resolvedHash.Hex()) |
||||
} |
||||
|
||||
// set the address for the name
|
||||
if _, err = ens.SetAddr(name, testAddr); err != nil { |
||||
t.Fatalf("can't set address: %v", err) |
||||
} |
||||
contractBackend.Commit() |
||||
|
||||
// Try to resolve the name to an address
|
||||
recoveredAddr, err := ens.Addr(name) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
if testAddr.Hex() != recoveredAddr.Hex() { |
||||
t.Fatalf("resolve error, expected %v, got %v", testAddr.Hex(), recoveredAddr.Hex()) |
||||
} |
||||
|
||||
// deploy the fallback contract and see that the fallback mechanism works
|
||||
fallbackResolverAddr, _, _, err := fallback_contract.DeployPublicResolver(transactOpts, contractBackend, ensAddr) |
||||
if err != nil { |
||||
t.Fatalf("can't deploy resolver: %v", err) |
||||
} |
||||
if _, err := ens.SetResolver(EnsNode(name), fallbackResolverAddr); err != nil { |
||||
t.Fatalf("can't set resolver: %v", err) |
||||
} |
||||
contractBackend.Commit() |
||||
|
||||
// Set the content hash for the name.
|
||||
if _, err = ens.SetContentHash(name, fallbackHash.Bytes()); err != nil { |
||||
t.Fatalf("can't set content hash: %v", err) |
||||
} |
||||
contractBackend.Commit() |
||||
|
||||
// Try to resolve the name.
|
||||
fallbackResolvedHash, err := ens.Resolve(name) |
||||
if err != nil { |
||||
t.Fatalf("expected no error, got %v", err) |
||||
} |
||||
if fallbackResolvedHash.Hex() != fallbackHash.Hex() { |
||||
t.Fatalf("resolve error, expected %v, got %v", hash.Hex(), resolvedHash.Hex()) |
||||
} |
||||
} |
@ -1,212 +0,0 @@ |
||||
pragma solidity ^0.4.0; |
||||
|
||||
import './AbstractENS.sol'; |
||||
|
||||
/** |
||||
* A simple resolver anyone can use; only allows the owner of a node to set its |
||||
* address. |
||||
*/ |
||||
contract PublicResolver { |
||||
bytes4 constant INTERFACE_META_ID = 0x01ffc9a7; |
||||
bytes4 constant ADDR_INTERFACE_ID = 0x3b3b57de; |
||||
bytes4 constant CONTENT_INTERFACE_ID = 0xd8389dc5; |
||||
bytes4 constant NAME_INTERFACE_ID = 0x691f3431; |
||||
bytes4 constant ABI_INTERFACE_ID = 0x2203ab56; |
||||
bytes4 constant PUBKEY_INTERFACE_ID = 0xc8690233; |
||||
bytes4 constant TEXT_INTERFACE_ID = 0x59d1d43c; |
||||
|
||||
event AddrChanged(bytes32 indexed node, address a); |
||||
event ContentChanged(bytes32 indexed node, bytes32 hash); |
||||
event NameChanged(bytes32 indexed node, string name); |
||||
event ABIChanged(bytes32 indexed node, uint256 indexed contentType); |
||||
event PubkeyChanged(bytes32 indexed node, bytes32 x, bytes32 y); |
||||
event TextChanged(bytes32 indexed node, string indexed indexedKey, string key); |
||||
|
||||
struct PublicKey { |
||||
bytes32 x; |
||||
bytes32 y; |
||||
} |
||||
|
||||
struct Record { |
||||
address addr; |
||||
bytes32 content; |
||||
string name; |
||||
PublicKey pubkey; |
||||
mapping(string=>string) text; |
||||
mapping(uint256=>bytes) abis; |
||||
} |
||||
|
||||
AbstractENS ens; |
||||
mapping(bytes32=>Record) records; |
||||
|
||||
modifier only_owner(bytes32 node) { |
||||
if (ens.owner(node) != msg.sender) throw; |
||||
_; |
||||
} |
||||
|
||||
/** |
||||
* Constructor. |
||||
* @param ensAddr The ENS registrar contract. |
||||
*/ |
||||
function PublicResolver(AbstractENS ensAddr) { |
||||
ens = ensAddr; |
||||
} |
||||
|
||||
/** |
||||
* Returns true if the resolver implements the interface specified by the provided hash. |
||||
* @param interfaceID The ID of the interface to check for. |
||||
* @return True if the contract implements the requested interface. |
||||
*/ |
||||
function supportsInterface(bytes4 interfaceID) constant returns (bool) { |
||||
return interfaceID == ADDR_INTERFACE_ID || |
||||
interfaceID == CONTENT_INTERFACE_ID || |
||||
interfaceID == NAME_INTERFACE_ID || |
||||
interfaceID == ABI_INTERFACE_ID || |
||||
interfaceID == PUBKEY_INTERFACE_ID || |
||||
interfaceID == TEXT_INTERFACE_ID || |
||||
interfaceID == INTERFACE_META_ID; |
||||
} |
||||
|
||||
/** |
||||
* Returns the address associated with an ENS node. |
||||
* @param node The ENS node to query. |
||||
* @return The associated address. |
||||
*/ |
||||
function addr(bytes32 node) constant returns (address ret) { |
||||
ret = records[node].addr; |
||||
} |
||||
|
||||
/** |
||||
* Sets the address associated with an ENS node. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* @param node The node to update. |
||||
* @param addr The address to set. |
||||
*/ |
||||
function setAddr(bytes32 node, address addr) only_owner(node) { |
||||
records[node].addr = addr; |
||||
AddrChanged(node, addr); |
||||
} |
||||
|
||||
/** |
||||
* Returns the content hash associated with an ENS node. |
||||
* Note that this resource type is not standardized, and will likely change |
||||
* in future to a resource type based on multihash. |
||||
* @param node The ENS node to query. |
||||
* @return The associated content hash. |
||||
*/ |
||||
function content(bytes32 node) constant returns (bytes32 ret) { |
||||
ret = records[node].content; |
||||
} |
||||
|
||||
/** |
||||
* Sets the content hash associated with an ENS node. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* Note that this resource type is not standardized, and will likely change |
||||
* in future to a resource type based on multihash. |
||||
* @param node The node to update. |
||||
* @param hash The content hash to set |
||||
*/ |
||||
function setContent(bytes32 node, bytes32 hash) only_owner(node) { |
||||
records[node].content = hash; |
||||
ContentChanged(node, hash); |
||||
} |
||||
|
||||
/** |
||||
* Returns the name associated with an ENS node, for reverse records. |
||||
* Defined in EIP181. |
||||
* @param node The ENS node to query. |
||||
* @return The associated name. |
||||
*/ |
||||
function name(bytes32 node) constant returns (string ret) { |
||||
ret = records[node].name; |
||||
} |
||||
|
||||
/** |
||||
* Sets the name associated with an ENS node, for reverse records. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* @param node The node to update. |
||||
* @param name The name to set. |
||||
*/ |
||||
function setName(bytes32 node, string name) only_owner(node) { |
||||
records[node].name = name; |
||||
NameChanged(node, name); |
||||
} |
||||
|
||||
/** |
||||
* Returns the ABI associated with an ENS node. |
||||
* Defined in EIP205. |
||||
* @param node The ENS node to query |
||||
* @param contentTypes A bitwise OR of the ABI formats accepted by the caller. |
||||
* @return contentType The content type of the return value |
||||
* @return data The ABI data |
||||
*/ |
||||
function ABI(bytes32 node, uint256 contentTypes) constant returns (uint256 contentType, bytes data) { |
||||
var record = records[node]; |
||||
for(contentType = 1; contentType <= contentTypes; contentType <<= 1) { |
||||
if ((contentType & contentTypes) != 0 && record.abis[contentType].length > 0) { |
||||
data = record.abis[contentType]; |
||||
return; |
||||
} |
||||
} |
||||
contentType = 0; |
||||
} |
||||
|
||||
/** |
||||
* Sets the ABI associated with an ENS node. |
||||
* Nodes may have one ABI of each content type. To remove an ABI, set it to |
||||
* the empty string. |
||||
* @param node The node to update. |
||||
* @param contentType The content type of the ABI |
||||
* @param data The ABI data. |
||||
*/ |
||||
function setABI(bytes32 node, uint256 contentType, bytes data) only_owner(node) { |
||||
// Content types must be powers of 2 |
||||
if (((contentType - 1) & contentType) != 0) throw; |
||||
|
||||
records[node].abis[contentType] = data; |
||||
ABIChanged(node, contentType); |
||||
} |
||||
|
||||
/** |
||||
* Returns the SECP256k1 public key associated with an ENS node. |
||||
* Defined in EIP 619. |
||||
* @param node The ENS node to query |
||||
* @return x, y the X and Y coordinates of the curve point for the public key. |
||||
*/ |
||||
function pubkey(bytes32 node) constant returns (bytes32 x, bytes32 y) { |
||||
return (records[node].pubkey.x, records[node].pubkey.y); |
||||
} |
||||
|
||||
/** |
||||
* Sets the SECP256k1 public key associated with an ENS node. |
||||
* @param node The ENS node to query |
||||
* @param x the X coordinate of the curve point for the public key. |
||||
* @param y the Y coordinate of the curve point for the public key. |
||||
*/ |
||||
function setPubkey(bytes32 node, bytes32 x, bytes32 y) only_owner(node) { |
||||
records[node].pubkey = PublicKey(x, y); |
||||
PubkeyChanged(node, x, y); |
||||
} |
||||
|
||||
/** |
||||
* Returns the text data associated with an ENS node and key. |
||||
* @param node The ENS node to query. |
||||
* @param key The text data key to query. |
||||
* @return The associated text data. |
||||
*/ |
||||
function text(bytes32 node, string key) constant returns (string ret) { |
||||
ret = records[node].text[key]; |
||||
} |
||||
|
||||
/** |
||||
* Sets the text data associated with an ENS node and key. |
||||
* May only be called by the owner of that node in the ENS registry. |
||||
* @param node The node to update. |
||||
* @param key The key to set. |
||||
* @param value The text data value to set. |
||||
*/ |
||||
function setText(bytes32 node, string key, string value) only_owner(node) { |
||||
records[node].text[key] = value; |
||||
TextChanged(node, key, key); |
||||
} |
||||
} |
File diff suppressed because one or more lines are too long
@ -1,182 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package protocols |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
) |
||||
|
||||
// define some metrics
|
||||
var ( |
||||
// All metrics are cumulative
|
||||
|
||||
// total amount of units credited
|
||||
mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", metrics.AccountingRegistry) |
||||
// total amount of units debited
|
||||
mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", metrics.AccountingRegistry) |
||||
// total amount of bytes credited
|
||||
mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", metrics.AccountingRegistry) |
||||
// total amount of bytes debited
|
||||
mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", metrics.AccountingRegistry) |
||||
// total amount of credited messages
|
||||
mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", metrics.AccountingRegistry) |
||||
// total amount of debited messages
|
||||
mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", metrics.AccountingRegistry) |
||||
// how many times local node had to drop remote peers
|
||||
mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", metrics.AccountingRegistry) |
||||
// how many times local node overdrafted and dropped
|
||||
mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", metrics.AccountingRegistry) |
||||
) |
||||
|
||||
// Prices defines how prices are being passed on to the accounting instance
|
||||
type Prices interface { |
||||
// Return the Price for a message
|
||||
Price(interface{}) *Price |
||||
} |
||||
|
||||
type Payer bool |
||||
|
||||
const ( |
||||
Sender = Payer(true) |
||||
Receiver = Payer(false) |
||||
) |
||||
|
||||
// Price represents the costs of a message
|
||||
type Price struct { |
||||
Value uint64 |
||||
PerByte bool // True if the price is per byte or for unit
|
||||
Payer Payer |
||||
} |
||||
|
||||
// For gives back the price for a message
|
||||
// A protocol provides the message price in absolute value
|
||||
// This method then returns the correct signed amount,
|
||||
// depending on who pays, which is identified by the `payer` argument:
|
||||
// `Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument.
|
||||
// Thus: If Sending and sender pays, amount positive, otherwise negative
|
||||
// If Receiving, and receiver pays, amount positive, otherwise negative
|
||||
func (p *Price) For(payer Payer, size uint32) int64 { |
||||
price := p.Value |
||||
if p.PerByte { |
||||
price *= uint64(size) |
||||
} |
||||
if p.Payer == payer { |
||||
return 0 - int64(price) |
||||
} |
||||
return int64(price) |
||||
} |
||||
|
||||
// Balance is the actual accounting instance
|
||||
// Balance defines the operations needed for accounting
|
||||
// Implementations internally maintain the balance for every peer
|
||||
type Balance interface { |
||||
// Adds amount to the local balance with remote node `peer`;
|
||||
// positive amount = credit local node
|
||||
// negative amount = debit local node
|
||||
Add(amount int64, peer *Peer) error |
||||
} |
||||
|
||||
// Accounting implements the Hook interface
|
||||
// It interfaces to the balances through the Balance interface,
|
||||
// while interfacing with protocols and its prices through the Prices interface
|
||||
type Accounting struct { |
||||
Balance // interface to accounting logic
|
||||
Prices // interface to prices logic
|
||||
} |
||||
|
||||
func NewAccounting(balance Balance, po Prices) *Accounting { |
||||
ah := &Accounting{ |
||||
Prices: po, |
||||
Balance: balance, |
||||
} |
||||
return ah |
||||
} |
||||
|
||||
// SetupAccountingMetrics uses a separate registry for p2p accounting metrics;
|
||||
// this registry should be independent of any other metrics as it persists at different endpoints.
|
||||
// It also starts the persisting go-routine which
|
||||
// at the passed interval writes the metrics to a LevelDB
|
||||
func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics { |
||||
// create the DB and start persisting
|
||||
return NewAccountingMetrics(metrics.AccountingRegistry, reportInterval, path) |
||||
} |
||||
|
||||
// Send takes a peer, a size and a msg and
|
||||
// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
|
||||
// - credits/debits local node using balance interface
|
||||
func (ah *Accounting) Send(peer *Peer, size uint32, msg interface{}) error { |
||||
// get the price for a message (through the protocol spec)
|
||||
price := ah.Price(msg) |
||||
// this message doesn't need accounting
|
||||
if price == nil { |
||||
return nil |
||||
} |
||||
// evaluate the price for sending messages
|
||||
costToLocalNode := price.For(Sender, size) |
||||
// do the accounting
|
||||
err := ah.Add(costToLocalNode, peer) |
||||
// record metrics: just increase counters for user-facing metrics
|
||||
ah.doMetrics(costToLocalNode, size, err) |
||||
return err |
||||
} |
||||
|
||||
// Receive takes a peer, a size and a msg and
|
||||
// - calculates the cost for the local node receiving a msg of size from peer using the Prices interface
|
||||
// - credits/debits local node using balance interface
|
||||
func (ah *Accounting) Receive(peer *Peer, size uint32, msg interface{}) error { |
||||
// get the price for a message (through the protocol spec)
|
||||
price := ah.Price(msg) |
||||
// this message doesn't need accounting
|
||||
if price == nil { |
||||
return nil |
||||
} |
||||
// evaluate the price for receiving messages
|
||||
costToLocalNode := price.For(Receiver, size) |
||||
// do the accounting
|
||||
err := ah.Add(costToLocalNode, peer) |
||||
// record metrics: just increase counters for user-facing metrics
|
||||
ah.doMetrics(costToLocalNode, size, err) |
||||
return err |
||||
} |
||||
|
||||
// record some metrics
|
||||
// this is not an error handling. `err` is returned by both `Send` and `Receive`
|
||||
// `err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped.
|
||||
// if the limit has been violated and `err` is thus not nil:
|
||||
// * if the price is positive, local node has been credited; thus `err` implicitly signals the REMOTE has been dropped
|
||||
// * if the price is negative, local node has been debited, thus `err` implicitly signals LOCAL node "overdraft"
|
||||
func (ah *Accounting) doMetrics(price int64, size uint32, err error) { |
||||
if price > 0 { |
||||
mBalanceCredit.Inc(price) |
||||
mBytesCredit.Inc(int64(size)) |
||||
mMsgCredit.Inc(1) |
||||
if err != nil { |
||||
// increase the number of times a remote node has been dropped due to "overdraft"
|
||||
mPeerDrops.Inc(1) |
||||
} |
||||
} else { |
||||
mBalanceDebit.Inc(price) |
||||
mBytesDebit.Inc(int64(size)) |
||||
mMsgDebit.Inc(1) |
||||
if err != nil { |
||||
// increase the number of times the local node has done an "overdraft" in respect to other nodes
|
||||
mSelfDrops.Inc(1) |
||||
} |
||||
} |
||||
} |
@ -1,94 +0,0 @@ |
||||
package protocols |
||||
|
||||
import ( |
||||
"errors" |
||||
) |
||||
|
||||
// Textual version number of accounting API
|
||||
const AccountingVersion = "1.0" |
||||
|
||||
var errNoAccountingMetrics = errors.New("accounting metrics not enabled") |
||||
|
||||
// AccountingApi provides an API to access account related information
|
||||
type AccountingApi struct { |
||||
metrics *AccountingMetrics |
||||
} |
||||
|
||||
// NewAccountingApi creates a new AccountingApi
|
||||
// m will be used to check if accounting metrics are enabled
|
||||
func NewAccountingApi(m *AccountingMetrics) *AccountingApi { |
||||
return &AccountingApi{m} |
||||
} |
||||
|
||||
// Balance returns local node balance (units credited - units debited)
|
||||
func (self *AccountingApi) Balance() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
balance := mBalanceCredit.Count() - mBalanceDebit.Count() |
||||
return balance, nil |
||||
} |
||||
|
||||
// BalanceCredit returns total amount of units credited by local node
|
||||
func (self *AccountingApi) BalanceCredit() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mBalanceCredit.Count(), nil |
||||
} |
||||
|
||||
// BalanceCredit returns total amount of units debited by local node
|
||||
func (self *AccountingApi) BalanceDebit() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mBalanceDebit.Count(), nil |
||||
} |
||||
|
||||
// BytesCredit returns total amount of bytes credited by local node
|
||||
func (self *AccountingApi) BytesCredit() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mBytesCredit.Count(), nil |
||||
} |
||||
|
||||
// BalanceCredit returns total amount of bytes debited by local node
|
||||
func (self *AccountingApi) BytesDebit() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mBytesDebit.Count(), nil |
||||
} |
||||
|
||||
// MsgCredit returns total amount of messages credited by local node
|
||||
func (self *AccountingApi) MsgCredit() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mMsgCredit.Count(), nil |
||||
} |
||||
|
||||
// MsgDebit returns total amount of messages debited by local node
|
||||
func (self *AccountingApi) MsgDebit() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mMsgDebit.Count(), nil |
||||
} |
||||
|
||||
// PeerDrops returns number of times when local node had to drop remote peers
|
||||
func (self *AccountingApi) PeerDrops() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mPeerDrops.Count(), nil |
||||
} |
||||
|
||||
// SelfDrops returns number of times when local node was overdrafted and dropped
|
||||
func (self *AccountingApi) SelfDrops() (int64, error) { |
||||
if self.metrics == nil { |
||||
return 0, errNoAccountingMetrics |
||||
} |
||||
return mSelfDrops.Count(), nil |
||||
} |
@ -1,323 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package protocols |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"math/rand" |
||||
"os" |
||||
"path/filepath" |
||||
"reflect" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/mattn/go-colorable" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
|
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
) |
||||
|
||||
const ( |
||||
content = "123456789" |
||||
) |
||||
|
||||
var ( |
||||
nodes = flag.Int("nodes", 30, "number of nodes to create (default 30)") |
||||
msgs = flag.Int("msgs", 100, "number of messages sent by node (default 100)") |
||||
loglevel = flag.Int("loglevel", 0, "verbosity of logs") |
||||
rawlog = flag.Bool("rawlog", false, "remove terminal formatting from logs") |
||||
) |
||||
|
||||
func init() { |
||||
flag.Parse() |
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(!*rawlog)))) |
||||
} |
||||
|
||||
//TestAccountingSimulation runs a p2p/simulations simulation
|
||||
//It creates a *nodes number of nodes, connects each one with each other,
|
||||
//then sends out a random selection of messages up to *msgs amount of messages
|
||||
//from the test protocol spec.
|
||||
//The spec has some accounted messages defined through the Prices interface.
|
||||
//The test does accounting for all the message exchanged, and then checks
|
||||
//that every node has the same balance with a peer, but with opposite signs.
|
||||
//Balance(AwithB) = 0 - Balance(BwithA) or Abs|Balance(AwithB)| == Abs|Balance(BwithA)|
|
||||
func TestAccountingSimulation(t *testing.T) { |
||||
//setup the balances objects for every node
|
||||
bal := newBalances(*nodes) |
||||
//setup the metrics system or tests will fail trying to write metrics
|
||||
dir, err := ioutil.TempDir("", "account-sim") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
SetupAccountingMetrics(1*time.Second, filepath.Join(dir, "metrics.db")) |
||||
//define the node.Service for this test
|
||||
services := adapters.Services{ |
||||
"accounting": func(ctx *adapters.ServiceContext) (node.Service, error) { |
||||
return bal.newNode(), nil |
||||
}, |
||||
} |
||||
//setup the simulation
|
||||
adapter := adapters.NewSimAdapter(services) |
||||
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{DefaultService: "accounting"}) |
||||
defer net.Shutdown() |
||||
|
||||
// we send msgs messages per node, wait for all messages to arrive
|
||||
bal.wg.Add(*nodes * *msgs) |
||||
trigger := make(chan enode.ID) |
||||
go func() { |
||||
// wait for all of them to arrive
|
||||
bal.wg.Wait() |
||||
// then trigger a check
|
||||
// the selected node for the trigger is irrelevant,
|
||||
// we just want to trigger the end of the simulation
|
||||
trigger <- net.Nodes[0].ID() |
||||
}() |
||||
|
||||
// create nodes and start them
|
||||
for i := 0; i < *nodes; i++ { |
||||
conf := adapters.RandomNodeConfig() |
||||
bal.id2n[conf.ID] = i |
||||
if _, err := net.NewNodeWithConfig(conf); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err := net.Start(conf.ID); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
// fully connect nodes
|
||||
for i, n := range net.Nodes { |
||||
for _, m := range net.Nodes[i+1:] { |
||||
if err := net.Connect(n.ID(), m.ID()); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// empty action
|
||||
action := func(ctx context.Context) error { |
||||
return nil |
||||
} |
||||
// check always checks out
|
||||
check := func(ctx context.Context, id enode.ID) (bool, error) { |
||||
return true, nil |
||||
} |
||||
|
||||
// run simulation
|
||||
timeout := 30 * time.Second |
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) |
||||
defer cancel() |
||||
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{ |
||||
Action: action, |
||||
Trigger: trigger, |
||||
Expect: &simulations.Expectation{ |
||||
Nodes: []enode.ID{net.Nodes[0].ID()}, |
||||
Check: check, |
||||
}, |
||||
}) |
||||
|
||||
if result.Error != nil { |
||||
t.Fatal(result.Error) |
||||
} |
||||
|
||||
// check if balance matrix is symmetric
|
||||
if err := bal.symmetric(); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
// matrix is a matrix of nodes and its balances
|
||||
// matrix is in fact a linear array of size n*n,
|
||||
// so the balance for any node A with B is at index
|
||||
// A*n + B, while the balance of node B with A is at
|
||||
// B*n + A
|
||||
// (n entries in the array will not be filled -
|
||||
// the balance of a node with itself)
|
||||
type matrix struct { |
||||
n int //number of nodes
|
||||
m []int64 //array of balances
|
||||
lock sync.Mutex |
||||
} |
||||
|
||||
// create a new matrix
|
||||
func newMatrix(n int) *matrix { |
||||
return &matrix{ |
||||
n: n, |
||||
m: make([]int64, n*n), |
||||
} |
||||
} |
||||
|
||||
// called from the testBalance's Add accounting function: register balance change
|
||||
func (m *matrix) add(i, j int, v int64) error { |
||||
// index for the balance of local node i with remote nodde j is
|
||||
// i * number of nodes + remote node
|
||||
mi := i*m.n + j |
||||
// register that balance
|
||||
m.lock.Lock() |
||||
m.m[mi] += v |
||||
m.lock.Unlock() |
||||
return nil |
||||
} |
||||
|
||||
// check that the balances are symmetric:
|
||||
// balance of node i with node j is the same as j with i but with inverted signs
|
||||
func (m *matrix) symmetric() error { |
||||
//iterate all nodes
|
||||
for i := 0; i < m.n; i++ { |
||||
//iterate starting +1
|
||||
for j := i + 1; j < m.n; j++ { |
||||
log.Debug("bal", "1", i, "2", j, "i,j", m.m[i*m.n+j], "j,i", m.m[j*m.n+i]) |
||||
if m.m[i*m.n+j] != -m.m[j*m.n+i] { |
||||
return fmt.Errorf("value mismatch. m[%v, %v] = %v; m[%v, %v] = %v", i, j, m.m[i*m.n+j], j, i, m.m[j*m.n+i]) |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// all the balances
|
||||
type balances struct { |
||||
i int |
||||
*matrix |
||||
id2n map[enode.ID]int |
||||
wg *sync.WaitGroup |
||||
} |
||||
|
||||
func newBalances(n int) *balances { |
||||
return &balances{ |
||||
matrix: newMatrix(n), |
||||
id2n: make(map[enode.ID]int), |
||||
wg: &sync.WaitGroup{}, |
||||
} |
||||
} |
||||
|
||||
// create a new testNode for every node created as part of the service
|
||||
func (b *balances) newNode() *testNode { |
||||
defer func() { b.i++ }() |
||||
return &testNode{ |
||||
bal: b, |
||||
i: b.i, |
||||
peers: make([]*testPeer, b.n), //a node will be connected to n-1 peers
|
||||
} |
||||
} |
||||
|
||||
type testNode struct { |
||||
bal *balances |
||||
i int |
||||
lock sync.Mutex |
||||
peers []*testPeer |
||||
peerCount int |
||||
} |
||||
|
||||
// do the accounting for the peer's test protocol
|
||||
// testNode implements protocols.Balance
|
||||
func (t *testNode) Add(a int64, p *Peer) error { |
||||
//get the index for the remote peer
|
||||
remote := t.bal.id2n[p.ID()] |
||||
log.Debug("add", "local", t.i, "remote", remote, "amount", a) |
||||
return t.bal.add(t.i, remote, a) |
||||
} |
||||
|
||||
//run the p2p protocol
|
||||
//for every node, represented by testNode, create a remote testPeer
|
||||
func (t *testNode) run(p *p2p.Peer, rw p2p.MsgReadWriter) error { |
||||
spec := createTestSpec() |
||||
//create accounting hook
|
||||
spec.Hook = NewAccounting(t, &dummyPrices{}) |
||||
|
||||
//create a peer for this node
|
||||
tp := &testPeer{NewPeer(p, rw, spec), t.i, t.bal.id2n[p.ID()], t.bal.wg} |
||||
t.lock.Lock() |
||||
t.peers[t.bal.id2n[p.ID()]] = tp |
||||
t.peerCount++ |
||||
if t.peerCount == t.bal.n-1 { |
||||
//when all peer connections are established, start sending messages from this peer
|
||||
go t.send() |
||||
} |
||||
t.lock.Unlock() |
||||
return tp.Run(tp.handle) |
||||
} |
||||
|
||||
// p2p message receive handler function
|
||||
func (tp *testPeer) handle(ctx context.Context, msg interface{}) error { |
||||
tp.wg.Done() |
||||
log.Debug("receive", "from", tp.remote, "to", tp.local, "type", reflect.TypeOf(msg), "msg", msg) |
||||
return nil |
||||
} |
||||
|
||||
type testPeer struct { |
||||
*Peer |
||||
local, remote int |
||||
wg *sync.WaitGroup |
||||
} |
||||
|
||||
func (t *testNode) send() { |
||||
log.Debug("start sending") |
||||
for i := 0; i < *msgs; i++ { |
||||
//determine randomly to which peer to send
|
||||
whom := rand.Intn(t.bal.n - 1) |
||||
if whom >= t.i { |
||||
whom++ |
||||
} |
||||
t.lock.Lock() |
||||
p := t.peers[whom] |
||||
t.lock.Unlock() |
||||
|
||||
//determine a random message from the spec's messages to be sent
|
||||
which := rand.Intn(len(p.spec.Messages)) |
||||
msg := p.spec.Messages[which] |
||||
switch msg.(type) { |
||||
case *perBytesMsgReceiverPays: |
||||
msg = &perBytesMsgReceiverPays{Content: content[:rand.Intn(len(content))]} |
||||
case *perBytesMsgSenderPays: |
||||
msg = &perBytesMsgSenderPays{Content: content[:rand.Intn(len(content))]} |
||||
} |
||||
log.Debug("send", "from", t.i, "to", whom, "type", reflect.TypeOf(msg), "msg", msg) |
||||
p.Send(context.TODO(), msg) |
||||
} |
||||
} |
||||
|
||||
// define the protocol
|
||||
func (t *testNode) Protocols() []p2p.Protocol { |
||||
return []p2p.Protocol{{ |
||||
Length: 100, |
||||
Run: t.run, |
||||
}} |
||||
} |
||||
|
||||
func (t *testNode) APIs() []rpc.API { |
||||
return nil |
||||
} |
||||
|
||||
func (t *testNode) Start(server *p2p.Server) error { |
||||
return nil |
||||
} |
||||
|
||||
func (t *testNode) Stop() error { |
||||
return nil |
||||
} |
@ -1,223 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package protocols |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
//dummy Balance implementation
|
||||
type dummyBalance struct { |
||||
amount int64 |
||||
peer *Peer |
||||
} |
||||
|
||||
//dummy Prices implementation
|
||||
type dummyPrices struct{} |
||||
|
||||
//a dummy message which needs size based accounting
|
||||
//sender pays
|
||||
type perBytesMsgSenderPays struct { |
||||
Content string |
||||
} |
||||
|
||||
//a dummy message which needs size based accounting
|
||||
//receiver pays
|
||||
type perBytesMsgReceiverPays struct { |
||||
Content string |
||||
} |
||||
|
||||
//a dummy message which is paid for per unit
|
||||
//sender pays
|
||||
type perUnitMsgSenderPays struct{} |
||||
|
||||
//receiver pays
|
||||
type perUnitMsgReceiverPays struct{} |
||||
|
||||
//a dummy message which has zero as its price
|
||||
type zeroPriceMsg struct{} |
||||
|
||||
//a dummy message which has no accounting
|
||||
type nilPriceMsg struct{} |
||||
|
||||
//return the price for the defined messages
|
||||
func (d *dummyPrices) Price(msg interface{}) *Price { |
||||
switch msg.(type) { |
||||
//size based message cost, receiver pays
|
||||
case *perBytesMsgReceiverPays: |
||||
return &Price{ |
||||
PerByte: true, |
||||
Value: uint64(100), |
||||
Payer: Receiver, |
||||
} |
||||
//size based message cost, sender pays
|
||||
case *perBytesMsgSenderPays: |
||||
return &Price{ |
||||
PerByte: true, |
||||
Value: uint64(100), |
||||
Payer: Sender, |
||||
} |
||||
//unitary cost, receiver pays
|
||||
case *perUnitMsgReceiverPays: |
||||
return &Price{ |
||||
PerByte: false, |
||||
Value: uint64(99), |
||||
Payer: Receiver, |
||||
} |
||||
//unitary cost, sender pays
|
||||
case *perUnitMsgSenderPays: |
||||
return &Price{ |
||||
PerByte: false, |
||||
Value: uint64(99), |
||||
Payer: Sender, |
||||
} |
||||
case *zeroPriceMsg: |
||||
return &Price{ |
||||
PerByte: false, |
||||
Value: uint64(0), |
||||
Payer: Sender, |
||||
} |
||||
case *nilPriceMsg: |
||||
return nil |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
//dummy accounting implementation, only stores values for later check
|
||||
func (d *dummyBalance) Add(amount int64, peer *Peer) error { |
||||
d.amount = amount |
||||
d.peer = peer |
||||
return nil |
||||
} |
||||
|
||||
type testCase struct { |
||||
msg interface{} |
||||
size uint32 |
||||
sendResult int64 |
||||
recvResult int64 |
||||
} |
||||
|
||||
//lowest level unit test
|
||||
func TestBalance(t *testing.T) { |
||||
//create instances
|
||||
balance := &dummyBalance{} |
||||
prices := &dummyPrices{} |
||||
//create the spec
|
||||
spec := createTestSpec() |
||||
//create the accounting hook for the spec
|
||||
acc := NewAccounting(balance, prices) |
||||
//create a peer
|
||||
id := adapters.RandomNodeConfig().ID |
||||
p := p2p.NewPeer(id, "testPeer", nil) |
||||
peer := NewPeer(p, &dummyRW{}, spec) |
||||
//price depends on size, receiver pays
|
||||
msg := &perBytesMsgReceiverPays{Content: "testBalance"} |
||||
size, _ := rlp.EncodeToBytes(msg) |
||||
|
||||
testCases := []testCase{ |
||||
{ |
||||
msg, |
||||
uint32(len(size)), |
||||
int64(len(size) * 100), |
||||
int64(len(size) * -100), |
||||
}, |
||||
{ |
||||
&perBytesMsgSenderPays{Content: "testBalance"}, |
||||
uint32(len(size)), |
||||
int64(len(size) * -100), |
||||
int64(len(size) * 100), |
||||
}, |
||||
{ |
||||
&perUnitMsgSenderPays{}, |
||||
0, |
||||
int64(-99), |
||||
int64(99), |
||||
}, |
||||
{ |
||||
&perUnitMsgReceiverPays{}, |
||||
0, |
||||
int64(99), |
||||
int64(-99), |
||||
}, |
||||
{ |
||||
&zeroPriceMsg{}, |
||||
0, |
||||
int64(0), |
||||
int64(0), |
||||
}, |
||||
{ |
||||
&nilPriceMsg{}, |
||||
0, |
||||
int64(0), |
||||
int64(0), |
||||
}, |
||||
} |
||||
checkAccountingTestCases(t, testCases, acc, peer, balance, true) |
||||
checkAccountingTestCases(t, testCases, acc, peer, balance, false) |
||||
} |
||||
|
||||
func checkAccountingTestCases(t *testing.T, cases []testCase, acc *Accounting, peer *Peer, balance *dummyBalance, send bool) { |
||||
for _, c := range cases { |
||||
var err error |
||||
var expectedResult int64 |
||||
//reset balance before every check
|
||||
balance.amount = 0 |
||||
if send { |
||||
err = acc.Send(peer, c.size, c.msg) |
||||
expectedResult = c.sendResult |
||||
} else { |
||||
err = acc.Receive(peer, c.size, c.msg) |
||||
expectedResult = c.recvResult |
||||
} |
||||
|
||||
checkResults(t, err, balance, peer, expectedResult) |
||||
} |
||||
} |
||||
|
||||
func checkResults(t *testing.T, err error, balance *dummyBalance, peer *Peer, result int64) { |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if balance.peer != peer { |
||||
t.Fatalf("expected Add to be called with peer %v, got %v", peer, balance.peer) |
||||
} |
||||
if balance.amount != result { |
||||
t.Fatalf("Expected balance to be %d but is %d", result, balance.amount) |
||||
} |
||||
} |
||||
|
||||
//create a test spec
|
||||
func createTestSpec() *Spec { |
||||
spec := &Spec{ |
||||
Name: "test", |
||||
Version: 42, |
||||
MaxMsgSize: 10 * 1024, |
||||
Messages: []interface{}{ |
||||
&perBytesMsgReceiverPays{}, |
||||
&perBytesMsgSenderPays{}, |
||||
&perUnitMsgReceiverPays{}, |
||||
&perUnitMsgSenderPays{}, |
||||
&zeroPriceMsg{}, |
||||
&nilPriceMsg{}, |
||||
}, |
||||
} |
||||
return spec |
||||
} |
@ -1,443 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/* |
||||
Package protocols is an extension to p2p. It offers a user friendly simple way to define |
||||
devp2p subprotocols by abstracting away code standardly shared by protocols. |
||||
|
||||
* automate assigments of code indexes to messages |
||||
* automate RLP decoding/encoding based on reflecting |
||||
* provide the forever loop to read incoming messages |
||||
* standardise error handling related to communication |
||||
* standardised handshake negotiation |
||||
* TODO: automatic generation of wire protocol specification for peers |
||||
|
||||
*/ |
||||
package protocols |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"context" |
||||
"fmt" |
||||
"io" |
||||
"reflect" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/swarm/spancontext" |
||||
"github.com/ethereum/go-ethereum/swarm/tracing" |
||||
opentracing "github.com/opentracing/opentracing-go" |
||||
) |
||||
|
||||
// error codes used by this protocol scheme
|
||||
const ( |
||||
ErrMsgTooLong = iota |
||||
ErrDecode |
||||
ErrWrite |
||||
ErrInvalidMsgCode |
||||
ErrInvalidMsgType |
||||
ErrHandshake |
||||
ErrNoHandler |
||||
ErrHandler |
||||
) |
||||
|
||||
// error description strings associated with the codes
|
||||
var errorToString = map[int]string{ |
||||
ErrMsgTooLong: "Message too long", |
||||
ErrDecode: "Invalid message (RLP error)", |
||||
ErrWrite: "Error sending message", |
||||
ErrInvalidMsgCode: "Invalid message code", |
||||
ErrInvalidMsgType: "Invalid message type", |
||||
ErrHandshake: "Handshake error", |
||||
ErrNoHandler: "No handler registered error", |
||||
ErrHandler: "Message handler error", |
||||
} |
||||
|
||||
/* |
||||
Error implements the standard go error interface. |
||||
Use: |
||||
|
||||
errorf(code, format, params ...interface{}) |
||||
|
||||
Prints as: |
||||
|
||||
<description>: <details> |
||||
|
||||
where description is given by code in errorToString |
||||
and details is fmt.Sprintf(format, params...) |
||||
|
||||
exported field Code can be checked |
||||
*/ |
||||
type Error struct { |
||||
Code int |
||||
message string |
||||
format string |
||||
params []interface{} |
||||
} |
||||
|
||||
func (e Error) Error() (message string) { |
||||
if len(e.message) == 0 { |
||||
name, ok := errorToString[e.Code] |
||||
if !ok { |
||||
panic("invalid message code") |
||||
} |
||||
e.message = name |
||||
if e.format != "" { |
||||
e.message += ": " + fmt.Sprintf(e.format, e.params...) |
||||
} |
||||
} |
||||
return e.message |
||||
} |
||||
|
||||
func errorf(code int, format string, params ...interface{}) *Error { |
||||
return &Error{ |
||||
Code: code, |
||||
format: format, |
||||
params: params, |
||||
} |
||||
} |
||||
|
||||
// WrappedMsg is used to propagate marshalled context alongside message payloads
|
||||
type WrappedMsg struct { |
||||
Context []byte |
||||
Size uint32 |
||||
Payload []byte |
||||
} |
||||
|
||||
//For accounting, the design is to allow the Spec to describe which and how its messages are priced
|
||||
//To access this functionality, we provide a Hook interface which will call accounting methods
|
||||
//NOTE: there could be more such (horizontal) hooks in the future
|
||||
type Hook interface { |
||||
//A hook for sending messages
|
||||
Send(peer *Peer, size uint32, msg interface{}) error |
||||
//A hook for receiving messages
|
||||
Receive(peer *Peer, size uint32, msg interface{}) error |
||||
} |
||||
|
||||
// Spec is a protocol specification including its name and version as well as
|
||||
// the types of messages which are exchanged
|
||||
type Spec struct { |
||||
// Name is the name of the protocol, often a three-letter word
|
||||
Name string |
||||
|
||||
// Version is the version number of the protocol
|
||||
Version uint |
||||
|
||||
// MaxMsgSize is the maximum accepted length of the message payload
|
||||
MaxMsgSize uint32 |
||||
|
||||
// Messages is a list of message data types which this protocol uses, with
|
||||
// each message type being sent with its array index as the code (so
|
||||
// [&foo{}, &bar{}, &baz{}] would send foo, bar and baz with codes
|
||||
// 0, 1 and 2 respectively)
|
||||
// each message must have a single unique data type
|
||||
Messages []interface{} |
||||
|
||||
//hook for accounting (could be extended to multiple hooks in the future)
|
||||
Hook Hook |
||||
|
||||
initOnce sync.Once |
||||
codes map[reflect.Type]uint64 |
||||
types map[uint64]reflect.Type |
||||
} |
||||
|
||||
func (s *Spec) init() { |
||||
s.initOnce.Do(func() { |
||||
s.codes = make(map[reflect.Type]uint64, len(s.Messages)) |
||||
s.types = make(map[uint64]reflect.Type, len(s.Messages)) |
||||
for i, msg := range s.Messages { |
||||
code := uint64(i) |
||||
typ := reflect.TypeOf(msg) |
||||
if typ.Kind() == reflect.Ptr { |
||||
typ = typ.Elem() |
||||
} |
||||
s.codes[typ] = code |
||||
s.types[code] = typ |
||||
} |
||||
}) |
||||
} |
||||
|
||||
// Length returns the number of message types in the protocol
|
||||
func (s *Spec) Length() uint64 { |
||||
return uint64(len(s.Messages)) |
||||
} |
||||
|
||||
// GetCode returns the message code of a type, and boolean second argument is
|
||||
// false if the message type is not found
|
||||
func (s *Spec) GetCode(msg interface{}) (uint64, bool) { |
||||
s.init() |
||||
typ := reflect.TypeOf(msg) |
||||
if typ.Kind() == reflect.Ptr { |
||||
typ = typ.Elem() |
||||
} |
||||
code, ok := s.codes[typ] |
||||
return code, ok |
||||
} |
||||
|
||||
// NewMsg construct a new message type given the code
|
||||
func (s *Spec) NewMsg(code uint64) (interface{}, bool) { |
||||
s.init() |
||||
typ, ok := s.types[code] |
||||
if !ok { |
||||
return nil, false |
||||
} |
||||
return reflect.New(typ).Interface(), true |
||||
} |
||||
|
||||
// Peer represents a remote peer or protocol instance that is running on a peer connection with
|
||||
// a remote peer
|
||||
type Peer struct { |
||||
*p2p.Peer // the p2p.Peer object representing the remote
|
||||
rw p2p.MsgReadWriter // p2p.MsgReadWriter to send messages to and read messages from
|
||||
spec *Spec |
||||
} |
||||
|
||||
// NewPeer constructs a new peer
|
||||
// this constructor is called by the p2p.Protocol#Run function
|
||||
// the first two arguments are the arguments passed to p2p.Protocol.Run function
|
||||
// the third argument is the Spec describing the protocol
|
||||
func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer { |
||||
return &Peer{ |
||||
Peer: p, |
||||
rw: rw, |
||||
spec: spec, |
||||
} |
||||
} |
||||
|
||||
// Run starts the forever loop that handles incoming messages
|
||||
// called within the p2p.Protocol#Run function
|
||||
// the handler argument is a function which is called for each message received
|
||||
// from the remote peer, a returned error causes the loop to exit
|
||||
// resulting in disconnection
|
||||
func (p *Peer) Run(handler func(ctx context.Context, msg interface{}) error) error { |
||||
for { |
||||
if err := p.handleIncoming(handler); err != nil { |
||||
if err != io.EOF { |
||||
metrics.GetOrRegisterCounter("peer.handleincoming.error", nil).Inc(1) |
||||
log.Error("peer.handleIncoming", "err", err) |
||||
} |
||||
|
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Drop disconnects a peer.
|
||||
// TODO: may need to implement protocol drop only? don't want to kick off the peer
|
||||
// if they are useful for other protocols
|
||||
func (p *Peer) Drop() { |
||||
p.Disconnect(p2p.DiscSubprotocolError) |
||||
} |
||||
|
||||
// Send takes a message, encodes it in RLP, finds the right message code and sends the
|
||||
// message off to the peer
|
||||
// this low level call will be wrapped by libraries providing routed or broadcast sends
|
||||
// but often just used to forward and push messages to directly connected peers
|
||||
func (p *Peer) Send(ctx context.Context, msg interface{}) error { |
||||
defer metrics.GetOrRegisterResettingTimer("peer.send_t", nil).UpdateSince(time.Now()) |
||||
metrics.GetOrRegisterCounter("peer.send", nil).Inc(1) |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("peer.send.%T", msg), nil).Inc(1) |
||||
|
||||
var b bytes.Buffer |
||||
if tracing.Enabled { |
||||
writer := bufio.NewWriter(&b) |
||||
|
||||
tracer := opentracing.GlobalTracer() |
||||
|
||||
sctx := spancontext.FromContext(ctx) |
||||
|
||||
if sctx != nil { |
||||
err := tracer.Inject( |
||||
sctx, |
||||
opentracing.Binary, |
||||
writer) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
writer.Flush() |
||||
} |
||||
|
||||
r, err := rlp.EncodeToBytes(msg) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
wmsg := WrappedMsg{ |
||||
Context: b.Bytes(), |
||||
Size: uint32(len(r)), |
||||
Payload: r, |
||||
} |
||||
|
||||
//if the accounting hook is set, call it
|
||||
if p.spec.Hook != nil { |
||||
err := p.spec.Hook.Send(p, wmsg.Size, msg) |
||||
if err != nil { |
||||
p.Drop() |
||||
return err |
||||
} |
||||
} |
||||
|
||||
code, found := p.spec.GetCode(msg) |
||||
if !found { |
||||
return errorf(ErrInvalidMsgType, "%v", code) |
||||
} |
||||
return p2p.Send(p.rw, code, wmsg) |
||||
} |
||||
|
||||
// handleIncoming(code)
|
||||
// is called each cycle of the main forever loop that dispatches incoming messages
|
||||
// if this returns an error the loop returns and the peer is disconnected with the error
|
||||
// this generic handler
|
||||
// * checks message size,
|
||||
// * checks for out-of-range message codes,
|
||||
// * handles decoding with reflection,
|
||||
// * call handlers as callbacks
|
||||
func (p *Peer) handleIncoming(handle func(ctx context.Context, msg interface{}) error) error { |
||||
msg, err := p.rw.ReadMsg() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// make sure that the payload has been fully consumed
|
||||
defer msg.Discard() |
||||
|
||||
if msg.Size > p.spec.MaxMsgSize { |
||||
return errorf(ErrMsgTooLong, "%v > %v", msg.Size, p.spec.MaxMsgSize) |
||||
} |
||||
|
||||
// unmarshal wrapped msg, which might contain context
|
||||
var wmsg WrappedMsg |
||||
err = msg.Decode(&wmsg) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
ctx := context.Background() |
||||
|
||||
// if tracing is enabled and the context coming within the request is
|
||||
// not empty, try to unmarshal it
|
||||
if tracing.Enabled && len(wmsg.Context) > 0 { |
||||
var sctx opentracing.SpanContext |
||||
|
||||
tracer := opentracing.GlobalTracer() |
||||
sctx, err = tracer.Extract( |
||||
opentracing.Binary, |
||||
bytes.NewReader(wmsg.Context)) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
ctx = spancontext.WithContext(ctx, sctx) |
||||
} |
||||
|
||||
val, ok := p.spec.NewMsg(msg.Code) |
||||
if !ok { |
||||
return errorf(ErrInvalidMsgCode, "%v", msg.Code) |
||||
} |
||||
if err := rlp.DecodeBytes(wmsg.Payload, val); err != nil { |
||||
return errorf(ErrDecode, "<= %v: %v", msg, err) |
||||
} |
||||
|
||||
//if the accounting hook is set, call it
|
||||
if p.spec.Hook != nil { |
||||
err := p.spec.Hook.Receive(p, wmsg.Size, val) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
// call the registered handler callbacks
|
||||
// a registered callback take the decoded message as argument as an interface
|
||||
// which the handler is supposed to cast to the appropriate type
|
||||
// it is entirely safe not to check the cast in the handler since the handler is
|
||||
// chosen based on the proper type in the first place
|
||||
if err := handle(ctx, val); err != nil { |
||||
return errorf(ErrHandler, "(msg code %v): %v", msg.Code, err) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Handshake negotiates a handshake on the peer connection
|
||||
// * arguments
|
||||
// * context
|
||||
// * the local handshake to be sent to the remote peer
|
||||
// * function to be called on the remote handshake (can be nil)
|
||||
// * expects a remote handshake back of the same type
|
||||
// * the dialing peer needs to send the handshake first and then waits for remote
|
||||
// * the listening peer waits for the remote handshake and then sends it
|
||||
// returns the remote handshake and an error
|
||||
func (p *Peer) Handshake(ctx context.Context, hs interface{}, verify func(interface{}) error) (interface{}, error) { |
||||
if _, ok := p.spec.GetCode(hs); !ok { |
||||
return nil, errorf(ErrHandshake, "unknown handshake message type: %T", hs) |
||||
} |
||||
|
||||
var rhs interface{} |
||||
errc := make(chan error, 2) |
||||
handle := func(ctx context.Context, msg interface{}) error { |
||||
rhs = msg |
||||
if verify != nil { |
||||
return verify(rhs) |
||||
} |
||||
return nil |
||||
} |
||||
send := func() { errc <- p.Send(ctx, hs) } |
||||
receive := func() { errc <- p.handleIncoming(handle) } |
||||
|
||||
go func() { |
||||
if p.Inbound() { |
||||
receive() |
||||
send() |
||||
} else { |
||||
send() |
||||
receive() |
||||
} |
||||
}() |
||||
|
||||
for i := 0; i < 2; i++ { |
||||
var err error |
||||
select { |
||||
case err = <-errc: |
||||
case <-ctx.Done(): |
||||
err = ctx.Err() |
||||
} |
||||
if err != nil { |
||||
return nil, errorf(ErrHandshake, err.Error()) |
||||
} |
||||
} |
||||
return rhs, nil |
||||
} |
||||
|
||||
// HasCap returns true if Peer has a capability
|
||||
// with provided name.
|
||||
func (p *Peer) HasCap(capName string) (yes bool) { |
||||
if p == nil || p.Peer == nil { |
||||
return false |
||||
} |
||||
for _, c := range p.Caps() { |
||||
if c.Name == capName { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
@ -1,624 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package protocols |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing" |
||||
) |
||||
|
||||
// handshake message type
|
||||
type hs0 struct { |
||||
C uint |
||||
} |
||||
|
||||
// message to kill/drop the peer with nodeID
|
||||
type kill struct { |
||||
C enode.ID |
||||
} |
||||
|
||||
// message to drop connection
|
||||
type drop struct { |
||||
} |
||||
|
||||
/// protoHandshake represents module-independent aspects of the protocol and is
|
||||
// the first message peers send and receive as part the initial exchange
|
||||
type protoHandshake struct { |
||||
Version uint // local and remote peer should have identical version
|
||||
NetworkID string // local and remote peer should have identical network id
|
||||
} |
||||
|
||||
// checkProtoHandshake verifies local and remote protoHandshakes match
|
||||
func checkProtoHandshake(testVersion uint, testNetworkID string) func(interface{}) error { |
||||
return func(rhs interface{}) error { |
||||
remote := rhs.(*protoHandshake) |
||||
if remote.NetworkID != testNetworkID { |
||||
return fmt.Errorf("%s (!= %s)", remote.NetworkID, testNetworkID) |
||||
} |
||||
|
||||
if remote.Version != testVersion { |
||||
return fmt.Errorf("%d (!= %d)", remote.Version, testVersion) |
||||
} |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// newProtocol sets up a protocol
|
||||
// the run function here demonstrates a typical protocol using peerPool, handshake
|
||||
// and messages registered to handlers
|
||||
func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) error { |
||||
spec := &Spec{ |
||||
Name: "test", |
||||
Version: 42, |
||||
MaxMsgSize: 10 * 1024, |
||||
Messages: []interface{}{ |
||||
protoHandshake{}, |
||||
hs0{}, |
||||
kill{}, |
||||
drop{}, |
||||
}, |
||||
} |
||||
return func(p *p2p.Peer, rw p2p.MsgReadWriter) error { |
||||
peer := NewPeer(p, rw, spec) |
||||
|
||||
// initiate one-off protohandshake and check validity
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second) |
||||
defer cancel() |
||||
phs := &protoHandshake{42, "420"} |
||||
hsCheck := checkProtoHandshake(phs.Version, phs.NetworkID) |
||||
_, err := peer.Handshake(ctx, phs, hsCheck) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
lhs := &hs0{42} |
||||
// module handshake demonstrating a simple repeatable exchange of same-type message
|
||||
hs, err := peer.Handshake(ctx, lhs, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if rmhs := hs.(*hs0); rmhs.C > lhs.C { |
||||
return fmt.Errorf("handshake mismatch remote %v > local %v", rmhs.C, lhs.C) |
||||
} |
||||
|
||||
handle := func(ctx context.Context, msg interface{}) error { |
||||
switch msg := msg.(type) { |
||||
|
||||
case *protoHandshake: |
||||
return errors.New("duplicate handshake") |
||||
|
||||
case *hs0: |
||||
rhs := msg |
||||
if rhs.C > lhs.C { |
||||
return fmt.Errorf("handshake mismatch remote %v > local %v", rhs.C, lhs.C) |
||||
} |
||||
lhs.C += rhs.C |
||||
return peer.Send(ctx, lhs) |
||||
|
||||
case *kill: |
||||
// demonstrates use of peerPool, killing another peer connection as a response to a message
|
||||
id := msg.C |
||||
pp.Get(id).Drop() |
||||
return nil |
||||
|
||||
case *drop: |
||||
// for testing we can trigger self induced disconnect upon receiving drop message
|
||||
return errors.New("dropped") |
||||
|
||||
default: |
||||
return fmt.Errorf("unknown message type: %T", msg) |
||||
} |
||||
} |
||||
|
||||
pp.Add(peer) |
||||
defer pp.Remove(peer) |
||||
return peer.Run(handle) |
||||
} |
||||
} |
||||
|
||||
func protocolTester(pp *p2ptest.TestPeerPool) *p2ptest.ProtocolTester { |
||||
prvkey, err := crypto.GenerateKey() |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return p2ptest.NewProtocolTester(prvkey, 2, newProtocol(pp)) |
||||
} |
||||
|
||||
func protoHandshakeExchange(id enode.ID, proto *protoHandshake) []p2ptest.Exchange { |
||||
|
||||
return []p2ptest.Exchange{ |
||||
{ |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 0, |
||||
Msg: &protoHandshake{42, "420"}, |
||||
Peer: id, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 0, |
||||
Msg: proto, |
||||
Peer: id, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func runProtoHandshake(t *testing.T, proto *protoHandshake, errs ...error) { |
||||
t.Helper() |
||||
pp := p2ptest.NewTestPeerPool() |
||||
s := protocolTester(pp) |
||||
defer s.Stop() |
||||
|
||||
// TODO: make this more than one handshake
|
||||
node := s.Nodes[0] |
||||
if err := s.TestExchanges(protoHandshakeExchange(node.ID(), proto)...); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
var disconnects []*p2ptest.Disconnect |
||||
for i, err := range errs { |
||||
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.Nodes[i].ID(), Error: err}) |
||||
} |
||||
if err := s.TestDisconnected(disconnects...); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
type dummyHook struct { |
||||
peer *Peer |
||||
size uint32 |
||||
msg interface{} |
||||
send bool |
||||
err error |
||||
waitC chan struct{} |
||||
mu sync.Mutex |
||||
} |
||||
|
||||
type dummyMsg struct { |
||||
Content string |
||||
} |
||||
|
||||
func (d *dummyHook) Send(peer *Peer, size uint32, msg interface{}) error { |
||||
d.mu.Lock() |
||||
defer d.mu.Unlock() |
||||
|
||||
d.peer = peer |
||||
d.size = size |
||||
d.msg = msg |
||||
d.send = true |
||||
return d.err |
||||
} |
||||
|
||||
func (d *dummyHook) Receive(peer *Peer, size uint32, msg interface{}) error { |
||||
d.mu.Lock() |
||||
defer d.mu.Unlock() |
||||
|
||||
d.peer = peer |
||||
d.size = size |
||||
d.msg = msg |
||||
d.send = false |
||||
d.waitC <- struct{}{} |
||||
return d.err |
||||
} |
||||
|
||||
func TestProtocolHook(t *testing.T) { |
||||
testHook := &dummyHook{ |
||||
waitC: make(chan struct{}, 1), |
||||
} |
||||
spec := &Spec{ |
||||
Name: "test", |
||||
Version: 42, |
||||
MaxMsgSize: 10 * 1024, |
||||
Messages: []interface{}{ |
||||
dummyMsg{}, |
||||
}, |
||||
Hook: testHook, |
||||
} |
||||
|
||||
runFunc := func(p *p2p.Peer, rw p2p.MsgReadWriter) error { |
||||
peer := NewPeer(p, rw, spec) |
||||
ctx := context.TODO() |
||||
err := peer.Send(ctx, &dummyMsg{ |
||||
Content: "handshake"}) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
handle := func(ctx context.Context, msg interface{}) error { |
||||
return nil |
||||
} |
||||
|
||||
return peer.Run(handle) |
||||
} |
||||
|
||||
prvkey, err := crypto.GenerateKey() |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
tester := p2ptest.NewProtocolTester(prvkey, 2, runFunc) |
||||
defer tester.Stop() |
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 0, |
||||
Msg: &dummyMsg{Content: "handshake"}, |
||||
Peer: tester.Nodes[0].ID(), |
||||
}, |
||||
}, |
||||
}) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
testHook.mu.Lock() |
||||
if testHook.msg == nil || testHook.msg.(*dummyMsg).Content != "handshake" { |
||||
t.Fatal("Expected msg to be set, but it is not") |
||||
} |
||||
if !testHook.send { |
||||
t.Fatal("Expected a send message, but it is not") |
||||
} |
||||
if testHook.peer == nil { |
||||
t.Fatal("Expected peer to be set, is nil") |
||||
} |
||||
if peerId := testHook.peer.ID(); peerId != tester.Nodes[0].ID() && peerId != tester.Nodes[1].ID() { |
||||
t.Fatalf("Expected peer ID to be set correctly, but it is not (got %v, exp %v or %v", peerId, tester.Nodes[0].ID(), tester.Nodes[1].ID()) |
||||
} |
||||
if testHook.size != 11 { //11 is the length of the encoded message
|
||||
t.Fatalf("Expected size to be %d, but it is %d ", 1, testHook.size) |
||||
} |
||||
testHook.mu.Unlock() |
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 0, |
||||
Msg: &dummyMsg{Content: "response"}, |
||||
Peer: tester.Nodes[1].ID(), |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
<-testHook.waitC |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
testHook.mu.Lock() |
||||
if testHook.msg == nil || testHook.msg.(*dummyMsg).Content != "response" { |
||||
t.Fatal("Expected msg to be set, but it is not") |
||||
} |
||||
if testHook.send { |
||||
t.Fatal("Expected a send message, but it is not") |
||||
} |
||||
if testHook.peer == nil || testHook.peer.ID() != tester.Nodes[1].ID() { |
||||
t.Fatal("Expected peer ID to be set correctly, but it is not") |
||||
} |
||||
if testHook.size != 10 { //11 is the length of the encoded message
|
||||
t.Fatalf("Expected size to be %d, but it is %d ", 1, testHook.size) |
||||
} |
||||
testHook.mu.Unlock() |
||||
|
||||
testHook.err = fmt.Errorf("dummy error") |
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 0, |
||||
Msg: &dummyMsg{Content: "response"}, |
||||
Peer: tester.Nodes[1].ID(), |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
<-testHook.waitC |
||||
|
||||
time.Sleep(100 * time.Millisecond) |
||||
err = tester.TestDisconnected(&p2ptest.Disconnect{Peer: tester.Nodes[1].ID(), Error: testHook.err}) |
||||
if err != nil { |
||||
t.Fatalf("Expected a specific disconnect error, but got different one: %v", err) |
||||
} |
||||
} |
||||
|
||||
//We need to test that if the hook is not defined, then message infrastructure
|
||||
//(send,receive) still works
|
||||
func TestNoHook(t *testing.T) { |
||||
//create a test spec
|
||||
spec := createTestSpec() |
||||
//a random node
|
||||
id := adapters.RandomNodeConfig().ID |
||||
//a peer
|
||||
p := p2p.NewPeer(id, "testPeer", nil) |
||||
rw := &dummyRW{} |
||||
peer := NewPeer(p, rw, spec) |
||||
ctx := context.TODO() |
||||
msg := &perBytesMsgSenderPays{Content: "testBalance"} |
||||
//send a message
|
||||
|
||||
if err := peer.Send(ctx, msg); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
//simulate receiving a message
|
||||
rw.msg = msg |
||||
handler := func(ctx context.Context, msg interface{}) error { |
||||
return nil |
||||
} |
||||
|
||||
if err := peer.handleIncoming(handler); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
func TestProtoHandshakeVersionMismatch(t *testing.T) { |
||||
runProtoHandshake(t, &protoHandshake{41, "420"}, errorf(ErrHandshake, errorf(ErrHandler, "(msg code 0): 41 (!= 42)").Error())) |
||||
} |
||||
|
||||
func TestProtoHandshakeNetworkIDMismatch(t *testing.T) { |
||||
runProtoHandshake(t, &protoHandshake{42, "421"}, errorf(ErrHandshake, errorf(ErrHandler, "(msg code 0): 421 (!= 420)").Error())) |
||||
} |
||||
|
||||
func TestProtoHandshakeSuccess(t *testing.T) { |
||||
runProtoHandshake(t, &protoHandshake{42, "420"}) |
||||
} |
||||
|
||||
func moduleHandshakeExchange(id enode.ID, resp uint) []p2ptest.Exchange { |
||||
|
||||
return []p2ptest.Exchange{ |
||||
{ |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 1, |
||||
Msg: &hs0{42}, |
||||
Peer: id, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 1, |
||||
Msg: &hs0{resp}, |
||||
Peer: id, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func runModuleHandshake(t *testing.T, resp uint, errs ...error) { |
||||
t.Helper() |
||||
pp := p2ptest.NewTestPeerPool() |
||||
s := protocolTester(pp) |
||||
defer s.Stop() |
||||
|
||||
node := s.Nodes[0] |
||||
if err := s.TestExchanges(protoHandshakeExchange(node.ID(), &protoHandshake{42, "420"})...); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err := s.TestExchanges(moduleHandshakeExchange(node.ID(), resp)...); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
var disconnects []*p2ptest.Disconnect |
||||
for i, err := range errs { |
||||
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.Nodes[i].ID(), Error: err}) |
||||
} |
||||
if err := s.TestDisconnected(disconnects...); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
func TestModuleHandshakeError(t *testing.T) { |
||||
runModuleHandshake(t, 43, fmt.Errorf("handshake mismatch remote 43 > local 42")) |
||||
} |
||||
|
||||
func TestModuleHandshakeSuccess(t *testing.T) { |
||||
runModuleHandshake(t, 42) |
||||
} |
||||
|
||||
// testing complex interactions over multiple peers, relaying, dropping
|
||||
func testMultiPeerSetup(a, b enode.ID) []p2ptest.Exchange { |
||||
|
||||
return []p2ptest.Exchange{ |
||||
{ |
||||
Label: "primary handshake", |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 0, |
||||
Msg: &protoHandshake{42, "420"}, |
||||
Peer: a, |
||||
}, |
||||
{ |
||||
Code: 0, |
||||
Msg: &protoHandshake{42, "420"}, |
||||
Peer: b, |
||||
}, |
||||
}, |
||||
}, |
||||
{ |
||||
Label: "module handshake", |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 0, |
||||
Msg: &protoHandshake{42, "420"}, |
||||
Peer: a, |
||||
}, |
||||
{ |
||||
Code: 0, |
||||
Msg: &protoHandshake{42, "420"}, |
||||
Peer: b, |
||||
}, |
||||
}, |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 1, |
||||
Msg: &hs0{42}, |
||||
Peer: a, |
||||
}, |
||||
{ |
||||
Code: 1, |
||||
Msg: &hs0{42}, |
||||
Peer: b, |
||||
}, |
||||
}, |
||||
}, |
||||
|
||||
{Label: "alternative module handshake", Triggers: []p2ptest.Trigger{{Code: 1, Msg: &hs0{41}, Peer: a}, |
||||
{Code: 1, Msg: &hs0{41}, Peer: b}}}, |
||||
{Label: "repeated module handshake", Triggers: []p2ptest.Trigger{{Code: 1, Msg: &hs0{1}, Peer: a}}}, |
||||
{Label: "receiving repeated module handshake", Expects: []p2ptest.Expect{{Code: 1, Msg: &hs0{43}, Peer: a}}}} |
||||
} |
||||
|
||||
func runMultiplePeers(t *testing.T, peer int, errs ...error) { |
||||
t.Helper() |
||||
pp := p2ptest.NewTestPeerPool() |
||||
s := protocolTester(pp) |
||||
defer s.Stop() |
||||
|
||||
if err := s.TestExchanges(testMultiPeerSetup(s.Nodes[0].ID(), s.Nodes[1].ID())...); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
// after some exchanges of messages, we can test state changes
|
||||
// here this is simply demonstrated by the peerPool
|
||||
// after the handshake negotiations peers must be added to the pool
|
||||
// time.Sleep(1)
|
||||
tick := time.NewTicker(10 * time.Millisecond) |
||||
timeout := time.NewTimer(1 * time.Second) |
||||
WAIT: |
||||
for { |
||||
select { |
||||
case <-tick.C: |
||||
if pp.Has(s.Nodes[0].ID()) { |
||||
break WAIT |
||||
} |
||||
case <-timeout.C: |
||||
t.Fatal("timeout") |
||||
} |
||||
} |
||||
if !pp.Has(s.Nodes[1].ID()) { |
||||
t.Fatalf("missing peer test-1: %v (%v)", pp, s.Nodes) |
||||
} |
||||
|
||||
// peer 0 sends kill request for peer with index <peer>
|
||||
err := s.TestExchanges(p2ptest.Exchange{ |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 2, |
||||
Msg: &kill{s.Nodes[peer].ID()}, |
||||
Peer: s.Nodes[0].ID(), |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// the peer not killed sends a drop request
|
||||
err = s.TestExchanges(p2ptest.Exchange{ |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 3, |
||||
Msg: &drop{}, |
||||
Peer: s.Nodes[(peer+1)%2].ID(), |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// check the actual discconnect errors on the individual peers
|
||||
var disconnects []*p2ptest.Disconnect |
||||
for i, err := range errs { |
||||
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.Nodes[i].ID(), Error: err}) |
||||
} |
||||
if err := s.TestDisconnected(disconnects...); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
// test if disconnected peers have been removed from peerPool
|
||||
if pp.Has(s.Nodes[peer].ID()) { |
||||
t.Fatalf("peer test-%v not dropped: %v (%v)", peer, pp, s.Nodes) |
||||
} |
||||
|
||||
} |
||||
func TestMultiplePeersDropSelf(t *testing.T) { |
||||
runMultiplePeers(t, 0, |
||||
fmt.Errorf("subprotocol error"), |
||||
fmt.Errorf("Message handler error: (msg code 3): dropped"), |
||||
) |
||||
} |
||||
|
||||
func TestMultiplePeersDropOther(t *testing.T) { |
||||
runMultiplePeers(t, 1, |
||||
fmt.Errorf("Message handler error: (msg code 3): dropped"), |
||||
fmt.Errorf("subprotocol error"), |
||||
) |
||||
} |
||||
|
||||
//dummy implementation of a MsgReadWriter
|
||||
//this allows for quick and easy unit tests without
|
||||
//having to build up the complete protocol
|
||||
type dummyRW struct { |
||||
msg interface{} |
||||
size uint32 |
||||
code uint64 |
||||
} |
||||
|
||||
func (d *dummyRW) WriteMsg(msg p2p.Msg) error { |
||||
return nil |
||||
} |
||||
|
||||
func (d *dummyRW) ReadMsg() (p2p.Msg, error) { |
||||
enc := bytes.NewReader(d.getDummyMsg()) |
||||
return p2p.Msg{ |
||||
Code: d.code, |
||||
Size: d.size, |
||||
Payload: enc, |
||||
ReceivedAt: time.Now(), |
||||
}, nil |
||||
} |
||||
|
||||
func (d *dummyRW) getDummyMsg() []byte { |
||||
r, _ := rlp.EncodeToBytes(d.msg) |
||||
var b bytes.Buffer |
||||
wmsg := WrappedMsg{ |
||||
Context: b.Bytes(), |
||||
Size: uint32(len(r)), |
||||
Payload: r, |
||||
} |
||||
rr, _ := rlp.EncodeToBytes(wmsg) |
||||
d.size = uint32(len(rr)) |
||||
return rr |
||||
} |
@ -1,162 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package protocols |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
|
||||
"github.com/syndtr/goleveldb/leveldb" |
||||
) |
||||
|
||||
//AccountMetrics abstracts away the metrics DB and
|
||||
//the reporter to persist metrics
|
||||
type AccountingMetrics struct { |
||||
reporter *reporter |
||||
} |
||||
|
||||
//Close will be called when the node is being shutdown
|
||||
//for a graceful cleanup
|
||||
func (am *AccountingMetrics) Close() { |
||||
close(am.reporter.quit) |
||||
// wait for reporter loop to finish saving metrics
|
||||
// before reporter database is closed
|
||||
select { |
||||
case <-time.After(10 * time.Second): |
||||
log.Error("accounting metrics reporter timeout") |
||||
case <-am.reporter.done: |
||||
} |
||||
am.reporter.db.Close() |
||||
} |
||||
|
||||
//reporter is an internal structure used to write p2p accounting related
|
||||
//metrics to a LevelDB. It will periodically write the accrued metrics to the DB.
|
||||
type reporter struct { |
||||
reg metrics.Registry //the registry for these metrics (independent of other metrics)
|
||||
interval time.Duration //duration at which the reporter will persist metrics
|
||||
db *leveldb.DB //the actual DB
|
||||
quit chan struct{} //quit the reporter loop
|
||||
done chan struct{} //signal that reporter loop is done
|
||||
} |
||||
|
||||
//NewMetricsDB creates a new LevelDB instance used to persist metrics defined
|
||||
//inside p2p/protocols/accounting.go
|
||||
func NewAccountingMetrics(r metrics.Registry, d time.Duration, path string) *AccountingMetrics { |
||||
var val = make([]byte, 8) |
||||
var err error |
||||
|
||||
//Create the LevelDB
|
||||
db, err := leveldb.OpenFile(path, nil) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return nil |
||||
} |
||||
|
||||
//Check for all defined metrics that there is a value in the DB
|
||||
//If there is, assign it to the metric. This means that the node
|
||||
//has been running before and that metrics have been persisted.
|
||||
metricsMap := map[string]metrics.Counter{ |
||||
"account.balance.credit": mBalanceCredit, |
||||
"account.balance.debit": mBalanceDebit, |
||||
"account.bytes.credit": mBytesCredit, |
||||
"account.bytes.debit": mBytesDebit, |
||||
"account.msg.credit": mMsgCredit, |
||||
"account.msg.debit": mMsgDebit, |
||||
"account.peerdrops": mPeerDrops, |
||||
"account.selfdrops": mSelfDrops, |
||||
} |
||||
//iterate the map and get the values
|
||||
for key, metric := range metricsMap { |
||||
val, err = db.Get([]byte(key), nil) |
||||
//until the first time a value is being written,
|
||||
//this will return an error.
|
||||
//it could be beneficial though to log errors later,
|
||||
//but that would require a different logic
|
||||
if err == nil { |
||||
metric.Inc(int64(binary.BigEndian.Uint64(val))) |
||||
} |
||||
} |
||||
|
||||
//create the reporter
|
||||
rep := &reporter{ |
||||
reg: r, |
||||
interval: d, |
||||
db: db, |
||||
quit: make(chan struct{}), |
||||
done: make(chan struct{}), |
||||
} |
||||
|
||||
//run the go routine
|
||||
go rep.run() |
||||
|
||||
m := &AccountingMetrics{ |
||||
reporter: rep, |
||||
} |
||||
|
||||
return m |
||||
} |
||||
|
||||
//run is the goroutine which periodically sends the metrics to the configured LevelDB
|
||||
func (r *reporter) run() { |
||||
// signal that the reporter loop is done
|
||||
defer close(r.done) |
||||
|
||||
intervalTicker := time.NewTicker(r.interval) |
||||
|
||||
for { |
||||
select { |
||||
case <-intervalTicker.C: |
||||
//at each tick send the metrics
|
||||
if err := r.save(); err != nil { |
||||
log.Error("unable to send metrics to LevelDB", "err", err) |
||||
//If there is an error in writing, exit the routine; we assume here that the error is
|
||||
//severe and don't attempt to write again.
|
||||
//Also, this should prevent leaking when the node is stopped
|
||||
return |
||||
} |
||||
case <-r.quit: |
||||
//graceful shutdown
|
||||
if err := r.save(); err != nil { |
||||
log.Error("unable to send metrics to LevelDB", "err", err) |
||||
} |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
//send the metrics to the DB
|
||||
func (r *reporter) save() error { |
||||
//create a LevelDB Batch
|
||||
batch := leveldb.Batch{} |
||||
//for each metric in the registry (which is independent)...
|
||||
r.reg.Each(func(name string, i interface{}) { |
||||
metric, ok := i.(metrics.Counter) |
||||
if ok { |
||||
//assuming every metric here to be a Counter (separate registry)
|
||||
//...create a snapshot...
|
||||
ms := metric.Snapshot() |
||||
byteVal := make([]byte, 8) |
||||
binary.BigEndian.PutUint64(byteVal, uint64(ms.Count())) |
||||
//...and save the value to the DB
|
||||
batch.Put([]byte(name), byteVal) |
||||
} |
||||
}) |
||||
return r.db.Write(&batch, nil) |
||||
} |
@ -1,83 +0,0 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package protocols |
||||
|
||||
import ( |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
//TestReporter tests that the metrics being collected for p2p accounting
|
||||
//are being persisted and available after restart of a node.
|
||||
//It simulates restarting by just recreating the DB as if the node had restarted.
|
||||
func TestReporter(t *testing.T) { |
||||
//create a test directory
|
||||
dir, err := ioutil.TempDir("", "reporter-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
|
||||
//setup the metrics
|
||||
log.Debug("Setting up metrics first time") |
||||
reportInterval := 2 * time.Millisecond |
||||
metrics := SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db")) |
||||
log.Debug("Done.") |
||||
|
||||
//change metrics
|
||||
mBalanceCredit.Inc(12) |
||||
mBytesCredit.Inc(34) |
||||
mMsgDebit.Inc(9) |
||||
|
||||
//store expected metrics
|
||||
expectedBalanceCredit := mBalanceCredit.Count() |
||||
expectedBytesCredit := mBytesCredit.Count() |
||||
expectedMsgDebit := mMsgDebit.Count() |
||||
|
||||
//give the reporter time to write the metrics to DB
|
||||
time.Sleep(20 * time.Millisecond) |
||||
|
||||
//close the DB also, or we can't create a new one
|
||||
metrics.Close() |
||||
|
||||
//clear the metrics - this effectively simulates the node having shut down...
|
||||
mBalanceCredit.Clear() |
||||
mBytesCredit.Clear() |
||||
mMsgDebit.Clear() |
||||
|
||||
//setup the metrics again
|
||||
log.Debug("Setting up metrics second time") |
||||
metrics = SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db")) |
||||
defer metrics.Close() |
||||
log.Debug("Done.") |
||||
|
||||
//now check the metrics, they should have the same value as before "shutdown"
|
||||
if mBalanceCredit.Count() != expectedBalanceCredit { |
||||
t.Fatalf("Expected counter to be %d, but is %d", expectedBalanceCredit, mBalanceCredit.Count()) |
||||
} |
||||
if mBytesCredit.Count() != expectedBytesCredit { |
||||
t.Fatalf("Expected counter to be %d, but is %d", expectedBytesCredit, mBytesCredit.Count()) |
||||
} |
||||
if mMsgDebit.Count() != expectedMsgDebit { |
||||
t.Fatalf("Expected counter to be %d, but is %d", expectedMsgDebit, mMsgDebit.Count()) |
||||
} |
||||
} |
@ -1,35 +0,0 @@ |
||||
# Core team members |
||||
|
||||
Viktor Trón - @zelig |
||||
Louis Holbrook - @nolash |
||||
Lewis Marshall - @lmars |
||||
Anton Evangelatov - @nonsense |
||||
Janoš Guljaš - @janos |
||||
Balint Gabor - @gbalint |
||||
Elad Nachmias - @justelad |
||||
Daniel A. Nagy - @nagydani |
||||
Aron Fischer - @homotopycolimit |
||||
Fabio Barone - @holisticode |
||||
Zahoor Mohamed - @jmozah |
||||
Zsolt Felföldi - @zsfelfoldi |
||||
|
||||
# External contributors |
||||
|
||||
Kiel Barry |
||||
Gary Rong |
||||
Jared Wasinger |
||||
Leon Stanko |
||||
Javier Peletier [epiclabs.io] |
||||
Bartek Borkowski [tungsten-labs.com] |
||||
Shane Howley [mainframe.com] |
||||
Doug Leonard [mainframe.com] |
||||
Ivan Daniluk [status.im] |
||||
Felix Lange [EF] |
||||
Martin Holst Swende [EF] |
||||
Guillaume Ballet [EF] |
||||
ligi [EF] |
||||
Christopher Dro [blick-labs.com] |
||||
Sergii Bomko [ledgerleopard.com] |
||||
Domino Valdano |
||||
Rafael Matias |
||||
Coogan Brennan |
@ -1,25 +0,0 @@ |
||||
# Ownership by go packages |
||||
|
||||
swarm |
||||
├── api ─────────────────── ethersphere |
||||
├── bmt ─────────────────── @zelig |
||||
├── dev ─────────────────── @lmars |
||||
├── fuse ────────────────── @jmozah, @holisticode |
||||
├── grafana_dashboards ──── @nonsense |
||||
├── metrics ─────────────── @nonsense, @holisticode |
||||
├── network ─────────────── ethersphere |
||||
│ ├── bitvector ───────── @zelig, @janos, @gbalint |
||||
│ ├── priorityqueue ───── @zelig, @janos, @gbalint |
||||
│ ├── simulations ─────── @zelig |
||||
│ └── stream ──────────── @janos, @zelig, @gbalint, @holisticode, @justelad |
||||
│ ├── intervals ───── @janos |
||||
│ └── testing ─────── @zelig |
||||
├── pot ─────────────────── @zelig |
||||
├── pss ─────────────────── @nolash, @zelig, @nonsense |
||||
├── services ────────────── @zelig |
||||
├── state ───────────────── @justelad |
||||
├── storage ─────────────── ethersphere |
||||
│ ├── encryption ──────── @gbalint, @zelig, @nagydani |
||||
│ ├── mock ────────────── @janos |
||||
│ └── feed ────────────── @nolash, @jpeletier |
||||
└── testutil ────────────── @lmars |
@ -1,244 +1,7 @@ |
||||
## Swarm |
||||
# Swarm |
||||
|
||||
[https://swarm.ethereum.org](https://swarm.ethereum.org) |
||||
https://swarm.ethereum.org |
||||
|
||||
Swarm is a distributed storage platform and content distribution service, a native base layer service of the ethereum web3 stack. The primary objective of Swarm is to provide a decentralized and redundant store for dapp code and data as well as block chain and state data. Swarm is also set out to provide various base layer services for web3, including node-to-node messaging, media streaming, decentralised database services and scalable state-channel infrastructure for decentralised service economies. |
||||
|
||||
[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) |
||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethersphere/orange-lounge?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) |
||||
|
||||
## Table of Contents |
||||
|
||||
* [Building the source](#building-the-source) |
||||
* [Running Swarm](#running-swarm) |
||||
* [Documentation](#documentation) |
||||
* [Developers Guide](#developers-guide) |
||||
* [Go Environment](#development-environment) |
||||
* [Vendored Dependencies](#vendored-dependencies) |
||||
* [Testing](#testing) |
||||
* [Profiling Swarm](#profiling-swarm) |
||||
* [Metrics and Instrumentation in Swarm](#metrics-and-instrumentation-in-swarm) |
||||
* [Public Gateways](#public-gateways) |
||||
* [Swarm Dapps](#swarm-dapps) |
||||
* [Contributing](#contributing) |
||||
* [License](#license) |
||||
|
||||
## Building the source |
||||
|
||||
Building Swarm requires Go (version 1.10 or later). |
||||
|
||||
go get -d github.com/ethereum/go-ethereum |
||||
|
||||
go install github.com/ethereum/go-ethereum/cmd/swarm |
||||
|
||||
## Running Swarm |
||||
|
||||
Going through all the possible command line flags is out of scope here, but we've enumerated a few common parameter combos to get you up to speed quickly on how you can run your own Swarm node. |
||||
|
||||
To run Swarm you need an Ethereum account. You can create a new account by running the following command: |
||||
|
||||
geth account new |
||||
|
||||
You will be prompted for a password: |
||||
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password. |
||||
Passphrase: |
||||
Repeat passphrase: |
||||
|
||||
Once you have specified the password, the output will be the Ethereum address representing that account. For example: |
||||
|
||||
Address: {2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1} |
||||
|
||||
Using this account, connect to Swarm with |
||||
|
||||
swarm --bzzaccount <your-account-here> |
||||
|
||||
# in our example |
||||
|
||||
swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1 |
||||
|
||||
|
||||
### Verifying that your local Swarm node is running |
||||
|
||||
When running, Swarm is accessible through an HTTP API on port 8500. |
||||
|
||||
Confirm that it is up and running by pointing your browser to http://localhost:8500 |
||||
|
||||
### Ethereum Name Service resolution |
||||
|
||||
The Ethereum Name Service is the Ethereum equivalent of DNS in the classic web. In order to use ENS to resolve names to Swarm content hashes (e.g. `bzz://theswarm.eth`), `swarm` has to connect to a `geth` instance, which is synced with the Ethereum mainnet. This is done using the `--ens-api` flag. |
||||
|
||||
swarm --bzzaccount <your-account-here> \ |
||||
--ens-api '$HOME/.ethereum/geth.ipc' |
||||
|
||||
# in our example |
||||
|
||||
swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1 \ |
||||
--ens-api '$HOME/.ethereum/geth.ipc' |
||||
|
||||
For more information on usage, features or command line flags, please consult the Documentation. |
||||
|
||||
|
||||
## Documentation |
||||
|
||||
Swarm documentation can be found at [https://swarm-guide.readthedocs.io](https://swarm-guide.readthedocs.io). |
||||
|
||||
|
||||
## Developers Guide |
||||
|
||||
### Go Environment |
||||
|
||||
We assume that you have Go v1.10 installed, and `GOPATH` is set. |
||||
|
||||
You must have your working copy under `$GOPATH/src/github.com/ethereum/go-ethereum`. |
||||
|
||||
Most likely you will be working from your fork of `go-ethereum`, let's say from `github.com/nirname/go-ethereum`. Clone or move your fork into the right place: |
||||
|
||||
``` |
||||
git clone git@github.com:nirname/go-ethereum.git $GOPATH/src/github.com/ethereum/go-ethereum |
||||
``` |
||||
|
||||
|
||||
### Vendored Dependencies |
||||
|
||||
All dependencies are tracked in the `vendor` directory. We use `govendor` to manage them. |
||||
|
||||
If you want to add a new dependency, run `govendor fetch <import-path>`, then commit the result. |
||||
|
||||
If you want to update all dependencies to their latest upstream version, run `govendor fetch +v`. |
||||
|
||||
|
||||
### Testing |
||||
|
||||
This section explains how to run unit, integration, and end-to-end tests in your development sandbox. |
||||
|
||||
Testing one library: |
||||
|
||||
``` |
||||
go test -v -cpu 4 ./swarm/api |
||||
``` |
||||
|
||||
Note: Using options -cpu (number of cores allowed) and -v (logging even if no error) is recommended. |
||||
|
||||
Testing only some methods: |
||||
|
||||
``` |
||||
go test -v -cpu 4 ./eth -run TestMethod |
||||
``` |
||||
|
||||
Note: here all tests with prefix TestMethod will be run, so if you got TestMethod, TestMethod1, then both! |
||||
|
||||
Running benchmarks: |
||||
|
||||
``` |
||||
go test -v -cpu 4 -bench . -run BenchmarkJoin |
||||
``` |
||||
|
||||
|
||||
### Profiling Swarm |
||||
|
||||
This section explains how to add Go `pprof` profiler to Swarm |
||||
|
||||
If `swarm` is started with the `--pprof` option, a debugging HTTP server is made available on port 6060. |
||||
|
||||
You can bring up http://localhost:6060/debug/pprof to see the heap, running routines etc. |
||||
|
||||
By clicking full goroutine stack dump (clicking http://localhost:6060/debug/pprof/goroutine?debug=2) you can generate trace that is useful for debugging. |
||||
|
||||
|
||||
### Metrics and Instrumentation in Swarm |
||||
|
||||
This section explains how to visualize and use existing Swarm metrics and how to instrument Swarm with a new metric. |
||||
|
||||
Swarm metrics system is based on the `go-metrics` library. |
||||
|
||||
The most common types of measurements we use in Swarm are `counters` and `resetting timers`. Consult the `go-metrics` documentation for full reference of available types. |
||||
|
||||
``` |
||||
# incrementing a counter |
||||
metrics.GetOrRegisterCounter("network.stream.received_chunks", nil).Inc(1) |
||||
|
||||
# measuring latency with a resetting timer |
||||
start := time.Now() |
||||
t := metrics.GetOrRegisterResettingTimer("http.request.GET.time"), nil) |
||||
... |
||||
t := UpdateSince(start) |
||||
``` |
||||
|
||||
#### Visualizing metrics |
||||
|
||||
Swarm supports an InfluxDB exporter. Consult the help section to learn about the command line arguments used to configure it: |
||||
|
||||
``` |
||||
swarm --help | grep metrics |
||||
``` |
||||
|
||||
We use Grafana and InfluxDB to visualise metrics reported by Swarm. We keep our Grafana dashboards under version control at `./swarm/grafana_dashboards`. You could use them or design your own. |
||||
|
||||
We have built a tool to help with automatic start of Grafana and InfluxDB and provisioning of dashboards at https://github.com/nonsense/stateth , which requires that you have Docker installed. |
||||
|
||||
Once you have `stateth` installed, and you have Docker running locally, you have to: |
||||
|
||||
1. Run `stateth` and keep it running in the background |
||||
``` |
||||
stateth --rm --grafana-dashboards-folder $GOPATH/src/github.com/ethereum/go-ethereum/swarm/grafana_dashboards --influxdb-database metrics |
||||
``` |
||||
|
||||
2. Run `swarm` with at least the following params: |
||||
``` |
||||
--metrics \ |
||||
--metrics.influxdb.export \ |
||||
--metrics.influxdb.endpoint "http://localhost:8086" \ |
||||
--metrics.influxdb.username "admin" \ |
||||
--metrics.influxdb.password "admin" \ |
||||
--metrics.influxdb.database "metrics" |
||||
``` |
||||
|
||||
3. Open Grafana at http://localhost:3000 and view the dashboards to gain insight into Swarm. |
||||
|
||||
|
||||
## Public Gateways |
||||
|
||||
Swarm offers a local HTTP proxy API that Dapps can use to interact with Swarm. The Ethereum Foundation is hosting a public gateway, which allows free access so that people can try Swarm without running their own node. |
||||
|
||||
The Swarm public gateways are temporary and users should not rely on their existence for production services. |
||||
|
||||
The Swarm public gateway can be found at https://swarm-gateways.net and is always running the latest `stable` Swarm release. |
||||
|
||||
## Swarm Dapps |
||||
|
||||
You can find a few reference Swarm decentralised applications at: https://swarm-gateways.net/bzz:/swarmapps.eth |
||||
|
||||
Their source code can be found at: https://github.com/ethersphere/swarm-dapps |
||||
|
||||
## Contributing |
||||
|
||||
Thank you for considering to help out with the source code! We welcome contributions from |
||||
anyone on the internet, and are grateful for even the smallest of fixes! |
||||
|
||||
If you'd like to contribute to Swarm, please fork, fix, commit and send a pull request |
||||
for the maintainers to review and merge into the main code base. If you wish to submit more |
||||
complex changes though, please check up with the core devs first on [our Swarm gitter channel](https://gitter.im/ethersphere/orange-lounge) |
||||
to ensure those changes are in line with the general philosophy of the project and/or get some |
||||
early feedback which can make both your efforts much lighter as well as our review and merge |
||||
procedures quick and simple. |
||||
|
||||
Please make sure your contributions adhere to our coding guidelines: |
||||
|
||||
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). |
||||
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines. |
||||
* Pull requests need to be based on and opened against the `master` branch. |
||||
* [Code review guidelines](https://github.com/ethereum/go-ethereum/wiki/Code-Review-Guidelines). |
||||
* Commit messages should be prefixed with the package(s) they modify. |
||||
* E.g. "swarm/fuse: ignore default manifest entry" |
||||
|
||||
|
||||
## License |
||||
|
||||
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the |
||||
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), also |
||||
included in our repository in the `COPYING.LESSER` file. |
||||
|
||||
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the |
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also included |
||||
in our repository in the `COPYING` file. |
||||
**Note**: The codebase has been moved to [ethersphere/swarm](https://github.com/ethersphere/swarm) |
||||
|
@ -1,538 +0,0 @@ |
||||
package api |
||||
|
||||
import ( |
||||
"context" |
||||
"crypto/ecdsa" |
||||
"crypto/rand" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/crypto/ecies" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
"github.com/ethereum/go-ethereum/swarm/sctx" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"golang.org/x/crypto/scrypt" |
||||
"golang.org/x/crypto/sha3" |
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var ( |
||||
ErrDecrypt = errors.New("cant decrypt - forbidden") |
||||
ErrUnknownAccessType = errors.New("unknown access type (or not implemented)") |
||||
ErrDecryptDomainForbidden = errors.New("decryption request domain forbidden - can only decrypt on localhost") |
||||
AllowedDecryptDomains = []string{ |
||||
"localhost", |
||||
"127.0.0.1", |
||||
} |
||||
) |
||||
|
||||
const EmptyCredentials = "" |
||||
|
||||
type AccessEntry struct { |
||||
Type AccessType |
||||
Publisher string |
||||
Salt []byte |
||||
Act string |
||||
KdfParams *KdfParams |
||||
} |
||||
|
||||
type DecryptFunc func(*ManifestEntry) error |
||||
|
||||
func (a *AccessEntry) MarshalJSON() (out []byte, err error) { |
||||
|
||||
return json.Marshal(struct { |
||||
Type AccessType `json:"type,omitempty"` |
||||
Publisher string `json:"publisher,omitempty"` |
||||
Salt string `json:"salt,omitempty"` |
||||
Act string `json:"act,omitempty"` |
||||
KdfParams *KdfParams `json:"kdf_params,omitempty"` |
||||
}{ |
||||
Type: a.Type, |
||||
Publisher: a.Publisher, |
||||
Salt: hex.EncodeToString(a.Salt), |
||||
Act: a.Act, |
||||
KdfParams: a.KdfParams, |
||||
}) |
||||
|
||||
} |
||||
|
||||
func (a *AccessEntry) UnmarshalJSON(value []byte) error { |
||||
v := struct { |
||||
Type AccessType `json:"type,omitempty"` |
||||
Publisher string `json:"publisher,omitempty"` |
||||
Salt string `json:"salt,omitempty"` |
||||
Act string `json:"act,omitempty"` |
||||
KdfParams *KdfParams `json:"kdf_params,omitempty"` |
||||
}{} |
||||
|
||||
err := json.Unmarshal(value, &v) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
a.Act = v.Act |
||||
a.KdfParams = v.KdfParams |
||||
a.Publisher = v.Publisher |
||||
a.Salt, err = hex.DecodeString(v.Salt) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if len(a.Salt) != 32 { |
||||
return errors.New("salt should be 32 bytes long") |
||||
} |
||||
a.Type = v.Type |
||||
return nil |
||||
} |
||||
|
||||
type KdfParams struct { |
||||
N int `json:"n"` |
||||
P int `json:"p"` |
||||
R int `json:"r"` |
||||
} |
||||
|
||||
type AccessType string |
||||
|
||||
const AccessTypePass = AccessType("pass") |
||||
const AccessTypePK = AccessType("pk") |
||||
const AccessTypeACT = AccessType("act") |
||||
|
||||
// NewAccessEntryPassword creates a manifest AccessEntry in order to create an ACT protected by a password
|
||||
func NewAccessEntryPassword(salt []byte, kdfParams *KdfParams) (*AccessEntry, error) { |
||||
if len(salt) != 32 { |
||||
return nil, fmt.Errorf("salt should be 32 bytes long") |
||||
} |
||||
return &AccessEntry{ |
||||
Type: AccessTypePass, |
||||
Salt: salt, |
||||
KdfParams: kdfParams, |
||||
}, nil |
||||
} |
||||
|
||||
// NewAccessEntryPK creates a manifest AccessEntry in order to create an ACT protected by a pair of Elliptic Curve keys
|
||||
func NewAccessEntryPK(publisher string, salt []byte) (*AccessEntry, error) { |
||||
if len(publisher) != 66 { |
||||
return nil, fmt.Errorf("publisher should be 66 characters long, got %d", len(publisher)) |
||||
} |
||||
if len(salt) != 32 { |
||||
return nil, fmt.Errorf("salt should be 32 bytes long") |
||||
} |
||||
return &AccessEntry{ |
||||
Type: AccessTypePK, |
||||
Publisher: publisher, |
||||
Salt: salt, |
||||
}, nil |
||||
} |
||||
|
||||
// NewAccessEntryACT creates a manifest AccessEntry in order to create an ACT protected by a combination of EC keys and passwords
|
||||
func NewAccessEntryACT(publisher string, salt []byte, act string) (*AccessEntry, error) { |
||||
if len(salt) != 32 { |
||||
return nil, fmt.Errorf("salt should be 32 bytes long") |
||||
} |
||||
if len(publisher) != 66 { |
||||
return nil, fmt.Errorf("publisher should be 66 characters long") |
||||
} |
||||
|
||||
return &AccessEntry{ |
||||
Type: AccessTypeACT, |
||||
Publisher: publisher, |
||||
Salt: salt, |
||||
Act: act, |
||||
KdfParams: DefaultKdfParams, |
||||
}, nil |
||||
} |
||||
|
||||
// NOOPDecrypt is a generic decrypt function that is passed into the API in places where real ACT decryption capabilities are
|
||||
// either unwanted, or alternatively, cannot be implemented in the immediate scope
|
||||
func NOOPDecrypt(*ManifestEntry) error { |
||||
return nil |
||||
} |
||||
|
||||
var DefaultKdfParams = NewKdfParams(262144, 1, 8) |
||||
|
||||
// NewKdfParams returns a KdfParams struct with the given scrypt params
|
||||
func NewKdfParams(n, p, r int) *KdfParams { |
||||
|
||||
return &KdfParams{ |
||||
N: n, |
||||
P: p, |
||||
R: r, |
||||
} |
||||
} |
||||
|
||||
// NewSessionKeyPassword creates a session key based on a shared secret (password) and the given salt
|
||||
// and kdf parameters in the access entry
|
||||
func NewSessionKeyPassword(password string, accessEntry *AccessEntry) ([]byte, error) { |
||||
if accessEntry.Type != AccessTypePass && accessEntry.Type != AccessTypeACT { |
||||
return nil, errors.New("incorrect access entry type") |
||||
|
||||
} |
||||
return sessionKeyPassword(password, accessEntry.Salt, accessEntry.KdfParams) |
||||
} |
||||
|
||||
func sessionKeyPassword(password string, salt []byte, kdfParams *KdfParams) ([]byte, error) { |
||||
return scrypt.Key( |
||||
[]byte(password), |
||||
salt, |
||||
kdfParams.N, |
||||
kdfParams.R, |
||||
kdfParams.P, |
||||
32, |
||||
) |
||||
} |
||||
|
||||
// NewSessionKeyPK creates a new ACT Session Key using an ECDH shared secret for the given key pair and the given salt value
|
||||
func NewSessionKeyPK(private *ecdsa.PrivateKey, public *ecdsa.PublicKey, salt []byte) ([]byte, error) { |
||||
granteePubEcies := ecies.ImportECDSAPublic(public) |
||||
privateKey := ecies.ImportECDSA(private) |
||||
|
||||
bytes, err := privateKey.GenerateShared(granteePubEcies, 16, 16) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
bytes = append(salt, bytes...) |
||||
sessionKey := crypto.Keccak256(bytes) |
||||
return sessionKey, nil |
||||
} |
||||
|
||||
func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.PrivateKey) DecryptFunc { |
||||
return func(m *ManifestEntry) error { |
||||
if m.Access == nil { |
||||
return nil |
||||
} |
||||
|
||||
allowed := false |
||||
requestDomain := sctx.GetHost(ctx) |
||||
for _, v := range AllowedDecryptDomains { |
||||
if strings.Contains(requestDomain, v) { |
||||
allowed = true |
||||
} |
||||
} |
||||
|
||||
if !allowed { |
||||
return ErrDecryptDomainForbidden |
||||
} |
||||
|
||||
switch m.Access.Type { |
||||
case "pass": |
||||
if credentials != "" { |
||||
key, err := NewSessionKeyPassword(credentials, m.Access) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
ref, err := hex.DecodeString(m.Hash) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
enc := NewRefEncryption(len(ref) - 8) |
||||
decodedRef, err := enc.Decrypt(ref, key) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
|
||||
m.Hash = hex.EncodeToString(decodedRef) |
||||
m.Access = nil |
||||
return nil |
||||
} |
||||
return ErrDecrypt |
||||
case "pk": |
||||
publisherBytes, err := hex.DecodeString(m.Access.Publisher) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
publisher, err := crypto.DecompressPubkey(publisherBytes) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
key, err := NewSessionKeyPK(pk, publisher, m.Access.Salt) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
ref, err := hex.DecodeString(m.Hash) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
enc := NewRefEncryption(len(ref) - 8) |
||||
decodedRef, err := enc.Decrypt(ref, key) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
|
||||
m.Hash = hex.EncodeToString(decodedRef) |
||||
m.Access = nil |
||||
return nil |
||||
case "act": |
||||
var ( |
||||
sessionKey []byte |
||||
err error |
||||
) |
||||
|
||||
publisherBytes, err := hex.DecodeString(m.Access.Publisher) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
publisher, err := crypto.DecompressPubkey(publisherBytes) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
|
||||
sessionKey, err = NewSessionKeyPK(pk, publisher, m.Access.Salt) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
|
||||
found, ciphertext, decryptionKey, err := a.getACTDecryptionKey(ctx, storage.Address(common.Hex2Bytes(m.Access.Act)), sessionKey) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if !found { |
||||
// try to fall back to password
|
||||
if credentials != "" { |
||||
sessionKey, err = NewSessionKeyPassword(credentials, m.Access) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
found, ciphertext, decryptionKey, err = a.getACTDecryptionKey(ctx, storage.Address(common.Hex2Bytes(m.Access.Act)), sessionKey) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if !found { |
||||
return ErrDecrypt |
||||
} |
||||
} else { |
||||
return ErrDecrypt |
||||
} |
||||
} |
||||
enc := NewRefEncryption(len(ciphertext) - 8) |
||||
decodedRef, err := enc.Decrypt(ciphertext, decryptionKey) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
|
||||
ref, err := hex.DecodeString(m.Hash) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
enc = NewRefEncryption(len(ref) - 8) |
||||
decodedMainRef, err := enc.Decrypt(ref, decodedRef) |
||||
if err != nil { |
||||
return ErrDecrypt |
||||
} |
||||
m.Hash = hex.EncodeToString(decodedMainRef) |
||||
m.Access = nil |
||||
return nil |
||||
} |
||||
return ErrUnknownAccessType |
||||
} |
||||
} |
||||
|
||||
func (a *API) getACTDecryptionKey(ctx context.Context, actManifestAddress storage.Address, sessionKey []byte) (found bool, ciphertext, decryptionKey []byte, err error) { |
||||
hasher := sha3.NewLegacyKeccak256() |
||||
hasher.Write(append(sessionKey, 0)) |
||||
lookupKey := hasher.Sum(nil) |
||||
hasher.Reset() |
||||
|
||||
hasher.Write(append(sessionKey, 1)) |
||||
accessKeyDecryptionKey := hasher.Sum(nil) |
||||
hasher.Reset() |
||||
|
||||
lk := hex.EncodeToString(lookupKey) |
||||
list, err := a.GetManifestList(ctx, NOOPDecrypt, actManifestAddress, lk) |
||||
if err != nil { |
||||
return false, nil, nil, err |
||||
} |
||||
for _, v := range list.Entries { |
||||
if v.Path == lk { |
||||
cipherTextBytes, err := hex.DecodeString(v.Hash) |
||||
if err != nil { |
||||
return false, nil, nil, err |
||||
} |
||||
return true, cipherTextBytes, accessKeyDecryptionKey, nil |
||||
} |
||||
} |
||||
return false, nil, nil, nil |
||||
} |
||||
|
||||
func GenerateAccessControlManifest(ctx *cli.Context, ref string, accessKey []byte, ae *AccessEntry) (*Manifest, error) { |
||||
refBytes, err := hex.DecodeString(ref) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// encrypt ref with accessKey
|
||||
enc := NewRefEncryption(len(refBytes)) |
||||
encrypted, err := enc.Encrypt(refBytes, accessKey) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
m := &Manifest{ |
||||
Entries: []ManifestEntry{ |
||||
{ |
||||
Hash: hex.EncodeToString(encrypted), |
||||
ContentType: ManifestType, |
||||
ModTime: time.Now(), |
||||
Access: ae, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
return m, nil |
||||
} |
||||
|
||||
// DoPK is a helper function to the CLI API that handles the entire business logic for
|
||||
// creating a session key and access entry given the cli context, ec keys and salt
|
||||
func DoPK(ctx *cli.Context, privateKey *ecdsa.PrivateKey, granteePublicKey string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { |
||||
if granteePublicKey == "" { |
||||
return nil, nil, errors.New("need a grantee Public Key") |
||||
} |
||||
b, err := hex.DecodeString(granteePublicKey) |
||||
if err != nil { |
||||
log.Error("error decoding grantee public key", "err", err) |
||||
return nil, nil, err |
||||
} |
||||
|
||||
granteePub, err := crypto.DecompressPubkey(b) |
||||
if err != nil { |
||||
log.Error("error decompressing grantee public key", "err", err) |
||||
return nil, nil, err |
||||
} |
||||
|
||||
sessionKey, err = NewSessionKeyPK(privateKey, granteePub, salt) |
||||
if err != nil { |
||||
log.Error("error getting session key", "err", err) |
||||
return nil, nil, err |
||||
} |
||||
|
||||
ae, err = NewAccessEntryPK(hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)), salt) |
||||
if err != nil { |
||||
log.Error("error generating access entry", "err", err) |
||||
return nil, nil, err |
||||
} |
||||
|
||||
return sessionKey, ae, nil |
||||
} |
||||
|
||||
// DoACT is a helper function to the CLI API that handles the entire business logic for
|
||||
// creating a access key, access entry and ACT manifest (including uploading it) given the cli context, ec keys, password grantees and salt
|
||||
func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees []string, encryptPasswords []string) (accessKey []byte, ae *AccessEntry, actManifest *Manifest, err error) { |
||||
if len(grantees) == 0 && len(encryptPasswords) == 0 { |
||||
return nil, nil, nil, errors.New("did not get any grantee public keys or any encryption passwords") |
||||
} |
||||
|
||||
publisherPub := hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)) |
||||
grantees = append(grantees, publisherPub) |
||||
|
||||
accessKey = make([]byte, 32) |
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil { |
||||
panic("reading from crypto/rand failed: " + err.Error()) |
||||
} |
||||
if _, err := io.ReadFull(rand.Reader, accessKey); err != nil { |
||||
panic("reading from crypto/rand failed: " + err.Error()) |
||||
} |
||||
|
||||
lookupPathEncryptedAccessKeyMap := make(map[string]string) |
||||
i := 0 |
||||
for _, v := range grantees { |
||||
i++ |
||||
if v == "" { |
||||
return nil, nil, nil, errors.New("need a grantee Public Key") |
||||
} |
||||
b, err := hex.DecodeString(v) |
||||
if err != nil { |
||||
log.Error("error decoding grantee public key", "err", err) |
||||
return nil, nil, nil, err |
||||
} |
||||
|
||||
granteePub, err := crypto.DecompressPubkey(b) |
||||
if err != nil { |
||||
log.Error("error decompressing grantee public key", "err", err) |
||||
return nil, nil, nil, err |
||||
} |
||||
sessionKey, err := NewSessionKeyPK(privateKey, granteePub, salt) |
||||
if err != nil { |
||||
return nil, nil, nil, err |
||||
} |
||||
|
||||
hasher := sha3.NewLegacyKeccak256() |
||||
hasher.Write(append(sessionKey, 0)) |
||||
lookupKey := hasher.Sum(nil) |
||||
|
||||
hasher.Reset() |
||||
hasher.Write(append(sessionKey, 1)) |
||||
|
||||
accessKeyEncryptionKey := hasher.Sum(nil) |
||||
|
||||
enc := NewRefEncryption(len(accessKey)) |
||||
encryptedAccessKey, err := enc.Encrypt(accessKey, accessKeyEncryptionKey) |
||||
if err != nil { |
||||
return nil, nil, nil, err |
||||
} |
||||
lookupPathEncryptedAccessKeyMap[hex.EncodeToString(lookupKey)] = hex.EncodeToString(encryptedAccessKey) |
||||
} |
||||
|
||||
for _, pass := range encryptPasswords { |
||||
sessionKey, err := sessionKeyPassword(pass, salt, DefaultKdfParams) |
||||
if err != nil { |
||||
return nil, nil, nil, err |
||||
} |
||||
hasher := sha3.NewLegacyKeccak256() |
||||
hasher.Write(append(sessionKey, 0)) |
||||
lookupKey := hasher.Sum(nil) |
||||
|
||||
hasher.Reset() |
||||
hasher.Write(append(sessionKey, 1)) |
||||
|
||||
accessKeyEncryptionKey := hasher.Sum(nil) |
||||
|
||||
enc := NewRefEncryption(len(accessKey)) |
||||
encryptedAccessKey, err := enc.Encrypt(accessKey, accessKeyEncryptionKey) |
||||
if err != nil { |
||||
return nil, nil, nil, err |
||||
} |
||||
lookupPathEncryptedAccessKeyMap[hex.EncodeToString(lookupKey)] = hex.EncodeToString(encryptedAccessKey) |
||||
} |
||||
|
||||
m := &Manifest{ |
||||
Entries: []ManifestEntry{}, |
||||
} |
||||
|
||||
for k, v := range lookupPathEncryptedAccessKeyMap { |
||||
m.Entries = append(m.Entries, ManifestEntry{ |
||||
Path: k, |
||||
Hash: v, |
||||
ContentType: "text/plain", |
||||
}) |
||||
} |
||||
|
||||
ae, err = NewAccessEntryACT(hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)), salt, "") |
||||
if err != nil { |
||||
return nil, nil, nil, err |
||||
} |
||||
|
||||
return accessKey, ae, m, nil |
||||
} |
||||
|
||||
// DoPassword is a helper function to the CLI API that handles the entire business logic for
|
||||
// creating a session key and an access entry given the cli context, password and salt.
|
||||
// By default - DefaultKdfParams are used as the scrypt params
|
||||
func DoPassword(ctx *cli.Context, password string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { |
||||
ae, err = NewAccessEntryPassword(salt, DefaultKdfParams) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
sessionKey, err = NewSessionKeyPassword(password, ae) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
return sessionKey, ae, nil |
||||
} |
@ -1,993 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
//go:generate mimegen --types=./../../cmd/swarm/mimegen/mime.types --package=api --out=gen_mime.go
|
||||
//go:generate gofmt -s -w gen_mime.go
|
||||
|
||||
import ( |
||||
"archive/tar" |
||||
"context" |
||||
"crypto/ecdsa" |
||||
"encoding/hex" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"math/big" |
||||
"net/http" |
||||
"path" |
||||
"strings" |
||||
|
||||
"bytes" |
||||
"mime" |
||||
"path/filepath" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/contracts/ens" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
"github.com/ethereum/go-ethereum/swarm/spancontext" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" |
||||
|
||||
opentracing "github.com/opentracing/opentracing-go" |
||||
) |
||||
|
||||
var ( |
||||
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) |
||||
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) |
||||
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil) |
||||
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil) |
||||
apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil) |
||||
apiManifestUpdateCount = metrics.NewRegisteredCounter("api.manifestupdate.count", nil) |
||||
apiManifestUpdateFail = metrics.NewRegisteredCounter("api.manifestupdate.fail", nil) |
||||
apiManifestListCount = metrics.NewRegisteredCounter("api.manifestlist.count", nil) |
||||
apiManifestListFail = metrics.NewRegisteredCounter("api.manifestlist.fail", nil) |
||||
apiDeleteCount = metrics.NewRegisteredCounter("api.delete.count", nil) |
||||
apiDeleteFail = metrics.NewRegisteredCounter("api.delete.fail", nil) |
||||
apiGetTarCount = metrics.NewRegisteredCounter("api.gettar.count", nil) |
||||
apiGetTarFail = metrics.NewRegisteredCounter("api.gettar.fail", nil) |
||||
apiUploadTarCount = metrics.NewRegisteredCounter("api.uploadtar.count", nil) |
||||
apiUploadTarFail = metrics.NewRegisteredCounter("api.uploadtar.fail", nil) |
||||
apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil) |
||||
apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil) |
||||
apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil) |
||||
apiAddFileFail = metrics.NewRegisteredCounter("api.addfile.fail", nil) |
||||
apiRmFileCount = metrics.NewRegisteredCounter("api.removefile.count", nil) |
||||
apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil) |
||||
apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil) |
||||
apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil) |
||||
apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil) |
||||
) |
||||
|
||||
// Resolver interface resolve a domain name to a hash using ENS
|
||||
type Resolver interface { |
||||
Resolve(string) (common.Hash, error) |
||||
} |
||||
|
||||
// ResolveValidator is used to validate the contained Resolver
|
||||
type ResolveValidator interface { |
||||
Resolver |
||||
Owner(node [32]byte) (common.Address, error) |
||||
HeaderByNumber(context.Context, *big.Int) (*types.Header, error) |
||||
} |
||||
|
||||
// NoResolverError is returned by MultiResolver.Resolve if no resolver
|
||||
// can be found for the address.
|
||||
type NoResolverError struct { |
||||
TLD string |
||||
} |
||||
|
||||
// NewNoResolverError creates a NoResolverError for the given top level domain
|
||||
func NewNoResolverError(tld string) *NoResolverError { |
||||
return &NoResolverError{TLD: tld} |
||||
} |
||||
|
||||
// Error NoResolverError implements error
|
||||
func (e *NoResolverError) Error() string { |
||||
if e.TLD == "" { |
||||
return "no ENS resolver" |
||||
} |
||||
return fmt.Sprintf("no ENS endpoint configured to resolve .%s TLD names", e.TLD) |
||||
} |
||||
|
||||
// MultiResolver is used to resolve URL addresses based on their TLDs.
|
||||
// Each TLD can have multiple resolvers, and the resolution from the
|
||||
// first one in the sequence will be returned.
|
||||
type MultiResolver struct { |
||||
resolvers map[string][]ResolveValidator |
||||
nameHash func(string) common.Hash |
||||
} |
||||
|
||||
// MultiResolverOption sets options for MultiResolver and is used as
|
||||
// arguments for its constructor.
|
||||
type MultiResolverOption func(*MultiResolver) |
||||
|
||||
// MultiResolverOptionWithResolver adds a Resolver to a list of resolvers
|
||||
// for a specific TLD. If TLD is an empty string, the resolver will be added
|
||||
// to the list of default resolver, the ones that will be used for resolution
|
||||
// of addresses which do not have their TLD resolver specified.
|
||||
func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption { |
||||
return func(m *MultiResolver) { |
||||
m.resolvers[tld] = append(m.resolvers[tld], r) |
||||
} |
||||
} |
||||
|
||||
// NewMultiResolver creates a new instance of MultiResolver.
|
||||
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { |
||||
m = &MultiResolver{ |
||||
resolvers: make(map[string][]ResolveValidator), |
||||
nameHash: ens.EnsNode, |
||||
} |
||||
for _, o := range opts { |
||||
o(m) |
||||
} |
||||
return m |
||||
} |
||||
|
||||
// Resolve resolves address by choosing a Resolver by TLD.
|
||||
// If there are more default Resolvers, or for a specific TLD,
|
||||
// the Hash from the first one which does not return error
|
||||
// will be returned.
|
||||
func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) { |
||||
rs, err := m.getResolveValidator(addr) |
||||
if err != nil { |
||||
return h, err |
||||
} |
||||
for _, r := range rs { |
||||
h, err = r.Resolve(addr) |
||||
if err == nil { |
||||
return |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
|
||||
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) { |
||||
rs := m.resolvers[""] |
||||
tld := path.Ext(name) |
||||
if tld != "" { |
||||
tld = tld[1:] |
||||
rstld, ok := m.resolvers[tld] |
||||
if ok { |
||||
return rstld, nil |
||||
} |
||||
} |
||||
if len(rs) == 0 { |
||||
return rs, NewNoResolverError(tld) |
||||
} |
||||
return rs, nil |
||||
} |
||||
|
||||
/* |
||||
API implements webserver/file system related content storage and retrieval |
||||
on top of the FileStore |
||||
it is the public interface of the FileStore which is included in the ethereum stack |
||||
*/ |
||||
type API struct { |
||||
feed *feed.Handler |
||||
fileStore *storage.FileStore |
||||
dns Resolver |
||||
Tags *chunk.Tags |
||||
Decryptor func(context.Context, string) DecryptFunc |
||||
} |
||||
|
||||
// NewAPI the api constructor initialises a new API instance.
|
||||
func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey, tags *chunk.Tags) (self *API) { |
||||
self = &API{ |
||||
fileStore: fileStore, |
||||
dns: dns, |
||||
feed: feedHandler, |
||||
Tags: tags, |
||||
Decryptor: func(ctx context.Context, credentials string) DecryptFunc { |
||||
return self.doDecrypt(ctx, credentials, pk) |
||||
}, |
||||
} |
||||
return |
||||
} |
||||
|
||||
// Retrieve FileStore reader API
|
||||
func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) { |
||||
return a.fileStore.Retrieve(ctx, addr) |
||||
} |
||||
|
||||
// Store wraps the Store API call of the embedded FileStore
|
||||
func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(ctx context.Context) error, err error) { |
||||
log.Debug("api.store", "size", size) |
||||
return a.fileStore.Store(ctx, data, size, toEncrypt) |
||||
} |
||||
|
||||
// Resolve a name into a content-addressed hash
|
||||
// where address could be an ENS name, or a content addressed hash
|
||||
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) { |
||||
// if DNS is not configured, return an error
|
||||
if a.dns == nil { |
||||
if hashMatcher.MatchString(address) { |
||||
return common.Hex2Bytes(address), nil |
||||
} |
||||
apiResolveFail.Inc(1) |
||||
return nil, fmt.Errorf("no DNS to resolve name: %q", address) |
||||
} |
||||
// try and resolve the address
|
||||
resolved, err := a.dns.Resolve(address) |
||||
if err != nil { |
||||
if hashMatcher.MatchString(address) { |
||||
return common.Hex2Bytes(address), nil |
||||
} |
||||
return nil, err |
||||
} |
||||
return resolved[:], nil |
||||
} |
||||
|
||||
// Resolve resolves a URI to an Address using the MultiResolver.
|
||||
func (a *API) ResolveURI(ctx context.Context, uri *URI, credentials string) (storage.Address, error) { |
||||
apiResolveCount.Inc(1) |
||||
log.Trace("resolving", "uri", uri.Addr) |
||||
|
||||
var sp opentracing.Span |
||||
ctx, sp = spancontext.StartSpan( |
||||
ctx, |
||||
"api.resolve") |
||||
defer sp.Finish() |
||||
|
||||
// if the URI is immutable, check if the address looks like a hash
|
||||
if uri.Immutable() { |
||||
key := uri.Address() |
||||
if key == nil { |
||||
return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr) |
||||
} |
||||
return key, nil |
||||
} |
||||
|
||||
addr, err := a.Resolve(ctx, uri.Addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if uri.Path == "" { |
||||
return addr, nil |
||||
} |
||||
walker, err := a.NewManifestWalker(ctx, addr, a.Decryptor(ctx, credentials), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
var entry *ManifestEntry |
||||
walker.Walk(func(e *ManifestEntry) error { |
||||
// if the entry matches the path, set entry and stop
|
||||
// the walk
|
||||
if e.Path == uri.Path { |
||||
entry = e |
||||
// return an error to cancel the walk
|
||||
return errors.New("found") |
||||
} |
||||
// ignore non-manifest files
|
||||
if e.ContentType != ManifestType { |
||||
return nil |
||||
} |
||||
// if the manifest's path is a prefix of the
|
||||
// requested path, recurse into it by returning
|
||||
// nil and continuing the walk
|
||||
if strings.HasPrefix(uri.Path, e.Path) { |
||||
return nil |
||||
} |
||||
return ErrSkipManifest |
||||
}) |
||||
if entry == nil { |
||||
return nil, errors.New("not found") |
||||
} |
||||
addr = storage.Address(common.Hex2Bytes(entry.Hash)) |
||||
return addr, nil |
||||
} |
||||
|
||||
// Get uses iterative manifest retrieval and prefix matching
|
||||
// to resolve basePath to content using FileStore retrieve
|
||||
// it returns a section reader, mimeType, status, the key of the actual content and an error
|
||||
func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) { |
||||
log.Debug("api.get", "key", manifestAddr, "path", path) |
||||
apiGetCount.Inc(1) |
||||
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt) |
||||
if err != nil { |
||||
apiGetNotFound.Inc(1) |
||||
status = http.StatusNotFound |
||||
return nil, "", http.StatusNotFound, nil, err |
||||
} |
||||
|
||||
log.Debug("trie getting entry", "key", manifestAddr, "path", path) |
||||
entry, _ := trie.getEntry(path) |
||||
|
||||
if entry != nil { |
||||
log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash) |
||||
|
||||
if entry.ContentType == ManifestType { |
||||
log.Debug("entry is manifest", "key", manifestAddr, "new key", entry.Hash) |
||||
adr, err := hex.DecodeString(entry.Hash) |
||||
if err != nil { |
||||
return nil, "", 0, nil, err |
||||
} |
||||
return a.Get(ctx, decrypt, adr, entry.Path) |
||||
} |
||||
|
||||
// we need to do some extra work if this is a Swarm feed manifest
|
||||
if entry.ContentType == FeedContentType { |
||||
if entry.Feed == nil { |
||||
return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest") |
||||
} |
||||
_, err := a.feed.Lookup(ctx, feed.NewQueryLatest(entry.Feed, lookup.NoClue)) |
||||
if err != nil { |
||||
apiGetNotFound.Inc(1) |
||||
status = http.StatusNotFound |
||||
log.Debug(fmt.Sprintf("get feed update content error: %v", err)) |
||||
return reader, mimeType, status, nil, err |
||||
} |
||||
// get the data of the update
|
||||
_, contentAddr, err := a.feed.GetContent(entry.Feed) |
||||
if err != nil { |
||||
apiGetNotFound.Inc(1) |
||||
status = http.StatusNotFound |
||||
log.Warn(fmt.Sprintf("get feed update content error: %v", err)) |
||||
return reader, mimeType, status, nil, err |
||||
} |
||||
|
||||
// extract content hash
|
||||
if len(contentAddr) != storage.AddressLength { |
||||
apiGetInvalid.Inc(1) |
||||
status = http.StatusUnprocessableEntity |
||||
errorMessage := fmt.Sprintf("invalid swarm hash in feed update. Expected %d bytes. Got %d", storage.AddressLength, len(contentAddr)) |
||||
log.Warn(errorMessage) |
||||
return reader, mimeType, status, nil, errors.New(errorMessage) |
||||
} |
||||
manifestAddr = storage.Address(contentAddr) |
||||
log.Trace("feed update contains swarm hash", "key", manifestAddr) |
||||
|
||||
// get the manifest the swarm hash points to
|
||||
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt) |
||||
if err != nil { |
||||
apiGetNotFound.Inc(1) |
||||
status = http.StatusNotFound |
||||
log.Warn(fmt.Sprintf("loadManifestTrie (feed update) error: %v", err)) |
||||
return reader, mimeType, status, nil, err |
||||
} |
||||
|
||||
// finally, get the manifest entry
|
||||
// it will always be the entry on path ""
|
||||
entry, _ = trie.getEntry(path) |
||||
if entry == nil { |
||||
status = http.StatusNotFound |
||||
apiGetNotFound.Inc(1) |
||||
err = fmt.Errorf("manifest (feed update) entry for '%s' not found", path) |
||||
log.Trace("manifest (feed update) entry not found", "key", manifestAddr, "path", path) |
||||
return reader, mimeType, status, nil, err |
||||
} |
||||
} |
||||
|
||||
// regardless of feed update manifests or normal manifests we will converge at this point
|
||||
// get the key the manifest entry points to and serve it if it's unambiguous
|
||||
contentAddr = common.Hex2Bytes(entry.Hash) |
||||
status = entry.Status |
||||
if status == http.StatusMultipleChoices { |
||||
apiGetHTTP300.Inc(1) |
||||
return nil, entry.ContentType, status, contentAddr, err |
||||
} |
||||
mimeType = entry.ContentType |
||||
log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType) |
||||
reader, _ = a.fileStore.Retrieve(ctx, contentAddr) |
||||
} else { |
||||
// no entry found
|
||||
status = http.StatusNotFound |
||||
apiGetNotFound.Inc(1) |
||||
err = fmt.Errorf("Not found: could not find resource '%s'", path) |
||||
log.Trace("manifest entry not found", "key", contentAddr, "path", path) |
||||
} |
||||
return |
||||
} |
||||
|
||||
func (a *API) Delete(ctx context.Context, addr string, path string) (storage.Address, error) { |
||||
apiDeleteCount.Inc(1) |
||||
uri, err := Parse("bzz:/" + addr) |
||||
if err != nil { |
||||
apiDeleteFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
key, err := a.ResolveURI(ctx, uri, EmptyCredentials) |
||||
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
newKey, err := a.UpdateManifest(ctx, key, func(mw *ManifestWriter) error { |
||||
log.Debug(fmt.Sprintf("removing %s from manifest %s", path, key.Log())) |
||||
return mw.RemoveEntry(path) |
||||
}) |
||||
if err != nil { |
||||
apiDeleteFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
|
||||
return newKey, nil |
||||
} |
||||
|
||||
// GetDirectoryTar fetches a requested directory as a tarstream
|
||||
// it returns an io.Reader and an error. Do not forget to Close() the returned ReadCloser
|
||||
func (a *API) GetDirectoryTar(ctx context.Context, decrypt DecryptFunc, uri *URI) (io.ReadCloser, error) { |
||||
apiGetTarCount.Inc(1) |
||||
addr, err := a.Resolve(ctx, uri.Addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
walker, err := a.NewManifestWalker(ctx, addr, decrypt, nil) |
||||
if err != nil { |
||||
apiGetTarFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
|
||||
piper, pipew := io.Pipe() |
||||
|
||||
tw := tar.NewWriter(pipew) |
||||
|
||||
go func() { |
||||
err := walker.Walk(func(entry *ManifestEntry) error { |
||||
// ignore manifests (walk will recurse into them)
|
||||
if entry.ContentType == ManifestType { |
||||
return nil |
||||
} |
||||
|
||||
// retrieve the entry's key and size
|
||||
reader, _ := a.Retrieve(ctx, storage.Address(common.Hex2Bytes(entry.Hash))) |
||||
size, err := reader.Size(ctx, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// write a tar header for the entry
|
||||
hdr := &tar.Header{ |
||||
Name: entry.Path, |
||||
Mode: entry.Mode, |
||||
Size: size, |
||||
ModTime: entry.ModTime, |
||||
Xattrs: map[string]string{ |
||||
"user.swarm.content-type": entry.ContentType, |
||||
}, |
||||
} |
||||
|
||||
if err := tw.WriteHeader(hdr); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// copy the file into the tar stream
|
||||
n, err := io.Copy(tw, io.LimitReader(reader, hdr.Size)) |
||||
if err != nil { |
||||
return err |
||||
} else if n != size { |
||||
return fmt.Errorf("error writing %s: expected %d bytes but sent %d", entry.Path, size, n) |
||||
} |
||||
|
||||
return nil |
||||
}) |
||||
// close tar writer before closing pipew
|
||||
// to flush remaining data to pipew
|
||||
// regardless of error value
|
||||
tw.Close() |
||||
if err != nil { |
||||
apiGetTarFail.Inc(1) |
||||
pipew.CloseWithError(err) |
||||
} else { |
||||
pipew.Close() |
||||
} |
||||
}() |
||||
|
||||
return piper, nil |
||||
} |
||||
|
||||
// GetManifestList lists the manifest entries for the specified address and prefix
|
||||
// and returns it as a ManifestList
|
||||
func (a *API) GetManifestList(ctx context.Context, decryptor DecryptFunc, addr storage.Address, prefix string) (list ManifestList, err error) { |
||||
apiManifestListCount.Inc(1) |
||||
walker, err := a.NewManifestWalker(ctx, addr, decryptor, nil) |
||||
if err != nil { |
||||
apiManifestListFail.Inc(1) |
||||
return ManifestList{}, err |
||||
} |
||||
|
||||
err = walker.Walk(func(entry *ManifestEntry) error { |
||||
// handle non-manifest files
|
||||
if entry.ContentType != ManifestType { |
||||
// ignore the file if it doesn't have the specified prefix
|
||||
if !strings.HasPrefix(entry.Path, prefix) { |
||||
return nil |
||||
} |
||||
|
||||
// if the path after the prefix contains a slash, add a
|
||||
// common prefix to the list, otherwise add the entry
|
||||
suffix := strings.TrimPrefix(entry.Path, prefix) |
||||
if index := strings.Index(suffix, "/"); index > -1 { |
||||
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1]) |
||||
return nil |
||||
} |
||||
if entry.Path == "" { |
||||
entry.Path = "/" |
||||
} |
||||
list.Entries = append(list.Entries, entry) |
||||
return nil |
||||
} |
||||
|
||||
// if the manifest's path is a prefix of the specified prefix
|
||||
// then just recurse into the manifest by returning nil and
|
||||
// continuing the walk
|
||||
if strings.HasPrefix(prefix, entry.Path) { |
||||
return nil |
||||
} |
||||
|
||||
// if the manifest's path has the specified prefix, then if the
|
||||
// path after the prefix contains a slash, add a common prefix
|
||||
// to the list and skip the manifest, otherwise recurse into
|
||||
// the manifest by returning nil and continuing the walk
|
||||
if strings.HasPrefix(entry.Path, prefix) { |
||||
suffix := strings.TrimPrefix(entry.Path, prefix) |
||||
if index := strings.Index(suffix, "/"); index > -1 { |
||||
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1]) |
||||
return ErrSkipManifest |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// the manifest neither has the prefix or needs recursing in to
|
||||
// so just skip it
|
||||
return ErrSkipManifest |
||||
}) |
||||
|
||||
if err != nil { |
||||
apiManifestListFail.Inc(1) |
||||
return ManifestList{}, err |
||||
} |
||||
|
||||
return list, nil |
||||
} |
||||
|
||||
func (a *API) UpdateManifest(ctx context.Context, addr storage.Address, update func(mw *ManifestWriter) error) (storage.Address, error) { |
||||
apiManifestUpdateCount.Inc(1) |
||||
mw, err := a.NewManifestWriter(ctx, addr, nil) |
||||
if err != nil { |
||||
apiManifestUpdateFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
|
||||
if err := update(mw); err != nil { |
||||
apiManifestUpdateFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
|
||||
addr, err = mw.Store() |
||||
if err != nil { |
||||
apiManifestUpdateFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
log.Debug(fmt.Sprintf("generated manifest %s", addr)) |
||||
return addr, nil |
||||
} |
||||
|
||||
// Modify loads manifest and checks the content hash before recalculating and storing the manifest.
|
||||
func (a *API) Modify(ctx context.Context, addr storage.Address, path, contentHash, contentType string) (storage.Address, error) { |
||||
apiModifyCount.Inc(1) |
||||
quitC := make(chan bool) |
||||
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt) |
||||
if err != nil { |
||||
apiModifyFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
if contentHash != "" { |
||||
entry := newManifestTrieEntry(&ManifestEntry{ |
||||
Path: path, |
||||
ContentType: contentType, |
||||
}, nil) |
||||
entry.Hash = contentHash |
||||
trie.addEntry(entry, quitC) |
||||
} else { |
||||
trie.deleteEntry(path, quitC) |
||||
} |
||||
|
||||
if err := trie.recalcAndStore(); err != nil { |
||||
apiModifyFail.Inc(1) |
||||
return nil, err |
||||
} |
||||
return trie.ref, nil |
||||
} |
||||
|
||||
// AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm.
|
||||
func (a *API) AddFile(ctx context.Context, mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) { |
||||
apiAddFileCount.Inc(1) |
||||
|
||||
uri, err := Parse("bzz:/" + mhash) |
||||
if err != nil { |
||||
apiAddFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials) |
||||
if err != nil { |
||||
apiAddFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
|
||||
// trim the root dir we added
|
||||
if path[:1] == "/" { |
||||
path = path[1:] |
||||
} |
||||
|
||||
entry := &ManifestEntry{ |
||||
Path: filepath.Join(path, fname), |
||||
ContentType: mime.TypeByExtension(filepath.Ext(fname)), |
||||
Mode: 0700, |
||||
Size: int64(len(content)), |
||||
ModTime: time.Now(), |
||||
} |
||||
|
||||
mw, err := a.NewManifestWriter(ctx, mkey, nil) |
||||
if err != nil { |
||||
apiAddFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
|
||||
fkey, err := mw.AddEntry(ctx, bytes.NewReader(content), entry) |
||||
if err != nil { |
||||
apiAddFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
|
||||
newMkey, err := mw.Store() |
||||
if err != nil { |
||||
apiAddFileFail.Inc(1) |
||||
return nil, "", err |
||||
|
||||
} |
||||
|
||||
return fkey, newMkey.String(), nil |
||||
} |
||||
|
||||
func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestPath, defaultPath string, mw *ManifestWriter) (storage.Address, error) { |
||||
apiUploadTarCount.Inc(1) |
||||
var contentKey storage.Address |
||||
tr := tar.NewReader(bodyReader) |
||||
defer bodyReader.Close() |
||||
var defaultPathFound bool |
||||
for { |
||||
hdr, err := tr.Next() |
||||
if err == io.EOF { |
||||
break |
||||
} else if err != nil { |
||||
apiUploadTarFail.Inc(1) |
||||
return nil, fmt.Errorf("error reading tar stream: %s", err) |
||||
} |
||||
|
||||
// only store regular files
|
||||
if !hdr.FileInfo().Mode().IsRegular() { |
||||
continue |
||||
} |
||||
|
||||
// add the entry under the path from the request
|
||||
manifestPath := path.Join(manifestPath, hdr.Name) |
||||
contentType := hdr.Xattrs["user.swarm.content-type"] |
||||
if contentType == "" { |
||||
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name)) |
||||
} |
||||
//DetectContentType("")
|
||||
entry := &ManifestEntry{ |
||||
Path: manifestPath, |
||||
ContentType: contentType, |
||||
Mode: hdr.Mode, |
||||
Size: hdr.Size, |
||||
ModTime: hdr.ModTime, |
||||
} |
||||
contentKey, err = mw.AddEntry(ctx, tr, entry) |
||||
if err != nil { |
||||
apiUploadTarFail.Inc(1) |
||||
return nil, fmt.Errorf("error adding manifest entry from tar stream: %s", err) |
||||
} |
||||
if hdr.Name == defaultPath { |
||||
contentType := hdr.Xattrs["user.swarm.content-type"] |
||||
if contentType == "" { |
||||
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name)) |
||||
} |
||||
|
||||
entry := &ManifestEntry{ |
||||
Hash: contentKey.Hex(), |
||||
Path: "", // default entry
|
||||
ContentType: contentType, |
||||
Mode: hdr.Mode, |
||||
Size: hdr.Size, |
||||
ModTime: hdr.ModTime, |
||||
} |
||||
contentKey, err = mw.AddEntry(ctx, nil, entry) |
||||
if err != nil { |
||||
apiUploadTarFail.Inc(1) |
||||
return nil, fmt.Errorf("error adding default manifest entry from tar stream: %s", err) |
||||
} |
||||
defaultPathFound = true |
||||
} |
||||
} |
||||
if defaultPath != "" && !defaultPathFound { |
||||
return contentKey, fmt.Errorf("default path %q not found", defaultPath) |
||||
} |
||||
return contentKey, nil |
||||
} |
||||
|
||||
// RemoveFile removes a file entry in a manifest.
|
||||
func (a *API) RemoveFile(ctx context.Context, mhash string, path string, fname string, nameresolver bool) (string, error) { |
||||
apiRmFileCount.Inc(1) |
||||
|
||||
uri, err := Parse("bzz:/" + mhash) |
||||
if err != nil { |
||||
apiRmFileFail.Inc(1) |
||||
return "", err |
||||
} |
||||
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials) |
||||
if err != nil { |
||||
apiRmFileFail.Inc(1) |
||||
return "", err |
||||
} |
||||
|
||||
// trim the root dir we added
|
||||
if path[:1] == "/" { |
||||
path = path[1:] |
||||
} |
||||
|
||||
mw, err := a.NewManifestWriter(ctx, mkey, nil) |
||||
if err != nil { |
||||
apiRmFileFail.Inc(1) |
||||
return "", err |
||||
} |
||||
|
||||
err = mw.RemoveEntry(filepath.Join(path, fname)) |
||||
if err != nil { |
||||
apiRmFileFail.Inc(1) |
||||
return "", err |
||||
} |
||||
|
||||
newMkey, err := mw.Store() |
||||
if err != nil { |
||||
apiRmFileFail.Inc(1) |
||||
return "", err |
||||
|
||||
} |
||||
|
||||
return newMkey.String(), nil |
||||
} |
||||
|
||||
// AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm.
|
||||
func (a *API) AppendFile(ctx context.Context, mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) { |
||||
apiAppendFileCount.Inc(1) |
||||
|
||||
buffSize := offset + addSize |
||||
if buffSize < existingSize { |
||||
buffSize = existingSize |
||||
} |
||||
|
||||
buf := make([]byte, buffSize) |
||||
|
||||
oldReader, _ := a.Retrieve(ctx, oldAddr) |
||||
io.ReadAtLeast(oldReader, buf, int(offset)) |
||||
|
||||
newReader := bytes.NewReader(content) |
||||
io.ReadAtLeast(newReader, buf[offset:], int(addSize)) |
||||
|
||||
if buffSize < existingSize { |
||||
io.ReadAtLeast(oldReader, buf[addSize:], int(buffSize)) |
||||
} |
||||
|
||||
combinedReader := bytes.NewReader(buf) |
||||
totalSize := int64(len(buf)) |
||||
|
||||
// TODO(jmozah): to append using pyramid chunker when it is ready
|
||||
//oldReader := a.Retrieve(oldKey)
|
||||
//newReader := bytes.NewReader(content)
|
||||
//combinedReader := io.MultiReader(oldReader, newReader)
|
||||
|
||||
uri, err := Parse("bzz:/" + mhash) |
||||
if err != nil { |
||||
apiAppendFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials) |
||||
if err != nil { |
||||
apiAppendFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
|
||||
// trim the root dir we added
|
||||
if path[:1] == "/" { |
||||
path = path[1:] |
||||
} |
||||
|
||||
mw, err := a.NewManifestWriter(ctx, mkey, nil) |
||||
if err != nil { |
||||
apiAppendFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
|
||||
err = mw.RemoveEntry(filepath.Join(path, fname)) |
||||
if err != nil { |
||||
apiAppendFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
|
||||
entry := &ManifestEntry{ |
||||
Path: filepath.Join(path, fname), |
||||
ContentType: mime.TypeByExtension(filepath.Ext(fname)), |
||||
Mode: 0700, |
||||
Size: totalSize, |
||||
ModTime: time.Now(), |
||||
} |
||||
|
||||
fkey, err := mw.AddEntry(ctx, io.Reader(combinedReader), entry) |
||||
if err != nil { |
||||
apiAppendFileFail.Inc(1) |
||||
return nil, "", err |
||||
} |
||||
|
||||
newMkey, err := mw.Store() |
||||
if err != nil { |
||||
apiAppendFileFail.Inc(1) |
||||
return nil, "", err |
||||
|
||||
} |
||||
|
||||
return fkey, newMkey.String(), nil |
||||
} |
||||
|
||||
// BuildDirectoryTree used by swarmfs_unix
|
||||
func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) { |
||||
|
||||
uri, err := Parse("bzz:/" + mhash) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
addr, err = a.Resolve(ctx, uri.Addr) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
quitC := make(chan bool) |
||||
rootTrie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt) |
||||
if err != nil { |
||||
return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err) |
||||
} |
||||
|
||||
manifestEntryMap = map[string]*manifestTrieEntry{} |
||||
err = rootTrie.listWithPrefix(uri.Path, quitC, func(entry *manifestTrieEntry, suffix string) { |
||||
manifestEntryMap[suffix] = entry |
||||
}) |
||||
|
||||
if err != nil { |
||||
return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err) |
||||
} |
||||
return addr, manifestEntryMap, nil |
||||
} |
||||
|
||||
// FeedsLookup finds Swarm feeds updates at specific points in time, or the latest update
|
||||
func (a *API) FeedsLookup(ctx context.Context, query *feed.Query) ([]byte, error) { |
||||
_, err := a.feed.Lookup(ctx, query) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
var data []byte |
||||
_, data, err = a.feed.GetContent(&query.Feed) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return data, nil |
||||
} |
||||
|
||||
// FeedsNewRequest creates a Request object to update a specific feed
|
||||
func (a *API) FeedsNewRequest(ctx context.Context, feed *feed.Feed) (*feed.Request, error) { |
||||
return a.feed.NewRequest(ctx, feed) |
||||
} |
||||
|
||||
// FeedsUpdate publishes a new update on the given feed
|
||||
func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.Address, error) { |
||||
return a.feed.Update(ctx, request) |
||||
} |
||||
|
||||
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
|
||||
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest") |
||||
|
||||
// ErrNotAFeedManifest is returned when the address provided returned something other than a valid manifest
|
||||
var ErrNotAFeedManifest = errors.New("Not a feed manifest") |
||||
|
||||
// ResolveFeedManifest retrieves the Swarm feed manifest for the given address, and returns the referenced Feed.
|
||||
func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feed.Feed, error) { |
||||
trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) |
||||
if err != nil { |
||||
return nil, ErrCannotLoadFeedManifest |
||||
} |
||||
|
||||
entry, _ := trie.getEntry("") |
||||
if entry.ContentType != FeedContentType { |
||||
return nil, ErrNotAFeedManifest |
||||
} |
||||
|
||||
return entry.Feed, nil |
||||
} |
||||
|
||||
// ErrCannotResolveFeedURI is returned when the ENS resolver is not able to translate a name to a Swarm feed
|
||||
var ErrCannotResolveFeedURI = errors.New("Cannot resolve Feed URI") |
||||
|
||||
// ErrCannotResolveFeed is returned when values provided are not enough or invalid to recreate a
|
||||
// feed out of them.
|
||||
var ErrCannotResolveFeed = errors.New("Cannot resolve Feed") |
||||
|
||||
// ResolveFeed attempts to extract feed information out of the manifest, if provided
|
||||
// If not, it attempts to extract the feed out of a set of key-value pairs
|
||||
func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feed.Values) (*feed.Feed, error) { |
||||
var fd *feed.Feed |
||||
var err error |
||||
if uri.Addr != "" { |
||||
// resolve the content key.
|
||||
manifestAddr := uri.Address() |
||||
if manifestAddr == nil { |
||||
manifestAddr, err = a.Resolve(ctx, uri.Addr) |
||||
if err != nil { |
||||
return nil, ErrCannotResolveFeedURI |
||||
} |
||||
} |
||||
|
||||
// get the Swarm feed from the manifest
|
||||
fd, err = a.ResolveFeedManifest(ctx, manifestAddr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", fd.Hex()) |
||||
} else { |
||||
var f feed.Feed |
||||
if err := f.FromValues(values); err != nil { |
||||
return nil, ErrCannotResolveFeed |
||||
|
||||
} |
||||
fd = &f |
||||
} |
||||
return fd, nil |
||||
} |
||||
|
||||
// MimeOctetStream default value of http Content-Type header
|
||||
const MimeOctetStream = "application/octet-stream" |
||||
|
||||
// DetectContentType by file file extension, or fallback to content sniff
|
||||
func DetectContentType(fileName string, f io.ReadSeeker) (string, error) { |
||||
ctype := mime.TypeByExtension(filepath.Ext(fileName)) |
||||
if ctype != "" { |
||||
return ctype, nil |
||||
} |
||||
|
||||
// save/rollback to get content probe from begin of file
|
||||
currentPosition, err := f.Seek(0, io.SeekCurrent) |
||||
if err != nil { |
||||
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err) |
||||
} |
||||
|
||||
// read a chunk to decide between utf-8 text and binary
|
||||
var buf [512]byte |
||||
n, _ := f.Read(buf[:]) |
||||
ctype = http.DetectContentType(buf[:n]) |
||||
|
||||
_, err = f.Seek(currentPosition, io.SeekStart) // rewind to output whole file
|
||||
if err != nil { |
||||
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err) |
||||
} |
||||
|
||||
return ctype, nil |
||||
} |
@ -1,576 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
crand "crypto/rand" |
||||
"errors" |
||||
"flag" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"math/big" |
||||
"os" |
||||
"strings" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/sctx" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
) |
||||
|
||||
func init() { |
||||
loglevel := flag.Int("loglevel", 2, "loglevel") |
||||
flag.Parse() |
||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) |
||||
} |
||||
|
||||
func testAPI(t *testing.T, f func(*API, *chunk.Tags, bool)) { |
||||
for _, v := range []bool{true, false} { |
||||
datadir, err := ioutil.TempDir("", "bzz-test") |
||||
if err != nil { |
||||
t.Fatalf("unable to create temp dir: %v", err) |
||||
} |
||||
defer os.RemoveAll(datadir) |
||||
tags := chunk.NewTags() |
||||
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), tags) |
||||
if err != nil { |
||||
return |
||||
} |
||||
api := NewAPI(fileStore, nil, nil, nil, tags) |
||||
f(api, tags, v) |
||||
} |
||||
} |
||||
|
||||
type testResponse struct { |
||||
reader storage.LazySectionReader |
||||
*Response |
||||
} |
||||
|
||||
type Response struct { |
||||
MimeType string |
||||
Status int |
||||
Size int64 |
||||
Content string |
||||
} |
||||
|
||||
func checkResponse(t *testing.T, resp *testResponse, exp *Response) { |
||||
|
||||
if resp.MimeType != exp.MimeType { |
||||
t.Errorf("incorrect mimeType. expected '%s', got '%s'", exp.MimeType, resp.MimeType) |
||||
} |
||||
if resp.Status != exp.Status { |
||||
t.Errorf("incorrect status. expected '%d', got '%d'", exp.Status, resp.Status) |
||||
} |
||||
if resp.Size != exp.Size { |
||||
t.Errorf("incorrect size. expected '%d', got '%d'", exp.Size, resp.Size) |
||||
} |
||||
if resp.reader != nil { |
||||
content := make([]byte, resp.Size) |
||||
read, _ := resp.reader.Read(content) |
||||
if int64(read) != exp.Size { |
||||
t.Errorf("incorrect content length. expected '%d...', got '%d...'", read, exp.Size) |
||||
} |
||||
resp.Content = string(content) |
||||
} |
||||
if resp.Content != exp.Content { |
||||
// if !bytes.Equal(resp.Content, exp.Content)
|
||||
t.Errorf("incorrect content. expected '%s...', got '%s...'", string(exp.Content), string(resp.Content)) |
||||
} |
||||
} |
||||
|
||||
// func expResponse(content []byte, mimeType string, status int) *Response {
|
||||
func expResponse(content string, mimeType string, status int) *Response { |
||||
log.Trace(fmt.Sprintf("expected content (%v): %v ", len(content), content)) |
||||
return &Response{mimeType, status, int64(len(content)), content} |
||||
} |
||||
|
||||
func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse { |
||||
addr := storage.Address(common.Hex2Bytes(bzzhash)) |
||||
reader, mimeType, status, _, err := api.Get(context.TODO(), NOOPDecrypt, addr, path) |
||||
if err != nil { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
quitC := make(chan bool) |
||||
size, err := reader.Size(context.TODO(), quitC) |
||||
if err != nil { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
log.Trace(fmt.Sprintf("reader size: %v ", size)) |
||||
s := make([]byte, size) |
||||
_, err = reader.Read(s) |
||||
if err != io.EOF { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
reader.Seek(0, 0) |
||||
return &testResponse{reader, &Response{mimeType, status, size, string(s)}} |
||||
} |
||||
|
||||
func TestApiPut(t *testing.T) { |
||||
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) { |
||||
content := "hello" |
||||
exp := expResponse(content, "text/plain", 0) |
||||
ctx := context.TODO() |
||||
addr, wait, err := putString(ctx, api, content, exp.MimeType, toEncrypt) |
||||
if err != nil { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
err = wait(ctx) |
||||
if err != nil { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
resp := testGet(t, api, addr.Hex(), "") |
||||
checkResponse(t, resp, exp) |
||||
tag := tags.All()[0] |
||||
testutil.CheckTag(t, tag, 2, 2, 0, 2) //1 chunk data, 1 chunk manifest
|
||||
}) |
||||
} |
||||
|
||||
// TestApiTagLarge tests that the the number of chunks counted is larger for a larger input
|
||||
func TestApiTagLarge(t *testing.T) { |
||||
const contentLength = 4096 * 4095 |
||||
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) { |
||||
randomContentReader := io.LimitReader(crand.Reader, int64(contentLength)) |
||||
tag, err := api.Tags.New("unnamed-tag", 0) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
ctx := sctx.SetTag(context.Background(), tag.Uid) |
||||
key, waitContent, err := api.Store(ctx, randomContentReader, int64(contentLength), toEncrypt) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
err = waitContent(ctx) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
tag.DoneSplit(key) |
||||
|
||||
if toEncrypt { |
||||
tag := tags.All()[0] |
||||
expect := int64(4095 + 64 + 1) |
||||
testutil.CheckTag(t, tag, expect, expect, 0, expect) |
||||
} else { |
||||
tag := tags.All()[0] |
||||
expect := int64(4095 + 32 + 1) |
||||
testutil.CheckTag(t, tag, expect, expect, 0, expect) |
||||
} |
||||
}) |
||||
} |
||||
|
||||
// testResolver implements the Resolver interface and either returns the given
|
||||
// hash if it is set, or returns a "name not found" error
|
||||
type testResolveValidator struct { |
||||
hash *common.Hash |
||||
} |
||||
|
||||
func newTestResolveValidator(addr string) *testResolveValidator { |
||||
r := &testResolveValidator{} |
||||
if addr != "" { |
||||
hash := common.HexToHash(addr) |
||||
r.hash = &hash |
||||
} |
||||
return r |
||||
} |
||||
|
||||
func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) { |
||||
if t.hash == nil { |
||||
return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr) |
||||
} |
||||
return *t.hash, nil |
||||
} |
||||
|
||||
func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) { |
||||
return |
||||
} |
||||
func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) { |
||||
return |
||||
} |
||||
|
||||
// TestAPIResolve tests resolving URIs which can either contain content hashes
|
||||
// or ENS names
|
||||
func TestAPIResolve(t *testing.T) { |
||||
ensAddr := "swarm.eth" |
||||
hashAddr := "1111111111111111111111111111111111111111111111111111111111111111" |
||||
resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222" |
||||
doesResolve := newTestResolveValidator(resolvedAddr) |
||||
doesntResolve := newTestResolveValidator("") |
||||
|
||||
type test struct { |
||||
desc string |
||||
dns Resolver |
||||
addr string |
||||
immutable bool |
||||
result string |
||||
expectErr error |
||||
} |
||||
|
||||
tests := []*test{ |
||||
{ |
||||
desc: "DNS not configured, hash address, returns hash address", |
||||
dns: nil, |
||||
addr: hashAddr, |
||||
result: hashAddr, |
||||
}, |
||||
{ |
||||
desc: "DNS not configured, ENS address, returns error", |
||||
dns: nil, |
||||
addr: ensAddr, |
||||
expectErr: errors.New(`no DNS to resolve name: "swarm.eth"`), |
||||
}, |
||||
{ |
||||
desc: "DNS configured, hash address, hash resolves, returns resolved address", |
||||
dns: doesResolve, |
||||
addr: hashAddr, |
||||
result: resolvedAddr, |
||||
}, |
||||
{ |
||||
desc: "DNS configured, immutable hash address, hash resolves, returns hash address", |
||||
dns: doesResolve, |
||||
addr: hashAddr, |
||||
immutable: true, |
||||
result: hashAddr, |
||||
}, |
||||
{ |
||||
desc: "DNS configured, hash address, hash doesn't resolve, returns hash address", |
||||
dns: doesntResolve, |
||||
addr: hashAddr, |
||||
result: hashAddr, |
||||
}, |
||||
{ |
||||
desc: "DNS configured, ENS address, name resolves, returns resolved address", |
||||
dns: doesResolve, |
||||
addr: ensAddr, |
||||
result: resolvedAddr, |
||||
}, |
||||
{ |
||||
desc: "DNS configured, immutable ENS address, name resolves, returns error", |
||||
dns: doesResolve, |
||||
addr: ensAddr, |
||||
immutable: true, |
||||
expectErr: errors.New(`immutable address not a content hash: "swarm.eth"`), |
||||
}, |
||||
{ |
||||
desc: "DNS configured, ENS address, name doesn't resolve, returns error", |
||||
dns: doesntResolve, |
||||
addr: ensAddr, |
||||
expectErr: errors.New(`DNS name not found: "swarm.eth"`), |
||||
}, |
||||
} |
||||
for _, x := range tests { |
||||
t.Run(x.desc, func(t *testing.T) { |
||||
api := &API{dns: x.dns} |
||||
uri := &URI{Addr: x.addr, Scheme: "bzz"} |
||||
if x.immutable { |
||||
uri.Scheme = "bzz-immutable" |
||||
} |
||||
res, err := api.ResolveURI(context.TODO(), uri, "") |
||||
if err == nil { |
||||
if x.expectErr != nil { |
||||
t.Fatalf("expected error %q, got result %q", x.expectErr, res) |
||||
} |
||||
if res.String() != x.result { |
||||
t.Fatalf("expected result %q, got %q", x.result, res) |
||||
} |
||||
} else { |
||||
if x.expectErr == nil { |
||||
t.Fatalf("expected no error, got %q", err) |
||||
} |
||||
if err.Error() != x.expectErr.Error() { |
||||
t.Fatalf("expected error %q, got %q", x.expectErr, err) |
||||
} |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestMultiResolver(t *testing.T) { |
||||
doesntResolve := newTestResolveValidator("") |
||||
|
||||
ethAddr := "swarm.eth" |
||||
ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222" |
||||
ethResolve := newTestResolveValidator(ethHash) |
||||
|
||||
testAddr := "swarm.test" |
||||
testHash := "0x1111111111111111111111111111111111111111111111111111111111111111" |
||||
testResolve := newTestResolveValidator(testHash) |
||||
|
||||
tests := []struct { |
||||
desc string |
||||
r Resolver |
||||
addr string |
||||
result string |
||||
err error |
||||
}{ |
||||
{ |
||||
desc: "No resolvers, returns error", |
||||
r: NewMultiResolver(), |
||||
err: NewNoResolverError(""), |
||||
}, |
||||
{ |
||||
desc: "One default resolver, returns resolved address", |
||||
r: NewMultiResolver(MultiResolverOptionWithResolver(ethResolve, "")), |
||||
addr: ethAddr, |
||||
result: ethHash, |
||||
}, |
||||
{ |
||||
desc: "Two default resolvers, returns resolved address", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(ethResolve, ""), |
||||
MultiResolverOptionWithResolver(ethResolve, ""), |
||||
), |
||||
addr: ethAddr, |
||||
result: ethHash, |
||||
}, |
||||
{ |
||||
desc: "Two default resolvers, first doesn't resolve, returns resolved address", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(doesntResolve, ""), |
||||
MultiResolverOptionWithResolver(ethResolve, ""), |
||||
), |
||||
addr: ethAddr, |
||||
result: ethHash, |
||||
}, |
||||
{ |
||||
desc: "Default resolver doesn't resolve, tld resolver resolve, returns resolved address", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(doesntResolve, ""), |
||||
MultiResolverOptionWithResolver(ethResolve, "eth"), |
||||
), |
||||
addr: ethAddr, |
||||
result: ethHash, |
||||
}, |
||||
{ |
||||
desc: "Three TLD resolvers, third resolves, returns resolved address", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(doesntResolve, "eth"), |
||||
MultiResolverOptionWithResolver(doesntResolve, "eth"), |
||||
MultiResolverOptionWithResolver(ethResolve, "eth"), |
||||
), |
||||
addr: ethAddr, |
||||
result: ethHash, |
||||
}, |
||||
{ |
||||
desc: "One TLD resolver doesn't resolve, returns error", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(doesntResolve, ""), |
||||
MultiResolverOptionWithResolver(ethResolve, "eth"), |
||||
), |
||||
addr: ethAddr, |
||||
result: ethHash, |
||||
}, |
||||
{ |
||||
desc: "One defautl and one TLD resolver, all doesn't resolve, returns error", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(doesntResolve, ""), |
||||
MultiResolverOptionWithResolver(doesntResolve, "eth"), |
||||
), |
||||
addr: ethAddr, |
||||
result: ethHash, |
||||
err: errors.New(`DNS name not found: "swarm.eth"`), |
||||
}, |
||||
{ |
||||
desc: "Two TLD resolvers, both resolve, returns resolved address", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(ethResolve, "eth"), |
||||
MultiResolverOptionWithResolver(testResolve, "test"), |
||||
), |
||||
addr: testAddr, |
||||
result: testHash, |
||||
}, |
||||
{ |
||||
desc: "One TLD resolver, no default resolver, returns error for different TLD", |
||||
r: NewMultiResolver( |
||||
MultiResolverOptionWithResolver(ethResolve, "eth"), |
||||
), |
||||
addr: testAddr, |
||||
err: NewNoResolverError("test"), |
||||
}, |
||||
} |
||||
for _, x := range tests { |
||||
t.Run(x.desc, func(t *testing.T) { |
||||
res, err := x.r.Resolve(x.addr) |
||||
if err == nil { |
||||
if x.err != nil { |
||||
t.Fatalf("expected error %q, got result %q", x.err, res.Hex()) |
||||
} |
||||
if res.Hex() != x.result { |
||||
t.Fatalf("expected result %q, got %q", x.result, res.Hex()) |
||||
} |
||||
} else { |
||||
if x.err == nil { |
||||
t.Fatalf("expected no error, got %q", err) |
||||
} |
||||
if err.Error() != x.err.Error() { |
||||
t.Fatalf("expected error %q, got %q", x.err, err) |
||||
} |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestDecryptOriginForbidden(t *testing.T) { |
||||
ctx := context.TODO() |
||||
ctx = sctx.SetHost(ctx, "swarm-gateways.net") |
||||
|
||||
me := &ManifestEntry{ |
||||
Access: &AccessEntry{Type: AccessTypePass}, |
||||
} |
||||
|
||||
api := NewAPI(nil, nil, nil, nil, chunk.NewTags()) |
||||
|
||||
f := api.Decryptor(ctx, "") |
||||
err := f(me) |
||||
if err != ErrDecryptDomainForbidden { |
||||
t.Fatalf("should fail with ErrDecryptDomainForbidden, got %v", err) |
||||
} |
||||
} |
||||
|
||||
func TestDecryptOrigin(t *testing.T) { |
||||
for _, v := range []struct { |
||||
host string |
||||
expectError error |
||||
}{ |
||||
{ |
||||
host: "localhost", |
||||
expectError: ErrDecrypt, |
||||
}, |
||||
{ |
||||
host: "127.0.0.1", |
||||
expectError: ErrDecrypt, |
||||
}, |
||||
{ |
||||
host: "swarm-gateways.net", |
||||
expectError: ErrDecryptDomainForbidden, |
||||
}, |
||||
} { |
||||
ctx := context.TODO() |
||||
ctx = sctx.SetHost(ctx, v.host) |
||||
|
||||
me := &ManifestEntry{ |
||||
Access: &AccessEntry{Type: AccessTypePass}, |
||||
} |
||||
|
||||
api := NewAPI(nil, nil, nil, nil, chunk.NewTags()) |
||||
|
||||
f := api.Decryptor(ctx, "") |
||||
err := f(me) |
||||
if err != v.expectError { |
||||
t.Fatalf("should fail with %v, got %v", v.expectError, err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestDetectContentType(t *testing.T) { |
||||
for _, tc := range []struct { |
||||
file string |
||||
content string |
||||
expectedContentType string |
||||
}{ |
||||
{ |
||||
file: "file-with-correct-css.css", |
||||
content: "body {background-color: orange}", |
||||
expectedContentType: "text/css; charset=utf-8", |
||||
}, |
||||
{ |
||||
file: "empty-file.css", |
||||
content: "", |
||||
expectedContentType: "text/css; charset=utf-8", |
||||
}, |
||||
{ |
||||
file: "empty-file.pdf", |
||||
content: "", |
||||
expectedContentType: "application/pdf", |
||||
}, |
||||
{ |
||||
file: "empty-file.md", |
||||
content: "", |
||||
expectedContentType: "text/markdown; charset=utf-8", |
||||
}, |
||||
{ |
||||
file: "empty-file-with-unknown-content.strangeext", |
||||
content: "", |
||||
expectedContentType: "text/plain; charset=utf-8", |
||||
}, |
||||
{ |
||||
file: "file-with-unknown-extension-and-content.strangeext", |
||||
content: "Lorem Ipsum", |
||||
expectedContentType: "text/plain; charset=utf-8", |
||||
}, |
||||
{ |
||||
file: "file-no-extension", |
||||
content: "Lorem Ipsum", |
||||
expectedContentType: "text/plain; charset=utf-8", |
||||
}, |
||||
{ |
||||
file: "file-no-extension-no-content", |
||||
content: "", |
||||
expectedContentType: "text/plain; charset=utf-8", |
||||
}, |
||||
{ |
||||
file: "css-file-with-html-inside.css", |
||||
content: "<!doctype html><html><head></head><body></body></html>", |
||||
expectedContentType: "text/css; charset=utf-8", |
||||
}, |
||||
} { |
||||
t.Run(tc.file, func(t *testing.T) { |
||||
detected, err := DetectContentType(tc.file, bytes.NewReader([]byte(tc.content))) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if detected != tc.expectedContentType { |
||||
t.Fatalf("File: %s, Expected mime type %s, got %s", tc.file, tc.expectedContentType, detected) |
||||
} |
||||
|
||||
}) |
||||
} |
||||
} |
||||
|
||||
// putString provides singleton manifest creation on top of api.API
|
||||
func putString(ctx context.Context, a *API, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) { |
||||
r := strings.NewReader(content) |
||||
tag, err := a.Tags.New("unnamed-tag", 0) |
||||
|
||||
log.Trace("created new tag", "uid", tag.Uid) |
||||
|
||||
cCtx := sctx.SetTag(ctx, tag.Uid) |
||||
key, waitContent, err := a.Store(cCtx, r, int64(len(content)), toEncrypt) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) |
||||
r = strings.NewReader(manifest) |
||||
key, waitManifest, err := a.Store(cCtx, r, int64(len(manifest)), toEncrypt) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
tag.DoneSplit(key) |
||||
return key, func(ctx context.Context) error { |
||||
err := waitContent(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return waitManifest(ctx) |
||||
}, nil |
||||
} |
@ -1,829 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client |
||||
|
||||
import ( |
||||
"archive/tar" |
||||
"bytes" |
||||
"context" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"mime/multipart" |
||||
"net/http" |
||||
"net/http/httptrace" |
||||
"net/textproto" |
||||
"net/url" |
||||
"os" |
||||
"path/filepath" |
||||
"regexp" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" |
||||
"github.com/ethereum/go-ethereum/swarm/spancontext" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed" |
||||
"github.com/pborman/uuid" |
||||
) |
||||
|
||||
var ( |
||||
ErrUnauthorized = errors.New("unauthorized") |
||||
) |
||||
|
||||
func NewClient(gateway string) *Client { |
||||
return &Client{ |
||||
Gateway: gateway, |
||||
} |
||||
} |
||||
|
||||
// Client wraps interaction with a swarm HTTP gateway.
|
||||
type Client struct { |
||||
Gateway string |
||||
} |
||||
|
||||
// UploadRaw uploads raw data to swarm and returns the resulting hash. If toEncrypt is true it
|
||||
// uploads encrypted data
|
||||
func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, error) { |
||||
if size <= 0 { |
||||
return "", errors.New("data size must be greater than zero") |
||||
} |
||||
addr := "" |
||||
if toEncrypt { |
||||
addr = "encrypt" |
||||
} |
||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/"+addr, r) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
req.ContentLength = size |
||||
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("raw_upload_%d", time.Now().Unix())) |
||||
|
||||
res, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer res.Body.Close() |
||||
if res.StatusCode != http.StatusOK { |
||||
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status) |
||||
} |
||||
data, err := ioutil.ReadAll(res.Body) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return string(data), nil |
||||
} |
||||
|
||||
// DownloadRaw downloads raw data from swarm and it returns a ReadCloser and a bool whether the
|
||||
// content was encrypted
|
||||
func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) { |
||||
uri := c.Gateway + "/bzz-raw:/" + hash |
||||
res, err := http.DefaultClient.Get(uri) |
||||
if err != nil { |
||||
return nil, false, err |
||||
} |
||||
if res.StatusCode != http.StatusOK { |
||||
res.Body.Close() |
||||
return nil, false, fmt.Errorf("unexpected HTTP status: %s", res.Status) |
||||
} |
||||
isEncrypted := (res.Header.Get("X-Decrypted") == "true") |
||||
return res.Body, isEncrypted, nil |
||||
} |
||||
|
||||
// File represents a file in a swarm manifest and is used for uploading and
|
||||
// downloading content to and from swarm
|
||||
type File struct { |
||||
io.ReadCloser |
||||
api.ManifestEntry |
||||
Tag string |
||||
} |
||||
|
||||
// Open opens a local file which can then be passed to client.Upload to upload
|
||||
// it to swarm
|
||||
func Open(path string) (*File, error) { |
||||
f, err := os.Open(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
stat, err := f.Stat() |
||||
if err != nil { |
||||
f.Close() |
||||
return nil, err |
||||
} |
||||
|
||||
contentType, err := api.DetectContentType(f.Name(), f) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &File{ |
||||
ReadCloser: f, |
||||
ManifestEntry: api.ManifestEntry{ |
||||
ContentType: contentType, |
||||
Mode: int64(stat.Mode()), |
||||
Size: stat.Size(), |
||||
ModTime: stat.ModTime(), |
||||
}, |
||||
Tag: filepath.Base(path), |
||||
}, nil |
||||
} |
||||
|
||||
// Upload uploads a file to swarm and either adds it to an existing manifest
|
||||
// (if the manifest argument is non-empty) or creates a new manifest containing
|
||||
// the file, returning the resulting manifest hash (the file will then be
|
||||
// available at bzz:/<hash>/<path>)
|
||||
func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, error) { |
||||
if file.Size <= 0 { |
||||
return "", errors.New("file size must be greater than zero") |
||||
} |
||||
return c.TarUpload(manifest, &FileUploader{file}, "", toEncrypt) |
||||
} |
||||
|
||||
// Download downloads a file with the given path from the swarm manifest with
|
||||
// the given hash (i.e. it gets bzz:/<hash>/<path>)
|
||||
func (c *Client) Download(hash, path string) (*File, error) { |
||||
uri := c.Gateway + "/bzz:/" + hash + "/" + path |
||||
res, err := http.DefaultClient.Get(uri) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if res.StatusCode != http.StatusOK { |
||||
res.Body.Close() |
||||
return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) |
||||
} |
||||
return &File{ |
||||
ReadCloser: res.Body, |
||||
ManifestEntry: api.ManifestEntry{ |
||||
ContentType: res.Header.Get("Content-Type"), |
||||
Size: res.ContentLength, |
||||
}, |
||||
}, nil |
||||
} |
||||
|
||||
// UploadDirectory uploads a directory tree to swarm and either adds the files
|
||||
// to an existing manifest (if the manifest argument is non-empty) or creates a
|
||||
// new manifest, returning the resulting manifest hash (files from the
|
||||
// directory will then be available at bzz:/<hash>/path/to/file), with
|
||||
// the file specified in defaultPath being uploaded to the root of the manifest
|
||||
// (i.e. bzz:/<hash>/)
|
||||
func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bool) (string, error) { |
||||
stat, err := os.Stat(dir) |
||||
if err != nil { |
||||
return "", err |
||||
} else if !stat.IsDir() { |
||||
return "", fmt.Errorf("not a directory: %s", dir) |
||||
} |
||||
if defaultPath != "" { |
||||
if _, err := os.Stat(filepath.Join(dir, defaultPath)); err != nil { |
||||
if os.IsNotExist(err) { |
||||
return "", fmt.Errorf("the default path %q was not found in the upload directory %q", defaultPath, dir) |
||||
} |
||||
return "", fmt.Errorf("default path: %v", err) |
||||
} |
||||
} |
||||
return c.TarUpload(manifest, &DirectoryUploader{dir}, defaultPath, toEncrypt) |
||||
} |
||||
|
||||
// DownloadDirectory downloads the files contained in a swarm manifest under
|
||||
// the given path into a local directory (existing files will be overwritten)
|
||||
func (c *Client) DownloadDirectory(hash, path, destDir, credentials string) error { |
||||
stat, err := os.Stat(destDir) |
||||
if err != nil { |
||||
return err |
||||
} else if !stat.IsDir() { |
||||
return fmt.Errorf("not a directory: %s", destDir) |
||||
} |
||||
|
||||
uri := c.Gateway + "/bzz:/" + hash + "/" + path |
||||
req, err := http.NewRequest("GET", uri, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if credentials != "" { |
||||
req.SetBasicAuth("", credentials) |
||||
} |
||||
req.Header.Set("Accept", "application/x-tar") |
||||
res, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer res.Body.Close() |
||||
switch res.StatusCode { |
||||
case http.StatusOK: |
||||
case http.StatusUnauthorized: |
||||
return ErrUnauthorized |
||||
default: |
||||
return fmt.Errorf("unexpected HTTP status: %s", res.Status) |
||||
} |
||||
tr := tar.NewReader(res.Body) |
||||
for { |
||||
hdr, err := tr.Next() |
||||
if err == io.EOF { |
||||
return nil |
||||
} else if err != nil { |
||||
return err |
||||
} |
||||
// ignore the default path file
|
||||
if hdr.Name == "" { |
||||
continue |
||||
} |
||||
|
||||
dstPath := filepath.Join(destDir, filepath.Clean(strings.TrimPrefix(hdr.Name, path))) |
||||
if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil { |
||||
return err |
||||
} |
||||
var mode os.FileMode = 0644 |
||||
if hdr.Mode > 0 { |
||||
mode = os.FileMode(hdr.Mode) |
||||
} |
||||
dst, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
n, err := io.Copy(dst, tr) |
||||
dst.Close() |
||||
if err != nil { |
||||
return err |
||||
} else if n != hdr.Size { |
||||
return fmt.Errorf("expected %s to be %d bytes but got %d", hdr.Name, hdr.Size, n) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// DownloadFile downloads a single file into the destination directory
|
||||
// if the manifest entry does not specify a file name - it will fallback
|
||||
// to the hash of the file as a filename
|
||||
func (c *Client) DownloadFile(hash, path, dest, credentials string) error { |
||||
hasDestinationFilename := false |
||||
if stat, err := os.Stat(dest); err == nil { |
||||
hasDestinationFilename = !stat.IsDir() |
||||
} else { |
||||
if os.IsNotExist(err) { |
||||
// does not exist - should be created
|
||||
hasDestinationFilename = true |
||||
} else { |
||||
return fmt.Errorf("could not stat path: %v", err) |
||||
} |
||||
} |
||||
|
||||
manifestList, err := c.List(hash, path, credentials) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
switch len(manifestList.Entries) { |
||||
case 0: |
||||
return fmt.Errorf("could not find path requested at manifest address. make sure the path you've specified is correct") |
||||
case 1: |
||||
//continue
|
||||
default: |
||||
return fmt.Errorf("got too many matches for this path") |
||||
} |
||||
|
||||
uri := c.Gateway + "/bzz:/" + hash + "/" + path |
||||
req, err := http.NewRequest("GET", uri, nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if credentials != "" { |
||||
req.SetBasicAuth("", credentials) |
||||
} |
||||
res, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer res.Body.Close() |
||||
switch res.StatusCode { |
||||
case http.StatusOK: |
||||
case http.StatusUnauthorized: |
||||
return ErrUnauthorized |
||||
default: |
||||
return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode) |
||||
} |
||||
filename := "" |
||||
if hasDestinationFilename { |
||||
filename = dest |
||||
} else { |
||||
// try to assert
|
||||
re := regexp.MustCompile("[^/]+$") //everything after last slash
|
||||
|
||||
if results := re.FindAllString(path, -1); len(results) > 0 { |
||||
filename = results[len(results)-1] |
||||
} else { |
||||
if entry := manifestList.Entries[0]; entry.Path != "" && entry.Path != "/" { |
||||
filename = entry.Path |
||||
} else { |
||||
// assume hash as name if there's nothing from the command line
|
||||
filename = hash |
||||
} |
||||
} |
||||
filename = filepath.Join(dest, filename) |
||||
} |
||||
filePath, err := filepath.Abs(filename) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil { |
||||
return err |
||||
} |
||||
|
||||
dst, err := os.Create(filename) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer dst.Close() |
||||
|
||||
_, err = io.Copy(dst, res.Body) |
||||
return err |
||||
} |
||||
|
||||
// UploadManifest uploads the given manifest to swarm
|
||||
func (c *Client) UploadManifest(m *api.Manifest, toEncrypt bool) (string, error) { |
||||
data, err := json.Marshal(m) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return c.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt) |
||||
} |
||||
|
||||
// DownloadManifest downloads a swarm manifest
|
||||
func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) { |
||||
res, isEncrypted, err := c.DownloadRaw(hash) |
||||
if err != nil { |
||||
return nil, isEncrypted, err |
||||
} |
||||
defer res.Close() |
||||
var manifest api.Manifest |
||||
if err := json.NewDecoder(res).Decode(&manifest); err != nil { |
||||
return nil, isEncrypted, err |
||||
} |
||||
return &manifest, isEncrypted, nil |
||||
} |
||||
|
||||
// List list files in a swarm manifest which have the given prefix, grouping
|
||||
// common prefixes using "/" as a delimiter.
|
||||
//
|
||||
// For example, if the manifest represents the following directory structure:
|
||||
//
|
||||
// file1.txt
|
||||
// file2.txt
|
||||
// dir1/file3.txt
|
||||
// dir1/dir2/file4.txt
|
||||
//
|
||||
// Then:
|
||||
//
|
||||
// - a prefix of "" would return [dir1/, file1.txt, file2.txt]
|
||||
// - a prefix of "file" would return [file1.txt, file2.txt]
|
||||
// - a prefix of "dir1/" would return [dir1/dir2/, dir1/file3.txt]
|
||||
//
|
||||
// where entries ending with "/" are common prefixes.
|
||||
func (c *Client) List(hash, prefix, credentials string) (*api.ManifestList, error) { |
||||
req, err := http.NewRequest(http.MethodGet, c.Gateway+"/bzz-list:/"+hash+"/"+prefix, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if credentials != "" { |
||||
req.SetBasicAuth("", credentials) |
||||
} |
||||
res, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer res.Body.Close() |
||||
switch res.StatusCode { |
||||
case http.StatusOK: |
||||
case http.StatusUnauthorized: |
||||
return nil, ErrUnauthorized |
||||
default: |
||||
return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) |
||||
} |
||||
var list api.ManifestList |
||||
if err := json.NewDecoder(res.Body).Decode(&list); err != nil { |
||||
return nil, err |
||||
} |
||||
return &list, nil |
||||
} |
||||
|
||||
// Uploader uploads files to swarm using a provided UploadFn
|
||||
type Uploader interface { |
||||
Upload(UploadFn) error |
||||
Tag() string |
||||
} |
||||
|
||||
type UploaderFunc func(UploadFn) error |
||||
|
||||
func (u UploaderFunc) Upload(upload UploadFn) error { |
||||
return u(upload) |
||||
} |
||||
|
||||
func (u UploaderFunc) Tag() string { |
||||
return fmt.Sprintf("multipart_upload_%d", time.Now().Unix()) |
||||
} |
||||
|
||||
// DirectoryUploader implements Uploader
|
||||
var _ Uploader = &DirectoryUploader{} |
||||
|
||||
// DirectoryUploader uploads all files in a directory, optionally uploading
|
||||
// a file to the default path
|
||||
type DirectoryUploader struct { |
||||
Dir string |
||||
} |
||||
|
||||
func (d *DirectoryUploader) Tag() string { |
||||
return filepath.Base(d.Dir) |
||||
} |
||||
|
||||
// Upload performs the upload of the directory and default path
|
||||
func (d *DirectoryUploader) Upload(upload UploadFn) error { |
||||
return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error { |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if f.IsDir() { |
||||
return nil |
||||
} |
||||
file, err := Open(path) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
relPath, err := filepath.Rel(d.Dir, path) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
file.Path = filepath.ToSlash(relPath) |
||||
return upload(file) |
||||
}) |
||||
} |
||||
|
||||
var _ Uploader = &FileUploader{} |
||||
|
||||
// FileUploader uploads a single file
|
||||
type FileUploader struct { |
||||
File *File |
||||
} |
||||
|
||||
func (f *FileUploader) Tag() string { |
||||
return f.File.Tag |
||||
} |
||||
|
||||
// Upload performs the upload of the file
|
||||
func (f *FileUploader) Upload(upload UploadFn) error { |
||||
return upload(f.File) |
||||
} |
||||
|
||||
// UploadFn is the type of function passed to an Uploader to perform the upload
|
||||
// of a single file (for example, a directory uploader would call a provided
|
||||
// UploadFn for each file in the directory tree)
|
||||
type UploadFn func(file *File) error |
||||
|
||||
// TarUpload uses the given Uploader to upload files to swarm as a tar stream,
|
||||
// returning the resulting manifest hash
|
||||
func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, toEncrypt bool) (string, error) { |
||||
ctx, sp := spancontext.StartSpan(context.Background(), "api.client.tarupload") |
||||
defer sp.Finish() |
||||
|
||||
var tn time.Time |
||||
|
||||
reqR, reqW := io.Pipe() |
||||
defer reqR.Close() |
||||
addr := hash |
||||
|
||||
// If there is a hash already (a manifest), then that manifest will determine if the upload has
|
||||
// to be encrypted or not. If there is no manifest then the toEncrypt parameter decides if
|
||||
// there is encryption or not.
|
||||
if hash == "" && toEncrypt { |
||||
// This is the built-in address for the encrypted upload endpoint
|
||||
addr = "encrypt" |
||||
} |
||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+addr, reqR) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
trace := GetClientTrace("swarm api client - upload tar", "api.client.uploadtar", uuid.New()[:8], &tn) |
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace)) |
||||
transport := http.DefaultTransport |
||||
|
||||
req.Header.Set("Content-Type", "application/x-tar") |
||||
if defaultPath != "" { |
||||
q := req.URL.Query() |
||||
q.Set("defaultpath", defaultPath) |
||||
req.URL.RawQuery = q.Encode() |
||||
} |
||||
|
||||
tag := uploader.Tag() |
||||
if tag == "" { |
||||
tag = "unnamed_tag_" + fmt.Sprintf("%d", time.Now().Unix()) |
||||
} |
||||
log.Trace("setting upload tag", "tag", tag) |
||||
|
||||
req.Header.Set(swarmhttp.SwarmTagHeaderName, tag) |
||||
|
||||
// use 'Expect: 100-continue' so we don't send the request body if
|
||||
// the server refuses the request
|
||||
req.Header.Set("Expect", "100-continue") |
||||
|
||||
tw := tar.NewWriter(reqW) |
||||
|
||||
// define an UploadFn which adds files to the tar stream
|
||||
uploadFn := func(file *File) error { |
||||
hdr := &tar.Header{ |
||||
Name: file.Path, |
||||
Mode: file.Mode, |
||||
Size: file.Size, |
||||
ModTime: file.ModTime, |
||||
Xattrs: map[string]string{ |
||||
"user.swarm.content-type": file.ContentType, |
||||
}, |
||||
} |
||||
if err := tw.WriteHeader(hdr); err != nil { |
||||
return err |
||||
} |
||||
_, err = io.Copy(tw, file) |
||||
return err |
||||
} |
||||
|
||||
// run the upload in a goroutine so we can send the request headers and
|
||||
// wait for a '100 Continue' response before sending the tar stream
|
||||
go func() { |
||||
err := uploader.Upload(uploadFn) |
||||
if err == nil { |
||||
err = tw.Close() |
||||
} |
||||
reqW.CloseWithError(err) |
||||
}() |
||||
tn = time.Now() |
||||
res, err := transport.RoundTrip(req) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer res.Body.Close() |
||||
if res.StatusCode != http.StatusOK { |
||||
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status) |
||||
} |
||||
data, err := ioutil.ReadAll(res.Body) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return string(data), nil |
||||
} |
||||
|
||||
// MultipartUpload uses the given Uploader to upload files to swarm as a
|
||||
// multipart form, returning the resulting manifest hash
|
||||
func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) { |
||||
reqR, reqW := io.Pipe() |
||||
defer reqR.Close() |
||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
// use 'Expect: 100-continue' so we don't send the request body if
|
||||
// the server refuses the request
|
||||
req.Header.Set("Expect", "100-continue") |
||||
|
||||
mw := multipart.NewWriter(reqW) |
||||
req.Header.Set("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mw.Boundary())) |
||||
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("multipart_upload_%d", time.Now().Unix())) |
||||
|
||||
// define an UploadFn which adds files to the multipart form
|
||||
uploadFn := func(file *File) error { |
||||
hdr := make(textproto.MIMEHeader) |
||||
hdr.Set("Content-Disposition", fmt.Sprintf("form-data; name=%q", file.Path)) |
||||
hdr.Set("Content-Type", file.ContentType) |
||||
hdr.Set("Content-Length", strconv.FormatInt(file.Size, 10)) |
||||
w, err := mw.CreatePart(hdr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
_, err = io.Copy(w, file) |
||||
return err |
||||
} |
||||
|
||||
// run the upload in a goroutine so we can send the request headers and
|
||||
// wait for a '100 Continue' response before sending the multipart form
|
||||
go func() { |
||||
err := uploader.Upload(uploadFn) |
||||
if err == nil { |
||||
err = mw.Close() |
||||
} |
||||
reqW.CloseWithError(err) |
||||
}() |
||||
|
||||
res, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer res.Body.Close() |
||||
if res.StatusCode != http.StatusOK { |
||||
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status) |
||||
} |
||||
data, err := ioutil.ReadAll(res.Body) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return string(data), nil |
||||
} |
||||
|
||||
// ErrNoFeedUpdatesFound is returned when Swarm cannot find updates of the given feed
|
||||
var ErrNoFeedUpdatesFound = errors.New("No updates found for this feed") |
||||
|
||||
// CreateFeedWithManifest creates a feed manifest, initializing it with the provided
|
||||
// data
|
||||
// Returns the resulting feed manifest address that you can use to include in an ENS Resolver (setContent)
|
||||
// or reference future updates (Client.UpdateFeed)
|
||||
func (c *Client) CreateFeedWithManifest(request *feed.Request) (string, error) { |
||||
responseStream, err := c.updateFeed(request, true) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer responseStream.Close() |
||||
|
||||
body, err := ioutil.ReadAll(responseStream) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
var manifestAddress string |
||||
if err = json.Unmarshal(body, &manifestAddress); err != nil { |
||||
return "", err |
||||
} |
||||
return manifestAddress, nil |
||||
} |
||||
|
||||
// UpdateFeed allows you to set a new version of your content
|
||||
func (c *Client) UpdateFeed(request *feed.Request) error { |
||||
_, err := c.updateFeed(request, false) |
||||
return err |
||||
} |
||||
|
||||
func (c *Client) updateFeed(request *feed.Request, createManifest bool) (io.ReadCloser, error) { |
||||
URL, err := url.Parse(c.Gateway) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
URL.Path = "/bzz-feed:/" |
||||
values := URL.Query() |
||||
body := request.AppendValues(values) |
||||
if createManifest { |
||||
values.Set("manifest", "1") |
||||
} |
||||
URL.RawQuery = values.Encode() |
||||
|
||||
req, err := http.NewRequest("POST", URL.String(), bytes.NewBuffer(body)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
res, err := http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return res.Body, nil |
||||
} |
||||
|
||||
// QueryFeed returns a byte stream with the raw content of the feed update
|
||||
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
func (c *Client) QueryFeed(query *feed.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { |
||||
return c.queryFeed(query, manifestAddressOrDomain, false) |
||||
} |
||||
|
||||
// queryFeed returns a byte stream with the raw content of the feed update
|
||||
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
// meta set to true will instruct the node return feed metainformation instead
|
||||
func (c *Client) queryFeed(query *feed.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { |
||||
URL, err := url.Parse(c.Gateway) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
URL.Path = "/bzz-feed:/" + manifestAddressOrDomain |
||||
values := URL.Query() |
||||
if query != nil { |
||||
query.AppendValues(values) //adds query parameters
|
||||
} |
||||
if meta { |
||||
values.Set("meta", "1") |
||||
} |
||||
URL.RawQuery = values.Encode() |
||||
res, err := http.Get(URL.String()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if res.StatusCode != http.StatusOK { |
||||
if res.StatusCode == http.StatusNotFound { |
||||
return nil, ErrNoFeedUpdatesFound |
||||
} |
||||
errorMessageBytes, err := ioutil.ReadAll(res.Body) |
||||
var errorMessage string |
||||
if err != nil { |
||||
errorMessage = "cannot retrieve error message: " + err.Error() |
||||
} else { |
||||
errorMessage = string(errorMessageBytes) |
||||
} |
||||
return nil, fmt.Errorf("Error retrieving feed updates: %s", errorMessage) |
||||
} |
||||
|
||||
return res.Body, nil |
||||
} |
||||
|
||||
// GetFeedRequest returns a structure that describes the referenced feed status
|
||||
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
func (c *Client) GetFeedRequest(query *feed.Query, manifestAddressOrDomain string) (*feed.Request, error) { |
||||
|
||||
responseStream, err := c.queryFeed(query, manifestAddressOrDomain, true) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer responseStream.Close() |
||||
|
||||
body, err := ioutil.ReadAll(responseStream) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var metadata feed.Request |
||||
if err := metadata.UnmarshalJSON(body); err != nil { |
||||
return nil, err |
||||
} |
||||
return &metadata, nil |
||||
} |
||||
|
||||
func GetClientTrace(traceMsg, metricPrefix, ruid string, tn *time.Time) *httptrace.ClientTrace { |
||||
trace := &httptrace.ClientTrace{ |
||||
GetConn: func(_ string) { |
||||
log.Trace(traceMsg+" - http get", "event", "GetConn", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".getconn", nil).Update(time.Since(*tn)) |
||||
}, |
||||
GotConn: func(_ httptrace.GotConnInfo) { |
||||
log.Trace(traceMsg+" - http get", "event", "GotConn", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".gotconn", nil).Update(time.Since(*tn)) |
||||
}, |
||||
PutIdleConn: func(err error) { |
||||
log.Trace(traceMsg+" - http get", "event", "PutIdleConn", "ruid", ruid, "err", err) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".putidle", nil).Update(time.Since(*tn)) |
||||
}, |
||||
GotFirstResponseByte: func() { |
||||
log.Trace(traceMsg+" - http get", "event", "GotFirstResponseByte", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".firstbyte", nil).Update(time.Since(*tn)) |
||||
}, |
||||
Got100Continue: func() { |
||||
log.Trace(traceMsg, "event", "Got100Continue", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".got100continue", nil).Update(time.Since(*tn)) |
||||
}, |
||||
DNSStart: func(_ httptrace.DNSStartInfo) { |
||||
log.Trace(traceMsg, "event", "DNSStart", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsstart", nil).Update(time.Since(*tn)) |
||||
}, |
||||
DNSDone: func(_ httptrace.DNSDoneInfo) { |
||||
log.Trace(traceMsg, "event", "DNSDone", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsdone", nil).Update(time.Since(*tn)) |
||||
}, |
||||
ConnectStart: func(network, addr string) { |
||||
log.Trace(traceMsg, "event", "ConnectStart", "ruid", ruid, "network", network, "addr", addr) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".connectstart", nil).Update(time.Since(*tn)) |
||||
}, |
||||
ConnectDone: func(network, addr string, err error) { |
||||
log.Trace(traceMsg, "event", "ConnectDone", "ruid", ruid, "network", network, "addr", addr, "err", err) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".connectdone", nil).Update(time.Since(*tn)) |
||||
}, |
||||
WroteHeaders: func() { |
||||
log.Trace(traceMsg, "event", "WroteHeaders(request)", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".wroteheaders", nil).Update(time.Since(*tn)) |
||||
}, |
||||
Wait100Continue: func() { |
||||
log.Trace(traceMsg, "event", "Wait100Continue", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".wait100continue", nil).Update(time.Since(*tn)) |
||||
}, |
||||
WroteRequest: func(_ httptrace.WroteRequestInfo) { |
||||
log.Trace(traceMsg, "event", "WroteRequest", "ruid", ruid) |
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".wroterequest", nil).Update(time.Since(*tn)) |
||||
}, |
||||
} |
||||
return trace |
||||
} |
@ -1,608 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"reflect" |
||||
"sort" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" |
||||
"github.com/ethereum/go-ethereum/swarm/testutil" |
||||
) |
||||
|
||||
func serverFunc(api *api.API) swarmhttp.TestServer { |
||||
return swarmhttp.NewServer(api, "") |
||||
} |
||||
|
||||
// TestClientUploadDownloadRaw test uploading and downloading raw data to swarm
|
||||
func TestClientUploadDownloadRaw(t *testing.T) { |
||||
testClientUploadDownloadRaw(false, t) |
||||
} |
||||
|
||||
func TestClientUploadDownloadRawEncrypted(t *testing.T) { |
||||
if testutil.RaceEnabled { |
||||
t.Skip("flaky with -race on Travis") |
||||
// See: https://github.com/ethersphere/go-ethereum/issues/1254
|
||||
} |
||||
|
||||
testClientUploadDownloadRaw(true, t) |
||||
} |
||||
|
||||
func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) { |
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
defer srv.Close() |
||||
|
||||
client := NewClient(srv.URL) |
||||
|
||||
// upload some raw data
|
||||
data := []byte("foo123") |
||||
hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// check the tag was created successfully
|
||||
tag := srv.Tags.All()[0] |
||||
testutil.CheckTag(t, tag, 1, 1, 0, 1) |
||||
|
||||
// check we can download the same data
|
||||
res, isEncrypted, err := client.DownloadRaw(hash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if isEncrypted != toEncrypt { |
||||
t.Fatalf("Expected encyption status %v got %v", toEncrypt, isEncrypted) |
||||
} |
||||
defer res.Close() |
||||
gotData, err := ioutil.ReadAll(res) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(gotData, data) { |
||||
t.Fatalf("expected downloaded data to be %q, got %q", data, gotData) |
||||
} |
||||
} |
||||
|
||||
// TestClientUploadDownloadFiles test uploading and downloading files to swarm
|
||||
// manifests
|
||||
func TestClientUploadDownloadFiles(t *testing.T) { |
||||
testClientUploadDownloadFiles(false, t) |
||||
} |
||||
|
||||
func TestClientUploadDownloadFilesEncrypted(t *testing.T) { |
||||
testClientUploadDownloadFiles(true, t) |
||||
} |
||||
|
||||
func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) { |
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
defer srv.Close() |
||||
|
||||
client := NewClient(srv.URL) |
||||
upload := func(manifest, path string, data []byte) string { |
||||
file := &File{ |
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)), |
||||
ManifestEntry: api.ManifestEntry{ |
||||
Path: path, |
||||
ContentType: "text/plain", |
||||
Size: int64(len(data)), |
||||
}, |
||||
} |
||||
hash, err := client.Upload(file, manifest, toEncrypt) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
return hash |
||||
} |
||||
checkDownload := func(manifest, path string, expected []byte) { |
||||
file, err := client.Download(manifest, path) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer file.Close() |
||||
if file.Size != int64(len(expected)) { |
||||
t.Fatalf("expected downloaded file to be %d bytes, got %d", len(expected), file.Size) |
||||
} |
||||
if file.ContentType != "text/plain" { |
||||
t.Fatalf("expected downloaded file to have type %q, got %q", "text/plain", file.ContentType) |
||||
} |
||||
data, err := ioutil.ReadAll(file) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(data, expected) { |
||||
t.Fatalf("expected downloaded data to be %q, got %q", expected, data) |
||||
} |
||||
} |
||||
|
||||
// upload a file to the root of a manifest
|
||||
rootData := []byte("some-data") |
||||
rootHash := upload("", "", rootData) |
||||
|
||||
// check we can download the root file
|
||||
checkDownload(rootHash, "", rootData) |
||||
|
||||
// upload another file to the same manifest
|
||||
otherData := []byte("some-other-data") |
||||
newHash := upload(rootHash, "some/other/path", otherData) |
||||
|
||||
// check we can download both files from the new manifest
|
||||
checkDownload(newHash, "", rootData) |
||||
checkDownload(newHash, "some/other/path", otherData) |
||||
|
||||
// replace the root file with different data
|
||||
newHash = upload(newHash, "", otherData) |
||||
|
||||
// check both files have the other data
|
||||
checkDownload(newHash, "", otherData) |
||||
checkDownload(newHash, "some/other/path", otherData) |
||||
} |
||||
|
||||
var testDirFiles = []string{ |
||||
"file1.txt", |
||||
"file2.txt", |
||||
"dir1/file3.txt", |
||||
"dir1/file4.txt", |
||||
"dir2/file5.txt", |
||||
"dir2/dir3/file6.txt", |
||||
"dir2/dir4/file7.txt", |
||||
"dir2/dir4/file8.txt", |
||||
} |
||||
|
||||
func newTestDirectory(t *testing.T) string { |
||||
dir, err := ioutil.TempDir("", "swarm-client-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
for _, file := range testDirFiles { |
||||
path := filepath.Join(dir, file) |
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { |
||||
os.RemoveAll(dir) |
||||
t.Fatalf("error creating dir for %s: %s", path, err) |
||||
} |
||||
if err := ioutil.WriteFile(path, []byte(file), 0644); err != nil { |
||||
os.RemoveAll(dir) |
||||
t.Fatalf("error writing file %s: %s", path, err) |
||||
} |
||||
} |
||||
|
||||
return dir |
||||
} |
||||
|
||||
// TestClientUploadDownloadDirectory tests uploading and downloading a
|
||||
// directory of files to a swarm manifest
|
||||
func TestClientUploadDownloadDirectory(t *testing.T) { |
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
defer srv.Close() |
||||
|
||||
dir := newTestDirectory(t) |
||||
defer os.RemoveAll(dir) |
||||
|
||||
// upload the directory
|
||||
client := NewClient(srv.URL) |
||||
defaultPath := testDirFiles[0] |
||||
hash, err := client.UploadDirectory(dir, defaultPath, "", false) |
||||
if err != nil { |
||||
t.Fatalf("error uploading directory: %s", err) |
||||
} |
||||
|
||||
// check the tag was created successfully
|
||||
tag := srv.Tags.All()[0] |
||||
testutil.CheckTag(t, tag, 9, 9, 0, 9) |
||||
|
||||
// check we can download the individual files
|
||||
checkDownloadFile := func(path string, expected []byte) { |
||||
file, err := client.Download(hash, path) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer file.Close() |
||||
data, err := ioutil.ReadAll(file) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(data, expected) { |
||||
t.Fatalf("expected data to be %q, got %q", expected, data) |
||||
} |
||||
} |
||||
for _, file := range testDirFiles { |
||||
checkDownloadFile(file, []byte(file)) |
||||
} |
||||
|
||||
// check we can download the default path
|
||||
checkDownloadFile("", []byte(testDirFiles[0])) |
||||
|
||||
// check we can download the directory
|
||||
tmp, err := ioutil.TempDir("", "swarm-client-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmp) |
||||
if err := client.DownloadDirectory(hash, "", tmp, ""); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
for _, file := range testDirFiles { |
||||
data, err := ioutil.ReadFile(filepath.Join(tmp, file)) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(data, []byte(file)) { |
||||
t.Fatalf("expected data to be %q, got %q", file, data) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// TestClientFileList tests listing files in a swarm manifest
|
||||
func TestClientFileList(t *testing.T) { |
||||
testClientFileList(false, t) |
||||
} |
||||
|
||||
func TestClientFileListEncrypted(t *testing.T) { |
||||
testClientFileList(true, t) |
||||
} |
||||
|
||||
func testClientFileList(toEncrypt bool, t *testing.T) { |
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
defer srv.Close() |
||||
|
||||
dir := newTestDirectory(t) |
||||
defer os.RemoveAll(dir) |
||||
|
||||
client := NewClient(srv.URL) |
||||
hash, err := client.UploadDirectory(dir, "", "", toEncrypt) |
||||
if err != nil { |
||||
t.Fatalf("error uploading directory: %s", err) |
||||
} |
||||
|
||||
ls := func(prefix string) []string { |
||||
list, err := client.List(hash, prefix, "") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
paths := make([]string, 0, len(list.CommonPrefixes)+len(list.Entries)) |
||||
paths = append(paths, list.CommonPrefixes...) |
||||
for _, entry := range list.Entries { |
||||
paths = append(paths, entry.Path) |
||||
} |
||||
sort.Strings(paths) |
||||
return paths |
||||
} |
||||
|
||||
tests := map[string][]string{ |
||||
"": {"dir1/", "dir2/", "file1.txt", "file2.txt"}, |
||||
"file": {"file1.txt", "file2.txt"}, |
||||
"file1": {"file1.txt"}, |
||||
"file2.txt": {"file2.txt"}, |
||||
"file12": {}, |
||||
"dir": {"dir1/", "dir2/"}, |
||||
"dir1": {"dir1/"}, |
||||
"dir1/": {"dir1/file3.txt", "dir1/file4.txt"}, |
||||
"dir1/file": {"dir1/file3.txt", "dir1/file4.txt"}, |
||||
"dir1/file3.txt": {"dir1/file3.txt"}, |
||||
"dir1/file34": {}, |
||||
"dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"}, |
||||
"dir2/file": {"dir2/file5.txt"}, |
||||
"dir2/dir": {"dir2/dir3/", "dir2/dir4/"}, |
||||
"dir2/dir3/": {"dir2/dir3/file6.txt"}, |
||||
"dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"}, |
||||
"dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"}, |
||||
"dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"}, |
||||
"dir2/dir4/file78": {}, |
||||
} |
||||
for prefix, expected := range tests { |
||||
actual := ls(prefix) |
||||
if !reflect.DeepEqual(actual, expected) { |
||||
t.Fatalf("expected prefix %q to return %v, got %v", prefix, expected, actual) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// TestClientMultipartUpload tests uploading files to swarm using a multipart
|
||||
// upload
|
||||
func TestClientMultipartUpload(t *testing.T) { |
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
defer srv.Close() |
||||
|
||||
// define an uploader which uploads testDirFiles with some data
|
||||
// note: this test should result in SEEN chunks. assert accordingly
|
||||
data := []byte("some-data") |
||||
uploader := UploaderFunc(func(upload UploadFn) error { |
||||
for _, name := range testDirFiles { |
||||
file := &File{ |
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)), |
||||
ManifestEntry: api.ManifestEntry{ |
||||
Path: name, |
||||
ContentType: "text/plain", |
||||
Size: int64(len(data)), |
||||
}, |
||||
} |
||||
if err := upload(file); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
|
||||
// upload the files as a multipart upload
|
||||
client := NewClient(srv.URL) |
||||
hash, err := client.MultipartUpload("", uploader) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// check the tag was created successfully
|
||||
tag := srv.Tags.All()[0] |
||||
testutil.CheckTag(t, tag, 9, 9, 7, 9) |
||||
|
||||
// check we can download the individual files
|
||||
checkDownloadFile := func(path string) { |
||||
file, err := client.Download(hash, path) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer file.Close() |
||||
gotData, err := ioutil.ReadAll(file) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(gotData, data) { |
||||
t.Fatalf("expected data to be %q, got %q", data, gotData) |
||||
} |
||||
} |
||||
for _, file := range testDirFiles { |
||||
checkDownloadFile(file) |
||||
} |
||||
} |
||||
|
||||
func newTestSigner() (*feed.GenericSigner, error) { |
||||
privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return feed.NewGenericSigner(privKey), nil |
||||
} |
||||
|
||||
// Test the transparent resolving of feed updates with bzz:// scheme
|
||||
//
|
||||
// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
|
||||
// This effectively uses a feed to store a pointer to content rather than the content itself
|
||||
// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
|
||||
// and raw retrieve of that hash should return the data
|
||||
func TestClientBzzWithFeed(t *testing.T) { |
||||
|
||||
signer, _ := newTestSigner() |
||||
|
||||
// Initialize a Swarm test server
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
swarmClient := NewClient(srv.URL) |
||||
defer srv.Close() |
||||
|
||||
// put together some data for our test:
|
||||
dataBytes := []byte(` |
||||
//
|
||||
// Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
|
||||
// So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
|
||||
//
|
||||
// MANIFEST HASH --> DATA
|
||||
//
|
||||
// Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
|
||||
// we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
|
||||
//
|
||||
// FEED MANIFEST HASH --> MANIFEST HASH --> DATA
|
||||
//
|
||||
// Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
|
||||
// stays constant, we have effectively created a fixed address to changing content. (Applause)
|
||||
//
|
||||
// FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2)
|
||||
//
|
||||
`) |
||||
|
||||
// Create a virtual File out of memory containing the above data
|
||||
f := &File{ |
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)), |
||||
ManifestEntry: api.ManifestEntry{ |
||||
ContentType: "text/plain", |
||||
Mode: 0660, |
||||
Size: int64(len(dataBytes)), |
||||
}, |
||||
} |
||||
|
||||
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
|
||||
manifestAddressHex, err := swarmClient.Upload(f, "", false) |
||||
if err != nil { |
||||
t.Fatalf("Error creating manifest: %s", err) |
||||
} |
||||
|
||||
// convert the hex-encoded manifest hash to a 32-byte slice
|
||||
manifestAddress := common.FromHex(manifestAddressHex) |
||||
|
||||
if len(manifestAddress) != storage.AddressLength { |
||||
t.Fatalf("Something went wrong. Got a hash of an unexpected length. Expected %d bytes. Got %d", storage.AddressLength, len(manifestAddress)) |
||||
} |
||||
|
||||
// Now create a **feed manifest**. For that, we need a topic:
|
||||
topic, _ := feed.NewTopic("interesting topic indeed", nil) |
||||
|
||||
// Build a feed request to update data
|
||||
request := feed.NewFirstRequest(topic) |
||||
|
||||
// Put the 32-byte address of the manifest into the feed update
|
||||
request.SetData(manifestAddress) |
||||
|
||||
// Sign the update
|
||||
if err := request.Sign(signer); err != nil { |
||||
t.Fatalf("Error signing update: %s", err) |
||||
} |
||||
|
||||
// Publish the update and at the same time request a **feed manifest** to be created
|
||||
feedManifestAddressHex, err := swarmClient.CreateFeedWithManifest(request) |
||||
if err != nil { |
||||
t.Fatalf("Error creating feed manifest: %s", err) |
||||
} |
||||
|
||||
// Check we have received the exact **feed manifest** to be expected
|
||||
// given the topic and user signing the updates:
|
||||
correctFeedManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2" |
||||
if feedManifestAddressHex != correctFeedManifestAddrHex { |
||||
t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctFeedManifestAddrHex, feedManifestAddressHex) |
||||
} |
||||
|
||||
// Check we get a not found error when trying to get feed updates with a made-up manifest
|
||||
_, err = swarmClient.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") |
||||
if err != ErrNoFeedUpdatesFound { |
||||
t.Fatalf("Expected to receive ErrNoFeedUpdatesFound error. Got: %s", err) |
||||
} |
||||
|
||||
// If we query the feed directly we should get **manifest hash** back:
|
||||
reader, err := swarmClient.QueryFeed(nil, correctFeedManifestAddrHex) |
||||
if err != nil { |
||||
t.Fatalf("Error retrieving feed updates: %s", err) |
||||
} |
||||
defer reader.Close() |
||||
gotData, err := ioutil.ReadAll(reader) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
//Check that indeed the **manifest hash** is retrieved
|
||||
if !bytes.Equal(manifestAddress, gotData) { |
||||
t.Fatalf("Expected: %v, got %v", manifestAddress, gotData) |
||||
} |
||||
|
||||
// Now the final test we were looking for: Use bzz://<feed-manifest> and that should resolve all manifests
|
||||
// and return the original data directly:
|
||||
f, err = swarmClient.Download(feedManifestAddressHex, "") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
gotData, err = ioutil.ReadAll(f) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// Check that we get back the original data:
|
||||
if !bytes.Equal(dataBytes, gotData) { |
||||
t.Fatalf("Expected: %v, got %v", manifestAddress, gotData) |
||||
} |
||||
} |
||||
|
||||
// TestClientCreateUpdateFeed will check that feeds can be created and updated via the HTTP client.
|
||||
func TestClientCreateUpdateFeed(t *testing.T) { |
||||
|
||||
signer, _ := newTestSigner() |
||||
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil) |
||||
client := NewClient(srv.URL) |
||||
defer srv.Close() |
||||
|
||||
// set raw data for the feed update
|
||||
databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...") |
||||
|
||||
// our feed topic name
|
||||
topic, _ := feed.NewTopic("El Quijote", nil) |
||||
createRequest := feed.NewFirstRequest(topic) |
||||
|
||||
createRequest.SetData(databytes) |
||||
if err := createRequest.Sign(signer); err != nil { |
||||
t.Fatalf("Error signing update: %s", err) |
||||
} |
||||
|
||||
feedManifestHash, err := client.CreateFeedWithManifest(createRequest) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
correctManifestAddrHex := "0e9b645ebc3da167b1d56399adc3276f7a08229301b72a03336be0e7d4b71882" |
||||
if feedManifestHash != correctManifestAddrHex { |
||||
t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash) |
||||
} |
||||
|
||||
reader, err := client.QueryFeed(nil, correctManifestAddrHex) |
||||
if err != nil { |
||||
t.Fatalf("Error retrieving feed updates: %s", err) |
||||
} |
||||
defer reader.Close() |
||||
gotData, err := ioutil.ReadAll(reader) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(databytes, gotData) { |
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData) |
||||
} |
||||
|
||||
// define different data
|
||||
databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...") |
||||
|
||||
updateRequest, err := client.GetFeedRequest(nil, correctManifestAddrHex) |
||||
if err != nil { |
||||
t.Fatalf("Error retrieving update request template: %s", err) |
||||
} |
||||
|
||||
updateRequest.SetData(databytes) |
||||
if err := updateRequest.Sign(signer); err != nil { |
||||
t.Fatalf("Error signing update: %s", err) |
||||
} |
||||
|
||||
if err = client.UpdateFeed(updateRequest); err != nil { |
||||
t.Fatalf("Error updating feed: %s", err) |
||||
} |
||||
|
||||
reader, err = client.QueryFeed(nil, correctManifestAddrHex) |
||||
if err != nil { |
||||
t.Fatalf("Error retrieving feed updates: %s", err) |
||||
} |
||||
defer reader.Close() |
||||
gotData, err = ioutil.ReadAll(reader) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(databytes, gotData) { |
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData) |
||||
} |
||||
|
||||
// now try retrieving feed updates without a manifest
|
||||
|
||||
fd := &feed.Feed{ |
||||
Topic: topic, |
||||
User: signer.Address(), |
||||
} |
||||
|
||||
lookupParams := feed.NewQueryLatest(fd, lookup.NoClue) |
||||
reader, err = client.QueryFeed(lookupParams, "") |
||||
if err != nil { |
||||
t.Fatalf("Error retrieving feed updates: %s", err) |
||||
} |
||||
defer reader.Close() |
||||
gotData, err = ioutil.ReadAll(reader) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(databytes, gotData) { |
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData) |
||||
} |
||||
} |
@ -1,174 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"crypto/ecdsa" |
||||
"fmt" |
||||
"os" |
||||
"path/filepath" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/contracts/ens" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
"github.com/ethereum/go-ethereum/swarm/pss" |
||||
"github.com/ethereum/go-ethereum/swarm/services/swap" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
const ( |
||||
DefaultHTTPListenAddr = "127.0.0.1" |
||||
DefaultHTTPPort = "8500" |
||||
) |
||||
|
||||
// separate bzz directories
|
||||
// allow several bzz nodes running in parallel
|
||||
type Config struct { |
||||
// serialised/persisted fields
|
||||
*storage.FileStoreParams |
||||
|
||||
// LocalStore
|
||||
ChunkDbPath string |
||||
DbCapacity uint64 |
||||
CacheCapacity uint |
||||
BaseKey []byte |
||||
|
||||
*network.HiveParams |
||||
Swap *swap.LocalProfile |
||||
Pss *pss.PssParams |
||||
Contract common.Address |
||||
EnsRoot common.Address |
||||
EnsAPIs []string |
||||
Path string |
||||
ListenAddr string |
||||
Port string |
||||
PublicKey string |
||||
BzzKey string |
||||
Enode *enode.Node `toml:"-"` |
||||
NetworkID uint64 |
||||
SwapEnabled bool |
||||
SyncEnabled bool |
||||
SyncingSkipCheck bool |
||||
DeliverySkipCheck bool |
||||
MaxStreamPeerServers int |
||||
LightNodeEnabled bool |
||||
BootnodeMode bool |
||||
SyncUpdateDelay time.Duration |
||||
SwapAPI string |
||||
Cors string |
||||
BzzAccount string |
||||
GlobalStoreAPI string |
||||
privateKey *ecdsa.PrivateKey |
||||
} |
||||
|
||||
//create a default config with all parameters to set to defaults
|
||||
func NewConfig() (c *Config) { |
||||
|
||||
c = &Config{ |
||||
FileStoreParams: storage.NewFileStoreParams(), |
||||
HiveParams: network.NewHiveParams(), |
||||
Swap: swap.NewDefaultSwapParams(), |
||||
Pss: pss.NewPssParams(), |
||||
ListenAddr: DefaultHTTPListenAddr, |
||||
Port: DefaultHTTPPort, |
||||
Path: node.DefaultDataDir(), |
||||
EnsAPIs: nil, |
||||
EnsRoot: ens.TestNetAddress, |
||||
NetworkID: network.DefaultNetworkID, |
||||
SwapEnabled: false, |
||||
SyncEnabled: true, |
||||
SyncingSkipCheck: false, |
||||
MaxStreamPeerServers: 10000, |
||||
DeliverySkipCheck: true, |
||||
SyncUpdateDelay: 15 * time.Second, |
||||
SwapAPI: "", |
||||
} |
||||
|
||||
return |
||||
} |
||||
|
||||
//some config params need to be initialized after the complete
|
||||
//config building phase is completed (e.g. due to overriding flags)
|
||||
func (c *Config) Init(prvKey *ecdsa.PrivateKey, nodeKey *ecdsa.PrivateKey) error { |
||||
|
||||
// create swarm dir and record key
|
||||
err := c.createAndSetPath(c.Path, prvKey) |
||||
if err != nil { |
||||
return fmt.Errorf("Error creating root swarm data directory: %v", err) |
||||
} |
||||
c.setKey(prvKey) |
||||
|
||||
// create the new enode record
|
||||
// signed with the ephemeral node key
|
||||
enodeParams := &network.EnodeParams{ |
||||
PrivateKey: prvKey, |
||||
EnodeKey: nodeKey, |
||||
Lightnode: c.LightNodeEnabled, |
||||
Bootnode: c.BootnodeMode, |
||||
} |
||||
c.Enode, err = network.NewEnode(enodeParams) |
||||
if err != nil { |
||||
return fmt.Errorf("Error creating enode: %v", err) |
||||
} |
||||
|
||||
// initialize components that depend on the swarm instance's private key
|
||||
if c.SwapEnabled { |
||||
c.Swap.Init(c.Contract, prvKey) |
||||
} |
||||
|
||||
c.privateKey = prvKey |
||||
c.ChunkDbPath = filepath.Join(c.Path, "chunks") |
||||
c.BaseKey = common.FromHex(c.BzzKey) |
||||
|
||||
c.Pss = c.Pss.WithPrivateKey(c.privateKey) |
||||
return nil |
||||
} |
||||
|
||||
func (c *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) { |
||||
if c.privateKey != nil { |
||||
privKey = c.privateKey |
||||
c.privateKey = nil |
||||
} |
||||
return privKey |
||||
} |
||||
|
||||
func (c *Config) setKey(prvKey *ecdsa.PrivateKey) { |
||||
bzzkeybytes := network.PrivateKeyToBzzKey(prvKey) |
||||
pubkey := crypto.FromECDSAPub(&prvKey.PublicKey) |
||||
pubkeyhex := hexutil.Encode(pubkey) |
||||
keyhex := hexutil.Encode(bzzkeybytes) |
||||
|
||||
c.privateKey = prvKey |
||||
c.PublicKey = pubkeyhex |
||||
c.BzzKey = keyhex |
||||
} |
||||
|
||||
func (c *Config) createAndSetPath(datadirPath string, prvKey *ecdsa.PrivateKey) error { |
||||
address := crypto.PubkeyToAddress(prvKey.PublicKey) |
||||
bzzdirPath := filepath.Join(datadirPath, "bzz-"+common.Bytes2Hex(address.Bytes())) |
||||
err := os.MkdirAll(bzzdirPath, os.ModePerm) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
c.Path = bzzdirPath |
||||
return nil |
||||
} |
@ -1,66 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"reflect" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
func TestConfig(t *testing.T) { |
||||
|
||||
var hexprvkey = "65138b2aa745041b372153550584587da326ab440576b2a1191dd95cee30039c" |
||||
var hexnodekey = "75138b2aa745041b372153550584587da326ab440576b2a1191dd95cee30039c" |
||||
|
||||
prvkey, err := crypto.HexToECDSA(hexprvkey) |
||||
if err != nil { |
||||
t.Fatalf("failed to load private key: %v", err) |
||||
} |
||||
nodekey, err := crypto.HexToECDSA(hexnodekey) |
||||
if err != nil { |
||||
t.Fatalf("failed to load private key: %v", err) |
||||
} |
||||
|
||||
one := NewConfig() |
||||
two := NewConfig() |
||||
|
||||
if equal := reflect.DeepEqual(one, two); !equal { |
||||
t.Fatal("Two default configs are not equal") |
||||
} |
||||
|
||||
err = one.Init(prvkey, nodekey) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
//the init function should set the following fields
|
||||
if one.BzzKey == "" { |
||||
t.Fatal("Expected BzzKey to be set") |
||||
} |
||||
if one.PublicKey == "" { |
||||
t.Fatal("Expected PublicKey to be set") |
||||
} |
||||
if one.Swap.PayProfile.Beneficiary == (common.Address{}) && one.SwapEnabled { |
||||
t.Fatal("Failed to correctly initialize SwapParams") |
||||
} |
||||
if one.ChunkDbPath == one.Path { |
||||
t.Fatal("Failed to correctly initialize StoreParams") |
||||
} |
||||
} |
@ -1,78 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"errors" |
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/encryption" |
||||
"golang.org/x/crypto/sha3" |
||||
) |
||||
|
||||
type RefEncryption struct { |
||||
refSize int |
||||
span []byte |
||||
} |
||||
|
||||
func NewRefEncryption(refSize int) *RefEncryption { |
||||
span := make([]byte, 8) |
||||
binary.LittleEndian.PutUint64(span, uint64(refSize)) |
||||
return &RefEncryption{ |
||||
refSize: refSize, |
||||
span: span, |
||||
} |
||||
} |
||||
|
||||
func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) { |
||||
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256) |
||||
encryptedSpan, err := spanEncryption.Encrypt(re.span) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256) |
||||
encryptedData, err := dataEncryption.Encrypt(ref) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
encryptedRef := make([]byte, len(ref)+8) |
||||
copy(encryptedRef[:8], encryptedSpan) |
||||
copy(encryptedRef[8:], encryptedData) |
||||
|
||||
return encryptedRef, nil |
||||
} |
||||
|
||||
func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) { |
||||
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256) |
||||
decryptedSpan, err := spanEncryption.Decrypt(ref[:8]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
size := binary.LittleEndian.Uint64(decryptedSpan) |
||||
if size != uint64(len(ref)-8) { |
||||
return nil, errors.New("invalid span in encrypted reference") |
||||
} |
||||
|
||||
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256) |
||||
decryptedRef, err := dataEncryption.Decrypt(ref[8:]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return decryptedRef, nil |
||||
} |
@ -1,292 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"bufio" |
||||
"context" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
"path" |
||||
"path/filepath" |
||||
"sync" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
const maxParallelFiles = 5 |
||||
|
||||
type FileSystem struct { |
||||
api *API |
||||
} |
||||
|
||||
func NewFileSystem(api *API) *FileSystem { |
||||
return &FileSystem{api} |
||||
} |
||||
|
||||
// Upload replicates a local directory as a manifest file and uploads it
|
||||
// using FileStore store
|
||||
// This function waits the chunks to be stored.
|
||||
// TODO: localpath should point to a manifest
|
||||
//
|
||||
// DEPRECATED: Use the HTTP API instead
|
||||
func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error) { |
||||
var list []*manifestTrieEntry |
||||
localpath, err := filepath.Abs(filepath.Clean(lpath)) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
f, err := os.Open(localpath) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
stat, err := f.Stat() |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
var start int |
||||
if stat.IsDir() { |
||||
start = len(localpath) |
||||
log.Debug(fmt.Sprintf("uploading '%s'", localpath)) |
||||
err = filepath.Walk(localpath, func(path string, info os.FileInfo, err error) error { |
||||
if (err == nil) && !info.IsDir() { |
||||
if len(path) <= start { |
||||
return fmt.Errorf("Path is too short") |
||||
} |
||||
if path[:start] != localpath { |
||||
return fmt.Errorf("Path prefix of '%s' does not match localpath '%s'", path, localpath) |
||||
} |
||||
entry := newManifestTrieEntry(&ManifestEntry{Path: filepath.ToSlash(path)}, nil) |
||||
list = append(list, entry) |
||||
} |
||||
return err |
||||
}) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
} else { |
||||
dir := filepath.Dir(localpath) |
||||
start = len(dir) |
||||
if len(localpath) <= start { |
||||
return "", fmt.Errorf("Path is too short") |
||||
} |
||||
if localpath[:start] != dir { |
||||
return "", fmt.Errorf("Path prefix of '%s' does not match dir '%s'", localpath, dir) |
||||
} |
||||
entry := newManifestTrieEntry(&ManifestEntry{Path: filepath.ToSlash(localpath)}, nil) |
||||
list = append(list, entry) |
||||
} |
||||
|
||||
errors := make([]error, len(list)) |
||||
sem := make(chan bool, maxParallelFiles) |
||||
defer close(sem) |
||||
|
||||
for i, entry := range list { |
||||
sem <- true |
||||
go func(i int, entry *manifestTrieEntry) { |
||||
defer func() { <-sem }() |
||||
|
||||
f, err := os.Open(entry.Path) |
||||
if err != nil { |
||||
errors[i] = err |
||||
return |
||||
} |
||||
defer f.Close() |
||||
|
||||
stat, err := f.Stat() |
||||
if err != nil { |
||||
errors[i] = err |
||||
return |
||||
} |
||||
|
||||
var hash storage.Address |
||||
var wait func(context.Context) error |
||||
ctx := context.TODO() |
||||
hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt) |
||||
if err != nil { |
||||
errors[i] = err |
||||
return |
||||
} |
||||
if hash != nil { |
||||
list[i].Hash = hash.Hex() |
||||
} |
||||
if err := wait(ctx); err != nil { |
||||
errors[i] = err |
||||
return |
||||
} |
||||
|
||||
list[i].ContentType, err = DetectContentType(f.Name(), f) |
||||
if err != nil { |
||||
errors[i] = err |
||||
return |
||||
} |
||||
|
||||
}(i, entry) |
||||
} |
||||
for i := 0; i < cap(sem); i++ { |
||||
sem <- true |
||||
} |
||||
|
||||
trie := &manifestTrie{ |
||||
fileStore: fs.api.fileStore, |
||||
} |
||||
quitC := make(chan bool) |
||||
for i, entry := range list { |
||||
if errors[i] != nil { |
||||
return "", errors[i] |
||||
} |
||||
entry.Path = RegularSlashes(entry.Path[start:]) |
||||
if entry.Path == index { |
||||
ientry := newManifestTrieEntry(&ManifestEntry{ |
||||
ContentType: entry.ContentType, |
||||
}, nil) |
||||
ientry.Hash = entry.Hash |
||||
trie.addEntry(ientry, quitC) |
||||
} |
||||
trie.addEntry(entry, quitC) |
||||
} |
||||
|
||||
err2 := trie.recalcAndStore() |
||||
var hs string |
||||
if err2 == nil { |
||||
hs = trie.ref.Hex() |
||||
} |
||||
return hs, err2 |
||||
} |
||||
|
||||
// Download replicates the manifest basePath structure on the local filesystem
|
||||
// under localpath
|
||||
//
|
||||
// DEPRECATED: Use the HTTP API instead
|
||||
func (fs *FileSystem) Download(bzzpath, localpath string) error { |
||||
lpath, err := filepath.Abs(filepath.Clean(localpath)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
err = os.MkdirAll(lpath, os.ModePerm) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
//resolving host and port
|
||||
uri, err := Parse(path.Join("bzz:/", bzzpath)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
addr, err := fs.api.Resolve(context.TODO(), uri.Addr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
path := uri.Path |
||||
|
||||
if len(path) > 0 { |
||||
path += "/" |
||||
} |
||||
|
||||
quitC := make(chan bool) |
||||
trie, err := loadManifest(context.TODO(), fs.api.fileStore, addr, quitC, NOOPDecrypt) |
||||
if err != nil { |
||||
log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)) |
||||
return err |
||||
} |
||||
|
||||
type downloadListEntry struct { |
||||
addr storage.Address |
||||
path string |
||||
} |
||||
|
||||
var list []*downloadListEntry |
||||
var mde error |
||||
|
||||
prevPath := lpath |
||||
err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) { |
||||
log.Trace(fmt.Sprintf("fs.Download: %#v", entry)) |
||||
|
||||
addr = common.Hex2Bytes(entry.Hash) |
||||
path := lpath + "/" + suffix |
||||
dir := filepath.Dir(path) |
||||
if dir != prevPath { |
||||
mde = os.MkdirAll(dir, os.ModePerm) |
||||
prevPath = dir |
||||
} |
||||
if (mde == nil) && (path != dir+"/") { |
||||
list = append(list, &downloadListEntry{addr: addr, path: path}) |
||||
} |
||||
}) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
wg := sync.WaitGroup{} |
||||
errC := make(chan error) |
||||
done := make(chan bool, maxParallelFiles) |
||||
for i, entry := range list { |
||||
select { |
||||
case done <- true: |
||||
wg.Add(1) |
||||
case <-quitC: |
||||
return fmt.Errorf("aborted") |
||||
} |
||||
go func(i int, entry *downloadListEntry) { |
||||
defer wg.Done() |
||||
err := retrieveToFile(quitC, fs.api.fileStore, entry.addr, entry.path) |
||||
if err != nil { |
||||
select { |
||||
case errC <- err: |
||||
case <-quitC: |
||||
} |
||||
return |
||||
} |
||||
<-done |
||||
}(i, entry) |
||||
} |
||||
go func() { |
||||
wg.Wait() |
||||
close(errC) |
||||
}() |
||||
select { |
||||
case err = <-errC: |
||||
return err |
||||
case <-quitC: |
||||
return fmt.Errorf("aborted") |
||||
} |
||||
} |
||||
|
||||
func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage.Address, path string) error { |
||||
f, err := os.Create(path) // TODO: basePath separators
|
||||
if err != nil { |
||||
return err |
||||
} |
||||
reader, _ := fileStore.Retrieve(context.TODO(), addr) |
||||
writer := bufio.NewWriter(f) |
||||
size, err := reader.Size(context.TODO(), quitC) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if _, err = io.CopyN(writer, reader, size); err != nil { |
||||
return err |
||||
} |
||||
if err := writer.Flush(); err != nil { |
||||
return err |
||||
} |
||||
return f.Close() |
||||
} |
@ -1,200 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test") |
||||
|
||||
func testFileSystem(t *testing.T, f func(*FileSystem, bool)) { |
||||
testAPI(t, func(api *API, _ *chunk.Tags, toEncrypt bool) { |
||||
f(NewFileSystem(api), toEncrypt) |
||||
}) |
||||
} |
||||
|
||||
func readPath(t *testing.T, parts ...string) string { |
||||
file := filepath.Join(parts...) |
||||
content, err := ioutil.ReadFile(file) |
||||
|
||||
if err != nil { |
||||
t.Fatalf("unexpected error reading '%v': %v", file, err) |
||||
} |
||||
return string(content) |
||||
} |
||||
|
||||
func TestApiDirUpload0(t *testing.T) { |
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { |
||||
api := fs.api |
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt) |
||||
if err != nil { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
content := readPath(t, "testdata", "test0", "index.html") |
||||
resp := testGet(t, api, bzzhash, "index.html") |
||||
exp := expResponse(content, "text/html; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
|
||||
content = readPath(t, "testdata", "test0", "index.css") |
||||
resp = testGet(t, api, bzzhash, "index.css") |
||||
exp = expResponse(content, "text/css; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
|
||||
addr := storage.Address(common.Hex2Bytes(bzzhash)) |
||||
_, _, _, _, err = api.Get(context.TODO(), NOOPDecrypt, addr, "") |
||||
if err == nil { |
||||
t.Fatalf("expected error: %v", err) |
||||
} |
||||
|
||||
downloadDir := filepath.Join(testDownloadDir, "test0") |
||||
defer os.RemoveAll(downloadDir) |
||||
err = fs.Download(bzzhash, downloadDir) |
||||
if err != nil { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
newbzzhash, err := fs.Upload(downloadDir, "", toEncrypt) |
||||
if err != nil { |
||||
t.Fatalf("unexpected error: %v", err) |
||||
} |
||||
// TODO: currently the hash is not deterministic in the encrypted case
|
||||
if !toEncrypt && bzzhash != newbzzhash { |
||||
t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash) |
||||
} |
||||
}) |
||||
} |
||||
|
||||
func TestApiDirUploadModify(t *testing.T) { |
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { |
||||
api := fs.api |
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt) |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
|
||||
addr := storage.Address(common.Hex2Bytes(bzzhash)) |
||||
addr, err = api.Modify(context.TODO(), addr, "index.html", "", "") |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
index, err := ioutil.ReadFile(filepath.Join("testdata", "test0", "index.html")) |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
ctx := context.TODO() |
||||
hash, wait, err := api.Store(ctx, bytes.NewReader(index), int64(len(index)), toEncrypt) |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
err = wait(ctx) |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
addr, err = api.Modify(context.TODO(), addr, "index2.html", hash.Hex(), "text/html; charset=utf-8") |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
addr, err = api.Modify(context.TODO(), addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
bzzhash = addr.Hex() |
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html") |
||||
resp := testGet(t, api, bzzhash, "index2.html") |
||||
exp := expResponse(content, "text/html; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
|
||||
resp = testGet(t, api, bzzhash, "img/logo.png") |
||||
exp = expResponse(content, "text/html; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
|
||||
content = readPath(t, "testdata", "test0", "index.css") |
||||
resp = testGet(t, api, bzzhash, "index.css") |
||||
exp = expResponse(content, "text/css; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
|
||||
_, _, _, _, err = api.Get(context.TODO(), nil, addr, "") |
||||
if err == nil { |
||||
t.Errorf("expected error: %v", err) |
||||
} |
||||
}) |
||||
} |
||||
|
||||
func TestApiDirUploadWithRootFile(t *testing.T) { |
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { |
||||
api := fs.api |
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html", toEncrypt) |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html") |
||||
resp := testGet(t, api, bzzhash, "") |
||||
exp := expResponse(content, "text/html; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
}) |
||||
} |
||||
|
||||
func TestApiFileUpload(t *testing.T) { |
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { |
||||
api := fs.api |
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "", toEncrypt) |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html") |
||||
resp := testGet(t, api, bzzhash, "index.html") |
||||
exp := expResponse(content, "text/html; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
}) |
||||
} |
||||
|
||||
func TestApiFileUploadWithRootFile(t *testing.T) { |
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { |
||||
api := fs.api |
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html", toEncrypt) |
||||
if err != nil { |
||||
t.Errorf("unexpected error: %v", err) |
||||
return |
||||
} |
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html") |
||||
resp := testGet(t, api, bzzhash, "") |
||||
exp := expResponse(content, "text/html; charset=utf-8", 0) |
||||
checkResponse(t, resp, exp) |
||||
}) |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -1,162 +0,0 @@ |
||||
package http |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net/http" |
||||
"runtime/debug" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
"github.com/ethereum/go-ethereum/swarm/chunk" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
"github.com/ethereum/go-ethereum/swarm/sctx" |
||||
"github.com/ethereum/go-ethereum/swarm/spancontext" |
||||
"github.com/pborman/uuid" |
||||
) |
||||
|
||||
// Adapt chains h (main request handler) main handler to adapters (middleware handlers)
|
||||
// Please note that the order of execution for `adapters` is FIFO (adapters[0] will be executed first)
|
||||
func Adapt(h http.Handler, adapters ...Adapter) http.Handler { |
||||
for i := range adapters { |
||||
adapter := adapters[len(adapters)-1-i] |
||||
h = adapter(h) |
||||
} |
||||
return h |
||||
} |
||||
|
||||
type Adapter func(http.Handler) http.Handler |
||||
|
||||
func SetRequestID(h http.Handler) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
r = r.WithContext(SetRUID(r.Context(), uuid.New()[:8])) |
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1) |
||||
log.Info("created ruid for request", "ruid", GetRUID(r.Context()), "method", r.Method, "url", r.RequestURI) |
||||
|
||||
h.ServeHTTP(w, r) |
||||
}) |
||||
} |
||||
|
||||
func SetRequestHost(h http.Handler) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
r = r.WithContext(sctx.SetHost(r.Context(), r.Host)) |
||||
log.Info("setting request host", "ruid", GetRUID(r.Context()), "host", sctx.GetHost(r.Context())) |
||||
|
||||
h.ServeHTTP(w, r) |
||||
}) |
||||
} |
||||
|
||||
func ParseURI(h http.Handler) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) |
||||
if err != nil { |
||||
w.WriteHeader(http.StatusBadRequest) |
||||
respondError(w, r, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest) |
||||
return |
||||
} |
||||
if uri.Addr != "" && strings.HasPrefix(uri.Addr, "0x") { |
||||
uri.Addr = strings.TrimPrefix(uri.Addr, "0x") |
||||
|
||||
msg := fmt.Sprintf(`The requested hash seems to be prefixed with '0x'. You will be redirected to the correct URL within 5 seconds.<br/> |
||||
Please click <a href='%[1]s'>here</a> if your browser does not redirect you within 5 seconds.<script>setTimeout("location.href='%[1]s';",5000);</script>`, "/"+uri.String()) |
||||
w.WriteHeader(http.StatusNotFound) |
||||
w.Write([]byte(msg)) |
||||
return |
||||
} |
||||
|
||||
ctx := r.Context() |
||||
r = r.WithContext(SetURI(ctx, uri)) |
||||
log.Debug("parsed request path", "ruid", GetRUID(r.Context()), "method", r.Method, "uri.Addr", uri.Addr, "uri.Path", uri.Path, "uri.Scheme", uri.Scheme) |
||||
|
||||
h.ServeHTTP(w, r) |
||||
}) |
||||
} |
||||
|
||||
func InitLoggingResponseWriter(h http.Handler) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
tn := time.Now() |
||||
|
||||
writer := newLoggingResponseWriter(w) |
||||
h.ServeHTTP(writer, r) |
||||
|
||||
ts := time.Since(tn) |
||||
log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts) |
||||
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).Update(ts) |
||||
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).Update(ts) |
||||
}) |
||||
} |
||||
|
||||
// InitUploadTag creates a new tag for an upload to the local HTTP proxy
|
||||
// if a tag is not named using the SwarmTagHeaderName, a fallback name will be used
|
||||
// when the Content-Length header is set, an ETA on chunking will be available since the
|
||||
// number of chunks to be split is known in advance (not including enclosing manifest chunks)
|
||||
// the tag can later be accessed using the appropriate identifier in the request context
|
||||
func InitUploadTag(h http.Handler, tags *chunk.Tags) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
var ( |
||||
tagName string |
||||
err error |
||||
estimatedTotal int64 = 0 |
||||
contentType = r.Header.Get("Content-Type") |
||||
headerTag = r.Header.Get(SwarmTagHeaderName) |
||||
) |
||||
if headerTag != "" { |
||||
tagName = headerTag |
||||
log.Trace("got tag name from http header", "tagName", tagName) |
||||
} else { |
||||
tagName = fmt.Sprintf("unnamed_tag_%d", time.Now().Unix()) |
||||
} |
||||
|
||||
if !strings.Contains(contentType, "multipart") && r.ContentLength > 0 { |
||||
log.Trace("calculating tag size", "contentType", contentType, "contentLength", r.ContentLength) |
||||
uri := GetURI(r.Context()) |
||||
if uri != nil { |
||||
log.Debug("got uri from context") |
||||
if uri.Addr == "encrypt" { |
||||
estimatedTotal = calculateNumberOfChunks(r.ContentLength, true) |
||||
} else { |
||||
estimatedTotal = calculateNumberOfChunks(r.ContentLength, false) |
||||
} |
||||
} |
||||
} |
||||
|
||||
log.Trace("creating tag", "tagName", tagName, "estimatedTotal", estimatedTotal) |
||||
|
||||
t, err := tags.New(tagName, estimatedTotal) |
||||
if err != nil { |
||||
log.Error("error creating tag", "err", err, "tagName", tagName) |
||||
} |
||||
|
||||
log.Trace("setting tag id to context", "uid", t.Uid) |
||||
ctx := sctx.SetTag(r.Context(), t.Uid) |
||||
|
||||
h.ServeHTTP(w, r.WithContext(ctx)) |
||||
}) |
||||
} |
||||
|
||||
func InstrumentOpenTracing(h http.Handler) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
uri := GetURI(r.Context()) |
||||
if uri == nil || r.Method == "" || (uri != nil && uri.Scheme == "") { |
||||
h.ServeHTTP(w, r) // soft fail
|
||||
return |
||||
} |
||||
spanName := fmt.Sprintf("http.%s.%s", r.Method, uri.Scheme) |
||||
ctx, sp := spancontext.StartSpan(r.Context(), spanName) |
||||
|
||||
defer sp.Finish() |
||||
h.ServeHTTP(w, r.WithContext(ctx)) |
||||
}) |
||||
} |
||||
|
||||
func RecoverPanic(h http.Handler) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
defer func() { |
||||
if err := recover(); err != nil { |
||||
log.Error("panic recovery!", "stack trace", string(debug.Stack()), "url", r.URL.String(), "headers", r.Header) |
||||
} |
||||
}() |
||||
h.ServeHTTP(w, r) |
||||
}) |
||||
} |
@ -1,132 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package http |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"html/template" |
||||
"net/http" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
) |
||||
|
||||
var ( |
||||
htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil) |
||||
jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil) |
||||
plaintextCounter = metrics.NewRegisteredCounter("api.http.errorpage.plaintext.count", nil) |
||||
) |
||||
|
||||
type ResponseParams struct { |
||||
Msg template.HTML |
||||
Code int |
||||
Timestamp string |
||||
template *template.Template |
||||
Details template.HTML |
||||
} |
||||
|
||||
// ShowMultipleChoices is used when a user requests a resource in a manifest which results
|
||||
// in ambiguous results. It returns a HTML page with clickable links of each of the entry
|
||||
// in the manifest which fits the request URI ambiguity.
|
||||
// For example, if the user requests bzz:/<hash>/read and that manifest contains entries
|
||||
// "readme.md" and "readinglist.txt", a HTML page is returned with this two links.
|
||||
// This only applies if the manifest has no default entry
|
||||
func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.ManifestList) { |
||||
log.Debug("ShowMultipleChoices", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) |
||||
msg := "" |
||||
if list.Entries == nil { |
||||
respondError(w, r, "Could not resolve", http.StatusInternalServerError) |
||||
return |
||||
} |
||||
requestUri := strings.TrimPrefix(r.RequestURI, "/") |
||||
|
||||
uri, err := api.Parse(requestUri) |
||||
if err != nil { |
||||
respondError(w, r, "Bad Request", http.StatusBadRequest) |
||||
} |
||||
|
||||
uri.Scheme = "bzz-list" |
||||
msg += fmt.Sprintf("Disambiguation:<br/>Your request may refer to multiple choices.<br/>Click <a class=\"orange\" href='"+"/"+uri.String()+"'>here</a> if your browser does not redirect you within 5 seconds.<script>setTimeout(\"location.href='%s';\",5000);</script><br/>", "/"+uri.String()) |
||||
respondTemplate(w, r, "error", msg, http.StatusMultipleChoices) |
||||
} |
||||
|
||||
func respondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg string, code int) { |
||||
log.Debug("respondTemplate", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) |
||||
respond(w, r, &ResponseParams{ |
||||
Code: code, |
||||
Msg: template.HTML(msg), |
||||
Timestamp: time.Now().Format(time.RFC1123), |
||||
template: TemplatesMap[templateName], |
||||
}) |
||||
} |
||||
|
||||
func respondError(w http.ResponseWriter, r *http.Request, msg string, code int) { |
||||
log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code, "msg", msg) |
||||
respondTemplate(w, r, "error", msg, code) |
||||
} |
||||
|
||||
func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { |
||||
w.WriteHeader(params.Code) |
||||
|
||||
if params.Code >= 400 { |
||||
w.Header().Del("Cache-Control") |
||||
w.Header().Del("ETag") |
||||
} |
||||
|
||||
acceptHeader := r.Header.Get("Accept") |
||||
// this cannot be in a switch since an Accept header can have multiple values: "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8"
|
||||
if strings.Contains(acceptHeader, "application/json") { |
||||
if err := respondJSON(w, r, params); err != nil { |
||||
respondError(w, r, "Internal server error", http.StatusInternalServerError) |
||||
} |
||||
} else if strings.Contains(acceptHeader, "text/html") { |
||||
respondHTML(w, r, params) |
||||
} else { |
||||
respondPlaintext(w, r, params) //returns nice errors for curl
|
||||
} |
||||
} |
||||
|
||||
func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) { |
||||
htmlCounter.Inc(1) |
||||
log.Info("respondHTML", "ruid", GetRUID(r.Context()), "code", params.Code) |
||||
err := params.template.Execute(w, params) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
} |
||||
} |
||||
|
||||
func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { |
||||
jsonCounter.Inc(1) |
||||
log.Info("respondJSON", "ruid", GetRUID(r.Context()), "code", params.Code) |
||||
w.Header().Set("Content-Type", "application/json") |
||||
return json.NewEncoder(w).Encode(params) |
||||
} |
||||
|
||||
func respondPlaintext(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { |
||||
plaintextCounter.Inc(1) |
||||
log.Info("respondPlaintext", "ruid", GetRUID(r.Context()), "code", params.Code) |
||||
w.Header().Set("Content-Type", "text/plain") |
||||
strToWrite := "Code: " + fmt.Sprintf("%d", params.Code) + "\n" |
||||
strToWrite += "Message: " + string(params.Msg) + "\n" |
||||
strToWrite += "Timestamp: " + params.Timestamp + "\n" |
||||
_, err := w.Write([]byte(strToWrite)) |
||||
return err |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue