mirror of https://github.com/ethereum/go-ethereum
go.mod: switch to Go modules (#20311)
* go.mod, vendor: switch to Go modules * travis: explicitly enable go modules in Go 1.11 and 1.12 * accounts/abi/bind: switch binding test to go modules * travis, build: aggregate and upload go mod dependencies for PPA * go.mod: tidy up the modules to avoid xgo writes to go.sum * build, internal/build: drop own file/folder copier * travis: fake build ppa only for go module dependencies * mobile: fix CopyFile switch to package cp * build, travis: use ephemeral debsrc GOPATH to get mod depspull/20355/head
commit
89ab8a74c0
@ -0,0 +1,68 @@ |
||||
module github.com/ethereum/go-ethereum |
||||
|
||||
go 1.13 |
||||
|
||||
require ( |
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect |
||||
github.com/Azure/azure-storage-blob-go v0.7.0 |
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect |
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect |
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 |
||||
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 |
||||
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 |
||||
github.com/cespare/cp v0.1.0 |
||||
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 |
||||
github.com/davecgh/go-spew v1.1.1 |
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea |
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf |
||||
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c |
||||
github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa |
||||
github.com/fatih/color v1.3.0 |
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc |
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff |
||||
github.com/go-ole/go-ole v1.2.1 // indirect |
||||
github.com/go-stack/stack v1.8.0 |
||||
github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c |
||||
github.com/golang/snappy v0.0.1 |
||||
github.com/google/go-cmp v0.3.1 // indirect |
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 |
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 |
||||
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad |
||||
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3 |
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 |
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 |
||||
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 |
||||
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 |
||||
github.com/kr/pretty v0.1.0 // indirect |
||||
github.com/kylelemons/godebug v1.1.0 // indirect |
||||
github.com/mattn/go-colorable v0.1.0 |
||||
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 |
||||
github.com/naoina/go-stringutil v0.1.0 // indirect |
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 |
||||
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c |
||||
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 |
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 |
||||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 |
||||
github.com/rjeczalik/notify v0.9.1 |
||||
github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d |
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 |
||||
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 // indirect |
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 |
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 |
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect |
||||
github.com/stretchr/testify v1.4.0 |
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d |
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef |
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 |
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 |
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect |
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f |
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 |
||||
golang.org/x/text v0.3.2 |
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 |
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce |
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 |
||||
gopkg.in/sourcemap.v1 v1.0.5 // indirect |
||||
gopkg.in/urfave/cli.v1 v1.20.0 |
||||
gotest.tools v2.2.0+incompatible // indirect |
||||
) |
@ -0,0 +1,214 @@ |
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= |
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= |
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= |
||||
github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck= |
||||
github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= |
||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= |
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= |
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= |
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= |
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= |
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= |
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= |
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= |
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= |
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= |
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= |
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= |
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= |
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= |
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= |
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= |
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= |
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= |
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= |
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= |
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= |
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= |
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= |
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= |
||||
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A= |
||||
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= |
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= |
||||
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI= |
||||
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= |
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= |
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= |
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= |
||||
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 h1:J82+/8rub3qSy0HxEnoYD8cs+HDlHWYrqYXe2Vqxluk= |
||||
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= |
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= |
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= |
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= |
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= |
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= |
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= |
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M= |
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= |
||||
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c h1:JHHhtb9XWJrGNMcrVP6vyzO4dusgi/HnceHTgxSejUM= |
||||
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= |
||||
github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa h1:XKAhUk/dtp+CV0VO6mhG2V7jA9vbcGcnYF/Ay9NjZrY= |
||||
github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= |
||||
github.com/fatih/color v1.3.0 h1:YehCCcyeQ6Km0D6+IapqPinWBK6y+0eB5umvZXK9WPs= |
||||
github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= |
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= |
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= |
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= |
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= |
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= |
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= |
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= |
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= |
||||
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= |
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= |
||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= |
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= |
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= |
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= |
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= |
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= |
||||
github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c h1:zqAKixg3cTcIasAMJV+EcfVbWwLpOZ7LeoWJvcuD/5Q= |
||||
github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= |
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= |
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= |
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= |
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= |
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDFmmHSrGcbargOuLHQGtywqo4mheITex54= |
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= |
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKxgp2ojts0FDgUA8dl62bmH0LxKanMoBr6MDTDM= |
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= |
||||
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad h1:eMxs9EL0PvIGS9TTtxg4R+JxuPGav82J8rA+GFnY7po= |
||||
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= |
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= |
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= |
||||
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3 h1:DqD8eigqlUm0+znmx7zhL0xvTW3+e1jCekJMfBUADWI= |
||||
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= |
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 h1:FSeK4fZCo8u40n2JMnyAsd6x7+SbvoOMHvQOU/n10P4= |
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= |
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= |
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= |
||||
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 h1:F/iKcka0K2LgnKy/fgSBf235AETtm1n1TvBzqu40LE0= |
||||
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= |
||||
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw= |
||||
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= |
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= |
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= |
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= |
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= |
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= |
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= |
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= |
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= |
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= |
||||
github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o= |
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= |
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= |
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= |
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= |
||||
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 h1:USWjF42jDCSEeikX/G1g40ZWnsPXN5WkZ4jMHZWyBK4= |
||||
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= |
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= |
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= |
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= |
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= |
||||
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= |
||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= |
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= |
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= |
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= |
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= |
||||
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= |
||||
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= |
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= |
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= |
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= |
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= |
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= |
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= |
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= |
||||
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA= |
||||
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= |
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= |
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= |
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= |
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= |
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= |
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= |
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= |
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= |
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= |
||||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN8iVhff6M38Mfu73FQiJve/GEXYJBjE= |
||||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= |
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= |
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= |
||||
github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d h1:ouzpe+YhpIfnjR40gSkJHWsvXmB6TiPKqMtMpfyU9DE= |
||||
github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= |
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 h1:8DPul/X0IT/1TNMIxoKLwdemEOBBHDC/K4EB16Cw5WE= |
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= |
||||
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20= |
||||
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= |
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= |
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= |
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= |
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= |
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= |
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= |
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= |
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= |
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= |
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= |
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= |
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= |
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= |
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= |
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4= |
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= |
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= |
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk= |
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= |
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= |
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= |
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= |
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= |
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= |
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= |
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI= |
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= |
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= |
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= |
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= |
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= |
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= |
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= |
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= |
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= |
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= |
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= |
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= |
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 h1:hhsSf/5z74Ck/DJYc+R8zpq8KGm7uJvpdLRQED/IedA= |
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= |
||||
gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= |
||||
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= |
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= |
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= |
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= |
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= |
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= |
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= |
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= |
@ -1,21 +0,0 @@ |
||||
MIT License |
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved. |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE |
@ -1,284 +0,0 @@ |
||||
package pipeline |
||||
|
||||
import ( |
||||
"context" |
||||
"github.com/mattn/go-ieproxy" |
||||
"net" |
||||
"net/http" |
||||
"os" |
||||
"time" |
||||
) |
||||
|
||||
// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
|
||||
// requires that this Factory create a new instance of its Policy object.
|
||||
type Factory interface { |
||||
New(next Policy, po *PolicyOptions) Policy |
||||
} |
||||
|
||||
// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
|
||||
type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc |
||||
|
||||
// New calls f(next,po).
|
||||
func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy { |
||||
return f(next, po) |
||||
} |
||||
|
||||
// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
|
||||
// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
|
||||
// Response goes backward through the linked-list for additional processing.
|
||||
// NOTE: Request is passed by value so changes do not change the caller's version of
|
||||
// the request. However, Request has some fields that reference mutable objects (not strings).
|
||||
// These references are copied; a deep copy is not performed. Specifically, this means that
|
||||
// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
|
||||
// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
|
||||
type Policy interface { |
||||
Do(ctx context.Context, request Request) (Response, error) |
||||
} |
||||
|
||||
// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
|
||||
type PolicyFunc func(ctx context.Context, request Request) (Response, error) |
||||
|
||||
// Do calls f(ctx, request).
|
||||
func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) { |
||||
return f(ctx, request) |
||||
} |
||||
|
||||
// Options configures a Pipeline's behavior.
|
||||
type Options struct { |
||||
HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
|
||||
Log LogOptions |
||||
} |
||||
|
||||
// LogLevel tells a logger the minimum level to log. When code reports a log entry,
|
||||
// the LogLevel indicates the level of the log entry. The logger only records entries
|
||||
// whose level is at least the level it was told to log. See the Log* constants.
|
||||
// For example, if a logger is configured with LogError, then LogError, LogPanic,
|
||||
// and LogFatal entries will be logged; lower level entries are ignored.
|
||||
type LogLevel uint32 |
||||
|
||||
const ( |
||||
// LogNone tells a logger not to log any entries passed to it.
|
||||
LogNone LogLevel = iota |
||||
|
||||
// LogFatal tells a logger to log all LogFatal entries passed to it.
|
||||
LogFatal |
||||
|
||||
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
|
||||
LogPanic |
||||
|
||||
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogError |
||||
|
||||
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogWarning |
||||
|
||||
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogInfo |
||||
|
||||
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogDebug |
||||
) |
||||
|
||||
// LogOptions configures the pipeline's logging mechanism & level filtering.
|
||||
type LogOptions struct { |
||||
Log func(level LogLevel, message string) |
||||
|
||||
// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
|
||||
// An application can return different values over the its lifetime; this allows the application to dynamically
|
||||
// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
|
||||
// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
|
||||
// Usually, the function will be implemented simply like this: return level <= LogWarning
|
||||
ShouldLog func(level LogLevel) bool |
||||
} |
||||
|
||||
type pipeline struct { |
||||
factories []Factory |
||||
options Options |
||||
} |
||||
|
||||
// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
|
||||
// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
|
||||
// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
|
||||
// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
|
||||
// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
|
||||
//
|
||||
// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
|
||||
// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
|
||||
// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
|
||||
// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
|
||||
type Pipeline interface { |
||||
Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) |
||||
} |
||||
|
||||
// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
|
||||
func NewPipeline(factories []Factory, o Options) Pipeline { |
||||
if o.HTTPSender == nil { |
||||
o.HTTPSender = newDefaultHTTPClientFactory() |
||||
} |
||||
if o.Log.Log == nil { |
||||
o.Log.Log = func(LogLevel, string) {} // No-op logger
|
||||
} |
||||
return &pipeline{factories: factories, options: o} |
||||
} |
||||
|
||||
// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
|
||||
// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
|
||||
// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
|
||||
// ultimately sends the transformed HTTP request over the network.
|
||||
func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) { |
||||
response, err := p.newPolicies(methodFactory).Do(ctx, request) |
||||
request.close() |
||||
return response, err |
||||
} |
||||
|
||||
func (p *pipeline) newPolicies(methodFactory Factory) Policy { |
||||
// The last Policy is the one that actually sends the request over the wire and gets the response.
|
||||
// It is overridable via the Options' HTTPSender field.
|
||||
po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
|
||||
next := p.options.HTTPSender.New(nil, po) |
||||
|
||||
// Walk over the slice of Factory objects in reverse (from wire to API)
|
||||
markers := 0 |
||||
for i := len(p.factories) - 1; i >= 0; i-- { |
||||
factory := p.factories[i] |
||||
if _, ok := factory.(methodFactoryMarker); ok { |
||||
markers++ |
||||
if markers > 1 { |
||||
panic("MethodFactoryMarker can only appear once in the pipeline") |
||||
} |
||||
if methodFactory != nil { |
||||
// Replace MethodFactoryMarker with passed-in methodFactory
|
||||
next = methodFactory.New(next, po) |
||||
} |
||||
} else { |
||||
// Use the slice's Factory to construct its Policy
|
||||
next = factory.New(next, po) |
||||
} |
||||
} |
||||
|
||||
// Each Factory has created its Policy
|
||||
if markers == 0 && methodFactory != nil { |
||||
panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline") |
||||
} |
||||
return next // Return head of the Policy object linked-list
|
||||
} |
||||
|
||||
// A PolicyOptions represents optional information that can be used by a node in the
|
||||
// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
|
||||
// which passes it (if desired) to the Policy object it creates. Today, the Policy object
|
||||
// uses the options to perform logging. But, in the future, this could be used for more.
|
||||
type PolicyOptions struct { |
||||
pipeline *pipeline |
||||
} |
||||
|
||||
// ShouldLog returns true if the specified log level should be logged.
|
||||
func (po *PolicyOptions) ShouldLog(level LogLevel) bool { |
||||
if po.pipeline.options.Log.ShouldLog != nil { |
||||
return po.pipeline.options.Log.ShouldLog(level) |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Log logs a string to the Pipeline's Logger.
|
||||
func (po *PolicyOptions) Log(level LogLevel, msg string) { |
||||
if !po.ShouldLog(level) { |
||||
return // Short circuit message formatting if we're not logging it
|
||||
} |
||||
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' { |
||||
msg += "\n" // Ensure trailing newline
|
||||
} |
||||
po.pipeline.options.Log.Log(level, msg) |
||||
|
||||
// If logger doesn't handle fatal/panic, we'll do it here.
|
||||
if level == LogFatal { |
||||
os.Exit(1) |
||||
} else if level == LogPanic { |
||||
panic(msg) |
||||
} |
||||
} |
||||
|
||||
var pipelineHTTPClient = newDefaultHTTPClient() |
||||
|
||||
func newDefaultHTTPClient() *http.Client { |
||||
// We want the Transport to have a large connection pool
|
||||
return &http.Client{ |
||||
Transport: &http.Transport{ |
||||
Proxy: ieproxy.GetProxyFunc(), |
||||
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
|
||||
Dial /*Context*/ : (&net.Dialer{ |
||||
Timeout: 30 * time.Second, |
||||
KeepAlive: 30 * time.Second, |
||||
DualStack: true, |
||||
}).Dial, /*Context*/ |
||||
MaxIdleConns: 0, // No limit
|
||||
MaxIdleConnsPerHost: 100, |
||||
IdleConnTimeout: 90 * time.Second, |
||||
TLSHandshakeTimeout: 10 * time.Second, |
||||
ExpectContinueTimeout: 1 * time.Second, |
||||
DisableKeepAlives: false, |
||||
DisableCompression: false, |
||||
MaxResponseHeaderBytes: 0, |
||||
//ResponseHeaderTimeout: time.Duration{},
|
||||
//ExpectContinueTimeout: time.Duration{},
|
||||
}, |
||||
} |
||||
} |
||||
|
||||
// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
|
||||
func newDefaultHTTPClientFactory() Factory { |
||||
return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc { |
||||
return func(ctx context.Context, request Request) (Response, error) { |
||||
r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) |
||||
if err != nil { |
||||
err = NewError(err, "HTTP request failed") |
||||
} |
||||
return NewHTTPResponse(r), err |
||||
} |
||||
}) |
||||
} |
||||
|
||||
var mfm = methodFactoryMarker{} // Singleton
|
||||
|
||||
// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
|
||||
// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
|
||||
// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
|
||||
func MethodFactoryMarker() Factory { |
||||
return mfm |
||||
} |
||||
|
||||
type methodFactoryMarker struct { |
||||
} |
||||
|
||||
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy { |
||||
panic("methodFactoryMarker policy should have been replaced with a method policy") |
||||
} |
||||
|
||||
// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
|
||||
// By default no implemetation is provided here, because pipeline may be used in many different
|
||||
// contexts, so the correct implementation is context-dependent
|
||||
type LogSanitizer interface { |
||||
SanitizeLogMessage(raw string) string |
||||
} |
||||
|
||||
var sanitizer LogSanitizer |
||||
var enableForceLog bool = true |
||||
|
||||
// SetLogSanitizer can be called to supply a custom LogSanitizer.
|
||||
// There is no threadsafety or locking on the underlying variable,
|
||||
// so call this function just once at startup of your application
|
||||
// (Don't later try to change the sanitizer on the fly).
|
||||
func SetLogSanitizer(s LogSanitizer)(){ |
||||
sanitizer = s |
||||
} |
||||
|
||||
// SetForceLogEnabled can be used to disable ForceLog
|
||||
// There is no threadsafety or locking on the underlying variable,
|
||||
// so call this function just once at startup of your application
|
||||
// (Don't later try to change the setting on the fly).
|
||||
func SetForceLogEnabled(enable bool)() { |
||||
enableForceLog = enable |
||||
} |
||||
|
||||
|
@ -1,14 +0,0 @@ |
||||
package pipeline |
||||
|
||||
|
||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func ForceLog(level LogLevel, msg string) { |
||||
if !enableForceLog { |
||||
return |
||||
} |
||||
if sanitizer != nil { |
||||
msg = sanitizer.SanitizeLogMessage(msg) |
||||
} |
||||
forceLog(level, msg) |
||||
} |
@ -1,33 +0,0 @@ |
||||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package pipeline |
||||
|
||||
import ( |
||||
"log" |
||||
"log/syslog" |
||||
) |
||||
|
||||
// forceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func forceLog(level LogLevel, msg string) { |
||||
if defaultLogger == nil { |
||||
return // Return fast if we failed to create the logger.
|
||||
} |
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' { |
||||
msg += "\n" // Ensure trailing newline
|
||||
} |
||||
switch level { |
||||
case LogFatal: |
||||
defaultLogger.Fatal(msg) |
||||
case LogPanic: |
||||
defaultLogger.Panic(msg) |
||||
case LogError, LogWarning, LogInfo: |
||||
defaultLogger.Print(msg) |
||||
} |
||||
} |
||||
|
||||
var defaultLogger = func() *log.Logger { |
||||
l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags) |
||||
return l |
||||
}() |
@ -1,61 +0,0 @@ |
||||
package pipeline |
||||
|
||||
import ( |
||||
"os" |
||||
"syscall" |
||||
"unsafe" |
||||
) |
||||
|
||||
// forceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func forceLog(level LogLevel, msg string) { |
||||
var el eventType |
||||
switch level { |
||||
case LogError, LogFatal, LogPanic: |
||||
el = elError |
||||
case LogWarning: |
||||
el = elWarning |
||||
case LogInfo: |
||||
el = elInfo |
||||
} |
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' { |
||||
msg += "\n" // Ensure trailing newline
|
||||
} |
||||
reportEvent(el, 0, msg) |
||||
} |
||||
|
||||
type eventType int16 |
||||
|
||||
const ( |
||||
elSuccess eventType = 0 |
||||
elError eventType = 1 |
||||
elWarning eventType = 2 |
||||
elInfo eventType = 4 |
||||
) |
||||
|
||||
var reportEvent = func() func(eventType eventType, eventID int32, msg string) { |
||||
advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
|
||||
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW") |
||||
|
||||
sourceName, _ := os.Executable() |
||||
sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName) |
||||
handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16))) |
||||
if lastErr == nil { // On error, logging is a no-op
|
||||
return func(eventType eventType, eventID int32, msg string) {} |
||||
} |
||||
reportEvent := advAPI32.MustFindProc("ReportEventW") |
||||
return func(eventType eventType, eventID int32, msg string) { |
||||
s, _ := syscall.UTF16PtrFromString(msg) |
||||
_, _, _ = reportEvent.Call( |
||||
uintptr(handle), // HANDLE hEventLog
|
||||
uintptr(eventType), // WORD wType
|
||||
uintptr(0), // WORD wCategory
|
||||
uintptr(eventID), // DWORD dwEventID
|
||||
uintptr(0), // PSID lpUserSid
|
||||
uintptr(1), // WORD wNumStrings
|
||||
uintptr(0), // DWORD dwDataSize
|
||||
uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
|
||||
uintptr(0)) // LPVOID lpRawData
|
||||
} |
||||
}() |
@ -1,161 +0,0 @@ |
||||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
||||
// Use of this source code is governed by an MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/* |
||||
Package pipeline implements an HTTP request/response middleware pipeline whose |
||||
policy objects mutate an HTTP request's URL, query parameters, and/or headers before |
||||
the request is sent over the wire. |
||||
|
||||
Not all policy objects mutate an HTTP request; some policy objects simply impact the |
||||
flow of requests/responses by performing operations such as logging, retry policies, |
||||
timeouts, failure injection, and deserialization of response payloads. |
||||
|
||||
Implementing the Policy Interface |
||||
|
||||
To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do |
||||
method is called when an HTTP request wants to be sent over the network. Your Do method can perform any |
||||
operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query |
||||
parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object |
||||
in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy |
||||
object sends the HTTP request over the network (by calling the HTTPSender's Do method). |
||||
|
||||
When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response |
||||
(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure |
||||
or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response |
||||
to the code that initiated the original HTTP request. |
||||
|
||||
Here is a template for how to define a pipeline.Policy object: |
||||
|
||||
type myPolicy struct { |
||||
node PolicyNode |
||||
// TODO: Add configuration/setting fields here (if desired)...
|
||||
} |
||||
|
||||
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { |
||||
// TODO: Mutate/process the HTTP request here...
|
||||
response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
|
||||
// TODO: Mutate/process the HTTP response here...
|
||||
return response, err // Return response/error to previous Policy
|
||||
} |
||||
|
||||
Implementing the Factory Interface |
||||
|
||||
Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New |
||||
method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is |
||||
passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and |
||||
a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object |
||||
passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object. |
||||
|
||||
Here is a template for how to define a pipeline.Policy object: |
||||
|
||||
// NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
|
||||
// this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
|
||||
type myPolicyFactory struct { |
||||
// TODO: Add any configuration/setting fields if desired...
|
||||
} |
||||
|
||||
func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy { |
||||
return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
|
||||
} |
||||
|
||||
Using your Factory and Policy objects via a Pipeline |
||||
|
||||
To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes |
||||
this slice to the pipeline.NewPipeline function. |
||||
|
||||
func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline |
||||
|
||||
This function also requires an object implementing the HTTPSender interface. For simple scenarios, |
||||
passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually |
||||
send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender |
||||
object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects |
||||
or other objects that can simulate the network requests for testing purposes. |
||||
|
||||
Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple |
||||
wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a |
||||
context.Context for cancelling the HTTP request (if desired). |
||||
|
||||
type Pipeline interface { |
||||
Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) |
||||
} |
||||
|
||||
Do iterates over the slice of Factory objects and tells each one to create its corresponding |
||||
Policy object. After the linked-list of Policy objects have been created, Do calls the first |
||||
Policy object passing it the Context & HTTP request parameters. These parameters now flow through |
||||
all the Policy objects giving each object a chance to look at and/or mutate the HTTP request. |
||||
The last Policy object sends the message over the network. |
||||
|
||||
When the network operation completes, the HTTP response and error return values pass |
||||
back through the same Policy objects in reverse order. Most Policy objects ignore the |
||||
response/error but some log the result, retry the operation (depending on the exact |
||||
reason the operation failed), or deserialize the response's body. Your own Policy |
||||
objects can do whatever they like when processing outgoing requests or incoming responses. |
||||
|
||||
Note that after an I/O request runs to completion, the Policy objects for that request |
||||
are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing |
||||
them to be created once and reused over many I/O operations. This allows for efficient use of |
||||
memory and also makes them safely usable by multiple goroutines concurrently. |
||||
|
||||
Inserting a Method-Specific Factory into the Linked-List of Policy Objects |
||||
|
||||
While Pipeline and Factory objects can be reused over many different operations, it is |
||||
common to have special behavior for a specific operation/method. For example, a method |
||||
may need to deserialize the response's body to an instance of a specific data type. |
||||
To accommodate this, the Pipeline's Do method takes an additional method-specific |
||||
Factory object. The Do method tells this Factory to create a Policy object and |
||||
injects this method-specific Policy object into the linked-list of Policy objects. |
||||
|
||||
When creating a Pipeline object, the slice of Factory objects passed must have 1 |
||||
(and only 1) entry marking where the method-specific Factory should be injected. |
||||
The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function: |
||||
|
||||
func MethodFactoryMarker() pipeline.Factory |
||||
|
||||
Creating an HTTP Request Object |
||||
|
||||
The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct. |
||||
Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard |
||||
http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function: |
||||
|
||||
func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error) |
||||
|
||||
To this function, you must pass a pipeline.RequestOptions that looks like this: |
||||
|
||||
type RequestOptions struct { |
||||
// The readable and seekable stream to be sent to the server as the request's body.
|
||||
Body io.ReadSeeker |
||||
|
||||
// The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
|
||||
Progress ProgressReceiver |
||||
} |
||||
|
||||
The method and struct ensure that the request's body stream is a read/seekable stream. |
||||
A seekable stream is required so that upon retry, the final Policy object can seek |
||||
the stream back to the beginning before retrying the network request and re-uploading the |
||||
body. In addition, you can associate a ProgressReceiver callback function which will be |
||||
invoked periodically to report progress while bytes are being read from the body stream |
||||
and sent over the network. |
||||
|
||||
Processing the HTTP Response |
||||
|
||||
When an HTTP response comes in from the network, a reference to Go's http.Response struct is |
||||
embedded in a struct that implements the pipeline.Response interface: |
||||
|
||||
type Response interface { |
||||
Response() *http.Response |
||||
} |
||||
|
||||
This interface is returned through all the Policy objects. Each Policy object can call the Response |
||||
interface's Response method to examine (or mutate) the embedded http.Response object. |
||||
|
||||
A Policy object can internally define another struct (implementing the pipeline.Response interface) |
||||
that embeds an http.Response and adds additional fields and return this structure to other Policy |
||||
objects. This allows a Policy object to deserialize the body to some other struct and return the |
||||
original http.Response and the additional struct back through the Policy chain. Other Policy objects |
||||
can see the Response but cannot see the additional struct with the deserialized body. After all the |
||||
Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method. |
||||
The caller of this method can perform a type assertion attempting to get back to the struct type |
||||
really returned by the Policy object. If the type assertion is successful, the caller now has |
||||
access to both the http.Response and the deserialized struct object.*/ |
||||
package pipeline |
@ -1,181 +0,0 @@ |
||||
package pipeline |
||||
|
||||
import ( |
||||
"fmt" |
||||
"runtime" |
||||
) |
||||
|
||||
type causer interface { |
||||
Cause() error |
||||
} |
||||
|
||||
func errorWithPC(msg string, pc uintptr) string { |
||||
s := "" |
||||
if fn := runtime.FuncForPC(pc); fn != nil { |
||||
file, line := fn.FileLine(pc) |
||||
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line) |
||||
} |
||||
s += msg + "\n\n" |
||||
return s |
||||
} |
||||
|
||||
func getPC(callersToSkip int) uintptr { |
||||
// Get the PC of Initialize method's caller.
|
||||
pc := [1]uintptr{} |
||||
_ = runtime.Callers(callersToSkip, pc[:]) |
||||
return pc[0] |
||||
} |
||||
|
||||
// ErrorNode can be an embedded field in a private error object. This field
|
||||
// adds Program Counter support and a 'cause' (reference to a preceding error).
|
||||
// When initializing a error type with this embedded field, initialize the
|
||||
// ErrorNode field by calling ErrorNode{}.Initialize(cause).
|
||||
type ErrorNode struct { |
||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||
cause error // Refers to the preceding error (or nil)
|
||||
} |
||||
|
||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||
// When defining a new error type, have its Error method call this one passing
|
||||
// it the string representation of the error.
|
||||
func (e *ErrorNode) Error(msg string) string { |
||||
s := errorWithPC(msg, e.pc) |
||||
if e.cause != nil { |
||||
s += e.cause.Error() + "\n" |
||||
} |
||||
return s |
||||
} |
||||
|
||||
// Cause returns the error that preceded this error.
|
||||
func (e *ErrorNode) Cause() error { return e.cause } |
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition.
|
||||
func (e ErrorNode) Temporary() bool { |
||||
type temporary interface { |
||||
Temporary() bool |
||||
} |
||||
|
||||
for err := e.cause; err != nil; { |
||||
if t, ok := err.(temporary); ok { |
||||
return t.Temporary() |
||||
} |
||||
|
||||
if cause, ok := err.(causer); ok { |
||||
err = cause.Cause() |
||||
} else { |
||||
err = nil |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Timeout returns true if the error occurred due to time expiring.
|
||||
func (e ErrorNode) Timeout() bool { |
||||
type timeout interface { |
||||
Timeout() bool |
||||
} |
||||
|
||||
for err := e.cause; err != nil; { |
||||
if t, ok := err.(timeout); ok { |
||||
return t.Timeout() |
||||
} |
||||
|
||||
if cause, ok := err.(causer); ok { |
||||
err = cause.Cause() |
||||
} else { |
||||
err = nil |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Initialize is used to initialize an embedded ErrorNode field.
|
||||
// It captures the caller's program counter and saves the cause (preceding error).
|
||||
// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
|
||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||
// a different value.
|
||||
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode { |
||||
pc := getPC(callersToSkip) |
||||
return ErrorNode{pc: pc, cause: cause} |
||||
} |
||||
|
||||
// Cause walks all the preceding errors and return the originating error.
|
||||
func Cause(err error) error { |
||||
for err != nil { |
||||
cause, ok := err.(causer) |
||||
if !ok { |
||||
break |
||||
} |
||||
err = cause.Cause() |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// ErrorNodeNoCause can be an embedded field in a private error object. This field
|
||||
// adds Program Counter support.
|
||||
// When initializing a error type with this embedded field, initialize the
|
||||
// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
|
||||
type ErrorNodeNoCause struct { |
||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||
} |
||||
|
||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||
// When defining a new error type, have its Error method call this one passing
|
||||
// it the string representation of the error.
|
||||
func (e *ErrorNodeNoCause) Error(msg string) string { |
||||
return errorWithPC(msg, e.pc) |
||||
} |
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition.
|
||||
func (e ErrorNodeNoCause) Temporary() bool { |
||||
return false |
||||
} |
||||
|
||||
// Timeout returns true if the error occurred due to time expiring.
|
||||
func (e ErrorNodeNoCause) Timeout() bool { |
||||
return false |
||||
} |
||||
|
||||
// Initialize is used to initialize an embedded ErrorNode field.
|
||||
// It captures the caller's program counter.
|
||||
// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
|
||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||
// a different value.
|
||||
func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause { |
||||
pc := getPC(callersToSkip) |
||||
return ErrorNodeNoCause{pc: pc} |
||||
} |
||||
|
||||
// NewError creates a simple string error (like Error.New). But, this
|
||||
// error also captures the caller's Program Counter and the preceding error (if provided).
|
||||
func NewError(cause error, msg string) error { |
||||
if cause != nil { |
||||
return &pcError{ |
||||
ErrorNode: ErrorNode{}.Initialize(cause, 3), |
||||
msg: msg, |
||||
} |
||||
} |
||||
return &pcErrorNoCause{ |
||||
ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3), |
||||
msg: msg, |
||||
} |
||||
} |
||||
|
||||
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
|
||||
type pcError struct { |
||||
ErrorNode |
||||
msg string |
||||
} |
||||
|
||||
// Error satisfies the error interface. It shows the error with Program Counter
|
||||
// symbols and calls Error on the preceding error so you can see the full error chain.
|
||||
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) } |
||||
|
||||
// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
|
||||
type pcErrorNoCause struct { |
||||
ErrorNodeNoCause |
||||
msg string |
||||
} |
||||
|
||||
// Error satisfies the error interface. It shows the error with Program Counter symbols.
|
||||
func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) } |
@ -1,82 +0,0 @@ |
||||
package pipeline |
||||
|
||||
import "io" |
||||
|
||||
// ********** The following is common between the request body AND the response body.
|
||||
|
||||
// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
|
||||
type ProgressReceiver func(bytesTransferred int64) |
||||
|
||||
// ********** The following are specific to the request body (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type requestBodyProgress struct { |
||||
requestBody io.ReadSeeker // Seeking is required to support retries
|
||||
pr ProgressReceiver |
||||
} |
||||
|
||||
// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
|
||||
func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker { |
||||
if pr == nil { |
||||
panic("pr must not be nil") |
||||
} |
||||
return &requestBodyProgress{requestBody: requestBody, pr: pr} |
||||
} |
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) { |
||||
n, err = rbp.requestBody.Read(p) |
||||
if err != nil { |
||||
return |
||||
} |
||||
// Invokes the user's callback method to report progress
|
||||
position, err := rbp.requestBody.Seek(0, io.SeekCurrent) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
rbp.pr(position) |
||||
return |
||||
} |
||||
|
||||
func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) { |
||||
return rbp.requestBody.Seek(offset, whence) |
||||
} |
||||
|
||||
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
|
||||
func (rbp *requestBodyProgress) Close() error { |
||||
if c, ok := rbp.requestBody.(io.Closer); ok { |
||||
return c.Close() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ********** The following are specific to the response body (a ReadCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type responseBodyProgress struct { |
||||
responseBody io.ReadCloser |
||||
pr ProgressReceiver |
||||
offset int64 |
||||
} |
||||
|
||||
// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
|
||||
func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser { |
||||
if pr == nil { |
||||
panic("pr must not be nil") |
||||
} |
||||
return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0} |
||||
} |
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) { |
||||
n, err = rbp.responseBody.Read(p) |
||||
rbp.offset += int64(n) |
||||
|
||||
// Invokes the user's callback method to report progress
|
||||
rbp.pr(rbp.offset) |
||||
return |
||||
} |
||||
|
||||
func (rbp *responseBodyProgress) Close() error { |
||||
return rbp.responseBody.Close() |
||||
} |
@ -1,147 +0,0 @@ |
||||
package pipeline |
||||
|
||||
import ( |
||||
"io" |
||||
"net/http" |
||||
"net/url" |
||||
"strconv" |
||||
) |
||||
|
||||
// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
|
||||
type Request struct { |
||||
*http.Request |
||||
} |
||||
|
||||
// NewRequest initializes a new HTTP request object with any desired options.
|
||||
func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) { |
||||
// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
|
||||
|
||||
// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
|
||||
request.Request = &http.Request{ |
||||
Method: method, |
||||
URL: &url, |
||||
Proto: "HTTP/1.1", |
||||
ProtoMajor: 1, |
||||
ProtoMinor: 1, |
||||
Header: make(http.Header), |
||||
Host: url.Host, |
||||
} |
||||
|
||||
if body != nil { |
||||
err = request.SetBody(body) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// SetBody sets the body and content length, assumes body is not nil.
|
||||
func (r Request) SetBody(body io.ReadSeeker) error { |
||||
size, err := body.Seek(0, io.SeekEnd) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
body.Seek(0, io.SeekStart) |
||||
r.ContentLength = size |
||||
r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)} |
||||
|
||||
if size != 0 { |
||||
r.Body = &retryableRequestBody{body: body} |
||||
r.GetBody = func() (io.ReadCloser, error) { |
||||
_, err := body.Seek(0, io.SeekStart) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return r.Body, nil |
||||
} |
||||
} else { |
||||
// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
|
||||
r.Body = http.NoBody |
||||
r.GetBody = func() (io.ReadCloser, error) { |
||||
return http.NoBody, nil |
||||
} |
||||
|
||||
// close the user-provided empty body
|
||||
if c, ok := body.(io.Closer); ok { |
||||
c.Close() |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
|
||||
// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
|
||||
// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
|
||||
// Cancel, Response, and ctx fields. Copy panics if any of these fields are
|
||||
// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
|
||||
func (r Request) Copy() Request { |
||||
if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil { |
||||
panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" + |
||||
"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.") |
||||
} |
||||
copy := *r.Request // Copy the request
|
||||
urlCopy := *(r.Request.URL) // Copy the URL
|
||||
copy.URL = &urlCopy |
||||
copy.Header = http.Header{} // Copy the header
|
||||
for k, vs := range r.Header { |
||||
for _, value := range vs { |
||||
copy.Header.Add(k, value) |
||||
} |
||||
} |
||||
return Request{Request: ©} // Return the copy
|
||||
} |
||||
|
||||
func (r Request) close() error { |
||||
if r.Body != nil && r.Body != http.NoBody { |
||||
c, ok := r.Body.(*retryableRequestBody) |
||||
if !ok { |
||||
panic("unexpected request body type (should be *retryableReadSeekerCloser)") |
||||
} |
||||
return c.realClose() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
|
||||
func (r Request) RewindBody() error { |
||||
if r.Body != nil && r.Body != http.NoBody { |
||||
s, ok := r.Body.(io.Seeker) |
||||
if !ok { |
||||
panic("unexpected request body type (should be io.Seeker)") |
||||
} |
||||
|
||||
// Reset the stream back to the beginning
|
||||
_, err := s.Seek(0, io.SeekStart) |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type retryableRequestBody struct { |
||||
body io.ReadSeeker // Seeking is required to support retries
|
||||
} |
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (b *retryableRequestBody) Read(p []byte) (n int, err error) { |
||||
return b.body.Read(p) |
||||
} |
||||
|
||||
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { |
||||
return b.body.Seek(offset, whence) |
||||
} |
||||
|
||||
func (b *retryableRequestBody) Close() error { |
||||
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
|
||||
// The pipeline closes the request body upon success.
|
||||
return nil |
||||
} |
||||
|
||||
func (b *retryableRequestBody) realClose() error { |
||||
if c, ok := b.body.(io.Closer); ok { |
||||
return c.Close() |
||||
} |
||||
return nil |
||||
} |
@ -1,74 +0,0 @@ |
||||
package pipeline |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"net/http" |
||||
"sort" |
||||
"strings" |
||||
) |
||||
|
||||
// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
|
||||
// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
|
||||
// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
|
||||
// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
|
||||
// to the expected struct and returns the struct to its caller.
|
||||
type Response interface { |
||||
Response() *http.Response |
||||
} |
||||
|
||||
// This is the default struct that has the http.Response.
|
||||
// A method can replace this struct with its own struct containing an http.Response
|
||||
// field and any other additional fields.
|
||||
type httpResponse struct { |
||||
response *http.Response |
||||
} |
||||
|
||||
// NewHTTPResponse is typically called by a Policy object to return a Response object.
|
||||
func NewHTTPResponse(response *http.Response) Response { |
||||
return &httpResponse{response: response} |
||||
} |
||||
|
||||
// This method satisfies the public Response interface's Response method
|
||||
func (r httpResponse) Response() *http.Response { |
||||
return r.response |
||||
} |
||||
|
||||
// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
|
||||
// not nil, then these are also written into the Buffer.
|
||||
func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) { |
||||
// Write the request into the buffer.
|
||||
fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n") |
||||
writeHeader(b, request.Header) |
||||
if response != nil { |
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------") |
||||
fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") |
||||
writeHeader(b, response.Header) |
||||
} |
||||
if err != nil { |
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------") |
||||
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") |
||||
} |
||||
} |
||||
|
||||
// formatHeaders appends an HTTP request's or response's header into a Buffer.
|
||||
func writeHeader(b *bytes.Buffer, header map[string][]string) { |
||||
if len(header) == 0 { |
||||
b.WriteString(" (no headers)\n") |
||||
return |
||||
} |
||||
keys := make([]string, 0, len(header)) |
||||
// Alphabetize the headers
|
||||
for k := range header { |
||||
keys = append(keys, k) |
||||
} |
||||
sort.Strings(keys) |
||||
for _, k := range keys { |
||||
// Redact the value of any Authorization header to prevent security information from persisting in logs
|
||||
value := interface{}("REDACTED") |
||||
if !strings.EqualFold(k, "Authorization") { |
||||
value = header[k] |
||||
} |
||||
fmt.Fprintf(b, " %s: %+v\n", k, value) |
||||
} |
||||
} |
@ -1,9 +0,0 @@ |
||||
package pipeline |
||||
|
||||
const ( |
||||
// UserAgent is the string to be used in the user agent string when making requests.
|
||||
UserAgent = "azure-pipeline-go/" + Version |
||||
|
||||
// Version is the semantic version (see http://semver.org) of the pipeline package.
|
||||
Version = "0.2.1" |
||||
) |
@ -1,280 +0,0 @@ |
||||
# CHANGELOG |
||||
|
||||
----- |
||||
|
||||
## `v6.0.0-beta` |
||||
|
||||
| api | version | note | |
||||
|:-------------------------------|:-------------------|:-----------------------------------| |
||||
| arm/authorization | no change | code refactoring | |
||||
| arm/batch | no change | code refactoring | |
||||
| arm/compute | no change | code refactoring | |
||||
| arm/containerservice | 2016-03-30 | return | |
||||
| arm/datalake-analytics/account | 2015-10-01-preview | new | |
||||
| arm/datalake-store/filesystem | no change | moved to datalake-store/filesystem | |
||||
| arm/eventhub | no change | code refactoring | |
||||
| arm/intune | no change | code refactoring | |
||||
| arm/iothub | no change | code refactoring | |
||||
| arm/keyvault | no change | code refactoring | |
||||
| arm/mediaservices | no change | code refactoring | |
||||
| arm/network | no change | code refactoring | |
||||
| arm/notificationhubs | no change | code refactoring | |
||||
| arm/redis | no change | code refactoring | |
||||
| arm/resources/resources | no change | code refactoring | |
||||
| arm/resources/links | 2016-09-01 | new | |
||||
| arm/resources/locks | 2016-09-01 | updated | |
||||
| arm/resources/policy | no change | code refactoring | |
||||
| arm/resources/resources | 2016-09-01 | updated | |
||||
| arm/servermanagement | 2016-07-01-preview | updated | |
||||
| arm/web | no change | code refactoring | |
||||
|
||||
- storage: Added blob lease functionality and tests |
||||
|
||||
## `v5.0.0-beta` |
||||
|
||||
| api | version | note | |
||||
|:------------------------------|:--------------------|:-----------------| |
||||
| arm/network | 2016-09-01 | updated | |
||||
| arm/servermanagement | 2015-07-01-preview | new | |
||||
| arm/eventhub | 2015-08-01 | new | |
||||
| arm/containerservice | -- | removed | |
||||
| arm/resources/subscriptions | no change | code refactoring | |
||||
| arm/resources/features | no change | code refactoring | |
||||
| arm/resources/resources | no change | code refactoring | |
||||
| arm/datalake-store/accounts | no change | code refactoring | |
||||
| arm/datalake-store/filesystem | no change | code refactoring | |
||||
| arm/notificationhubs | no change | code refactoring | |
||||
| arm/redis | no change | code refactoring | |
||||
|
||||
- storage: Add more file storage share operations. |
||||
- azure-rest-api-specs/commit/b8cdc2c50a0872fc0039f20c2b6b33aa0c2af4bf |
||||
- Uses go-autorest v7.2.1 |
||||
|
||||
## `v4.0.0-beta` |
||||
|
||||
- arm/logic: breaking change in package logic. |
||||
- arm: parameter validation code added in all arm packages. |
||||
- Uses go-autorest v7.2.0. |
||||
|
||||
|
||||
## `v3.2.0-beta` |
||||
|
||||
| api | version | note | |
||||
|:----------------------------|:--------------------|:----------| |
||||
| arm/mediaservices | 2015-10-01 | new | |
||||
| arm/keyvault | 2015-06-01 | new | |
||||
| arm/iothub | 2016-02-03 | new | |
||||
| arm/datalake-store | 2015-12-01 | new | |
||||
| arm/network | 2016-06-01 | updated | |
||||
| arm/resources/resources | 2016-07-01 | updated | |
||||
| arm/resources/policy | 2016-04-01 | updated | |
||||
| arm/servicebus | 2015-08-01 | updated | |
||||
|
||||
- arm: uses go-autorest version v7.1.0. |
||||
- storage: fix for operating on blobs names containing special characters. |
||||
- storage: add SetBlobProperties(), update BlobProperties response fields. |
||||
- storage: make storage client work correctly with read-only secondary account. |
||||
- storage: add Azure Storage Emulator support. |
||||
|
||||
|
||||
## `v3.1.0-beta` |
||||
|
||||
- Added a new arm/compute/containerservice (2016-03-30) package |
||||
- Reintroduced NewxxClientWithBaseURI method. |
||||
- Uses go-autorest version - v7.0.7. |
||||
|
||||
|
||||
## `v3.0.0-beta` |
||||
|
||||
This release brings the Go SDK ARM packages up-to-date with Azure ARM Swagger files for most |
||||
services. Since the underlying [Swagger files](https://github.com/Azure/azure-rest-api-specs) |
||||
continue to change substantially, the ARM packages are still in *beta* status. |
||||
|
||||
The ARM packages now align with the following API versions (*highlighted* packages are new or |
||||
updated in this release): |
||||
|
||||
| api | version | note | |
||||
|:----------------------------|:--------------------|:----------| |
||||
| arm/authorization | 2015-07-01 | no change | |
||||
| arm/intune | 2015-01-14-preview | no change | |
||||
| arm/notificationhubs | 2014-09-01 | no change | |
||||
| arm/resources/features | 2015-12-01 | no change | |
||||
| arm/resources/subscriptions | 2015-11-01 | no change | |
||||
| arm/web | 2015-08-01 | no change | |
||||
| arm/cdn | 2016-04-02 | updated | |
||||
| arm/compute | 2016-03-30 | updated | |
||||
| arm/dns | 2016-04-01 | updated | |
||||
| arm/logic | 2015-08-01-preview | updated | |
||||
| arm/network | 2016-03-30 | updated | |
||||
| arm/redis | 2016-04-01 | updated | |
||||
| arm/resources/resources | 2016-02-01 | updated | |
||||
| arm/resources/policy | 2015-10-01-preview | updated | |
||||
| arm/resources/locks | 2015-01-01 | updated (resources/authorization earlier)| |
||||
| arm/scheduler | 2016-03-01 | updated | |
||||
| arm/storage | 2016-01-01 | updated | |
||||
| arm/search | 2015-02-28 | updated | |
||||
| arm/batch | 2015-12-01 | new | |
||||
| arm/cognitiveservices | 2016-02-01-preview | new | |
||||
| arm/devtestlabs | 2016-05-15 | new | |
||||
| arm/machinelearning | 2016-05-01-preview | new | |
||||
| arm/powerbiembedded | 2016-01-29 | new | |
||||
| arm/mobileengagement | 2014-12-01 | new | |
||||
| arm/servicebus | 2014-09-01 | new | |
||||
| arm/sql | 2015-05-01 | new | |
||||
| arm/trafficmanager | 2015-11-01 | new | |
||||
|
||||
|
||||
Below are some design changes. |
||||
- Removed Api version from method arguments. |
||||
- Removed New...ClientWithBaseURI() method in all clients. BaseURI value is set in client.go. |
||||
- Uses go-autorest version v7.0.6. |
||||
|
||||
|
||||
## `v2.2.0-beta` |
||||
|
||||
- Uses go-autorest version v7.0.5. |
||||
- Update version of pacakges "jwt-go" and "crypto" in glide.lock. |
||||
|
||||
|
||||
## `v2.1.1-beta` |
||||
|
||||
- arm: Better error messages for long running operation failures (Uses go-autorest version v7.0.4). |
||||
|
||||
|
||||
## `v2.1.0-beta` |
||||
|
||||
- arm: Uses go-autorest v7.0.3 (polling related updates). |
||||
- arm: Cancel channel argument added in long-running calls. |
||||
- storage: Allow caller to provide headers for DeleteBlob methods. |
||||
- storage: Enables connection sharing with http keepalive. |
||||
- storage: Add BlobPrefixes and Delimiter to BlobListResponse |
||||
|
||||
|
||||
## `v2.0.0-beta` |
||||
|
||||
- Uses go-autorest v6.0.0 (Polling and Asynchronous requests related changes). |
||||
|
||||
|
||||
## `v0.5.0-beta` |
||||
|
||||
Updated following packages to new API versions: |
||||
- arm/resources/features 2015-12-01 |
||||
- arm/resources/resources 2015-11-01 |
||||
- arm/resources/subscriptions 2015-11-01 |
||||
|
||||
|
||||
### Changes |
||||
|
||||
- SDK now uses go-autorest v3.0.0. |
||||
|
||||
|
||||
|
||||
## `v0.4.0-beta` |
||||
|
||||
This release brings the Go SDK ARM packages up-to-date with Azure ARM Swagger files for most |
||||
services. Since the underlying [Swagger files](https://github.com/Azure/azure-rest-api-specs) |
||||
continue to change substantially, the ARM packages are still in *beta* status. |
||||
|
||||
The ARM packages now align with the following API versions (*highlighted* packages are new or |
||||
updated in this release): |
||||
|
||||
- *arm/authorization 2015-07-01* |
||||
- *arm/cdn 2015-06-01* |
||||
- arm/compute 2015-06-15 |
||||
- arm/dns 2015-05-04-preview |
||||
- *arm/intune 2015-01-14-preview* |
||||
- arm/logic 2015-02-01-preview |
||||
- *arm/network 2015-06-15* |
||||
- *arm/notificationhubs 2014-09-01* |
||||
- arm/redis 2015-08-01 |
||||
- *arm/resources/authorization 2015-01-01* |
||||
- *arm/resources/features 2014-08-01-preview* |
||||
- *arm/resources/resources 2014-04-01-preview* |
||||
- *arm/resources/subscriptions 2014-04-01-preview* |
||||
- *arm/scheduler 2016-01-01* |
||||
- arm/storage 2015-06-15 |
||||
- arm/web 2015-08-01 |
||||
|
||||
### Changes |
||||
|
||||
- Moved the arm/authorization, arm/features, arm/resources, and arm/subscriptions packages under a new, resources, package (to reflect the corresponding Swagger structure) |
||||
- Added a new arm/authoriation (2015-07-01) package |
||||
- Added a new arm/cdn (2015-06-01) package |
||||
- Added a new arm/intune (2015-01-14-preview) package |
||||
- Udated arm/network (2015-06-01) |
||||
- Added a new arm/notificationhubs (2014-09-01) package |
||||
- Updated arm/scheduler (2016-01-01) package |
||||
|
||||
|
||||
----- |
||||
|
||||
## `v0.3.0-beta` |
||||
|
||||
- Corrected unintentional struct field renaming and client renaming in v0.2.0-beta |
||||
|
||||
----- |
||||
|
||||
## `v0.2.0-beta` |
||||
|
||||
- Added support for DNS, Redis, and Web site services |
||||
- Updated Storage service to API version 2015-06-15 |
||||
- Updated Network to include routing table support |
||||
- Address https://github.com/Azure/azure-sdk-for-go/issues/232 |
||||
- Address https://github.com/Azure/azure-sdk-for-go/issues/231 |
||||
- Address https://github.com/Azure/azure-sdk-for-go/issues/230 |
||||
- Address https://github.com/Azure/azure-sdk-for-go/issues/224 |
||||
- Address https://github.com/Azure/azure-sdk-for-go/issues/184 |
||||
- Address https://github.com/Azure/azure-sdk-for-go/issues/183 |
||||
|
||||
------ |
||||
|
||||
## `v0.1.1-beta` |
||||
|
||||
- Improves the UserAgent string to disambiguate arm packages from others in the SDK |
||||
- Improves setting the http.Response into generated results (reduces likelihood of a nil reference) |
||||
- Adds gofmt, golint, and govet to Travis CI for the arm packages |
||||
|
||||
##### Fixed Issues |
||||
|
||||
- https://github.com/Azure/azure-sdk-for-go/issues/196 |
||||
- https://github.com/Azure/azure-sdk-for-go/issues/213 |
||||
|
||||
------ |
||||
|
||||
## v0.1.0-beta |
||||
|
||||
This release addresses the issues raised against the alpha release and adds more features. Most |
||||
notably, to address the challenges of encoding JSON |
||||
(see the [comments](https://github.com/Azure/go-autorest#handling-empty-values) in the |
||||
[go-autorest](https://github.com/Azure/go-autorest) package) by using pointers for *all* structure |
||||
fields (with the exception of enumerations). The |
||||
[go-autorest/autorest/to](https://github.com/Azure/go-autorest/tree/master/autorest/to) package |
||||
provides helpers to convert to / from pointers. The examples demonstrate their usage. |
||||
|
||||
Additionally, the packages now align with Go coding standards and pass both `golint` and `govet`. |
||||
Accomplishing this required renaming various fields and parameters (such as changing Url to URL). |
||||
|
||||
##### Changes |
||||
|
||||
- Changed request / response structures to use pointer fields. |
||||
- Changed methods to return `error` instead of `autorest.Error`. |
||||
- Re-divided methods to ease asynchronous requests. |
||||
- Added paged results support. |
||||
- Added a UserAgent string. |
||||
- Added changes necessary to pass golint and govet. |
||||
- Updated README.md with details on asynchronous requests and paging. |
||||
- Saved package dependencies through Godep (for the entire SDK). |
||||
|
||||
##### Fixed Issues: |
||||
|
||||
- https://github.com/Azure/azure-sdk-for-go/issues/205 |
||||
- https://github.com/Azure/azure-sdk-for-go/issues/206 |
||||
- https://github.com/Azure/azure-sdk-for-go/issues/211 |
||||
- https://github.com/Azure/azure-sdk-for-go/issues/212 |
||||
|
||||
----- |
||||
|
||||
## v0.1.0-alpha |
||||
|
||||
This release introduces the Azure Resource Manager packages generated from the corresponding |
||||
[Swagger API](http://swagger.io) [definitions](https://github.com/Azure/azure-rest-api-specs). |
@ -1,202 +0,0 @@ |
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
APPENDIX: How to apply the Apache License to your work. |
||||
|
||||
To apply the Apache License to your work, attach the following |
||||
boilerplate notice, with the fields enclosed by brackets "[]" |
||||
replaced with your own identifying information. (Don't include |
||||
the brackets!) The text should be enclosed in the appropriate |
||||
comment syntax for the file format. We also recommend that a |
||||
file or class name and description of purpose be included on the |
||||
same "printed page" as the copyright notice for easier |
||||
identification within third-party archives. |
||||
|
||||
Copyright 2016 Microsoft Corporation |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
@ -1,102 +0,0 @@ |
||||
# Microsoft Azure SDK for Go |
||||
|
||||
This project provides various Go packages to perform operations |
||||
on Microsoft Azure REST APIs. |
||||
|
||||
[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) |
||||
|
||||
> **NOTE:** This repository is under heavy ongoing development and |
||||
is likely to break over time. We currently do not have any releases |
||||
yet. If you are planning to use the repository, please consider vendoring |
||||
the packages in your project and update them when a stable tag is out. |
||||
|
||||
# Packages |
||||
|
||||
## Azure Resource Manager (ARM) |
||||
|
||||
[About ARM](/arm/README.md) |
||||
|
||||
- [authorization](/arm/authorization) |
||||
- [batch](/arm/batch) |
||||
- [cdn](/arm/cdn) |
||||
- [cognitiveservices](/arm/cognitiveservices) |
||||
- [compute](/arm/compute) |
||||
- [containerservice](/arm/containerservice) |
||||
- [datalake-store](/arm/datalake-store) |
||||
- [devtestlabs](/arm/devtestlabs) |
||||
- [dns](/arm/dns) |
||||
- [intune](/arm/intune) |
||||
- [iothub](/arm/iothub) |
||||
- [keyvault](/arm/keyvault) |
||||
- [logic](/arm/logic) |
||||
- [machinelearning](/arm/machinelearning) |
||||
- [mediaservices](/arm/mediaservices) |
||||
- [mobileengagement](/arm/mobileengagement) |
||||
- [network](/arm/network) |
||||
- [notificationhubs](/arm/notificationhubs) |
||||
- [powerbiembedded](/arm/powerbiembedded) |
||||
- [redis](/arm/redis) |
||||
- [resources](/arm/resources) |
||||
- [scheduler](/arm/scheduler) |
||||
- [search](/arm/search) |
||||
- [servicebus](/arm/servicebus) |
||||
- [sql](/arm/sql) |
||||
- [storage](/arm/storage) |
||||
- [trafficmanager](/arm/trafficmanager) |
||||
- [web](/arm/web) |
||||
|
||||
## Azure Service Management (ASM), aka classic deployment |
||||
|
||||
[About ASM](/management/README.md) |
||||
|
||||
- [affinitygroup](/management/affinitygroup) |
||||
- [hostedservice](/management/hostedservice) |
||||
- [location](/management/location) |
||||
- [networksecuritygroup](/management/networksecuritygroup) |
||||
- [osimage](/management/osimage) |
||||
- [sql](/management/sql) |
||||
- [storageservice](/management/storageservice) |
||||
- [virtualmachine](/management/virtualmachine) |
||||
- [virtualmachinedisk](/management/virtualmachinedisk) |
||||
- [virtualmachineimage](/management/virtualmachineimage) |
||||
- [virtualnetwork](/management/virtualnetwork) |
||||
- [vmutils](/management/vmutils) |
||||
|
||||
## Azure Storage SDK for Go |
||||
|
||||
[About Storage](/storage/README.md) |
||||
|
||||
- [storage](/storage) |
||||
|
||||
# Installation |
||||
|
||||
- [Install Go 1.7](https://golang.org/dl/). |
||||
|
||||
- Go get the SDK: |
||||
|
||||
``` |
||||
$ go get -d github.com/Azure/azure-sdk-for-go |
||||
``` |
||||
|
||||
> **IMPORTANT:** We highly suggest vendoring Azure SDK for Go as a dependency. For vendoring dependencies, Azure SDK for Go uses [glide](https://github.com/Masterminds/glide). If you haven't already, install glide. Navigate to your project directory and install the dependencies. |
||||
|
||||
``` |
||||
$ cd your/project |
||||
$ glide create |
||||
$ glide install |
||||
``` |
||||
|
||||
# Documentation |
||||
|
||||
Read the Godoc of the repository at [Godoc.org](http://godoc.org/github.com/Azure/azure-sdk-for-go/). |
||||
|
||||
# Contribute |
||||
|
||||
If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). |
||||
|
||||
# License |
||||
|
||||
This project is published under [Apache 2.0 License](LICENSE). |
||||
|
||||
----- |
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. |
@ -1,53 +0,0 @@ |
||||
hash: 7407050cee9bb9ce89e23ef26bce4051cce63d558338a4937f027a18b789e3a1 |
||||
updated: 2016-10-25T11:34:48.4987356-07:00 |
||||
imports: |
||||
- name: github.com/Azure/go-autorest |
||||
version: 0781901f19f1e7db3034d97ec57af753db0bf808 |
||||
subpackages: |
||||
- autorest |
||||
- autorest/azure |
||||
- autorest/date |
||||
- autorest/to |
||||
- autorest/validation |
||||
- name: github.com/dgrijalva/jwt-go |
||||
version: 24c63f56522a87ec5339cc3567883f1039378fdb |
||||
- name: github.com/howeyc/gopass |
||||
version: f5387c492211eb133053880d23dfae62aa14123d |
||||
- name: github.com/mattn/go-colorable |
||||
version: 6c903ff4aa50920ca86087a280590b36b3152b9c |
||||
- name: github.com/mattn/go-isatty |
||||
version: 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8 |
||||
- name: github.com/mgutz/ansi |
||||
version: c286dcecd19ff979eeb73ea444e479b903f2cfcb |
||||
- name: github.com/mgutz/minimist |
||||
version: 39eb8cf573ca29344bd7d7e6ba4d7febdebd37a9 |
||||
- name: github.com/mgutz/str |
||||
version: 968bf66e3da857419e4f6e71b2d5c9ae95682dc4 |
||||
- name: github.com/mgutz/to |
||||
version: 2a0bcba0661696e339461f5efb2273f4459dd1b9 |
||||
- name: github.com/MichaelTJones/walk |
||||
version: 3af09438b0ab0e8f296410bfa646a7e635ea1fc0 |
||||
- name: github.com/nozzle/throttler |
||||
version: d9b45f19996c645d38c9266d1f5cf1990e930119 |
||||
- name: github.com/satori/uuid |
||||
version: b061729afc07e77a8aa4fad0a2fd840958f1942a |
||||
- name: golang.org/x/crypto |
||||
version: 84e98f45760e87786b7f24603b8166a6fa09811d |
||||
subpackages: |
||||
- pkcs12 |
||||
- pkcs12/internal/rc2 |
||||
- ssh/terminal |
||||
- name: golang.org/x/sys |
||||
version: c200b10b5d5e122be351b67af224adc6128af5bf |
||||
subpackages: |
||||
- unix |
||||
- name: gopkg.in/check.v1 |
||||
version: 4f90aeace3a26ad7021961c297b22c42160c7b25 |
||||
- name: gopkg.in/godo.v2 |
||||
version: b5fd2f0bef1ebe832e628cfad18ab1cc707f65a1 |
||||
subpackages: |
||||
- glob |
||||
- util |
||||
- watcher |
||||
- watcher/fswatch |
||||
testImports: [] |
@ -1,12 +0,0 @@ |
||||
package: github.com/Azure/azure-sdk-for-go |
||||
import: |
||||
- package: github.com/Azure/go-autorest |
||||
subpackages: |
||||
- /autorest |
||||
- autorest/azure |
||||
- autorest/date |
||||
- autorest/to |
||||
- package: golang.org/x/crypto |
||||
subpackages: |
||||
- /pkcs12 |
||||
- package: gopkg.in/check.v1 |
@ -1,21 +0,0 @@ |
||||
MIT License |
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved. |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE |
@ -1,65 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"time" |
||||
) |
||||
|
||||
// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
|
||||
type ModifiedAccessConditions struct { |
||||
IfModifiedSince time.Time |
||||
IfUnmodifiedSince time.Time |
||||
IfMatch ETag |
||||
IfNoneMatch ETag |
||||
} |
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { |
||||
if !ac.IfModifiedSince.IsZero() { |
||||
ims = &ac.IfModifiedSince |
||||
} |
||||
if !ac.IfUnmodifiedSince.IsZero() { |
||||
ius = &ac.IfUnmodifiedSince |
||||
} |
||||
if ac.IfMatch != ETagNone { |
||||
ime = &ac.IfMatch |
||||
} |
||||
if ac.IfNoneMatch != ETagNone { |
||||
inme = &ac.IfNoneMatch |
||||
} |
||||
return |
||||
} |
||||
|
||||
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
|
||||
type ContainerAccessConditions struct { |
||||
ModifiedAccessConditions |
||||
LeaseAccessConditions |
||||
} |
||||
|
||||
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
|
||||
type BlobAccessConditions struct { |
||||
ModifiedAccessConditions |
||||
LeaseAccessConditions |
||||
} |
||||
|
||||
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
|
||||
type LeaseAccessConditions struct { |
||||
LeaseID string |
||||
} |
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac LeaseAccessConditions) pointers() (leaseID *string) { |
||||
if ac.LeaseID != "" { |
||||
leaseID = &ac.LeaseID |
||||
} |
||||
return |
||||
} |
||||
|
||||
/* |
||||
// getInt32 is for internal infrastructure. It is used with access condition values where
|
||||
// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header
|
||||
// and the privately-storage field in the access condition object is stored as +1 higher than desired.
|
||||
// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value).
|
||||
func getInt32(value int32) (bool, int32) { |
||||
return value > 0, value - 1 |
||||
} |
||||
*/ |
@ -1,69 +0,0 @@ |
||||
package azblob |
||||
|
||||
import "sync/atomic" |
||||
|
||||
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{}) |
||||
|
||||
const targetAndMorpherMustNotBeNil = "target and morpher must not be nil" |
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} { |
||||
for { |
||||
currentVal := atomic.LoadInt32(target) |
||||
desiredVal, morphResult := morpher(currentVal) |
||||
if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) { |
||||
return morphResult |
||||
} |
||||
} |
||||
} |
||||
|
||||
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{}) |
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} { |
||||
for { |
||||
currentVal := atomic.LoadUint32(target) |
||||
desiredVal, morphResult := morpher(currentVal) |
||||
if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { |
||||
return morphResult |
||||
} |
||||
} |
||||
} |
||||
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{}) |
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} { |
||||
for { |
||||
currentVal := atomic.LoadInt64(target) |
||||
desiredVal, morphResult := morpher(currentVal) |
||||
if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) { |
||||
return morphResult |
||||
} |
||||
} |
||||
} |
||||
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{}) |
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} { |
||||
for { |
||||
currentVal := atomic.LoadUint64(target) |
||||
desiredVal, morphResult := morpher(currentVal) |
||||
if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) { |
||||
return morphResult |
||||
} |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -1,538 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/base64" |
||||
"io" |
||||
"net/http" |
||||
|
||||
"bytes" |
||||
"os" |
||||
"sync" |
||||
"time" |
||||
|
||||
"errors" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// CommonResponse returns the headers common to all blob REST API responses.
|
||||
type CommonResponse interface { |
||||
// ETag returns the value for header ETag.
|
||||
ETag() ETag |
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
LastModified() time.Time |
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
RequestID() string |
||||
|
||||
// Date returns the value for header Date.
|
||||
Date() time.Time |
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
Version() string |
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
Response() *http.Response |
||||
} |
||||
|
||||
// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
|
||||
type UploadToBlockBlobOptions struct { |
||||
// BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
|
||||
BlockSize int64 |
||||
|
||||
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
|
||||
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
|
||||
Progress pipeline.ProgressReceiver |
||||
|
||||
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
|
||||
BlobHTTPHeaders BlobHTTPHeaders |
||||
|
||||
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
|
||||
Metadata Metadata |
||||
|
||||
// AccessConditions indicates the access conditions for the block blob.
|
||||
AccessConditions BlobAccessConditions |
||||
|
||||
// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
|
||||
Parallelism uint16 |
||||
} |
||||
|
||||
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
|
||||
func UploadBufferToBlockBlob(ctx context.Context, b []byte, |
||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { |
||||
bufferSize := int64(len(b)) |
||||
if o.BlockSize == 0 { |
||||
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
|
||||
if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { |
||||
return nil, errors.New("Buffer is too large to upload to a block blob") |
||||
} |
||||
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes { |
||||
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
||||
} else { |
||||
o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
|
||||
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
|
||||
o.BlockSize = BlobDefaultDownloadBlockSize |
||||
} |
||||
// StageBlock will be called with blockSize blocks and a parallelism of (BufferSize / BlockSize).
|
||||
} |
||||
} |
||||
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes { |
||||
// If the size can fit in 1 Upload call, do it this way
|
||||
var body io.ReadSeeker = bytes.NewReader(b) |
||||
if o.Progress != nil { |
||||
body = pipeline.NewRequestBodyProgress(body, o.Progress) |
||||
} |
||||
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) |
||||
} |
||||
|
||||
var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1) |
||||
|
||||
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
|
||||
progress := int64(0) |
||||
progressLock := &sync.Mutex{} |
||||
|
||||
err := doBatchTransfer(ctx, batchTransferOptions{ |
||||
operationName: "UploadBufferToBlockBlob", |
||||
transferSize: bufferSize, |
||||
chunkSize: o.BlockSize, |
||||
parallelism: o.Parallelism, |
||||
operation: func(offset int64, count int64) error { |
||||
// This function is called once per block.
|
||||
// It is passed this block's offset within the buffer and its count of bytes
|
||||
// Prepare to read the proper block/section of the buffer
|
||||
var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count]) |
||||
blockNum := offset / o.BlockSize |
||||
if o.Progress != nil { |
||||
blockProgress := int64(0) |
||||
body = pipeline.NewRequestBodyProgress(body, |
||||
func(bytesTransferred int64) { |
||||
diff := bytesTransferred - blockProgress |
||||
blockProgress = bytesTransferred |
||||
progressLock.Lock() // 1 goroutine at a time gets a progress report
|
||||
progress += diff |
||||
o.Progress(progress) |
||||
progressLock.Unlock() |
||||
}) |
||||
} |
||||
|
||||
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
|
||||
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
|
||||
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) |
||||
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil) |
||||
return err |
||||
}, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// All put blocks were successful, call Put Block List to finalize the blob
|
||||
return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) |
||||
} |
||||
|
||||
// UploadFileToBlockBlob uploads a file in blocks to a block blob.
|
||||
func UploadFileToBlockBlob(ctx context.Context, file *os.File, |
||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { |
||||
|
||||
stat, err := file.Stat() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
m := mmf{} // Default to an empty slice; used for 0-size file
|
||||
if stat.Size() != 0 { |
||||
m, err = newMMF(file, false, 0, int(stat.Size())) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer m.unmap() |
||||
} |
||||
return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o) |
||||
} |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
|
||||
|
||||
// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
|
||||
type DownloadFromBlobOptions struct { |
||||
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
|
||||
BlockSize int64 |
||||
|
||||
// Progress is a function that is invoked periodically as bytes are received.
|
||||
Progress pipeline.ProgressReceiver |
||||
|
||||
// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
|
||||
AccessConditions BlobAccessConditions |
||||
|
||||
// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
|
||||
Parallelism uint16 |
||||
|
||||
// RetryReaderOptionsPerBlock is used when downloading each block.
|
||||
RetryReaderOptionsPerBlock RetryReaderOptions |
||||
} |
||||
|
||||
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, |
||||
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { |
||||
if o.BlockSize == 0 { |
||||
o.BlockSize = BlobDefaultDownloadBlockSize |
||||
} |
||||
|
||||
if count == CountToEnd { // If size not specified, calculate it
|
||||
if initialDownloadResponse != nil { |
||||
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
|
||||
} else { |
||||
// If we don't have the length at all, get it
|
||||
dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
count = dr.ContentLength() - offset |
||||
} |
||||
} |
||||
|
||||
// Prepare and do parallel download.
|
||||
progress := int64(0) |
||||
progressLock := &sync.Mutex{} |
||||
|
||||
err := doBatchTransfer(ctx, batchTransferOptions{ |
||||
operationName: "downloadBlobToBuffer", |
||||
transferSize: count, |
||||
chunkSize: o.BlockSize, |
||||
parallelism: o.Parallelism, |
||||
operation: func(chunkStart int64, count int64) error { |
||||
dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
body := dr.Body(o.RetryReaderOptionsPerBlock) |
||||
if o.Progress != nil { |
||||
rangeProgress := int64(0) |
||||
body = pipeline.NewResponseBodyProgress( |
||||
body, |
||||
func(bytesTransferred int64) { |
||||
diff := bytesTransferred - rangeProgress |
||||
rangeProgress = bytesTransferred |
||||
progressLock.Lock() |
||||
progress += diff |
||||
o.Progress(progress) |
||||
progressLock.Unlock() |
||||
}) |
||||
} |
||||
_, err = io.ReadFull(body, b[chunkStart:chunkStart+count]) |
||||
body.Close() |
||||
return err |
||||
}, |
||||
}) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, |
||||
b []byte, o DownloadFromBlobOptions) error { |
||||
return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil) |
||||
} |
||||
|
||||
// DownloadBlobToFile downloads an Azure blob to a local file.
|
||||
// The file would be truncated if the size doesn't match.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, |
||||
file *os.File, o DownloadFromBlobOptions) error { |
||||
// 1. Calculate the size of the destination file
|
||||
var size int64 |
||||
|
||||
if count == CountToEnd { |
||||
// Try to get Azure blob's size
|
||||
props, err := blobURL.GetProperties(ctx, o.AccessConditions) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
size = props.ContentLength() - offset |
||||
} else { |
||||
size = count |
||||
} |
||||
|
||||
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
|
||||
stat, err := file.Stat() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if stat.Size() != size { |
||||
if err = file.Truncate(size); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
if size > 0 { |
||||
// 3. Set mmap and call downloadBlobToBuffer.
|
||||
m, err := newMMF(file, true, 0, int(size)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer m.unmap() |
||||
return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil) |
||||
} else { // if the blob's size is 0, there is no need in downloading it
|
||||
return nil |
||||
} |
||||
} |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// BatchTransferOptions identifies options used by doBatchTransfer.
|
||||
type batchTransferOptions struct { |
||||
transferSize int64 |
||||
chunkSize int64 |
||||
parallelism uint16 |
||||
operation func(offset int64, chunkSize int64) error |
||||
operationName string |
||||
} |
||||
|
||||
// doBatchTransfer helps to execute operations in a batch manner.
|
||||
func doBatchTransfer(ctx context.Context, o batchTransferOptions) error { |
||||
// Prepare and do parallel operations.
|
||||
numChunks := uint16(((o.transferSize - 1) / o.chunkSize) + 1) |
||||
operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently
|
||||
operationResponseChannel := make(chan error, numChunks) // Holds each response
|
||||
ctx, cancel := context.WithCancel(ctx) |
||||
defer cancel() |
||||
|
||||
// Create the goroutines that process each operation (in parallel).
|
||||
if o.parallelism == 0 { |
||||
o.parallelism = 5 // default parallelism
|
||||
} |
||||
for g := uint16(0); g < o.parallelism; g++ { |
||||
//grIndex := g
|
||||
go func() { |
||||
for f := range operationChannel { |
||||
//fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex)
|
||||
err := f() |
||||
operationResponseChannel <- err |
||||
//fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex)
|
||||
} |
||||
}() |
||||
} |
||||
|
||||
// Add each chunk's operation to the channel.
|
||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { |
||||
curChunkSize := o.chunkSize |
||||
|
||||
if chunkNum == numChunks-1 { // Last chunk
|
||||
curChunkSize = o.transferSize - (int64(chunkNum) * o.chunkSize) // Remove size of all transferred chunks from total
|
||||
} |
||||
offset := int64(chunkNum) * o.chunkSize |
||||
|
||||
operationChannel <- func() error { |
||||
return o.operation(offset, curChunkSize) |
||||
} |
||||
} |
||||
close(operationChannel) |
||||
|
||||
// Wait for the operations to complete.
|
||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { |
||||
responseError := <-operationResponseChannel |
||||
if responseError != nil { |
||||
cancel() // As soon as any operation fails, cancel all remaining operation calls
|
||||
return responseError // No need to process anymore responses
|
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type UploadStreamToBlockBlobOptions struct { |
||||
BufferSize int |
||||
MaxBuffers int |
||||
BlobHTTPHeaders BlobHTTPHeaders |
||||
Metadata Metadata |
||||
AccessConditions BlobAccessConditions |
||||
} |
||||
|
||||
func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, |
||||
o UploadStreamToBlockBlobOptions) (CommonResponse, error) { |
||||
result, err := uploadStream(ctx, reader, |
||||
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers}, |
||||
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return result.(CommonResponse), nil |
||||
} |
||||
|
||||
type uploadStreamToBlockBlobOptions struct { |
||||
b BlockBlobURL |
||||
o UploadStreamToBlockBlobOptions |
||||
blockIDPrefix uuid // UUID used with all blockIDs
|
||||
maxBlockNum uint32 // defaults to 0
|
||||
firstBlock []byte // Used only if maxBlockNum is 0
|
||||
} |
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) { |
||||
return nil, nil |
||||
} |
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error { |
||||
if num == 0 { |
||||
t.firstBlock = buffer |
||||
|
||||
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
|
||||
// If the payload is exactly the same size as the buffer, there may be more content coming in.
|
||||
if len(buffer) < t.o.BufferSize { |
||||
return nil |
||||
} |
||||
} |
||||
// Else, upload a staged block...
|
||||
atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) { |
||||
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
|
||||
if startVal < num { |
||||
return num, nil |
||||
} |
||||
return startVal, nil |
||||
}) |
||||
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64() |
||||
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil) |
||||
return err |
||||
} |
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) { |
||||
// If the first block had the exact same size as the buffer
|
||||
// we would have staged it as a block thinking that there might be more data coming
|
||||
if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize { |
||||
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
|
||||
return t.b.Upload(ctx, bytes.NewReader(t.firstBlock), |
||||
t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) |
||||
} |
||||
// Multiple blocks staged, commit them all now
|
||||
blockID := newUuidBlockID(t.blockIDPrefix) |
||||
blockIDs := make([]string, t.maxBlockNum+1) |
||||
for bn := uint32(0); bn <= t.maxBlockNum; bn++ { |
||||
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() |
||||
} |
||||
return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) |
||||
} |
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type iTransfer interface { |
||||
start(ctx context.Context) (interface{}, error) |
||||
chunk(ctx context.Context, num uint32, buffer []byte) error |
||||
end(ctx context.Context) (interface{}, error) |
||||
} |
||||
|
||||
type UploadStreamOptions struct { |
||||
MaxBuffers int |
||||
BufferSize int |
||||
} |
||||
|
||||
type firstErr struct { |
||||
lock sync.Mutex |
||||
finalError error |
||||
} |
||||
|
||||
func (fe *firstErr) set(err error) { |
||||
fe.lock.Lock() |
||||
if fe.finalError == nil { |
||||
fe.finalError = err |
||||
} |
||||
fe.lock.Unlock() |
||||
} |
||||
|
||||
func (fe *firstErr) get() (err error) { |
||||
fe.lock.Lock() |
||||
err = fe.finalError |
||||
fe.lock.Unlock() |
||||
return |
||||
} |
||||
|
||||
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) { |
||||
firstErr := firstErr{} |
||||
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
|
||||
defer cancel() |
||||
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
|
||||
type OutgoingMsg struct { |
||||
chunkNum uint32 |
||||
buffer []byte |
||||
} |
||||
|
||||
// Create a channel to hold the buffers usable for incoming datsa
|
||||
incoming := make(chan []byte, o.MaxBuffers) |
||||
outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers
|
||||
if result, err := t.start(ctx); err != nil { |
||||
return result, err |
||||
} |
||||
|
||||
numBuffers := 0 // The number of buffers & out going goroutines created so far
|
||||
injectBuffer := func() { |
||||
// For each Buffer, create it and a goroutine to upload it
|
||||
incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it
|
||||
numBuffers++ |
||||
go func() { |
||||
for outgoingMsg := range outgoing { |
||||
// Upload the outgoing buffer
|
||||
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer) |
||||
wg.Done() // Indicate this buffer was sent
|
||||
if nil != err { |
||||
// NOTE: finalErr could be assigned to multiple times here which is OK,
|
||||
// some error will be returned.
|
||||
firstErr.set(err) |
||||
cancel() |
||||
} |
||||
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
|
||||
} |
||||
}() |
||||
} |
||||
injectBuffer() // Create our 1st buffer & outgoing goroutine
|
||||
|
||||
// This goroutine grabs a buffer, reads from the stream into the buffer,
|
||||
// and inserts the buffer into the outgoing channel to be uploaded
|
||||
for c := uint32(0); true; c++ { // Iterate once per chunk
|
||||
var buffer []byte |
||||
if numBuffers < o.MaxBuffers { |
||||
select { |
||||
// We're not at max buffers, see if a previously-created buffer is available
|
||||
case buffer = <-incoming: |
||||
break |
||||
default: |
||||
// No buffer available; inject a new buffer & go routine to process it
|
||||
injectBuffer() |
||||
buffer = <-incoming // Grab the just-injected buffer
|
||||
} |
||||
} else { |
||||
// We are at max buffers, block until we get to reuse one
|
||||
buffer = <-incoming |
||||
} |
||||
n, err := io.ReadFull(reader, buffer) |
||||
if err != nil { // Less than len(buffer) bytes were read
|
||||
buffer = buffer[:n] // Make slice match the # of read bytes
|
||||
} |
||||
if len(buffer) > 0 { |
||||
// Buffer not empty, upload it
|
||||
wg.Add(1) // We're posting a buffer to be sent
|
||||
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer} |
||||
} |
||||
if err != nil { // The reader is done, no more outgoing buffers
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF { |
||||
err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
|
||||
} else { |
||||
firstErr.set(err) |
||||
} |
||||
break |
||||
} |
||||
} |
||||
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
|
||||
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
|
||||
wg.Wait() // Wait for all pending outgoing messages to complete
|
||||
err := firstErr.get() |
||||
if err == nil { |
||||
// If no error, after all blocks uploaded, commit them to the blob & return the result
|
||||
return t.end(ctx) |
||||
} |
||||
return nil, err |
||||
} |
@ -1,153 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"net" |
||||
"net/url" |
||||
"strings" |
||||
) |
||||
|
||||
const ( |
||||
snapshot = "snapshot" |
||||
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" |
||||
) |
||||
|
||||
// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
|
||||
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
|
||||
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
||||
type BlobURLParts struct { |
||||
Scheme string // Ex: "https://"
|
||||
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
|
||||
IPEndpointStyleInfo IPEndpointStyleInfo |
||||
ContainerName string // "" if no container
|
||||
BlobName string // "" if no blob
|
||||
Snapshot string // "" if not a snapshot
|
||||
SAS SASQueryParameters |
||||
UnparsedParams string |
||||
} |
||||
|
||||
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
|
||||
// Ex: "https://10.132.141.33/accountname/containername"
|
||||
type IPEndpointStyleInfo struct { |
||||
AccountName string // "" if not using IP endpoint style
|
||||
} |
||||
|
||||
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
|
||||
// http(s)://IP(:port)/storageaccount/container/...
|
||||
// As url's Host property, host could be both host or host:port
|
||||
func isIPEndpointStyle(host string) bool { |
||||
if host == "" { |
||||
return false |
||||
} |
||||
if h, _, err := net.SplitHostPort(host); err == nil { |
||||
host = h |
||||
} |
||||
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
|
||||
// In this case, eliminate the '[' and ']' in the URL.
|
||||
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
|
||||
if host[0] == '[' && host[len(host)-1] == ']' { |
||||
host = host[1 : len(host)-1] |
||||
} |
||||
return net.ParseIP(host) != nil |
||||
} |
||||
|
||||
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
|
||||
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
|
||||
func NewBlobURLParts(u url.URL) BlobURLParts { |
||||
up := BlobURLParts{ |
||||
Scheme: u.Scheme, |
||||
Host: u.Host, |
||||
} |
||||
|
||||
// Find the container & blob names (if any)
|
||||
if u.Path != "" { |
||||
path := u.Path |
||||
if path[0] == '/' { |
||||
path = path[1:] // If path starts with a slash, remove it
|
||||
} |
||||
if isIPEndpointStyle(up.Host) { |
||||
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
|
||||
up.IPEndpointStyleInfo.AccountName = path |
||||
} else { |
||||
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
|
||||
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
|
||||
} |
||||
} |
||||
|
||||
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
|
||||
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
||||
up.ContainerName = path |
||||
} else { |
||||
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
|
||||
up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash
|
||||
} |
||||
} |
||||
|
||||
// Convert the query parameters to a case-sensitive map & trim whitespace
|
||||
paramsMap := u.Query() |
||||
|
||||
up.Snapshot = "" // Assume no snapshot
|
||||
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { |
||||
up.Snapshot = snapshotStr[0] |
||||
// If we recognized the query parameter, remove it from the map
|
||||
delete(paramsMap, snapshot) |
||||
} |
||||
up.SAS = newSASQueryParameters(paramsMap, true) |
||||
up.UnparsedParams = paramsMap.Encode() |
||||
return up |
||||
} |
||||
|
||||
type caseInsensitiveValues url.Values // map[string][]string
|
||||
func (values caseInsensitiveValues) Get(key string) ([]string, bool) { |
||||
key = strings.ToLower(key) |
||||
for k, v := range values { |
||||
if strings.ToLower(k) == key { |
||||
return v, true |
||||
} |
||||
} |
||||
return []string{}, false |
||||
} |
||||
|
||||
// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
|
||||
// field contains the SAS, snapshot, and unparsed query parameters.
|
||||
func (up BlobURLParts) URL() url.URL { |
||||
path := "" |
||||
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { |
||||
path += "/" + up.IPEndpointStyleInfo.AccountName |
||||
} |
||||
// Concatenate container & blob names (if they exist)
|
||||
if up.ContainerName != "" { |
||||
path += "/" + up.ContainerName |
||||
if up.BlobName != "" { |
||||
path += "/" + up.BlobName |
||||
} |
||||
} |
||||
|
||||
rawQuery := up.UnparsedParams |
||||
|
||||
//If no snapshot is initially provided, fill it in from the SAS query properties to help the user
|
||||
if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { |
||||
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) |
||||
} |
||||
|
||||
// Concatenate blob snapshot query parameter (if it exists)
|
||||
if up.Snapshot != "" { |
||||
if len(rawQuery) > 0 { |
||||
rawQuery += "&" |
||||
} |
||||
rawQuery += snapshot + "=" + up.Snapshot |
||||
} |
||||
sas := up.SAS.Encode() |
||||
if sas != "" { |
||||
if len(rawQuery) > 0 { |
||||
rawQuery += "&" |
||||
} |
||||
rawQuery += sas |
||||
} |
||||
u := url.URL{ |
||||
Scheme: up.Scheme, |
||||
Host: up.Host, |
||||
Path: path, |
||||
RawQuery: rawQuery, |
||||
} |
||||
return u |
||||
} |
@ -1,256 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
|
||||
type BlobSASSignatureValues struct { |
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||
SnapshotTime time.Time |
||||
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
|
||||
IPRange IPRange `param:"sip"` |
||||
Identifier string `param:"si"` |
||||
ContainerName string |
||||
BlobName string // Use "" to create a Container SAS
|
||||
CacheControl string // rscc
|
||||
ContentDisposition string // rscd
|
||||
ContentEncoding string // rsce
|
||||
ContentLanguage string // rscl
|
||||
ContentType string // rsct
|
||||
} |
||||
|
||||
// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce
|
||||
// the proper SAS query parameters.
|
||||
// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential
|
||||
func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) { |
||||
resource := "c" |
||||
if credential == nil { |
||||
return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential") |
||||
} |
||||
|
||||
if !v.SnapshotTime.IsZero() { |
||||
resource = "bs" |
||||
//Make sure the permission characters are in the correct order
|
||||
perms := &BlobSASPermissions{} |
||||
if err := perms.Parse(v.Permissions); err != nil { |
||||
return SASQueryParameters{}, err |
||||
} |
||||
v.Permissions = perms.String() |
||||
} else if v.BlobName == "" { |
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &ContainerSASPermissions{} |
||||
if err := perms.Parse(v.Permissions); err != nil { |
||||
return SASQueryParameters{}, err |
||||
} |
||||
v.Permissions = perms.String() |
||||
} else { |
||||
resource = "b" |
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &BlobSASPermissions{} |
||||
if err := perms.Parse(v.Permissions); err != nil { |
||||
return SASQueryParameters{}, err |
||||
} |
||||
v.Permissions = perms.String() |
||||
} |
||||
if v.Version == "" { |
||||
v.Version = SASVersion |
||||
} |
||||
startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) |
||||
|
||||
signedIdentifier := v.Identifier |
||||
|
||||
udk := credential.getUDKParams() |
||||
|
||||
if udk != nil { |
||||
udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) |
||||
//I don't like this answer to combining the functions
|
||||
//But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
|
||||
signedIdentifier = strings.Join([]string{ |
||||
udk.SignedOid, |
||||
udk.SignedTid, |
||||
udkStart, |
||||
udkExpiry, |
||||
udk.SignedService, |
||||
udk.SignedVersion, |
||||
}, "\n") |
||||
} |
||||
|
||||
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
stringToSign := strings.Join([]string{ |
||||
v.Permissions, |
||||
startTime, |
||||
expiryTime, |
||||
getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName), |
||||
signedIdentifier, |
||||
v.IPRange.String(), |
||||
string(v.Protocol), |
||||
v.Version, |
||||
resource, |
||||
snapshotTime, // signed timestamp
|
||||
v.CacheControl, // rscc
|
||||
v.ContentDisposition, // rscd
|
||||
v.ContentEncoding, // rsce
|
||||
v.ContentLanguage, // rscl
|
||||
v.ContentType}, // rsct
|
||||
"\n") |
||||
|
||||
signature := "" |
||||
signature = credential.ComputeHMACSHA256(stringToSign) |
||||
|
||||
p := SASQueryParameters{ |
||||
// Common SAS parameters
|
||||
version: v.Version, |
||||
protocol: v.Protocol, |
||||
startTime: v.StartTime, |
||||
expiryTime: v.ExpiryTime, |
||||
permissions: v.Permissions, |
||||
ipRange: v.IPRange, |
||||
|
||||
// Container/Blob-specific SAS parameters
|
||||
resource: resource, |
||||
identifier: v.Identifier, |
||||
cacheControl: v.CacheControl, |
||||
contentDisposition: v.ContentDisposition, |
||||
contentEncoding: v.ContentEncoding, |
||||
contentLanguage: v.ContentLanguage, |
||||
contentType: v.ContentType, |
||||
snapshotTime: v.SnapshotTime, |
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature, |
||||
} |
||||
|
||||
//User delegation SAS specific parameters
|
||||
if udk != nil { |
||||
p.signedOid = udk.SignedOid |
||||
p.signedTid = udk.SignedTid |
||||
p.signedStart = udk.SignedStart |
||||
p.signedExpiry = udk.SignedExpiry |
||||
p.signedService = udk.SignedService |
||||
p.signedVersion = udk.SignedVersion |
||||
} |
||||
|
||||
return p, nil |
||||
} |
||||
|
||||
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
|
||||
func getCanonicalName(account string, containerName string, blobName string) string { |
||||
// Container: "/blob/account/containername"
|
||||
// Blob: "/blob/account/containername/blobname"
|
||||
elements := []string{"/blob/", account, "/", containerName} |
||||
if blobName != "" { |
||||
elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) |
||||
} |
||||
return strings.Join(elements, "") |
||||
} |
||||
|
||||
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
|
||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
||||
type ContainerSASPermissions struct { |
||||
Read, Add, Create, Write, Delete, List bool |
||||
} |
||||
|
||||
// String produces the SAS permissions string for an Azure Storage container.
|
||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
||||
func (p ContainerSASPermissions) String() string { |
||||
var b bytes.Buffer |
||||
if p.Read { |
||||
b.WriteRune('r') |
||||
} |
||||
if p.Add { |
||||
b.WriteRune('a') |
||||
} |
||||
if p.Create { |
||||
b.WriteRune('c') |
||||
} |
||||
if p.Write { |
||||
b.WriteRune('w') |
||||
} |
||||
if p.Delete { |
||||
b.WriteRune('d') |
||||
} |
||||
if p.List { |
||||
b.WriteRune('l') |
||||
} |
||||
return b.String() |
||||
} |
||||
|
||||
// Parse initializes the ContainerSASPermissions's fields from a string.
|
||||
func (p *ContainerSASPermissions) Parse(s string) error { |
||||
*p = ContainerSASPermissions{} // Clear the flags
|
||||
for _, r := range s { |
||||
switch r { |
||||
case 'r': |
||||
p.Read = true |
||||
case 'a': |
||||
p.Add = true |
||||
case 'c': |
||||
p.Create = true |
||||
case 'w': |
||||
p.Write = true |
||||
case 'd': |
||||
p.Delete = true |
||||
case 'l': |
||||
p.List = true |
||||
default: |
||||
return fmt.Errorf("Invalid permission: '%v'", r) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
|
||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
||||
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool } |
||||
|
||||
// String produces the SAS permissions string for an Azure Storage blob.
|
||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
||||
func (p BlobSASPermissions) String() string { |
||||
var b bytes.Buffer |
||||
if p.Read { |
||||
b.WriteRune('r') |
||||
} |
||||
if p.Add { |
||||
b.WriteRune('a') |
||||
} |
||||
if p.Create { |
||||
b.WriteRune('c') |
||||
} |
||||
if p.Write { |
||||
b.WriteRune('w') |
||||
} |
||||
if p.Delete { |
||||
b.WriteRune('d') |
||||
} |
||||
return b.String() |
||||
} |
||||
|
||||
// Parse initializes the BlobSASPermissions's fields from a string.
|
||||
func (p *BlobSASPermissions) Parse(s string) error { |
||||
*p = BlobSASPermissions{} // Clear the flags
|
||||
for _, r := range s { |
||||
switch r { |
||||
case 'r': |
||||
p.Read = true |
||||
case 'a': |
||||
p.Add = true |
||||
case 'c': |
||||
p.Create = true |
||||
case 'w': |
||||
p.Write = true |
||||
case 'd': |
||||
p.Delete = true |
||||
default: |
||||
return fmt.Errorf("Invalid permission: '%v'", r) |
||||
} |
||||
} |
||||
return nil |
||||
} |
@ -1,195 +0,0 @@ |
||||
package azblob |
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
|
||||
|
||||
// ServiceCode values indicate a service failure.
|
||||
const ( |
||||
// ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met.
|
||||
ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" |
||||
|
||||
// ServiceCodeBlobAlreadyExists means the specified blob already exists.
|
||||
ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" |
||||
|
||||
// ServiceCodeBlobNotFound means the specified blob does not exist.
|
||||
ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" |
||||
|
||||
// ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken.
|
||||
ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" |
||||
|
||||
// ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length.
|
||||
ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" |
||||
|
||||
// ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks
|
||||
// or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks.
|
||||
ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" |
||||
|
||||
// ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks.
|
||||
ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" |
||||
|
||||
// ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set.
|
||||
ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" |
||||
|
||||
// ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time.
|
||||
// Examine the HTTP status code and message for more information about the failure.
|
||||
ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" |
||||
|
||||
// ServiceCodeContainerAlreadyExists means the specified container already exists.
|
||||
ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" |
||||
|
||||
// ServiceCodeContainerBeingDeleted means the specified container is being deleted.
|
||||
ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" |
||||
|
||||
// ServiceCodeContainerDisabled means the specified container has been disabled by the administrator.
|
||||
ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" |
||||
|
||||
// ServiceCodeContainerNotFound means the specified container does not exist.
|
||||
ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" |
||||
|
||||
// ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit.
|
||||
ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" |
||||
|
||||
// ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same.
|
||||
ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" |
||||
|
||||
// ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation.
|
||||
ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" |
||||
|
||||
// ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or
|
||||
// that the operation for AppendBlob requires at least version 2015-02-21.
|
||||
ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" |
||||
|
||||
// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
|
||||
ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" |
||||
|
||||
// ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
|
||||
ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" |
||||
|
||||
// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
|
||||
ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" |
||||
|
||||
// ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease.
|
||||
ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" |
||||
|
||||
// ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid.
|
||||
ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" |
||||
|
||||
// ServiceCodeInvalidBlobType means the blob type is invalid for this operation.
|
||||
ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" |
||||
|
||||
// ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded.
|
||||
ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" |
||||
|
||||
// ServiceCodeInvalidBlockList means the specified block list is invalid.
|
||||
ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" |
||||
|
||||
// ServiceCodeInvalidOperation means an invalid operation against a blob snapshot.
|
||||
ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" |
||||
|
||||
// ServiceCodeInvalidPageRange means the page range specified is invalid.
|
||||
ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" |
||||
|
||||
// ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation.
|
||||
ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" |
||||
|
||||
// ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL.
|
||||
ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" |
||||
|
||||
// ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19.
|
||||
ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" |
||||
|
||||
// ServiceCodeLeaseAlreadyPresent means there is already a lease present.
|
||||
ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" |
||||
|
||||
// ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again.
|
||||
ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" |
||||
|
||||
// ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob.
|
||||
ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" |
||||
|
||||
// ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container.
|
||||
ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" |
||||
|
||||
// ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container.
|
||||
ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" |
||||
|
||||
// ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request.
|
||||
ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" |
||||
|
||||
// ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken.
|
||||
ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" |
||||
|
||||
// ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed.
|
||||
ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" |
||||
|
||||
// ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed.
|
||||
ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" |
||||
|
||||
// ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired.
|
||||
ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" |
||||
|
||||
// ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob.
|
||||
ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" |
||||
|
||||
// ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container.
|
||||
ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" |
||||
|
||||
// ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container.
|
||||
ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" |
||||
|
||||
// ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met.
|
||||
ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" |
||||
|
||||
// ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation.
|
||||
ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" |
||||
|
||||
// ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob.
|
||||
ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" |
||||
|
||||
// ServiceCodePendingCopyOperation means there is currently a pending copy operation.
|
||||
ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" |
||||
|
||||
// ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value.
|
||||
ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" |
||||
|
||||
// ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found.
|
||||
ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" |
||||
|
||||
// ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot.
|
||||
ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" |
||||
|
||||
// ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met.
|
||||
ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" |
||||
|
||||
// ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number.
|
||||
ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" |
||||
|
||||
// ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded.
|
||||
ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" |
||||
|
||||
// ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded.
|
||||
ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" |
||||
|
||||
// ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots.
|
||||
ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" |
||||
|
||||
// ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met.
|
||||
ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" |
||||
|
||||
// ServiceCodeSystemInUse means this blob is in use by the system.
|
||||
ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" |
||||
|
||||
// ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met.
|
||||
ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" |
||||
|
||||
// ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites.
|
||||
ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" |
||||
|
||||
// ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated.
|
||||
ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" |
||||
|
||||
// ServiceCodeBlobArchived means this operation is not permitted on an archived blob.
|
||||
ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" |
||||
|
||||
// ServiceCodeBlobNotArchived means this blob is currently not in the archived state.
|
||||
ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" |
||||
) |
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
@ -1,8 +0,0 @@ |
||||
package azblob |
||||
|
||||
// StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential
|
||||
type StorageAccountCredential interface { |
||||
AccountName() string |
||||
ComputeHMACSHA256(message string) (base64String string) |
||||
getUDKParams() *UserDelegationKey |
||||
} |
@ -1,128 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"io" |
||||
"net/url" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
const ( |
||||
// AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock.
|
||||
AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB
|
||||
|
||||
// AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob.
|
||||
AppendBlobMaxBlocks = 50000 |
||||
) |
||||
|
||||
// AppendBlobURL defines a set of operations applicable to append blobs.
|
||||
type AppendBlobURL struct { |
||||
BlobURL |
||||
abClient appendBlobClient |
||||
} |
||||
|
||||
// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { |
||||
blobClient := newBlobClient(url, p) |
||||
abClient := newAppendBlobClient(url, p) |
||||
return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} |
||||
} |
||||
|
||||
// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { |
||||
return NewAppendBlobURL(ab.blobClient.URL(), p) |
||||
} |
||||
|
||||
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { |
||||
p := NewBlobURLParts(ab.URL()) |
||||
p.Snapshot = snapshot |
||||
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) |
||||
} |
||||
|
||||
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() |
||||
return ab.abClient.Create(ctx, 0, nil, |
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, |
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil) |
||||
} |
||||
|
||||
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
|
||||
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() |
||||
count, err := validateSeekableStreamAt0AndGetCount(body) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return ab.abClient.AppendBlock(ctx, body, count, nil, |
||||
transactionalMD5, ac.LeaseAccessConditions.pointers(), |
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
|
||||
func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() |
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() |
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers() |
||||
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(), |
||||
transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(), |
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) |
||||
} |
||||
|
||||
type AppendBlobAccessConditions struct { |
||||
ModifiedAccessConditions |
||||
LeaseAccessConditions |
||||
AppendPositionAccessConditions |
||||
} |
||||
|
||||
// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
|
||||
type AppendPositionAccessConditions struct { |
||||
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
|
||||
// only if the append position is equal to a value.
|
||||
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
|
||||
// IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value
|
||||
// IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0
|
||||
IfAppendPositionEqual int64 |
||||
|
||||
// IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds
|
||||
// only if the append blob's size is less than or equal to a value.
|
||||
// IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified.
|
||||
// IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value
|
||||
// IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0
|
||||
IfMaxSizeLessThanOrEqual int64 |
||||
} |
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) { |
||||
var zero int64 // defaults to 0
|
||||
switch ac.IfAppendPositionEqual { |
||||
case -1: |
||||
iape = &zero |
||||
case 0: |
||||
iape = nil |
||||
default: |
||||
iape = &ac.IfAppendPositionEqual |
||||
} |
||||
|
||||
switch ac.IfMaxSizeLessThanOrEqual { |
||||
case -1: |
||||
imsltoe = &zero |
||||
case 0: |
||||
imsltoe = nil |
||||
default: |
||||
imsltoe = &ac.IfMaxSizeLessThanOrEqual |
||||
} |
||||
return |
||||
} |
@ -1,216 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"net/url" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
|
||||
type BlobURL struct { |
||||
blobClient blobClient |
||||
} |
||||
|
||||
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
|
||||
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { |
||||
blobClient := newBlobClient(url, p) |
||||
return BlobURL{blobClient: blobClient} |
||||
} |
||||
|
||||
// URL returns the URL endpoint used by the BlobURL object.
|
||||
func (b BlobURL) URL() url.URL { |
||||
return b.blobClient.URL() |
||||
} |
||||
|
||||
// String returns the URL as a string.
|
||||
func (b BlobURL) String() string { |
||||
u := b.URL() |
||||
return u.String() |
||||
} |
||||
|
||||
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
|
||||
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { |
||||
return NewBlobURL(b.blobClient.URL(), p) |
||||
} |
||||
|
||||
// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (b BlobURL) WithSnapshot(snapshot string) BlobURL { |
||||
p := NewBlobURLParts(b.URL()) |
||||
p.Snapshot = snapshot |
||||
return NewBlobURL(p.URL(), b.blobClient.Pipeline()) |
||||
} |
||||
|
||||
// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToAppendBlobURL() AppendBlobURL { |
||||
return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) |
||||
} |
||||
|
||||
// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToBlockBlobURL() BlockBlobURL { |
||||
return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) |
||||
} |
||||
|
||||
// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToPageBlobURL() PageBlobURL { |
||||
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) |
||||
} |
||||
|
||||
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
|
||||
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
|
||||
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) { |
||||
var xRangeGetContentMD5 *bool |
||||
if rangeGetContentMD5 { |
||||
xRangeGetContentMD5 = &rangeGetContentMD5 |
||||
} |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
dr, err := b.blobClient.Download(ctx, nil, nil, |
||||
httpRange{offset: offset, count: count}.pointers(), |
||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &DownloadResponse{ |
||||
b: b, |
||||
r: dr, |
||||
ctx: ctx, |
||||
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, |
||||
}, err |
||||
} |
||||
|
||||
// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
|
||||
// Note that deleting a blob also deletes all its snapshots.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
|
||||
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
|
||||
func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { |
||||
return b.blobClient.Undelete(ctx, nil, nil) |
||||
} |
||||
|
||||
// SetTier operation sets the tier on a blob. The operation is allowed on a page
|
||||
// blob in a premium storage account and on a block blob in a blob storage account (locally
|
||||
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
|
||||
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
|
||||
// does not update the blob's ETag.
|
||||
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
|
||||
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) { |
||||
return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers()) |
||||
} |
||||
|
||||
// GetBlobProperties returns the blob's properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
|
||||
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// SetBlobHTTPHeaders changes a blob's HTTP headers.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return b.blobClient.SetHTTPHeaders(ctx, nil, |
||||
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, |
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, |
||||
&h.ContentDisposition, nil) |
||||
} |
||||
|
||||
// SetBlobMetadata changes a blob's metadata.
|
||||
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
|
||||
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// CreateSnapshot creates a read-only snapshot of a blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
|
||||
func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) { |
||||
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
|
||||
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
||||
// performance hit.
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil) |
||||
} |
||||
|
||||
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
||||
// 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() |
||||
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// RenewLease renews the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() |
||||
return b.blobClient.RenewLease(ctx, leaseID, nil, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// ReleaseLease releases the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() |
||||
return b.blobClient.ReleaseLease(ctx, leaseID, nil, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
|
||||
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() |
||||
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// ChangeLease changes the blob's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() |
||||
return b.blobClient.ChangeLease(ctx, leaseID, proposedID, |
||||
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
|
||||
const LeaseBreakNaturally = -1 |
||||
|
||||
func leasePeriodPointer(period int32) (p *int32) { |
||||
if period != LeaseBreakNaturally { |
||||
p = &period |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// StartCopyFromURL copies the data at the source URL to a blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
|
||||
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) { |
||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() |
||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() |
||||
dstLeaseID := dstac.LeaseAccessConditions.pointers() |
||||
|
||||
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, |
||||
srcIfModifiedSince, srcIfUnmodifiedSince, |
||||
srcIfMatchETag, srcIfNoneMatchETag, |
||||
dstIfModifiedSince, dstIfUnmodifiedSince, |
||||
dstIfMatchETag, dstIfNoneMatchETag, |
||||
dstLeaseID, nil) |
||||
} |
||||
|
||||
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
|
||||
func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { |
||||
return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) |
||||
} |
@ -1,162 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"io" |
||||
"net/url" |
||||
|
||||
"encoding/base64" |
||||
"encoding/binary" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
const ( |
||||
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
||||
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
|
||||
|
||||
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
|
||||
BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
|
||||
|
||||
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
|
||||
BlockBlobMaxBlocks = 50000 |
||||
) |
||||
|
||||
// BlockBlobURL defines a set of operations applicable to block blobs.
|
||||
type BlockBlobURL struct { |
||||
BlobURL |
||||
bbClient blockBlobClient |
||||
} |
||||
|
||||
// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { |
||||
blobClient := newBlobClient(url, p) |
||||
bbClient := newBlockBlobClient(url, p) |
||||
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} |
||||
} |
||||
|
||||
// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { |
||||
return NewBlockBlobURL(bb.blobClient.URL(), p) |
||||
} |
||||
|
||||
// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { |
||||
p := NewBlobURLParts(bb.URL()) |
||||
p.Snapshot = snapshot |
||||
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) |
||||
} |
||||
|
||||
// Upload creates a new block blob or overwrites an existing block blob.
|
||||
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
|
||||
// supported with Upload; the content of the existing blob is overwritten with the new content. To
|
||||
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
count, err := validateSeekableStreamAt0AndGetCount(body) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return bb.bbClient.Upload(ctx, body, count, nil, |
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, |
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), |
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, |
||||
nil) |
||||
} |
||||
|
||||
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
|
||||
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) { |
||||
count, err := validateSeekableStreamAt0AndGetCount(body) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil) |
||||
} |
||||
|
||||
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
||||
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) { |
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() |
||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) |
||||
} |
||||
|
||||
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
||||
// In order to be written as part of a blob, a block must have been successfully written
|
||||
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
|
||||
// by uploading only those blocks that have changed, then committing the new and existing
|
||||
// blocks together. Any blocks not specified in the block list and permanently deleted.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
|
||||
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, |
||||
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, |
||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, |
||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
|
||||
func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { |
||||
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil) |
||||
} |
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type BlockID [64]byte |
||||
|
||||
func (blockID BlockID) ToBase64() string { |
||||
return base64.StdEncoding.EncodeToString(blockID[:]) |
||||
} |
||||
|
||||
func (blockID *BlockID) FromBase64(s string) error { |
||||
*blockID = BlockID{} // Zero out the block ID
|
||||
_, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s)) |
||||
return err |
||||
} |
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type uuidBlockID BlockID |
||||
|
||||
func (ubi uuidBlockID) UUID() uuid { |
||||
u := uuid{} |
||||
copy(u[:], ubi[:len(u)]) |
||||
return u |
||||
} |
||||
|
||||
func (ubi uuidBlockID) Number() uint32 { |
||||
return binary.BigEndian.Uint32(ubi[len(uuid{}):]) |
||||
} |
||||
|
||||
func newUuidBlockID(u uuid) uuidBlockID { |
||||
ubi := uuidBlockID{} // Create a new uuidBlockID
|
||||
copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
|
||||
// Block number defaults to 0
|
||||
return ubi |
||||
} |
||||
|
||||
func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID { |
||||
copy(ubi[:len(u)], u[:]) |
||||
return ubi |
||||
} |
||||
|
||||
func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { |
||||
binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID
|
||||
return ubi // Return the passed-in copy
|
||||
} |
||||
|
||||
func (ubi uuidBlockID) ToBase64() string { |
||||
return BlockID(ubi).ToBase64() |
||||
} |
||||
|
||||
func (ubi *uuidBlockID) FromBase64(s string) error { |
||||
return (*BlockID)(ubi).FromBase64(s) |
||||
} |
@ -1,295 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
"net/url" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
||||
type ContainerURL struct { |
||||
client containerClient |
||||
} |
||||
|
||||
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
|
||||
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { |
||||
client := newContainerClient(url, p) |
||||
return ContainerURL{client: client} |
||||
} |
||||
|
||||
// URL returns the URL endpoint used by the ContainerURL object.
|
||||
func (c ContainerURL) URL() url.URL { |
||||
return c.client.URL() |
||||
} |
||||
|
||||
// String returns the URL as a string.
|
||||
func (c ContainerURL) String() string { |
||||
u := c.URL() |
||||
return u.String() |
||||
} |
||||
|
||||
// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
|
||||
func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { |
||||
return NewContainerURL(c.URL(), p) |
||||
} |
||||
|
||||
// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's
|
||||
// NewBlobURL method.
|
||||
func (c ContainerURL) NewBlobURL(blobName string) BlobURL { |
||||
blobURL := appendToURLPath(c.URL(), blobName) |
||||
return NewBlobURL(blobURL, c.client.Pipeline()) |
||||
} |
||||
|
||||
// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's
|
||||
// NewAppendBlobURL method.
|
||||
func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { |
||||
blobURL := appendToURLPath(c.URL(), blobName) |
||||
return NewAppendBlobURL(blobURL, c.client.Pipeline()) |
||||
} |
||||
|
||||
// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's
|
||||
// NewBlockBlobURL method.
|
||||
func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { |
||||
blobURL := appendToURLPath(c.URL(), blobName) |
||||
return NewBlockBlobURL(blobURL, c.client.Pipeline()) |
||||
} |
||||
|
||||
// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's
|
||||
// NewPageBlobURL method.
|
||||
func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { |
||||
blobURL := appendToURLPath(c.URL(), blobName) |
||||
return NewPageBlobURL(blobURL, c.client.Pipeline()) |
||||
} |
||||
|
||||
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
|
||||
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { |
||||
return c.client.Create(ctx, nil, metadata, publicAccessType, nil) |
||||
} |
||||
|
||||
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
|
||||
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { |
||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { |
||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") |
||||
} |
||||
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() |
||||
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), |
||||
ifModifiedSince, ifUnmodifiedSince, nil) |
||||
} |
||||
|
||||
// GetProperties returns the container's properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
|
||||
func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { |
||||
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
|
||||
// This allows us to not expose a GetProperties method at all simplifying the API.
|
||||
return c.client.GetProperties(ctx, nil, ac.pointers(), nil) |
||||
} |
||||
|
||||
// SetMetadata sets the container's metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
|
||||
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { |
||||
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { |
||||
return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") |
||||
} |
||||
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers() |
||||
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) |
||||
} |
||||
|
||||
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
|
||||
func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { |
||||
return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) |
||||
} |
||||
|
||||
// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
|
||||
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
|
||||
type AccessPolicyPermission struct { |
||||
Read, Add, Create, Write, Delete, List bool |
||||
} |
||||
|
||||
// String produces the access policy permission string for an Azure Storage container.
|
||||
// Call this method to set AccessPolicy's Permission field.
|
||||
func (p AccessPolicyPermission) String() string { |
||||
var b bytes.Buffer |
||||
if p.Read { |
||||
b.WriteRune('r') |
||||
} |
||||
if p.Add { |
||||
b.WriteRune('a') |
||||
} |
||||
if p.Create { |
||||
b.WriteRune('c') |
||||
} |
||||
if p.Write { |
||||
b.WriteRune('w') |
||||
} |
||||
if p.Delete { |
||||
b.WriteRune('d') |
||||
} |
||||
if p.List { |
||||
b.WriteRune('l') |
||||
} |
||||
return b.String() |
||||
} |
||||
|
||||
// Parse initializes the AccessPolicyPermission's fields from a string.
|
||||
func (p *AccessPolicyPermission) Parse(s string) error { |
||||
*p = AccessPolicyPermission{} // Clear the flags
|
||||
for _, r := range s { |
||||
switch r { |
||||
case 'r': |
||||
p.Read = true |
||||
case 'a': |
||||
p.Add = true |
||||
case 'c': |
||||
p.Create = true |
||||
case 'w': |
||||
p.Write = true |
||||
case 'd': |
||||
p.Delete = true |
||||
case 'l': |
||||
p.List = true |
||||
default: |
||||
return fmt.Errorf("invalid permission: '%v'", r) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
|
||||
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, |
||||
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { |
||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { |
||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") |
||||
} |
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() |
||||
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), |
||||
accessType, ifModifiedSince, ifUnmodifiedSince, nil) |
||||
} |
||||
|
||||
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() |
||||
return c.client.AcquireLease(ctx, nil, &duration, &proposedID, |
||||
ifModifiedSince, ifUnmodifiedSince, nil) |
||||
} |
||||
|
||||
// RenewLease renews the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() |
||||
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) |
||||
} |
||||
|
||||
// ReleaseLease releases the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() |
||||
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) |
||||
} |
||||
|
||||
// BreakLease breaks the container's previously-acquired lease (if it exists).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() |
||||
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) |
||||
} |
||||
|
||||
// ChangeLease changes the container's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() |
||||
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) |
||||
} |
||||
|
||||
// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { |
||||
prefix, include, maxResults := o.pointers() |
||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) |
||||
} |
||||
|
||||
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { |
||||
if o.Details.Snapshots { |
||||
return nil, errors.New("snapshots are not supported in this listing operation") |
||||
} |
||||
prefix, include, maxResults := o.pointers() |
||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil) |
||||
} |
||||
|
||||
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
||||
type ListBlobsSegmentOptions struct { |
||||
Details BlobListingDetails // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
|
||||
// SetMaxResults sets the maximum desired results you want the service to return. Note, the
|
||||
// service may return fewer results than requested.
|
||||
// MaxResults=0 means no 'MaxResults' header specified.
|
||||
MaxResults int32 |
||||
} |
||||
|
||||
func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { |
||||
if o.Prefix != "" { |
||||
prefix = &o.Prefix |
||||
} |
||||
include = o.Details.slice() |
||||
if o.MaxResults != 0 { |
||||
maxResults = &o.MaxResults |
||||
} |
||||
return |
||||
} |
||||
|
||||
// BlobListingDetails indicates what additional information the service should return with each blob.
|
||||
type BlobListingDetails struct { |
||||
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool |
||||
} |
||||
|
||||
// string produces the Include query parameter's value.
|
||||
func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { |
||||
items := []ListBlobsIncludeItemType{} |
||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
||||
if d.Copy { |
||||
items = append(items, ListBlobsIncludeItemCopy) |
||||
} |
||||
if d.Deleted { |
||||
items = append(items, ListBlobsIncludeItemDeleted) |
||||
} |
||||
if d.Metadata { |
||||
items = append(items, ListBlobsIncludeItemMetadata) |
||||
} |
||||
if d.Snapshots { |
||||
items = append(items, ListBlobsIncludeItemSnapshots) |
||||
} |
||||
if d.UncommittedBlobs { |
||||
items = append(items, ListBlobsIncludeItemUncommittedblobs) |
||||
} |
||||
return items |
||||
} |
@ -1,223 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"io" |
||||
"net/url" |
||||
"strconv" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
const ( |
||||
// PageBlobPageBytes indicates the number of bytes in a page (512).
|
||||
PageBlobPageBytes = 512 |
||||
|
||||
// PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
|
||||
PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
|
||||
) |
||||
|
||||
// PageBlobURL defines a set of operations applicable to page blobs.
|
||||
type PageBlobURL struct { |
||||
BlobURL |
||||
pbClient pageBlobClient |
||||
} |
||||
|
||||
// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { |
||||
blobClient := newBlobClient(url, p) |
||||
pbClient := newPageBlobClient(url, p) |
||||
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} |
||||
} |
||||
|
||||
// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { |
||||
return NewPageBlobURL(pb.blobClient.URL(), p) |
||||
} |
||||
|
||||
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { |
||||
p := NewBlobURLParts(pb.URL()) |
||||
p.Snapshot = snapshot |
||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) |
||||
} |
||||
|
||||
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return pb.pbClient.Create(ctx, 0, size, nil, |
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, |
||||
metadata, ac.LeaseAccessConditions.pointers(), |
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil) |
||||
} |
||||
|
||||
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) { |
||||
count, err := validateSeekableStreamAt0AndGetCount(body) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() |
||||
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, |
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(), |
||||
ac.LeaseAccessConditions.pointers(), |
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
|
||||
// The sourceOffset specifies the start offset of source data to copy from.
|
||||
// The destOffset specifies the start offset of data in page blob will be written to.
|
||||
// The count must be a multiple of 512 bytes.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
|
||||
func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*PageBlobUploadPagesFromURLResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() |
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() |
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers() |
||||
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0, |
||||
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(), |
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) |
||||
} |
||||
|
||||
// ClearPages frees the specified pages from the page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() |
||||
return pb.pbClient.ClearPages(ctx, 0, nil, |
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(), |
||||
ac.LeaseAccessConditions.pointers(), |
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, |
||||
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return pb.pbClient.GetPageRanges(ctx, nil, nil, |
||||
httpRange{offset: offset, count: count}.pointers(), |
||||
ac.LeaseAccessConditions.pointers(), |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, |
||||
httpRange{offset: offset, count: count}.pointers(), |
||||
ac.LeaseAccessConditions.pointers(), |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, |
||||
nil) |
||||
} |
||||
|
||||
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
// SetSequenceNumber sets the page blob's sequence number.
|
||||
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, |
||||
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { |
||||
sn := &sequenceNumber |
||||
if action == SequenceNumberActionIncrement { |
||||
sn = nil |
||||
} |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() |
||||
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, |
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, |
||||
sn, nil) |
||||
} |
||||
|
||||
// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
|
||||
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
|
||||
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
|
||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
|
||||
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() |
||||
qp := source.Query() |
||||
qp.Set("snapshot", snapshot) |
||||
source.RawQuery = qp.Encode() |
||||
return pb.pbClient.CopyIncremental(ctx, source.String(), nil, |
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) |
||||
} |
||||
|
||||
func (pr PageRange) pointers() *string { |
||||
endOffset := strconv.FormatInt(int64(pr.End), 10) |
||||
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) |
||||
return &asString |
||||
} |
||||
|
||||
type PageBlobAccessConditions struct { |
||||
ModifiedAccessConditions |
||||
LeaseAccessConditions |
||||
SequenceNumberAccessConditions |
||||
} |
||||
|
||||
// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
|
||||
type SequenceNumberAccessConditions struct { |
||||
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is less than a value.
|
||||
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
|
||||
// IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value
|
||||
// IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0
|
||||
IfSequenceNumberLessThan int64 |
||||
|
||||
// IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is less than or equal to a value.
|
||||
// IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified.
|
||||
// IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value
|
||||
// IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0
|
||||
IfSequenceNumberLessThanOrEqual int64 |
||||
|
||||
// IfSequenceNumberEqual ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is equal to a value.
|
||||
// IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified.
|
||||
// IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value
|
||||
// IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0
|
||||
IfSequenceNumberEqual int64 |
||||
} |
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { |
||||
var zero int64 // Defaults to 0
|
||||
switch ac.IfSequenceNumberLessThan { |
||||
case -1: |
||||
snlt = &zero |
||||
case 0: |
||||
snlt = nil |
||||
default: |
||||
snlt = &ac.IfSequenceNumberLessThan |
||||
} |
||||
|
||||
switch ac.IfSequenceNumberLessThanOrEqual { |
||||
case -1: |
||||
snltoe = &zero |
||||
case 0: |
||||
snltoe = nil |
||||
default: |
||||
snltoe = &ac.IfSequenceNumberLessThanOrEqual |
||||
} |
||||
switch ac.IfSequenceNumberEqual { |
||||
case -1: |
||||
sne = &zero |
||||
case 0: |
||||
sne = nil |
||||
default: |
||||
sne = &ac.IfSequenceNumberEqual |
||||
} |
||||
return |
||||
} |
@ -1,145 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"net/url" |
||||
"strings" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
const ( |
||||
// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
|
||||
ContainerNameRoot = "$root" |
||||
|
||||
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
|
||||
ContainerNameLogs = "$logs" |
||||
) |
||||
|
||||
// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers.
|
||||
type ServiceURL struct { |
||||
client serviceClient |
||||
} |
||||
|
||||
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
|
||||
func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { |
||||
client := newServiceClient(primaryURL, p) |
||||
return ServiceURL{client: client} |
||||
} |
||||
|
||||
//GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object.
|
||||
//OAuth is required for this call, as well as any role that can delegate access to the storage account.
|
||||
func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) { |
||||
sc := newServiceClient(s.client.url, s.client.p) |
||||
udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID) |
||||
if err != nil { |
||||
return UserDelegationCredential{}, err |
||||
} |
||||
return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil |
||||
} |
||||
|
||||
// URL returns the URL endpoint used by the ServiceURL object.
|
||||
func (s ServiceURL) URL() url.URL { |
||||
return s.client.URL() |
||||
} |
||||
|
||||
// String returns the URL as a string.
|
||||
func (s ServiceURL) String() string { |
||||
u := s.URL() |
||||
return u.String() |
||||
} |
||||
|
||||
// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
|
||||
func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { |
||||
return NewServiceURL(s.URL(), p) |
||||
} |
||||
|
||||
// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of
|
||||
// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL.
|
||||
// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's
|
||||
// NewContainerURL method.
|
||||
func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { |
||||
containerURL := appendToURLPath(s.URL(), containerName) |
||||
return NewContainerURL(containerURL, s.client.Pipeline()) |
||||
} |
||||
|
||||
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
|
||||
func appendToURLPath(u url.URL, name string) url.URL { |
||||
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
|
||||
// When you call url.Parse() this is what you'll get:
|
||||
// Scheme: "https"
|
||||
// Opaque: ""
|
||||
// User: nil
|
||||
// Host: "ms.com"
|
||||
// Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
|
||||
// RawPath: ""
|
||||
// ForceQuery: false
|
||||
// RawQuery: "k1=v1&k2=v2"
|
||||
// Fragment: "f"
|
||||
if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { |
||||
u.Path += "/" // Append "/" to end before appending name
|
||||
} |
||||
u.Path += name |
||||
return u |
||||
} |
||||
|
||||
// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment. For more information, see
|
||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) { |
||||
prefix, include, maxResults := o.pointers() |
||||
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) |
||||
} |
||||
|
||||
// ListContainersOptions defines options available when calling ListContainers.
|
||||
type ListContainersSegmentOptions struct { |
||||
Detail ListContainersDetail // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
MaxResults int32 // 0 means unspecified
|
||||
// TODO: update swagger to generate this type?
|
||||
} |
||||
|
||||
func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) { |
||||
if o.Prefix != "" { |
||||
prefix = &o.Prefix |
||||
} |
||||
if o.MaxResults != 0 { |
||||
maxResults = &o.MaxResults |
||||
} |
||||
include = ListContainersIncludeType(o.Detail.string()) |
||||
return |
||||
} |
||||
|
||||
// ListContainersFlatDetail indicates what additional information the service should return with each container.
|
||||
type ListContainersDetail struct { |
||||
// Tells the service whether to return metadata for each container.
|
||||
Metadata bool |
||||
} |
||||
|
||||
// string produces the Include query parameter's value.
|
||||
func (d *ListContainersDetail) string() string { |
||||
items := make([]string, 0, 1) |
||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
||||
if d.Metadata { |
||||
items = append(items, string(ListContainersIncludeMetadata)) |
||||
} |
||||
if len(items) > 0 { |
||||
return strings.Join(items, ",") |
||||
} |
||||
return string(ListContainersIncludeNone) |
||||
} |
||||
|
||||
func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { |
||||
return bsu.client.GetProperties(ctx, nil, nil) |
||||
} |
||||
|
||||
func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { |
||||
return bsu.client.SetProperties(ctx, properties, nil, nil) |
||||
} |
||||
|
||||
func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { |
||||
return bsu.client.GetStatistics(ctx, nil, nil) |
||||
} |
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
@ -1,38 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"crypto/hmac" |
||||
"crypto/sha256" |
||||
"encoding/base64" |
||||
) |
||||
|
||||
// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it
|
||||
func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential { |
||||
return UserDelegationCredential{ |
||||
accountName: accountName, |
||||
accountKey: key, |
||||
} |
||||
} |
||||
|
||||
type UserDelegationCredential struct { |
||||
accountName string |
||||
accountKey UserDelegationKey |
||||
} |
||||
|
||||
// AccountName returns the Storage account's name
|
||||
func (f UserDelegationCredential) AccountName() string { |
||||
return f.accountName |
||||
} |
||||
|
||||
// ComputeHMAC
|
||||
func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) { |
||||
bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value) |
||||
h := hmac.New(sha256.New, bytes) |
||||
h.Write([]byte(message)) |
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil)) |
||||
} |
||||
|
||||
// Private method to return important parameters for NewSASQueryParameters
|
||||
func (f UserDelegationCredential) getUDKParams() *UserDelegationKey { |
||||
return &f.accountKey |
||||
} |
@ -1,3 +0,0 @@ |
||||
package azblob |
||||
|
||||
const serviceLibVersion = "0.7" |
55
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
generated
vendored
55
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
generated
vendored
@ -1,55 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// Credential represent any credential type; it is used to create a credential policy Factory.
|
||||
type Credential interface { |
||||
pipeline.Factory |
||||
credentialMarker() |
||||
} |
||||
|
||||
type credentialFunc pipeline.FactoryFunc |
||||
|
||||
func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { |
||||
return f(next, po) |
||||
} |
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (credentialFunc) credentialMarker() {} |
||||
|
||||
//////////////////////////////
|
||||
|
||||
// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
|
||||
// or for use with Shared Access Signatures (SAS).
|
||||
func NewAnonymousCredential() Credential { |
||||
return anonymousCredentialFactory |
||||
} |
||||
|
||||
var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
|
||||
|
||||
// anonymousCredentialPolicyFactory is the credential's policy factory.
|
||||
type anonymousCredentialPolicyFactory struct { |
||||
} |
||||
|
||||
// New creates a credential policy object.
|
||||
func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { |
||||
return &anonymousCredentialPolicy{next: next} |
||||
} |
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*anonymousCredentialPolicyFactory) credentialMarker() {} |
||||
|
||||
// anonymousCredentialPolicy is the credential's policy object.
|
||||
type anonymousCredentialPolicy struct { |
||||
next pipeline.Policy |
||||
} |
||||
|
||||
// Do implements the credential's policy interface.
|
||||
func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { |
||||
// For anonymous credentials, this is effectively a no-op
|
||||
return p.next.Do(ctx, request) |
||||
} |
205
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
generated
vendored
205
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
generated
vendored
@ -1,205 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"crypto/hmac" |
||||
"crypto/sha256" |
||||
"encoding/base64" |
||||
"errors" |
||||
"net/http" |
||||
"net/url" |
||||
"sort" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
||||
// storage account's name and either its primary or secondary key.
|
||||
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { |
||||
bytes, err := base64.StdEncoding.DecodeString(accountKey) |
||||
if err != nil { |
||||
return &SharedKeyCredential{}, err |
||||
} |
||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil |
||||
} |
||||
|
||||
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
||||
// It is immutable making it shareable and goroutine-safe.
|
||||
type SharedKeyCredential struct { |
||||
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
|
||||
accountName string |
||||
accountKey []byte |
||||
} |
||||
|
||||
// AccountName returns the Storage account's name.
|
||||
func (f SharedKeyCredential) AccountName() string { |
||||
return f.accountName |
||||
} |
||||
|
||||
func (f SharedKeyCredential) getAccountKey() []byte { |
||||
return f.accountKey |
||||
} |
||||
|
||||
// noop function to satisfy StorageAccountCredential interface
|
||||
func (f SharedKeyCredential) getUDKParams() *UserDelegationKey { |
||||
return nil |
||||
} |
||||
|
||||
// New creates a credential policy object.
|
||||
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { |
||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { |
||||
// Add a x-ms-date header if it doesn't already exist
|
||||
if d := request.Header.Get(headerXmsDate); d == "" { |
||||
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} |
||||
} |
||||
stringToSign, err := f.buildStringToSign(request) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
signature := f.ComputeHMACSHA256(stringToSign) |
||||
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") |
||||
request.Header[headerAuthorization] = []string{authHeader} |
||||
|
||||
response, err := next.Do(ctx, request) |
||||
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { |
||||
// Service failed to authenticate request, log it
|
||||
po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") |
||||
} |
||||
return response, err |
||||
}) |
||||
} |
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*SharedKeyCredential) credentialMarker() {} |
||||
|
||||
// Constants ensuring that header names are correctly spelled and consistently cased.
|
||||
const ( |
||||
headerAuthorization = "Authorization" |
||||
headerCacheControl = "Cache-Control" |
||||
headerContentEncoding = "Content-Encoding" |
||||
headerContentDisposition = "Content-Disposition" |
||||
headerContentLanguage = "Content-Language" |
||||
headerContentLength = "Content-Length" |
||||
headerContentMD5 = "Content-MD5" |
||||
headerContentType = "Content-Type" |
||||
headerDate = "Date" |
||||
headerIfMatch = "If-Match" |
||||
headerIfModifiedSince = "If-Modified-Since" |
||||
headerIfNoneMatch = "If-None-Match" |
||||
headerIfUnmodifiedSince = "If-Unmodified-Since" |
||||
headerRange = "Range" |
||||
headerUserAgent = "User-Agent" |
||||
headerXmsDate = "x-ms-date" |
||||
headerXmsVersion = "x-ms-version" |
||||
) |
||||
|
||||
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
|
||||
func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { |
||||
h := hmac.New(sha256.New, f.accountKey) |
||||
h.Write([]byte(message)) |
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil)) |
||||
} |
||||
|
||||
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { |
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
headers := request.Header |
||||
contentLength := headers.Get(headerContentLength) |
||||
if contentLength == "0" { |
||||
contentLength = "" |
||||
} |
||||
|
||||
canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
stringToSign := strings.Join([]string{ |
||||
request.Method, |
||||
headers.Get(headerContentEncoding), |
||||
headers.Get(headerContentLanguage), |
||||
contentLength, |
||||
headers.Get(headerContentMD5), |
||||
headers.Get(headerContentType), |
||||
"", // Empty date because x-ms-date is expected (as per web page above)
|
||||
headers.Get(headerIfModifiedSince), |
||||
headers.Get(headerIfMatch), |
||||
headers.Get(headerIfNoneMatch), |
||||
headers.Get(headerIfUnmodifiedSince), |
||||
headers.Get(headerRange), |
||||
buildCanonicalizedHeader(headers), |
||||
canonicalizedResource, |
||||
}, "\n") |
||||
return stringToSign, nil |
||||
} |
||||
|
||||
func buildCanonicalizedHeader(headers http.Header) string { |
||||
cm := map[string][]string{} |
||||
for k, v := range headers { |
||||
headerName := strings.TrimSpace(strings.ToLower(k)) |
||||
if strings.HasPrefix(headerName, "x-ms-") { |
||||
cm[headerName] = v // NOTE: the value must not have any whitespace around it.
|
||||
} |
||||
} |
||||
if len(cm) == 0 { |
||||
return "" |
||||
} |
||||
|
||||
keys := make([]string, 0, len(cm)) |
||||
for key := range cm { |
||||
keys = append(keys, key) |
||||
} |
||||
sort.Strings(keys) |
||||
ch := bytes.NewBufferString("") |
||||
for i, key := range keys { |
||||
if i > 0 { |
||||
ch.WriteRune('\n') |
||||
} |
||||
ch.WriteString(key) |
||||
ch.WriteRune(':') |
||||
ch.WriteString(strings.Join(cm[key], ",")) |
||||
} |
||||
return string(ch.Bytes()) |
||||
} |
||||
|
||||
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { |
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
cr := bytes.NewBufferString("/") |
||||
cr.WriteString(f.accountName) |
||||
|
||||
if len(u.Path) > 0 { |
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath()) |
||||
} else { |
||||
// a slash is required to indicate the root path
|
||||
cr.WriteString("/") |
||||
} |
||||
|
||||
// params is a map[string][]string; param name is key; params values is []string
|
||||
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
|
||||
if err != nil { |
||||
return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") |
||||
} |
||||
|
||||
if len(params) > 0 { // There is at least 1 query parameter
|
||||
paramNames := []string{} // We use this to sort the parameter key names
|
||||
for paramName := range params { |
||||
paramNames = append(paramNames, paramName) // paramNames must be lowercase
|
||||
} |
||||
sort.Strings(paramNames) |
||||
|
||||
for _, paramName := range paramNames { |
||||
paramValues := params[paramName] |
||||
sort.Strings(paramValues) |
||||
|
||||
// Join the sorted key values separated by ','
|
||||
// Then prepend "keyName:"; then add this string to the buffer
|
||||
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) |
||||
} |
||||
} |
||||
return string(cr.Bytes()), nil |
||||
} |
@ -1,137 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"sync/atomic" |
||||
|
||||
"runtime" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// TokenRefresher represents a callback method that you write; this method is called periodically
|
||||
// so you can refresh the token credential's value.
|
||||
type TokenRefresher func(credential TokenCredential) time.Duration |
||||
|
||||
// TokenCredential represents a token credential (which is also a pipeline.Factory).
|
||||
type TokenCredential interface { |
||||
Credential |
||||
Token() string |
||||
SetToken(newToken string) |
||||
} |
||||
|
||||
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
|
||||
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
|
||||
// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the
|
||||
// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
|
||||
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
|
||||
// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
|
||||
// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a
|
||||
// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
|
||||
func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { |
||||
tc := &tokenCredential{} |
||||
tc.SetToken(initialToken) // We don't set it above to guarantee atomicity
|
||||
if tokenRefresher == nil { |
||||
return tc // If no callback specified, return the simple tokenCredential
|
||||
} |
||||
|
||||
tcwr := &tokenCredentialWithRefresh{token: tc} |
||||
tcwr.token.startRefresh(tokenRefresher) |
||||
runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { |
||||
deadTC.token.stopRefresh() |
||||
deadTC.token = nil // Sanity (not really required)
|
||||
}) |
||||
return tcwr |
||||
} |
||||
|
||||
// tokenCredentialWithRefresh is a wrapper over a token credential.
|
||||
// When this wrapper object gets GC'd, it stops the tokenCredential's timer
|
||||
// which allows the tokenCredential object to also be GC'd.
|
||||
type tokenCredentialWithRefresh struct { |
||||
token *tokenCredential |
||||
} |
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*tokenCredentialWithRefresh) credentialMarker() {} |
||||
|
||||
// Token returns the current token value
|
||||
func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } |
||||
|
||||
// SetToken changes the current token value
|
||||
func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } |
||||
|
||||
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
|
||||
func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { |
||||
return f.token.New(next, po) |
||||
} |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// tokenCredential is a pipeline.Factory is the credential's policy factory.
|
||||
type tokenCredential struct { |
||||
token atomic.Value |
||||
|
||||
// The members below are only used if the user specified a tokenRefresher callback function.
|
||||
timer *time.Timer |
||||
tokenRefresher TokenRefresher |
||||
lock sync.Mutex |
||||
stopped bool |
||||
} |
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*tokenCredential) credentialMarker() {} |
||||
|
||||
// Token returns the current token value
|
||||
func (f *tokenCredential) Token() string { return f.token.Load().(string) } |
||||
|
||||
// SetToken changes the current token value
|
||||
func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } |
||||
|
||||
// startRefresh calls refresh which immediately calls tokenRefresher
|
||||
// and then starts a timer to call tokenRefresher in the future.
|
||||
func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { |
||||
f.tokenRefresher = tokenRefresher |
||||
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
|
||||
f.refresh() |
||||
} |
||||
|
||||
// refresh calls the user's tokenRefresher so they can refresh the token (by
|
||||
// calling SetToken) and then starts another time (based on the returned duration)
|
||||
// in order to refresh the token again in the future.
|
||||
func (f *tokenCredential) refresh() { |
||||
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
|
||||
if d > 0 { // If duration is 0 or negative, refresher wants to not be called again
|
||||
f.lock.Lock() |
||||
if !f.stopped { |
||||
f.timer = time.AfterFunc(d, f.refresh) |
||||
} |
||||
f.lock.Unlock() |
||||
} |
||||
} |
||||
|
||||
// stopRefresh stops any pending timer and sets stopped field to true to prevent
|
||||
// any new timer from starting.
|
||||
// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object.
|
||||
func (f *tokenCredential) stopRefresh() { |
||||
f.lock.Lock() |
||||
f.stopped = true |
||||
if f.timer != nil { |
||||
f.timer.Stop() |
||||
} |
||||
f.lock.Unlock() |
||||
} |
||||
|
||||
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
|
||||
func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { |
||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { |
||||
if request.URL.Scheme != "https" { |
||||
// HTTPS must be used, otherwise the tokens are at the risk of being exposed
|
||||
return nil, errors.New("token credentials require a URL using the https protocol scheme") |
||||
} |
||||
request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} |
||||
return next.Do(ctx, request) |
||||
}) |
||||
} |
@ -1,27 +0,0 @@ |
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package azblob |
||||
|
||||
import ( |
||||
"os" |
||||
"syscall" |
||||
) |
||||
|
||||
type mmf []byte |
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { |
||||
prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
|
||||
if writable { |
||||
prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED |
||||
} |
||||
addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) |
||||
return mmf(addr), err |
||||
} |
||||
|
||||
func (m *mmf) unmap() { |
||||
err := syscall.Munmap(*m) |
||||
*m = nil |
||||
if err != nil { |
||||
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") |
||||
} |
||||
} |
@ -1,38 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"os" |
||||
"reflect" |
||||
"syscall" |
||||
"unsafe" |
||||
) |
||||
|
||||
type mmf []byte |
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { |
||||
prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only
|
||||
if writable { |
||||
prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) |
||||
} |
||||
hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil) |
||||
if hMMF == 0 { |
||||
return nil, os.NewSyscallError("CreateFileMapping", errno) |
||||
} |
||||
defer syscall.CloseHandle(hMMF) |
||||
addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) |
||||
m := mmf{} |
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) |
||||
h.Data = addr |
||||
h.Len = length |
||||
h.Cap = h.Len |
||||
return m, nil |
||||
} |
||||
|
||||
func (m *mmf) unmap() { |
||||
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) |
||||
*m = mmf{} |
||||
err := syscall.UnmapViewOfFile(addr) |
||||
if err != nil { |
||||
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") |
||||
} |
||||
} |
@ -1,46 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
|
||||
type PipelineOptions struct { |
||||
// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
|
||||
Log pipeline.LogOptions |
||||
|
||||
// Retry configures the built-in retry policy behavior.
|
||||
Retry RetryOptions |
||||
|
||||
// RequestLog configures the built-in request logging policy.
|
||||
RequestLog RequestLogOptions |
||||
|
||||
// Telemetry configures the built-in telemetry policy behavior.
|
||||
Telemetry TelemetryOptions |
||||
|
||||
// HTTPSender configures the sender of HTTP requests
|
||||
HTTPSender pipeline.Factory |
||||
} |
||||
|
||||
// NewPipeline creates a Pipeline using the specified credentials and options.
|
||||
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { |
||||
// Closest to API goes first; closest to the wire goes last
|
||||
f := []pipeline.Factory{ |
||||
NewTelemetryPolicyFactory(o.Telemetry), |
||||
NewUniqueRequestIDPolicyFactory(), |
||||
NewRetryPolicyFactory(o.Retry), |
||||
} |
||||
|
||||
if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { |
||||
// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
|
||||
// NOTE: The credential's policy factory must appear close to the wire so it can sign any
|
||||
// changes made by other factories (like UniqueRequestIDPolicyFactory)
|
||||
f = append(f, c) |
||||
} |
||||
f = append(f, |
||||
NewRequestLogPolicyFactory(o.RequestLog), |
||||
pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
|
||||
|
||||
|
||||
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log}) |
||||
} |
182
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
182
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
@ -1,182 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"fmt" |
||||
"net/http" |
||||
"net/url" |
||||
"runtime" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// RequestLogOptions configures the retry policy's behavior.
|
||||
type RequestLogOptions struct { |
||||
// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
|
||||
// duration (-1=no logging; 0=default threshold).
|
||||
LogWarningIfTryOverThreshold time.Duration |
||||
} |
||||
|
||||
func (o RequestLogOptions) defaults() RequestLogOptions { |
||||
if o.LogWarningIfTryOverThreshold == 0 { |
||||
// It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
|
||||
// But this monitors the time to get the HTTP response; NOT the time to download the response body.
|
||||
o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
|
||||
} |
||||
return o |
||||
} |
||||
|
||||
// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
|
||||
func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { |
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { |
||||
// These variables are per-policy; shared by multiple calls to Do
|
||||
var try int32 |
||||
operationStart := time.Now() // If this is the 1st try, record the operation state time
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { |
||||
try++ // The first try is #1 (not #0)
|
||||
|
||||
// Log the outgoing request as informational
|
||||
if po.ShouldLog(pipeline.LogInfo) { |
||||
b := &bytes.Buffer{} |
||||
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) |
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) |
||||
po.Log(pipeline.LogInfo, b.String()) |
||||
} |
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryStart := time.Now() |
||||
response, err = next.Do(ctx, request) // Make the request
|
||||
tryEnd := time.Now() |
||||
tryDuration := tryEnd.Sub(tryStart) |
||||
opDuration := tryEnd.Sub(operationStart) |
||||
|
||||
logLevel, forceLog := pipeline.LogInfo, false // Default logging information
|
||||
|
||||
// If the response took too long, we'll upgrade to warning.
|
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { |
||||
// Log a warning if the try duration exceeded the specified threshold
|
||||
logLevel, forceLog = pipeline.LogWarning, true |
||||
} |
||||
|
||||
if err == nil { // We got a response from the service
|
||||
sc := response.Response().StatusCode |
||||
if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { |
||||
logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
|
||||
} else { |
||||
// For other status codes, we leave the level as is.
|
||||
} |
||||
} else { // This error did not get an HTTP response from the service; upgrade the severity to Error
|
||||
logLevel, forceLog = pipeline.LogError, true |
||||
} |
||||
|
||||
if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { |
||||
// We're going to log this; build the string to log
|
||||
b := &bytes.Buffer{} |
||||
slow := "" |
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { |
||||
slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) |
||||
} |
||||
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) |
||||
if err != nil { // This HTTP request did not get a response from the service
|
||||
fmt.Fprint(b, "REQUEST ERROR\n") |
||||
} else { |
||||
if logLevel == pipeline.LogError { |
||||
fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") |
||||
} else { |
||||
fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") |
||||
} |
||||
} |
||||
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) |
||||
if logLevel <= pipeline.LogError { |
||||
b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
|
||||
} |
||||
msg := b.String() |
||||
|
||||
if forceLog { |
||||
pipeline.ForceLog(logLevel, msg) |
||||
} |
||||
if shouldLog { |
||||
po.Log(logLevel, msg) |
||||
} |
||||
} |
||||
return response, err |
||||
} |
||||
}) |
||||
} |
||||
|
||||
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func RedactSigQueryParam(rawQuery string) (bool, string) { |
||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||
sigFound := strings.Contains(rawQuery, "?sig=") |
||||
if !sigFound { |
||||
sigFound = strings.Contains(rawQuery, "&sig=") |
||||
if !sigFound { |
||||
return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
|
||||
} |
||||
} |
||||
// [?|&]sig= found, redact its value
|
||||
values, _ := url.ParseQuery(rawQuery) |
||||
for name := range values { |
||||
if strings.EqualFold(name, "sig") { |
||||
values[name] = []string{"REDACTED"} |
||||
} |
||||
} |
||||
return sigFound, values.Encode() |
||||
} |
||||
|
||||
func prepareRequestForLogging(request pipeline.Request) *http.Request { |
||||
req := request |
||||
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound { |
||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
||||
req = request.Copy() |
||||
req.Request.URL.RawQuery = rawQuery |
||||
} |
||||
|
||||
return prepareRequestForServiceLogging(req) |
||||
} |
||||
|
||||
func stack() []byte { |
||||
buf := make([]byte, 1024) |
||||
for { |
||||
n := runtime.Stack(buf, false) |
||||
if n < len(buf) { |
||||
return buf[:n] |
||||
} |
||||
buf = make([]byte, 2*len(buf)) |
||||
} |
||||
} |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Redact phase useful for blob and file service only. For other services,
|
||||
// this method can directly return request.Request.
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { |
||||
req := request |
||||
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { |
||||
req = request.Copy() |
||||
url, err := url.Parse(req.Header.Get(key)) |
||||
if err == nil { |
||||
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound { |
||||
url.RawQuery = rawQuery |
||||
req.Header.Set(xMsCopySourceHeader, url.String()) |
||||
} |
||||
} |
||||
} |
||||
return req.Request |
||||
} |
||||
|
||||
const xMsCopySourceHeader = "x-ms-copy-source" |
||||
|
||||
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { |
||||
for keyInHeader := range header { |
||||
if strings.EqualFold(keyInHeader, key) { |
||||
return true, keyInHeader |
||||
} |
||||
} |
||||
return false, "" |
||||
} |
@ -1,412 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"io" |
||||
"io/ioutil" |
||||
"math/rand" |
||||
"net" |
||||
"net/http" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
||||
type RetryPolicy int32 |
||||
|
||||
const ( |
||||
// RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
|
||||
RetryPolicyExponential RetryPolicy = 0 |
||||
|
||||
// RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
|
||||
RetryPolicyFixed RetryPolicy = 1 |
||||
) |
||||
|
||||
// RetryOptions configures the retry policy's behavior.
|
||||
type RetryOptions struct { |
||||
// Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
|
||||
// A value of zero means that you accept our default policy.
|
||||
Policy RetryPolicy |
||||
|
||||
// MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
|
||||
// A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
|
||||
MaxTries int32 |
||||
|
||||
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
|
||||
// A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
|
||||
// of data, the default TryTimeout will probably not be sufficient. You should override this value
|
||||
// based on the bandwidth available to the host machine and proximity to the Storage service. A good
|
||||
// starting point may be something like (60 seconds per MB of anticipated-payload-size).
|
||||
TryTimeout time.Duration |
||||
|
||||
// RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
|
||||
// When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
|
||||
// with each retry up to a maximum specified by MaxRetryDelay.
|
||||
// If you specify 0, then you must also specify 0 for MaxRetryDelay.
|
||||
// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
|
||||
// equal to or greater than RetryDelay.
|
||||
RetryDelay time.Duration |
||||
|
||||
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
|
||||
// If you specify 0, then you must also specify 0 for RetryDelay.
|
||||
MaxRetryDelay time.Duration |
||||
|
||||
// RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host.
|
||||
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
|
||||
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
|
||||
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
|
||||
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
|
||||
} |
||||
|
||||
func (o RetryOptions) retryReadsFromSecondaryHost() string { |
||||
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
|
||||
//return "" // This is for non-blob SDKs
|
||||
} |
||||
|
||||
func (o RetryOptions) defaults() RetryOptions { |
||||
// We assume the following:
|
||||
// 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
|
||||
// 2. o.MaxTries >= 0
|
||||
// 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
|
||||
// 4. o.RetryDelay <= o.MaxRetryDelay
|
||||
// 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
|
||||
|
||||
IfDefault := func(current *time.Duration, desired time.Duration) { |
||||
if *current == time.Duration(0) { |
||||
*current = desired |
||||
} |
||||
} |
||||
|
||||
// Set defaults if unspecified
|
||||
if o.MaxTries == 0 { |
||||
o.MaxTries = 4 |
||||
} |
||||
switch o.Policy { |
||||
case RetryPolicyExponential: |
||||
IfDefault(&o.TryTimeout, 1*time.Minute) |
||||
IfDefault(&o.RetryDelay, 4*time.Second) |
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second) |
||||
|
||||
case RetryPolicyFixed: |
||||
IfDefault(&o.TryTimeout, 1*time.Minute) |
||||
IfDefault(&o.RetryDelay, 30*time.Second) |
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second) |
||||
} |
||||
return o |
||||
} |
||||
|
||||
func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
|
||||
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
|
||||
var result int64 = 1 |
||||
for n := int32(0); n < exponent; n++ { |
||||
result *= number |
||||
} |
||||
return result |
||||
} |
||||
|
||||
delay := time.Duration(0) |
||||
switch o.Policy { |
||||
case RetryPolicyExponential: |
||||
delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay |
||||
|
||||
case RetryPolicyFixed: |
||||
if try > 1 { // Any try after the 1st uses the fixed delay
|
||||
delay = o.RetryDelay |
||||
} |
||||
} |
||||
|
||||
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
|
||||
if delay > o.MaxRetryDelay { |
||||
delay = o.MaxRetryDelay |
||||
} |
||||
return delay |
||||
} |
||||
|
||||
// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
|
||||
func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { |
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { |
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { |
||||
// Before each try, we'll select either the primary or secondary URL.
|
||||
primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
|
||||
|
||||
// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
|
||||
considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" |
||||
|
||||
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
|
||||
// When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
|
||||
// If using a secondary:
|
||||
// Even tries go against primary; odd tries go against the secondary
|
||||
// For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
|
||||
// If secondary gets a 404, don't fail, retry but future retries are only against the primary
|
||||
// When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
|
||||
for try := int32(1); try <= o.MaxTries; try++ { |
||||
logf("\n=====> Try=%d\n", try) |
||||
|
||||
// Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
|
||||
tryingPrimary := !considerSecondary || (try%2 == 1) |
||||
// Select the correct host and delay
|
||||
if tryingPrimary { |
||||
primaryTry++ |
||||
delay := o.calcDelay(primaryTry) |
||||
logf("Primary try=%d, Delay=%v\n", primaryTry, delay) |
||||
time.Sleep(delay) // The 1st try returns 0 delay
|
||||
} else { |
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8)) |
||||
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) |
||||
time.Sleep(delay) // Delay with some jitter before trying secondary
|
||||
} |
||||
|
||||
// Clone the original request to ensure that each try starts with the original (unmutated) request.
|
||||
requestCopy := request.Copy() |
||||
|
||||
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
|
||||
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
|
||||
// 1st try as for additional tries.
|
||||
err = requestCopy.RewindBody() |
||||
if err != nil { |
||||
return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") |
||||
} |
||||
|
||||
if !tryingPrimary { |
||||
requestCopy.URL.Host = o.retryReadsFromSecondaryHost() |
||||
requestCopy.Host = o.retryReadsFromSecondaryHost() |
||||
} |
||||
|
||||
// Set the server-side timeout query parameter "timeout=[seconds]"
|
||||
timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
|
||||
if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
|
||||
t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
|
||||
logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) |
||||
if t < timeout { |
||||
timeout = t |
||||
} |
||||
if timeout < 0 { |
||||
timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
|
||||
} |
||||
logf("TryTimeout adjusted to=%d sec\n", timeout) |
||||
} |
||||
q := requestCopy.Request.URL.Query() |
||||
q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
|
||||
requestCopy.Request.URL.RawQuery = q.Encode() |
||||
logf("Url=%s\n", requestCopy.Request.URL.String()) |
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) |
||||
//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
|
||||
response, err = next.Do(tryCtx, requestCopy) // Make the request
|
||||
/*err = improveDeadlineExceeded(err) |
||||
if err == nil { |
||||
response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} |
||||
}*/ |
||||
logf("Err=%v, response=%v\n", err, response) |
||||
|
||||
action := "" // This MUST get changed within the switch code below
|
||||
switch { |
||||
case ctx.Err() != nil: |
||||
action = "NoRetry: Op timeout" |
||||
case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound: |
||||
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
||||
// the resource was not found. This may be due to replication delay. So, in this
|
||||
// case, we'll never try the secondary again for this operation.
|
||||
considerSecondary = false |
||||
action = "Retry: Secondary URL returned 404" |
||||
case err != nil: |
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
||||
// Use ServiceCode to verify if the error is related to storage service-side,
|
||||
// ServiceCode is set only when error related to storage service happened.
|
||||
if stErr, ok := err.(StorageError); ok { |
||||
if stErr.Temporary() { |
||||
action = "Retry: StorageError with error service code and Temporary()" |
||||
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
||||
action = "Retry: StorageError with success status code" |
||||
} else { |
||||
action = "NoRetry: StorageError not Temporary() and without retriable status code" |
||||
} |
||||
} else if netErr, ok := err.(net.Error); ok { |
||||
// Use non-retriable net.Error list, but not retriable list.
|
||||
// As there are errors without Temporary() implementation,
|
||||
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
||||
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
||||
if !isNotRetriable(netErr) { |
||||
action = "Retry: net.Error and not in the non-retriable list" |
||||
} else { |
||||
action = "NoRetry: net.Error and in the non-retriable list" |
||||
} |
||||
} else { |
||||
action = "NoRetry: unrecognized error" |
||||
} |
||||
default: |
||||
action = "NoRetry: successful HTTP request" // no error
|
||||
} |
||||
|
||||
logf("Action=%s\n", action) |
||||
// fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
|
||||
if action[0] != 'R' { // Retry only if action starts with 'R'
|
||||
if err != nil { |
||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
||||
} else { |
||||
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
||||
if response == nil || response.Response() == nil { |
||||
// We do panic in the case response or response.Response() is nil,
|
||||
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
||||
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
||||
// as in this case, current per-try has nothing to do in future.
|
||||
return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") |
||||
} |
||||
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} |
||||
} |
||||
break // Don't retry
|
||||
} |
||||
if response != nil && response.Response() != nil && response.Response().Body != nil { |
||||
// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
|
||||
body := response.Response().Body |
||||
io.Copy(ioutil.Discard, body) |
||||
body.Close() |
||||
} |
||||
// If retrying, cancel the current per-try timeout context
|
||||
tryCancel() |
||||
} |
||||
return response, err // Not retryable or too many retries; return the last response/error
|
||||
} |
||||
}) |
||||
} |
||||
|
||||
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
||||
type contextCancelReadCloser struct { |
||||
cf context.CancelFunc |
||||
body io.ReadCloser |
||||
} |
||||
|
||||
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { |
||||
return rc.body.Read(p) |
||||
} |
||||
|
||||
func (rc *contextCancelReadCloser) Close() error { |
||||
err := rc.body.Close() |
||||
if rc.cf != nil { |
||||
rc.cf() |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// isNotRetriable checks if the provided net.Error isn't retriable.
|
||||
func isNotRetriable(errToParse net.Error) bool { |
||||
// No error, so this is NOT retriable.
|
||||
if errToParse == nil { |
||||
return true |
||||
} |
||||
|
||||
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
||||
if errToParse.Temporary() || errToParse.Timeout() { |
||||
return false |
||||
} |
||||
|
||||
genericErr := error(errToParse) |
||||
|
||||
// From here all the error are neither Temporary() nor Timeout().
|
||||
switch err := errToParse.(type) { |
||||
case *net.OpError: |
||||
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
||||
if err.Err == nil { |
||||
return true |
||||
} |
||||
genericErr = err.Err |
||||
} |
||||
|
||||
switch genericErr.(type) { |
||||
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: |
||||
// If the error is one of the ones listed, then it is NOT retriable.
|
||||
return true |
||||
} |
||||
|
||||
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
|
||||
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
|
||||
if strings.Contains(genericErr.Error(), "invalid header field") { |
||||
return true |
||||
} |
||||
|
||||
// Assume the error is retriable.
|
||||
return false |
||||
} |
||||
|
||||
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} |
||||
|
||||
func isSuccessStatusCode(resp *http.Response) bool { |
||||
if resp == nil { |
||||
return false |
||||
} |
||||
for _, i := range successStatusCodes { |
||||
if i == resp.StatusCode { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
||||
var logf = func(format string, a ...interface{}) {} |
||||
|
||||
// Use this version to see the retry method's code path (import "fmt")
|
||||
//var logf = fmt.Printf
|
||||
|
||||
/* |
||||
type deadlineExceededReadCloser struct { |
||||
r io.ReadCloser |
||||
} |
||||
|
||||
func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { |
||||
n, err := 0, io.EOF |
||||
if r.r != nil { |
||||
n, err = r.r.Read(p) |
||||
} |
||||
return n, improveDeadlineExceeded(err) |
||||
} |
||||
func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { |
||||
// For an HTTP request, the ReadCloser MUST also implement seek
|
||||
// For an HTTP response, Seek MUST not be called (or this will panic)
|
||||
o, err := r.r.(io.Seeker).Seek(offset, whence) |
||||
return o, improveDeadlineExceeded(err) |
||||
} |
||||
func (r *deadlineExceededReadCloser) Close() error { |
||||
if c, ok := r.r.(io.Closer); ok { |
||||
c.Close() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// timeoutError is the internal struct that implements our richer timeout error.
|
||||
type deadlineExceeded struct { |
||||
responseError |
||||
} |
||||
|
||||
var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
|
||||
|
||||
// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
|
||||
func improveDeadlineExceeded(cause error) error { |
||||
// If cause is not DeadlineExceeded, return the same error passed in.
|
||||
if cause != context.DeadlineExceeded { |
||||
return cause |
||||
} |
||||
// Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
|
||||
return &deadlineExceeded{ |
||||
responseError: responseError{ |
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), |
||||
}, |
||||
} |
||||
} |
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *deadlineExceeded) Error() string { |
||||
return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") |
||||
} |
||||
*/ |
@ -1,51 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"fmt" |
||||
"os" |
||||
"runtime" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// TelemetryOptions configures the telemetry policy's behavior.
|
||||
type TelemetryOptions struct { |
||||
// Value is a string prepended to each request's User-Agent and sent to the service.
|
||||
// The service records the user-agent in logs for diagnostics and tracking of client requests.
|
||||
Value string |
||||
} |
||||
|
||||
// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
|
||||
// which add telemetry information to outgoing HTTP requests.
|
||||
func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { |
||||
b := &bytes.Buffer{} |
||||
b.WriteString(o.Value) |
||||
if b.Len() > 0 { |
||||
b.WriteRune(' ') |
||||
} |
||||
fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) |
||||
telemetryValue := b.String() |
||||
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { |
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { |
||||
request.Header.Set("User-Agent", telemetryValue) |
||||
return next.Do(ctx, request) |
||||
} |
||||
}) |
||||
} |
||||
|
||||
// NOTE: the ONLY function that should write to this variable is this func
|
||||
var platformInfo = func() string { |
||||
// Azure-Storage/version (runtime; os type and version)”
|
||||
// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
|
||||
operatingSystem := runtime.GOOS // Default OS string
|
||||
switch operatingSystem { |
||||
case "windows": |
||||
operatingSystem = os.Getenv("OS") // Get more specific OS information
|
||||
case "linux": // accept default OS info
|
||||
case "freebsd": // accept default OS info
|
||||
} |
||||
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) |
||||
}() |
24
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
generated
vendored
24
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
generated
vendored
@ -1,24 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
|
||||
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
|
||||
func NewUniqueRequestIDPolicyFactory() pipeline.Factory { |
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { |
||||
// This is Policy's Do method:
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { |
||||
id := request.Header.Get(xMsClientRequestID) |
||||
if id == "" { // Add a unique request ID if the caller didn't specify one already
|
||||
request.Header.Set(xMsClientRequestID, newUUID().String()) |
||||
} |
||||
return next.Do(ctx, request) |
||||
} |
||||
}) |
||||
} |
||||
|
||||
const xMsClientRequestID = "x-ms-client-request-id" |
@ -1,178 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"io" |
||||
"net" |
||||
"net/http" |
||||
"strings" |
||||
"sync" |
||||
) |
||||
|
||||
const CountToEnd = 0 |
||||
|
||||
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
|
||||
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) |
||||
|
||||
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
|
||||
// that should be used to make an HTTP GET request.
|
||||
type HTTPGetterInfo struct { |
||||
// Offset specifies the start offset that should be used when
|
||||
// creating the HTTP GET request's Range header
|
||||
Offset int64 |
||||
|
||||
// Count specifies the count of bytes that should be used to calculate
|
||||
// the end offset when creating the HTTP GET request's Range header
|
||||
Count int64 |
||||
|
||||
// ETag specifies the resource's etag that should be used when creating
|
||||
// the HTTP GET request's If-Match header
|
||||
ETag ETag |
||||
} |
||||
|
||||
// FailedReadNotifier is a function type that represents the notification function called when a read fails
|
||||
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) |
||||
|
||||
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
||||
type RetryReaderOptions struct { |
||||
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
||||
// while reading from a RetryReader. A value of zero means that no additional HTTP
|
||||
// GET requests will be made.
|
||||
MaxRetryRequests int |
||||
doInjectError bool |
||||
doInjectErrorRound int |
||||
|
||||
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||
NotifyFailedRead FailedReadNotifier |
||||
|
||||
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
|
||||
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
|
||||
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
|
||||
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
|
||||
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
|
||||
// treated as a fatal (non-retryable) error.
|
||||
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
|
||||
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
|
||||
// which will be retried.
|
||||
TreatEarlyCloseAsError bool |
||||
} |
||||
|
||||
// retryReader implements io.ReaderCloser methods.
|
||||
// retryReader tries to read from response, and if there is retriable network error
|
||||
// returned during reading, it will retry according to retry reader option through executing
|
||||
// user defined action with provided data to get a new response, and continue the overall reading process
|
||||
// through reading from the new response.
|
||||
type retryReader struct { |
||||
ctx context.Context |
||||
info HTTPGetterInfo |
||||
countWasBounded bool |
||||
o RetryReaderOptions |
||||
getter HTTPGetter |
||||
|
||||
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
|
||||
responseMu *sync.Mutex |
||||
response *http.Response |
||||
} |
||||
|
||||
// NewRetryReader creates a retry reader.
|
||||
func NewRetryReader(ctx context.Context, initialResponse *http.Response, |
||||
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { |
||||
return &retryReader{ |
||||
ctx: ctx, |
||||
getter: getter, |
||||
info: info, |
||||
countWasBounded: info.Count != CountToEnd, |
||||
response: initialResponse, |
||||
responseMu: &sync.Mutex{}, |
||||
o: o} |
||||
} |
||||
|
||||
func (s *retryReader) setResponse(r *http.Response) { |
||||
s.responseMu.Lock() |
||||
defer s.responseMu.Unlock() |
||||
s.response = r |
||||
} |
||||
|
||||
func (s *retryReader) Read(p []byte) (n int, err error) { |
||||
for try := 0; ; try++ { |
||||
//fmt.Println(try) // Comment out for debugging.
|
||||
if s.countWasBounded && s.info.Count == CountToEnd { |
||||
// User specified an original count and the remaining bytes are 0, return 0, EOF
|
||||
return 0, io.EOF |
||||
} |
||||
|
||||
s.responseMu.Lock() |
||||
resp := s.response |
||||
s.responseMu.Unlock() |
||||
if resp == nil { // We don't have a response stream to read from, try to get one.
|
||||
newResponse, err := s.getter(s.ctx, s.info) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
// Successful GET; this is the network stream we'll read from.
|
||||
s.setResponse(newResponse) |
||||
resp = newResponse |
||||
} |
||||
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
|
||||
|
||||
// Injection mechanism for testing.
|
||||
if s.o.doInjectError && try == s.o.doInjectErrorRound { |
||||
err = &net.DNSError{IsTemporary: true} |
||||
} |
||||
|
||||
// We successfully read data or end EOF.
|
||||
if err == nil || err == io.EOF { |
||||
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
|
||||
if s.info.Count != CountToEnd { |
||||
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
|
||||
} |
||||
return n, err // Return the return to the caller
|
||||
} |
||||
s.Close() // Error, close stream
|
||||
s.setResponse(nil) // Our stream is no longer good
|
||||
|
||||
// Check the retry count and error code, and decide whether to retry.
|
||||
retriesExhausted := try >= s.o.MaxRetryRequests |
||||
_, isNetError := err.(net.Error) |
||||
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted |
||||
|
||||
// Notify, for logging purposes, of any failures
|
||||
if s.o.NotifyFailedRead != nil { |
||||
failureCount := try + 1 // because try is zero-based
|
||||
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) |
||||
} |
||||
|
||||
if willRetry { |
||||
continue |
||||
// Loop around and try to get and read from new stream.
|
||||
} |
||||
return n, err // Not retryable, or retries exhausted, so just return
|
||||
} |
||||
} |
||||
|
||||
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
|
||||
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
|
||||
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
|
||||
// which is exactly the behaviour we want.
|
||||
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
|
||||
// then there are two different types of error that may happen - either the one one we check for here,
|
||||
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
|
||||
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
|
||||
func (s *retryReader) wasRetryableEarlyClose(err error) bool { |
||||
if s.o.TreatEarlyCloseAsError { |
||||
return false // user wants all early closes to be errors, and so not retryable
|
||||
} |
||||
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
|
||||
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) |
||||
} |
||||
|
||||
const ReadOnClosedBodyMessage = "read on closed response body" |
||||
|
||||
func (s *retryReader) Close() error { |
||||
s.responseMu.Lock() |
||||
defer s.responseMu.Unlock() |
||||
if s.response != nil && s.response.Body != nil { |
||||
return s.response.Body.Close() |
||||
} |
||||
return nil |
||||
} |
@ -1,219 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
|
||||
type AccountSASSignatureValues struct { |
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||
Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
|
||||
IPRange IPRange `param:"sip"` |
||||
Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
|
||||
ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
|
||||
} |
||||
|
||||
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
||||
// the proper SAS query parameters.
|
||||
func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { |
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
|
||||
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { |
||||
return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") |
||||
} |
||||
if v.Version == "" { |
||||
v.Version = SASVersion |
||||
} |
||||
perms := &AccountSASPermissions{} |
||||
if err := perms.Parse(v.Permissions); err != nil { |
||||
return SASQueryParameters{}, err |
||||
} |
||||
v.Permissions = perms.String() |
||||
|
||||
startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) |
||||
|
||||
stringToSign := strings.Join([]string{ |
||||
sharedKeyCredential.AccountName(), |
||||
v.Permissions, |
||||
v.Services, |
||||
v.ResourceTypes, |
||||
startTime, |
||||
expiryTime, |
||||
v.IPRange.String(), |
||||
string(v.Protocol), |
||||
v.Version, |
||||
""}, // That right, the account SAS requires a terminating extra newline
|
||||
"\n") |
||||
|
||||
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) |
||||
p := SASQueryParameters{ |
||||
// Common SAS parameters
|
||||
version: v.Version, |
||||
protocol: v.Protocol, |
||||
startTime: v.StartTime, |
||||
expiryTime: v.ExpiryTime, |
||||
permissions: v.Permissions, |
||||
ipRange: v.IPRange, |
||||
|
||||
// Account-specific SAS parameters
|
||||
services: v.Services, |
||||
resourceTypes: v.ResourceTypes, |
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature, |
||||
} |
||||
|
||||
return p, nil |
||||
} |
||||
|
||||
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
|
||||
type AccountSASPermissions struct { |
||||
Read, Write, Delete, List, Add, Create, Update, Process bool |
||||
} |
||||
|
||||
// String produces the SAS permissions string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's Permissions field.
|
||||
func (p AccountSASPermissions) String() string { |
||||
var buffer bytes.Buffer |
||||
if p.Read { |
||||
buffer.WriteRune('r') |
||||
} |
||||
if p.Write { |
||||
buffer.WriteRune('w') |
||||
} |
||||
if p.Delete { |
||||
buffer.WriteRune('d') |
||||
} |
||||
if p.List { |
||||
buffer.WriteRune('l') |
||||
} |
||||
if p.Add { |
||||
buffer.WriteRune('a') |
||||
} |
||||
if p.Create { |
||||
buffer.WriteRune('c') |
||||
} |
||||
if p.Update { |
||||
buffer.WriteRune('u') |
||||
} |
||||
if p.Process { |
||||
buffer.WriteRune('p') |
||||
} |
||||
return buffer.String() |
||||
} |
||||
|
||||
// Parse initializes the AccountSASPermissions's fields from a string.
|
||||
func (p *AccountSASPermissions) Parse(s string) error { |
||||
*p = AccountSASPermissions{} // Clear out the flags
|
||||
for _, r := range s { |
||||
switch r { |
||||
case 'r': |
||||
p.Read = true |
||||
case 'w': |
||||
p.Write = true |
||||
case 'd': |
||||
p.Delete = true |
||||
case 'l': |
||||
p.List = true |
||||
case 'a': |
||||
p.Add = true |
||||
case 'c': |
||||
p.Create = true |
||||
case 'u': |
||||
p.Update = true |
||||
case 'p': |
||||
p.Process = true |
||||
default: |
||||
return fmt.Errorf("Invalid permission character: '%v'", r) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
|
||||
type AccountSASServices struct { |
||||
Blob, Queue, File bool |
||||
} |
||||
|
||||
// String produces the SAS services string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's Services field.
|
||||
func (s AccountSASServices) String() string { |
||||
var buffer bytes.Buffer |
||||
if s.Blob { |
||||
buffer.WriteRune('b') |
||||
} |
||||
if s.Queue { |
||||
buffer.WriteRune('q') |
||||
} |
||||
if s.File { |
||||
buffer.WriteRune('f') |
||||
} |
||||
return buffer.String() |
||||
} |
||||
|
||||
// Parse initializes the AccountSASServices' fields from a string.
|
||||
func (a *AccountSASServices) Parse(s string) error { |
||||
*a = AccountSASServices{} // Clear out the flags
|
||||
for _, r := range s { |
||||
switch r { |
||||
case 'b': |
||||
a.Blob = true |
||||
case 'q': |
||||
a.Queue = true |
||||
case 'f': |
||||
a.File = true |
||||
default: |
||||
return fmt.Errorf("Invalid service character: '%v'", r) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
|
||||
type AccountSASResourceTypes struct { |
||||
Service, Container, Object bool |
||||
} |
||||
|
||||
// String produces the SAS resource types string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's ResourceTypes field.
|
||||
func (rt AccountSASResourceTypes) String() string { |
||||
var buffer bytes.Buffer |
||||
if rt.Service { |
||||
buffer.WriteRune('s') |
||||
} |
||||
if rt.Container { |
||||
buffer.WriteRune('c') |
||||
} |
||||
if rt.Object { |
||||
buffer.WriteRune('o') |
||||
} |
||||
return buffer.String() |
||||
} |
||||
|
||||
// Parse initializes the AccountSASResourceType's fields from a string.
|
||||
func (rt *AccountSASResourceTypes) Parse(s string) error { |
||||
*rt = AccountSASResourceTypes{} // Clear out the flags
|
||||
for _, r := range s { |
||||
switch r { |
||||
case 's': |
||||
rt.Service = true |
||||
case 'c': |
||||
rt.Container = true |
||||
case 'o': |
||||
rt.Object = true |
||||
default: |
||||
return fmt.Errorf("Invalid resource type: '%v'", r) |
||||
} |
||||
} |
||||
return nil |
||||
} |
@ -1,322 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"net" |
||||
"net/url" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
// SASVersion indicates the SAS version.
|
||||
const SASVersion = ServiceVersion |
||||
|
||||
type SASProtocol string |
||||
|
||||
const ( |
||||
// SASProtocolHTTPS can be specified for a SAS protocol
|
||||
SASProtocolHTTPS SASProtocol = "https" |
||||
|
||||
// SASProtocolHTTPSandHTTP can be specified for a SAS protocol
|
||||
SASProtocolHTTPSandHTTP SASProtocol = "https,http" |
||||
) |
||||
|
||||
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
|
||||
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
|
||||
func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { |
||||
ss := "" |
||||
if !startTime.IsZero() { |
||||
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||
} |
||||
se := "" |
||||
if !expiryTime.IsZero() { |
||||
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||
} |
||||
sh := "" |
||||
if !snapshotTime.IsZero() { |
||||
sh = snapshotTime.Format(SnapshotTimeFormat) |
||||
} |
||||
return ss, se, sh |
||||
} |
||||
|
||||
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
|
||||
const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
|
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||
|
||||
// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
|
||||
// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
|
||||
// to a query parameter map by calling AddToValues().
|
||||
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
|
||||
//
|
||||
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
|
||||
type SASQueryParameters struct { |
||||
// All members are immutable or values so copies of this struct are goroutine-safe.
|
||||
version string `param:"sv"` |
||||
services string `param:"ss"` |
||||
resourceTypes string `param:"srt"` |
||||
protocol SASProtocol `param:"spr"` |
||||
startTime time.Time `param:"st"` |
||||
expiryTime time.Time `param:"se"` |
||||
snapshotTime time.Time `param:"snapshot"` |
||||
ipRange IPRange `param:"sip"` |
||||
identifier string `param:"si"` |
||||
resource string `param:"sr"` |
||||
permissions string `param:"sp"` |
||||
signature string `param:"sig"` |
||||
cacheControl string `param:"rscc"` |
||||
contentDisposition string `param:"rscd"` |
||||
contentEncoding string `param:"rsce"` |
||||
contentLanguage string `param:"rscl"` |
||||
contentType string `param:"rsct"` |
||||
signedOid string `param:"skoid"` |
||||
signedTid string `param:"sktid"` |
||||
signedStart time.Time `param:"skt"` |
||||
signedExpiry time.Time `param:"ske"` |
||||
signedService string `param:"sks"` |
||||
signedVersion string `param:"skv"` |
||||
} |
||||
|
||||
func (p *SASQueryParameters) SignedOid() string { |
||||
return p.signedOid |
||||
} |
||||
|
||||
func (p *SASQueryParameters) SignedTid() string { |
||||
return p.signedTid |
||||
} |
||||
|
||||
func (p *SASQueryParameters) SignedStart() time.Time { |
||||
return p.signedStart |
||||
} |
||||
|
||||
func (p *SASQueryParameters) SignedExpiry() time.Time { |
||||
return p.signedExpiry |
||||
} |
||||
|
||||
func (p *SASQueryParameters) SignedService() string { |
||||
return p.signedService |
||||
} |
||||
|
||||
func (p *SASQueryParameters) SignedVersion() string { |
||||
return p.signedVersion |
||||
} |
||||
|
||||
func (p *SASQueryParameters) SnapshotTime() time.Time { |
||||
return p.snapshotTime |
||||
} |
||||
|
||||
func (p *SASQueryParameters) Version() string { |
||||
return p.version |
||||
} |
||||
|
||||
func (p *SASQueryParameters) Services() string { |
||||
return p.services |
||||
} |
||||
func (p *SASQueryParameters) ResourceTypes() string { |
||||
return p.resourceTypes |
||||
} |
||||
func (p *SASQueryParameters) Protocol() SASProtocol { |
||||
return p.protocol |
||||
} |
||||
func (p *SASQueryParameters) StartTime() time.Time { |
||||
return p.startTime |
||||
} |
||||
func (p *SASQueryParameters) ExpiryTime() time.Time { |
||||
return p.expiryTime |
||||
} |
||||
|
||||
func (p *SASQueryParameters) IPRange() IPRange { |
||||
return p.ipRange |
||||
} |
||||
|
||||
func (p *SASQueryParameters) Identifier() string { |
||||
return p.identifier |
||||
} |
||||
|
||||
func (p *SASQueryParameters) Resource() string { |
||||
return p.resource |
||||
} |
||||
func (p *SASQueryParameters) Permissions() string { |
||||
return p.permissions |
||||
} |
||||
|
||||
func (p *SASQueryParameters) Signature() string { |
||||
return p.signature |
||||
} |
||||
|
||||
func (p *SASQueryParameters) CacheControl() string { |
||||
return p.cacheControl |
||||
} |
||||
|
||||
func (p *SASQueryParameters) ContentDisposition() string { |
||||
return p.contentDisposition |
||||
} |
||||
|
||||
func (p *SASQueryParameters) ContentEncoding() string { |
||||
return p.contentEncoding |
||||
} |
||||
|
||||
func (p *SASQueryParameters) ContentLanguage() string { |
||||
return p.contentLanguage |
||||
} |
||||
|
||||
func (p *SASQueryParameters) ContentType() string { |
||||
return p.contentType |
||||
} |
||||
|
||||
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
|
||||
type IPRange struct { |
||||
Start net.IP // Not specified if length = 0
|
||||
End net.IP // Not specified if length = 0
|
||||
} |
||||
|
||||
// String returns a string representation of an IPRange.
|
||||
func (ipr *IPRange) String() string { |
||||
if len(ipr.Start) == 0 { |
||||
return "" |
||||
} |
||||
start := ipr.Start.String() |
||||
if len(ipr.End) == 0 { |
||||
return start |
||||
} |
||||
return start + "-" + ipr.End.String() |
||||
} |
||||
|
||||
// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
|
||||
// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
|
||||
// all SAS-related query parameters are removed from the passed-in map. If
|
||||
// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
|
||||
func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { |
||||
p := SASQueryParameters{} |
||||
for k, v := range values { |
||||
val := v[0] |
||||
isSASKey := true |
||||
switch strings.ToLower(k) { |
||||
case "sv": |
||||
p.version = val |
||||
case "ss": |
||||
p.services = val |
||||
case "srt": |
||||
p.resourceTypes = val |
||||
case "spr": |
||||
p.protocol = SASProtocol(val) |
||||
case "snapshot": |
||||
p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) |
||||
case "st": |
||||
p.startTime, _ = time.Parse(SASTimeFormat, val) |
||||
case "se": |
||||
p.expiryTime, _ = time.Parse(SASTimeFormat, val) |
||||
case "sip": |
||||
dashIndex := strings.Index(val, "-") |
||||
if dashIndex == -1 { |
||||
p.ipRange.Start = net.ParseIP(val) |
||||
} else { |
||||
p.ipRange.Start = net.ParseIP(val[:dashIndex]) |
||||
p.ipRange.End = net.ParseIP(val[dashIndex+1:]) |
||||
} |
||||
case "si": |
||||
p.identifier = val |
||||
case "sr": |
||||
p.resource = val |
||||
case "sp": |
||||
p.permissions = val |
||||
case "sig": |
||||
p.signature = val |
||||
case "rscc": |
||||
p.cacheControl = val |
||||
case "rscd": |
||||
p.contentDisposition = val |
||||
case "rsce": |
||||
p.contentEncoding = val |
||||
case "rscl": |
||||
p.contentLanguage = val |
||||
case "rsct": |
||||
p.contentType = val |
||||
case "skoid": |
||||
p.signedOid = val |
||||
case "sktid": |
||||
p.signedTid = val |
||||
case "skt": |
||||
p.signedStart, _ = time.Parse(SASTimeFormat, val) |
||||
case "ske": |
||||
p.signedExpiry, _ = time.Parse(SASTimeFormat, val) |
||||
case "sks": |
||||
p.signedService = val |
||||
case "skv": |
||||
p.signedVersion = val |
||||
default: |
||||
isSASKey = false // We didn't recognize the query parameter
|
||||
} |
||||
if isSASKey && deleteSASParametersFromValues { |
||||
delete(values, k) |
||||
} |
||||
} |
||||
return p |
||||
} |
||||
|
||||
// AddToValues adds the SAS components to the specified query parameters map.
|
||||
func (p *SASQueryParameters) addToValues(v url.Values) url.Values { |
||||
if p.version != "" { |
||||
v.Add("sv", p.version) |
||||
} |
||||
if p.services != "" { |
||||
v.Add("ss", p.services) |
||||
} |
||||
if p.resourceTypes != "" { |
||||
v.Add("srt", p.resourceTypes) |
||||
} |
||||
if p.protocol != "" { |
||||
v.Add("spr", string(p.protocol)) |
||||
} |
||||
if !p.startTime.IsZero() { |
||||
v.Add("st", p.startTime.Format(SASTimeFormat)) |
||||
} |
||||
if !p.expiryTime.IsZero() { |
||||
v.Add("se", p.expiryTime.Format(SASTimeFormat)) |
||||
} |
||||
if len(p.ipRange.Start) > 0 { |
||||
v.Add("sip", p.ipRange.String()) |
||||
} |
||||
if p.identifier != "" { |
||||
v.Add("si", p.identifier) |
||||
} |
||||
if p.resource != "" { |
||||
v.Add("sr", p.resource) |
||||
} |
||||
if p.permissions != "" { |
||||
v.Add("sp", p.permissions) |
||||
} |
||||
if p.signedOid != "" { |
||||
v.Add("skoid", p.signedOid) |
||||
v.Add("sktid", p.signedTid) |
||||
v.Add("skt", p.signedStart.Format(SASTimeFormat)) |
||||
v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) |
||||
v.Add("sks", p.signedService) |
||||
v.Add("skv", p.signedVersion) |
||||
} |
||||
if p.signature != "" { |
||||
v.Add("sig", p.signature) |
||||
} |
||||
if p.cacheControl != "" { |
||||
v.Add("rscc", p.cacheControl) |
||||
} |
||||
if p.contentDisposition != "" { |
||||
v.Add("rscd", p.contentDisposition) |
||||
} |
||||
if p.contentEncoding != "" { |
||||
v.Add("rsce", p.contentEncoding) |
||||
} |
||||
if p.contentLanguage != "" { |
||||
v.Add("rscl", p.contentLanguage) |
||||
} |
||||
if p.contentType != "" { |
||||
v.Add("rsct", p.contentType) |
||||
} |
||||
return v |
||||
} |
||||
|
||||
// Encode encodes the SAS query parameters into URL encoded form sorted by key.
|
||||
func (p *SASQueryParameters) Encode() string { |
||||
v := url.Values{} |
||||
p.addToValues(v) |
||||
return v.Encode() |
||||
} |
131
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go
generated
vendored
131
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go
generated
vendored
@ -1,131 +0,0 @@ |
||||
package azblob |
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes
|
||||
|
||||
const ( |
||||
// ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code.
|
||||
ServiceCodeNone ServiceCodeType = "" |
||||
|
||||
// ServiceCodeAccountAlreadyExists means the specified account already exists.
|
||||
ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" |
||||
|
||||
// ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403).
|
||||
ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" |
||||
|
||||
// ServiceCodeAccountIsDisabled means the specified account is disabled (403).
|
||||
ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" |
||||
|
||||
// ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403).
|
||||
ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" |
||||
|
||||
// ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400).
|
||||
ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" |
||||
|
||||
// ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412).
|
||||
ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" |
||||
|
||||
// ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400).
|
||||
ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" |
||||
|
||||
// ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403).
|
||||
ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" |
||||
|
||||
// ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500).
|
||||
ServiceCodeInternalError ServiceCodeType = "InternalError" |
||||
|
||||
// ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400).
|
||||
ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" |
||||
|
||||
// ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400).
|
||||
ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" |
||||
|
||||
// ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400).
|
||||
ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" |
||||
|
||||
// ServiceCodeInvalidInput means one of the request inputs is not valid (400).
|
||||
ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" |
||||
|
||||
// ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400).
|
||||
ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" |
||||
|
||||
// ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400).
|
||||
ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" |
||||
|
||||
// ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400).
|
||||
ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" |
||||
|
||||
// ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416).
|
||||
ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" |
||||
|
||||
// ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400).
|
||||
ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" |
||||
|
||||
// ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400).
|
||||
ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" |
||||
|
||||
// ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400).
|
||||
ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" |
||||
|
||||
// ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400).
|
||||
ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" |
||||
|
||||
// ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400).
|
||||
ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" |
||||
|
||||
// ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400).
|
||||
ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" |
||||
|
||||
// ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411).
|
||||
ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" |
||||
|
||||
// ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400).
|
||||
ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" |
||||
|
||||
// ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400).
|
||||
ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" |
||||
|
||||
// ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400).
|
||||
ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" |
||||
|
||||
// ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400).
|
||||
ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" |
||||
|
||||
// ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500).
|
||||
ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" |
||||
|
||||
// ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400).
|
||||
ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" |
||||
|
||||
// ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400).
|
||||
ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" |
||||
|
||||
// ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413).
|
||||
ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" |
||||
|
||||
// ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409).
|
||||
ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" |
||||
|
||||
// ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400).
|
||||
ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" |
||||
|
||||
// ServiceCodeResourceAlreadyExists means the specified resource already exists (409).
|
||||
ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" |
||||
|
||||
// ServiceCodeResourceNotFound means the specified resource does not exist (404).
|
||||
ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" |
||||
|
||||
// ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
|
||||
ServiceCodeServerBusy ServiceCodeType = "ServerBusy" |
||||
|
||||
// ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400).
|
||||
ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" |
||||
|
||||
// ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400).
|
||||
ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" |
||||
|
||||
// ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400).
|
||||
ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" |
||||
|
||||
// ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405).
|
||||
ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" |
||||
) |
@ -1,111 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/xml" |
||||
"fmt" |
||||
"net/http" |
||||
"sort" |
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
) |
||||
|
||||
func init() { |
||||
// wire up our custom error handling constructor
|
||||
responseErrorFactory = newStorageError |
||||
} |
||||
|
||||
// ServiceCodeType is a string identifying a storage service error.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
|
||||
type ServiceCodeType string |
||||
|
||||
// StorageError identifies a responder-generated network or response parsing error.
|
||||
type StorageError interface { |
||||
// ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response().
|
||||
ResponseError |
||||
|
||||
// ServiceCode returns a service error code. Your code can use this to make error recovery decisions.
|
||||
ServiceCode() ServiceCodeType |
||||
} |
||||
|
||||
// storageError is the internal struct that implements the public StorageError interface.
|
||||
type storageError struct { |
||||
responseError |
||||
serviceCode ServiceCodeType |
||||
details map[string]string |
||||
} |
||||
|
||||
// newStorageError creates an error object that implements the error interface.
|
||||
func newStorageError(cause error, response *http.Response, description string) error { |
||||
return &storageError{ |
||||
responseError: responseError{ |
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), |
||||
response: response, |
||||
description: description, |
||||
}, |
||||
serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), |
||||
} |
||||
} |
||||
|
||||
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
|
||||
func (e *storageError) ServiceCode() ServiceCodeType { |
||||
return e.serviceCode |
||||
} |
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *storageError) Error() string { |
||||
b := &bytes.Buffer{} |
||||
fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) |
||||
fmt.Fprintf(b, "Description=%s, Details: ", e.description) |
||||
if len(e.details) == 0 { |
||||
b.WriteString("(none)\n") |
||||
} else { |
||||
b.WriteRune('\n') |
||||
keys := make([]string, 0, len(e.details)) |
||||
// Alphabetize the details
|
||||
for k := range e.details { |
||||
keys = append(keys, k) |
||||
} |
||||
sort.Strings(keys) |
||||
for _, k := range keys { |
||||
fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) |
||||
} |
||||
} |
||||
req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) |
||||
return e.ErrorNode.Error(b.String()) |
||||
} |
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
|
||||
func (e *storageError) Temporary() bool { |
||||
if e.response != nil { |
||||
if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { |
||||
return true |
||||
} |
||||
} |
||||
return e.ErrorNode.Temporary() |
||||
} |
||||
|
||||
// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors.
|
||||
func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { |
||||
tokName := "" |
||||
var t xml.Token |
||||
for t, err = d.Token(); err == nil; t, err = d.Token() { |
||||
switch tt := t.(type) { |
||||
case xml.StartElement: |
||||
tokName = tt.Name.Local |
||||
break |
||||
case xml.CharData: |
||||
switch tokName { |
||||
case "Message": |
||||
e.description = string(tt) |
||||
default: |
||||
if e.details == nil { |
||||
e.details = map[string]string{} |
||||
} |
||||
e.details[tokName] = string(tt) |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
@ -1,64 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"strconv" |
||||
) |
||||
|
||||
// httpRange defines a range of bytes within an HTTP resource, starting at offset and
|
||||
// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange
|
||||
// which has an offset but na zero value count indicates from the offset to the resource's end.
|
||||
type httpRange struct { |
||||
offset int64 |
||||
count int64 |
||||
} |
||||
|
||||
func (r httpRange) pointers() *string { |
||||
if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance
|
||||
return nil // No specified range
|
||||
} |
||||
endOffset := "" // if count == CountToEnd (0)
|
||||
if r.count > 0 { |
||||
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) |
||||
} |
||||
dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) |
||||
return &dataRange |
||||
} |
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { |
||||
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
|
||||
return 0, nil |
||||
} |
||||
|
||||
err := validateSeekableStreamAt0(body) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
count, err := body.Seek(0, io.SeekEnd) |
||||
if err != nil { |
||||
return 0, errors.New("body stream must be seekable") |
||||
} |
||||
|
||||
body.Seek(0, io.SeekStart) |
||||
return count, nil |
||||
} |
||||
|
||||
// return an error if body is not a valid seekable stream at 0
|
||||
func validateSeekableStreamAt0(body io.ReadSeeker) error { |
||||
if body == nil { // nil body's are "logically" seekable to 0
|
||||
return nil |
||||
} |
||||
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { |
||||
// Help detect programmer error
|
||||
if err != nil { |
||||
return errors.New("body stream must be seekable") |
||||
} |
||||
return errors.New("body stream must be set to position 0") |
||||
} |
||||
return nil |
||||
} |
@ -1,77 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"crypto/rand" |
||||
"fmt" |
||||
"strconv" |
||||
) |
||||
|
||||
// The UUID reserved variants.
|
||||
const ( |
||||
reservedNCS byte = 0x80 |
||||
reservedRFC4122 byte = 0x40 |
||||
reservedMicrosoft byte = 0x20 |
||||
reservedFuture byte = 0x00 |
||||
) |
||||
|
||||
// A UUID representation compliant with specification in RFC 4122 document.
|
||||
type uuid [16]byte |
||||
|
||||
// NewUUID returns a new uuid using RFC 4122 algorithm.
|
||||
func newUUID() (u uuid) { |
||||
u = uuid{} |
||||
// Set all bits to randomly (or pseudo-randomly) chosen values.
|
||||
rand.Read(u[:]) |
||||
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
|
||||
|
||||
var version byte = 4 |
||||
u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
|
||||
return |
||||
} |
||||
|
||||
// String returns an unparsed version of the generated UUID sequence.
|
||||
func (u uuid) String() string { |
||||
return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) |
||||
} |
||||
|
||||
// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f"
|
||||
// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID.
|
||||
func parseUUID(uuidStr string) uuid { |
||||
char := func(hexString string) byte { |
||||
i, _ := strconv.ParseUint(hexString, 16, 8) |
||||
return byte(i) |
||||
} |
||||
if uuidStr[0] == '{' { |
||||
uuidStr = uuidStr[1:] // Skip over the '{'
|
||||
} |
||||
// 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f
|
||||
// 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33
|
||||
// 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45
|
||||
uuidVal := uuid{ |
||||
char(uuidStr[0:2]), |
||||
char(uuidStr[2:4]), |
||||
char(uuidStr[4:6]), |
||||
char(uuidStr[6:8]), |
||||
|
||||
char(uuidStr[9:11]), |
||||
char(uuidStr[11:13]), |
||||
|
||||
char(uuidStr[14:16]), |
||||
char(uuidStr[16:18]), |
||||
|
||||
char(uuidStr[19:21]), |
||||
char(uuidStr[21:23]), |
||||
|
||||
char(uuidStr[24:26]), |
||||
char(uuidStr[26:28]), |
||||
char(uuidStr[28:30]), |
||||
char(uuidStr[30:32]), |
||||
char(uuidStr[32:34]), |
||||
char(uuidStr[34:36]), |
||||
} |
||||
return uuidVal |
||||
} |
||||
|
||||
func (u uuid) bytes() []byte { |
||||
return u[:] |
||||
} |
@ -1,89 +0,0 @@ |
||||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
||||
// Use of this source code is governed by an MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/* |
||||
Package azblob allows you to manipulate Azure Storage containers and blobs objects. |
||||
|
||||
URL Types |
||||
|
||||
The most common types you'll work with are the XxxURL types. The methods of these types make requests |
||||
against the Azure Storage Service. |
||||
|
||||
- ServiceURL's methods perform operations on a storage account. |
||||
- ContainerURL's methods perform operations on an account's container. |
||||
- BlockBlobURL's methods perform operations on a container's block blob. |
||||
- AppendBlobURL's methods perform operations on a container's append blob. |
||||
- PageBlobURL's methods perform operations on a container's page blob. |
||||
- BlobURL's methods perform operations on a container's blob regardless of the blob's type. |
||||
|
||||
Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP |
||||
request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. |
||||
The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. |
||||
|
||||
Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass |
||||
an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own |
||||
URL but it shares the same pipeline as the parent ServiceURL object. |
||||
|
||||
To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. |
||||
To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL |
||||
respectively. These three types are all identical except for the methods they expose; each type exposes the methods |
||||
relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; |
||||
this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, |
||||
the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You |
||||
can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. |
||||
|
||||
If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL |
||||
object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object |
||||
with the same URL as the original but with the specified pipeline. |
||||
|
||||
Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that |
||||
XxxURL objects share a lot of system resources making them very efficient. |
||||
|
||||
All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, |
||||
transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an |
||||
example of how to do deal with errors. |
||||
|
||||
URL and Shared Access Signature Manipulation |
||||
|
||||
The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types |
||||
for generating and parsing Shared Access Signature (SAS) |
||||
- Use the AccountSASSignatureValues type to create a SAS for a storage account. |
||||
- Use the BlobSASSignatureValues type to create a SAS for a container or blob. |
||||
- Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. |
||||
|
||||
To generate a SAS, you must use the SharedKeyCredential type. |
||||
|
||||
Credentials |
||||
|
||||
When creating a request pipeline, you must specify one of this package's credential types. |
||||
- Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). |
||||
- Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this |
||||
to generate Shared Access Signatures. |
||||
|
||||
HTTP Request Policy Factories |
||||
|
||||
This package defines several request policy factories for use with the pipeline package. |
||||
Most applications will not use these factories directly; instead, the NewPipeline |
||||
function creates these factories, initializes them (via the PipelineOptions type) |
||||
and returns a pipeline object for use by the XxxURL objects. |
||||
|
||||
However, for advanced scenarios, developers can access these policy factories directly |
||||
and even create their own and then construct their own pipeline in order to affect HTTP |
||||
requests and responses performed by the XxxURL objects. For example, developers can |
||||
introduce their own logging, random failures, request recording & playback for fast |
||||
testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The |
||||
possibilities are endless! |
||||
|
||||
Below are the request pipeline policy factory functions that are provided with this |
||||
package: |
||||
- NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. |
||||
- NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. |
||||
- NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. |
||||
- NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. |
||||
|
||||
Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. |
||||
*/ |
||||
package azblob |
||||
|
||||
// TokenCredential Use this to access resources using Role-Based Access Control (RBAC).
|
349
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
349
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
@ -1,349 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/base64" |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/url" |
||||
"strconv" |
||||
"time" |
||||
) |
||||
|
||||
// appendBlobClient is the client for the AppendBlob methods of the Azblob service.
|
||||
type appendBlobClient struct { |
||||
managementClient |
||||
} |
||||
|
||||
// newAppendBlobClient creates an instance of the appendBlobClient client.
|
||||
func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { |
||||
return appendBlobClient{newManagementClient(url, p)} |
||||
} |
||||
|
||||
// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append
|
||||
// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is
|
||||
// supported only on version 2015-02-21 version or later.
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
|
||||
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
|
||||
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
|
||||
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
|
||||
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
|
||||
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
|
||||
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
|
||||
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: body, |
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*AppendBlobAppendBlockResponse), err |
||||
} |
||||
|
||||
// appendBlockPreparer prepares the AppendBlock request.
|
||||
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, body) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "appendblock") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if transactionalContentMD5 != nil { |
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if maxSize != nil { |
||||
req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) |
||||
} |
||||
if appendPosition != nil { |
||||
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// appendBlockResponder handles the response to the AppendBlock request.
|
||||
func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob
|
||||
// where the contents are read from a source url. The Append Block operation is permitted only if the blob was created
|
||||
// with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
|
||||
//
|
||||
// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of
|
||||
// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must
|
||||
// be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
|
||||
// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
|
||||
// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
|
||||
// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
|
||||
// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
|
||||
// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
|
||||
// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
|
||||
// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
|
||||
// only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to
|
||||
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||
// matching value. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified
|
||||
// since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it
|
||||
// has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*AppendBlobAppendBlockFromURLResponse), err |
||||
} |
||||
|
||||
// appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
|
||||
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "appendblock") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-copy-source", sourceURL) |
||||
if sourceRange != nil { |
||||
req.Header.Set("x-ms-source-range", *sourceRange) |
||||
} |
||||
if sourceContentMD5 != nil { |
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) |
||||
} |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if maxSize != nil { |
||||
req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) |
||||
} |
||||
if appendPosition != nil { |
||||
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
if sourceIfModifiedSince != nil { |
||||
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if sourceIfUnmodifiedSince != nil { |
||||
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if sourceIfMatch != nil { |
||||
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) |
||||
} |
||||
if sourceIfNoneMatch != nil { |
||||
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// appendBlockFromURLResponder handles the response to the AppendBlockFromURL request.
|
||||
func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// Create the Create Append Blob operation creates a new append blob.
|
||||
//
|
||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*AppendBlobCreateResponse), err |
||||
} |
||||
|
||||
// createPreparer prepares the Create request.
|
||||
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if blobContentType != nil { |
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType) |
||||
} |
||||
if blobContentEncoding != nil { |
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) |
||||
} |
||||
if blobContentLanguage != nil { |
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) |
||||
} |
||||
if blobContentMD5 != nil { |
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) |
||||
} |
||||
if blobCacheControl != nil { |
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) |
||||
} |
||||
if metadata != nil { |
||||
for k, v := range metadata { |
||||
req.Header.Set("x-ms-meta-"+k, v) |
||||
} |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if blobContentDisposition != nil { |
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
req.Header.Set("x-ms-blob-type", "AppendBlob") |
||||
return req, nil |
||||
} |
||||
|
||||
// createResponder handles the response to the Create request.
|
||||
func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err |
||||
} |
File diff suppressed because it is too large
Load Diff
510
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
510
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
generated
vendored
@ -1,510 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/base64" |
||||
"encoding/xml" |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/url" |
||||
"strconv" |
||||
"time" |
||||
) |
||||
|
||||
// blockBlobClient is the client for the BlockBlob methods of the Azblob service.
|
||||
type blockBlobClient struct { |
||||
managementClient |
||||
} |
||||
|
||||
// newBlockBlobClient creates an instance of the blockBlobClient client.
|
||||
func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { |
||||
return blockBlobClient{newManagementClient(url, p)} |
||||
} |
||||
|
||||
// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the
|
||||
// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior
|
||||
// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed,
|
||||
// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from
|
||||
// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the
|
||||
// block, whichever list it may belong to.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobCacheControl is optional. Sets the blob's cache control. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's
|
||||
// content type. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If
|
||||
// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An
|
||||
// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were
|
||||
// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*BlockBlobCommitBlockListResponse), err |
||||
} |
||||
|
||||
// commitBlockListPreparer prepares the CommitBlockList request.
|
||||
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "blocklist") |
||||
req.URL.RawQuery = params.Encode() |
||||
if blobCacheControl != nil { |
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) |
||||
} |
||||
if blobContentType != nil { |
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType) |
||||
} |
||||
if blobContentEncoding != nil { |
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) |
||||
} |
||||
if blobContentLanguage != nil { |
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) |
||||
} |
||||
if blobContentMD5 != nil { |
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) |
||||
} |
||||
if metadata != nil { |
||||
for k, v := range metadata { |
||||
req.Header.Set("x-ms-meta-"+k, v) |
||||
} |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if blobContentDisposition != nil { |
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
b, err := xml.Marshal(blocks) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to marshal request body") |
||||
} |
||||
req.Header.Set("Content-Type", "application/xml") |
||||
err = req.SetBody(bytes.NewReader(b)) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to set request body") |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// commitBlockListResponder handles the response to the CommitBlockList request.
|
||||
func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block
|
||||
// blob
|
||||
//
|
||||
// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists
|
||||
// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob
|
||||
// snapshot to retrieve. For more information on working with blob snapshots, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*BlockList), err |
||||
} |
||||
|
||||
// getBlockListPreparer prepares the GetBlockList request.
|
||||
func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("GET", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if snapshot != nil && len(*snapshot) > 0 { |
||||
params.Set("snapshot", *snapshot) |
||||
} |
||||
params.Set("blocklisttype", string(listType)) |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "blocklist") |
||||
req.URL.RawQuery = params.Encode() |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// getBlockListResponder handles the response to the GetBlockList request.
|
||||
func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
result := &BlockList{rawResponse: resp.Response()} |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
if len(b) > 0 { |
||||
b = removeBOM(b) |
||||
err = xml.Unmarshal(b, result) |
||||
if err != nil { |
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// StageBlock the Stage Block operation creates a new block to be committed as part of a blob
|
||||
//
|
||||
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
|
||||
// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the
|
||||
// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: body, |
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*BlockBlobStageBlockResponse), err |
||||
} |
||||
|
||||
// stageBlockPreparer prepares the StageBlock request.
|
||||
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, body) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
params.Set("blockid", blockID) |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "block") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if transactionalContentMD5 != nil { |
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// stageBlockResponder handles the response to the StageBlock request.
|
||||
func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents
|
||||
// are read from a URL.
|
||||
//
|
||||
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||
// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source.
|
||||
// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the
|
||||
// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For
|
||||
// more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if
|
||||
// it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate
|
||||
// only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to
|
||||
// operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*BlockBlobStageBlockFromURLResponse), err |
||||
} |
||||
|
||||
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
|
||||
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
params.Set("blockid", blockID) |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "block") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
req.Header.Set("x-ms-copy-source", sourceURL) |
||||
if sourceRange != nil { |
||||
req.Header.Set("x-ms-source-range", *sourceRange) |
||||
} |
||||
if sourceContentMD5 != nil { |
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if sourceIfModifiedSince != nil { |
||||
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if sourceIfUnmodifiedSince != nil { |
||||
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if sourceIfMatch != nil { |
||||
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) |
||||
} |
||||
if sourceIfNoneMatch != nil { |
||||
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// stageBlockFromURLResponder handles the response to the StageBlockFromURL request.
|
||||
func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block
|
||||
// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of
|
||||
// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a
|
||||
// block blob, use the Put Block List operation.
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: body, |
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*BlockBlobUploadResponse), err |
||||
} |
||||
|
||||
// uploadPreparer prepares the Upload request.
|
||||
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, body) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if blobContentType != nil { |
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType) |
||||
} |
||||
if blobContentEncoding != nil { |
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) |
||||
} |
||||
if blobContentLanguage != nil { |
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) |
||||
} |
||||
if blobContentMD5 != nil { |
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) |
||||
} |
||||
if blobCacheControl != nil { |
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) |
||||
} |
||||
if metadata != nil { |
||||
for k, v := range metadata { |
||||
req.Header.Set("x-ms-meta-"+k, v) |
||||
} |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if blobContentDisposition != nil { |
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
req.Header.Set("x-ms-blob-type", "BlockBlob") |
||||
return req, nil |
||||
} |
||||
|
||||
// uploadResponder handles the response to the Upload request.
|
||||
func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err |
||||
} |
@ -1,38 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"net/url" |
||||
) |
||||
|
||||
const ( |
||||
// ServiceVersion specifies the version of the operations used in this package.
|
||||
ServiceVersion = "2018-11-09" |
||||
) |
||||
|
||||
// managementClient is the base client for Azblob.
|
||||
type managementClient struct { |
||||
url url.URL |
||||
p pipeline.Pipeline |
||||
} |
||||
|
||||
// newManagementClient creates an instance of the managementClient client.
|
||||
func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { |
||||
return managementClient{ |
||||
url: url, |
||||
p: p, |
||||
} |
||||
} |
||||
|
||||
// URL returns a copy of the URL for this client.
|
||||
func (mc managementClient) URL() url.URL { |
||||
return mc.url |
||||
} |
||||
|
||||
// Pipeline returns the pipeline for this client.
|
||||
func (mc managementClient) Pipeline() pipeline.Pipeline { |
||||
return mc.p |
||||
} |
1037
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
generated
vendored
1037
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
generated
vendored
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
896
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
896
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
@ -1,896 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/base64" |
||||
"encoding/xml" |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/url" |
||||
"strconv" |
||||
"time" |
||||
) |
||||
|
||||
// pageBlobClient is the client for the PageBlob methods of the Azblob service.
|
||||
type pageBlobClient struct { |
||||
managementClient |
||||
} |
||||
|
||||
// newPageBlobClient creates an instance of the pageBlobClient client.
|
||||
func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { |
||||
return pageBlobClient{newManagementClient(url, p)} |
||||
} |
||||
|
||||
// ClearPages the Clear Pages operation clears a set of pages from a page blob
|
||||
//
|
||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageBlobClearPagesResponse), err |
||||
} |
||||
|
||||
// clearPagesPreparer prepares the ClearPages request.
|
||||
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "page") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if rangeParameter != nil { |
||||
req.Header.Set("x-ms-range", *rangeParameter) |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if ifSequenceNumberLessThanOrEqualTo != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) |
||||
} |
||||
if ifSequenceNumberLessThan != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) |
||||
} |
||||
if ifSequenceNumberEqualTo != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
req.Header.Set("x-ms-page-write", "clear") |
||||
return req, nil |
||||
} |
||||
|
||||
// clearPagesResponder handles the response to the ClearPages request.
|
||||
func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob.
|
||||
// The snapshot is copied such that only the differential changes between the previously copied snapshot are
|
||||
// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or
|
||||
// copied from as usual. This API is supported since REST version 2016-05-31.
|
||||
//
|
||||
// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that
|
||||
// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob
|
||||
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
|
||||
// expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
|
||||
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||
// in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageBlobCopyIncrementalResponse), err |
||||
} |
||||
|
||||
// copyIncrementalPreparer prepares the CopyIncremental request.
|
||||
func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "incrementalcopy") |
||||
req.URL.RawQuery = params.Encode() |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-copy-source", copySource) |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// copyIncrementalResponder handles the response to the CopyIncremental request.
|
||||
func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusAccepted) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// Create the Create operation creates a new page blob.
|
||||
//
|
||||
// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page
|
||||
// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is
|
||||
// expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set
|
||||
// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
|
||||
// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1
|
||||
// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageBlobCreateResponse), err |
||||
} |
||||
|
||||
// createPreparer prepares the Create request.
|
||||
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if blobContentType != nil { |
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType) |
||||
} |
||||
if blobContentEncoding != nil { |
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) |
||||
} |
||||
if blobContentLanguage != nil { |
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) |
||||
} |
||||
if blobContentMD5 != nil { |
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) |
||||
} |
||||
if blobCacheControl != nil { |
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) |
||||
} |
||||
if metadata != nil { |
||||
for k, v := range metadata { |
||||
req.Header.Set("x-ms-meta-"+k, v) |
||||
} |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if blobContentDisposition != nil { |
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) |
||||
if blobSequenceNumber != nil { |
||||
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
req.Header.Set("x-ms-blob-type", "PageBlob") |
||||
return req, nil |
||||
} |
||||
|
||||
// createResponder handles the response to the Create request.
|
||||
func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &PageBlobCreateResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a
|
||||
// page blob
|
||||
//
|
||||
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
|
||||
// retrieve. For more information on working with blob snapshots, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageList), err |
||||
} |
||||
|
||||
// getPageRangesPreparer prepares the GetPageRanges request.
|
||||
func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("GET", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if snapshot != nil && len(*snapshot) > 0 { |
||||
params.Set("snapshot", *snapshot) |
||||
} |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "pagelist") |
||||
req.URL.RawQuery = params.Encode() |
||||
if rangeParameter != nil { |
||||
req.Header.Set("x-ms-range", *rangeParameter) |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// getPageRangesResponder handles the response to the GetPageRanges request.
|
||||
func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
result := &PageList{rawResponse: resp.Response()} |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
if len(b) > 0 { |
||||
b = removeBOM(b) |
||||
err = xml.Unmarshal(b, result) |
||||
if err != nil { |
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// GetPageRangesDiff the Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were
|
||||
// changed between target blob and previous snapshot.
|
||||
//
|
||||
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
|
||||
// retrieve. For more information on working with blob snapshots, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot
|
||||
// parameter is a DateTime value that specifies that the response will contain only pages that were changed between
|
||||
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
|
||||
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
|
||||
// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
|
||||
// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageList), err |
||||
} |
||||
|
||||
// getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
|
||||
func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("GET", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if snapshot != nil && len(*snapshot) > 0 { |
||||
params.Set("snapshot", *snapshot) |
||||
} |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
if prevsnapshot != nil && len(*prevsnapshot) > 0 { |
||||
params.Set("prevsnapshot", *prevsnapshot) |
||||
} |
||||
params.Set("comp", "pagelist") |
||||
req.URL.RawQuery = params.Encode() |
||||
if rangeParameter != nil { |
||||
req.Header.Set("x-ms-range", *rangeParameter) |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request.
|
||||
func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
result := &PageList{rawResponse: resp.Response()} |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
if len(b) > 0 { |
||||
b = removeBOM(b) |
||||
err = xml.Unmarshal(b, result) |
||||
if err != nil { |
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// Resize resize the Blob
|
||||
//
|
||||
// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must
|
||||
// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information,
|
||||
// see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageBlobResizeResponse), err |
||||
} |
||||
|
||||
// resizePreparer prepares the Resize request.
|
||||
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "properties") |
||||
req.URL.RawQuery = params.Encode() |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// resizeResponder handles the response to the Resize request.
|
||||
func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &PageBlobResizeResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// UpdateSequenceNumber update the sequence number of the blob
|
||||
//
|
||||
// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property
|
||||
// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout
|
||||
// is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can use to
|
||||
// track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageBlobUpdateSequenceNumberResponse), err |
||||
} |
||||
|
||||
// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request.
|
||||
func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "properties") |
||||
req.URL.RawQuery = params.Encode() |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) |
||||
if blobSequenceNumber != nil { |
||||
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request.
|
||||
func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// UploadPages the Upload Pages operation writes a range of pages to a page blob
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the
|
||||
// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: body, |
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageBlobUploadPagesResponse), err |
||||
} |
||||
|
||||
// uploadPagesPreparer prepares the UploadPages request.
|
||||
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, body) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "page") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
if transactionalContentMD5 != nil { |
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) |
||||
} |
||||
if rangeParameter != nil { |
||||
req.Header.Set("x-ms-range", *rangeParameter) |
||||
} |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if ifSequenceNumberLessThanOrEqualTo != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) |
||||
} |
||||
if ifSequenceNumberLessThan != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) |
||||
} |
||||
if ifSequenceNumberEqualTo != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
req.Header.Set("x-ms-page-write", "update") |
||||
return req, nil |
||||
} |
||||
|
||||
// uploadPagesResponder handles the response to the UploadPages request.
|
||||
func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read
|
||||
// from a URL
|
||||
//
|
||||
// sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The
|
||||
// length of this range should match the ContentLength header and x-ms-range/Range destination range header.
|
||||
// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be
|
||||
// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated
|
||||
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only
|
||||
// on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this
|
||||
// header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo
|
||||
// is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is
|
||||
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||
// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
|
||||
// specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this
|
||||
// header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||
// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
|
||||
// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*PageBlobUploadPagesFromURLResponse), err |
||||
} |
||||
|
||||
// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
|
||||
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "page") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-copy-source", sourceURL) |
||||
req.Header.Set("x-ms-source-range", sourceRange) |
||||
if sourceContentMD5 != nil { |
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) |
||||
} |
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) |
||||
req.Header.Set("x-ms-range", rangeParameter) |
||||
if leaseID != nil { |
||||
req.Header.Set("x-ms-lease-id", *leaseID) |
||||
} |
||||
if ifSequenceNumberLessThanOrEqualTo != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) |
||||
} |
||||
if ifSequenceNumberLessThan != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) |
||||
} |
||||
if ifSequenceNumberEqualTo != nil { |
||||
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) |
||||
} |
||||
if ifModifiedSince != nil { |
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifUnmodifiedSince != nil { |
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if ifMatch != nil { |
||||
req.Header.Set("If-Match", string(*ifMatch)) |
||||
} |
||||
if ifNoneMatch != nil { |
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch)) |
||||
} |
||||
if sourceIfModifiedSince != nil { |
||||
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if sourceIfUnmodifiedSince != nil { |
||||
req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) |
||||
} |
||||
if sourceIfMatch != nil { |
||||
req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) |
||||
} |
||||
if sourceIfNoneMatch != nil { |
||||
req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) |
||||
} |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
req.Header.Set("x-ms-page-write", "update") |
||||
return req, nil |
||||
} |
||||
|
||||
// uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request.
|
||||
func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err |
||||
} |
74
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go
generated
vendored
74
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go
generated
vendored
@ -1,74 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/xml" |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"io/ioutil" |
||||
) |
||||
|
||||
type responder func(resp pipeline.Response) (result pipeline.Response, err error) |
||||
|
||||
// ResponderPolicyFactory is a Factory capable of creating a responder pipeline.
|
||||
type responderPolicyFactory struct { |
||||
responder responder |
||||
} |
||||
|
||||
// New creates a responder policy factory.
|
||||
func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { |
||||
return responderPolicy{next: next, responder: arpf.responder} |
||||
} |
||||
|
||||
type responderPolicy struct { |
||||
next pipeline.Policy |
||||
responder responder |
||||
} |
||||
|
||||
// Do sends the request to the service and validates/deserializes the HTTP response.
|
||||
func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { |
||||
resp, err := arp.next.Do(ctx, request) |
||||
if err != nil { |
||||
return resp, err |
||||
} |
||||
return arp.responder(resp) |
||||
} |
||||
|
||||
// validateResponse checks an HTTP response's status code against a legal set of codes.
|
||||
// If the response code is not legal, then validateResponse reads all of the response's body
|
||||
// (containing error information) and returns a response error.
|
||||
func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { |
||||
if resp == nil { |
||||
return NewResponseError(nil, nil, "nil response") |
||||
} |
||||
responseCode := resp.Response().StatusCode |
||||
for _, i := range successStatusCodes { |
||||
if i == responseCode { |
||||
return nil |
||||
} |
||||
} |
||||
// only close the body in the failure case. in the
|
||||
// success case responders will close the body as required.
|
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// the service code, description and details will be populated during unmarshalling
|
||||
responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) |
||||
if len(b) > 0 { |
||||
if err = xml.Unmarshal(b, &responseError); err != nil { |
||||
return NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return responseError |
||||
} |
||||
|
||||
// removes any BOM from the byte slice
|
||||
func removeBOM(b []byte) []byte { |
||||
// UTF8
|
||||
return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) |
||||
} |
95
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
generated
vendored
95
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
generated
vendored
@ -1,95 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"net" |
||||
"net/http" |
||||
) |
||||
|
||||
// if you want to provide custom error handling set this variable to your constructor function
|
||||
var responseErrorFactory func(cause error, response *http.Response, description string) error |
||||
|
||||
// ResponseError identifies a responder-generated network or response parsing error.
|
||||
type ResponseError interface { |
||||
// Error exposes the Error(), Temporary() and Timeout() methods.
|
||||
net.Error // Includes the Go error interface
|
||||
// Response returns the HTTP response. You may examine this but you should not modify it.
|
||||
Response() *http.Response |
||||
} |
||||
|
||||
// NewResponseError creates an error object that implements the error interface.
|
||||
func NewResponseError(cause error, response *http.Response, description string) error { |
||||
if responseErrorFactory != nil { |
||||
return responseErrorFactory(cause, response, description) |
||||
} |
||||
return &responseError{ |
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), |
||||
response: response, |
||||
description: description, |
||||
} |
||||
} |
||||
|
||||
// responseError is the internal struct that implements the public ResponseError interface.
|
||||
type responseError struct { |
||||
pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause
|
||||
response *http.Response |
||||
description string |
||||
} |
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *responseError) Error() string { |
||||
b := &bytes.Buffer{} |
||||
fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) |
||||
fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) |
||||
s := b.String() |
||||
return e.ErrorNode.Error(s) |
||||
} |
||||
|
||||
// Response implements the ResponseError interface's method to return the HTTP response.
|
||||
func (e *responseError) Response() *http.Response { |
||||
return e.response |
||||
} |
||||
|
||||
// RFC7807 PROBLEM ------------------------------------------------------------------------------------
|
||||
// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members.
|
||||
/*type RFC7807Problem struct { |
||||
// Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation).
|
||||
typeURI string // Should default to "about:blank"
|
||||
// Optional: Short, human-readable summary (maybe localized).
|
||||
title string |
||||
// Optional: HTTP status code generated by the origin server
|
||||
status int |
||||
// Optional: Human-readable explanation for this problem occurance.
|
||||
// Should help client correct the problem. Clients should NOT parse this string.
|
||||
detail string |
||||
// Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced).
|
||||
instance string |
||||
} |
||||
// NewRFC7807Problem ...
|
||||
func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { |
||||
return &RFC7807Problem{ |
||||
typeURI: typeURI, |
||||
status: status, |
||||
title: fmt.Sprintf(titleFormat, a...), |
||||
} |
||||
} |
||||
// Error returns the error information as a string.
|
||||
func (e *RFC7807Problem) Error() string { |
||||
return e.title |
||||
} |
||||
// TypeURI ...
|
||||
func (e *RFC7807Problem) TypeURI() string { |
||||
if e.typeURI == "" { |
||||
e.typeURI = "about:blank" |
||||
} |
||||
return e.typeURI |
||||
} |
||||
// Members ...
|
||||
func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { |
||||
return e.status, e.title, e.detail, e.instance |
||||
}*/ |
@ -1,467 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/xml" |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/url" |
||||
"strconv" |
||||
) |
||||
|
||||
// serviceClient is the client for the Service methods of the Azblob service.
|
||||
type serviceClient struct { |
||||
managementClient |
||||
} |
||||
|
||||
// newServiceClient creates an instance of the serviceClient client.
|
||||
func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { |
||||
return serviceClient{newManagementClient(url, p)} |
||||
} |
||||
|
||||
// GetAccountInfo returns the sku name and account kind
|
||||
func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { |
||||
req, err := client.getAccountInfoPreparer() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*ServiceGetAccountInfoResponse), err |
||||
} |
||||
|
||||
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||
func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("GET", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
params.Set("restype", "account") |
||||
params.Set("comp", "properties") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
return req, nil |
||||
} |
||||
|
||||
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||
func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err |
||||
} |
||||
|
||||
// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics
|
||||
// and CORS (Cross-Origin Resource Sharing) rules.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.getPropertiesPreparer(timeout, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*StorageServiceProperties), err |
||||
} |
||||
|
||||
// getPropertiesPreparer prepares the GetProperties request.
|
||||
func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("GET", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("restype", "service") |
||||
params.Set("comp", "properties") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// getPropertiesResponder handles the response to the GetProperties request.
|
||||
func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
result := &StorageServiceProperties{rawResponse: resp.Response()} |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
if len(b) > 0 { |
||||
b = removeBOM(b) |
||||
err = xml.Unmarshal(b, result) |
||||
if err != nil { |
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the
|
||||
// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.getStatisticsPreparer(timeout, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*StorageServiceStats), err |
||||
} |
||||
|
||||
// getStatisticsPreparer prepares the GetStatistics request.
|
||||
func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("GET", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("restype", "service") |
||||
params.Set("comp", "stats") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// getStatisticsResponder handles the response to the GetStatistics request.
|
||||
func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
result := &StorageServiceStats{rawResponse: resp.Response()} |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
if len(b) > 0 { |
||||
b = removeBOM(b) |
||||
err = xml.Unmarshal(b, result) |
||||
if err != nil { |
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// GetUserDelegationKey retrieves a user delgation key for the Blob service. This is only a valid operation when using
|
||||
// bearer token authentication.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, timeout *int32, requestID *string) (*UserDelegationKey, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.getUserDelegationKeyPreparer(keyInfo, timeout, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getUserDelegationKeyResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*UserDelegationKey), err |
||||
} |
||||
|
||||
// getUserDelegationKeyPreparer prepares the GetUserDelegationKey request.
|
||||
func (client serviceClient) getUserDelegationKeyPreparer(keyInfo KeyInfo, timeout *int32, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("POST", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("restype", "service") |
||||
params.Set("comp", "userdelegationkey") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
b, err := xml.Marshal(keyInfo) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to marshal request body") |
||||
} |
||||
req.Header.Set("Content-Type", "application/xml") |
||||
err = req.SetBody(bytes.NewReader(b)) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to set request body") |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// getUserDelegationKeyResponder handles the response to the GetUserDelegationKey request.
|
||||
func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
result := &UserDelegationKey{rawResponse: resp.Response()} |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
if len(b) > 0 { |
||||
b = removeBOM(b) |
||||
err = xml.Unmarshal(b, result) |
||||
if err != nil { |
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified
|
||||
// account
|
||||
//
|
||||
// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a
|
||||
// string value that identifies the portion of the list of containers to be returned with the next listing operation.
|
||||
// The operation returns the NextMarker value within the response body if the listing operation did not return all
|
||||
// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the
|
||||
// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the
|
||||
// client. maxresults is specifies the maximum number of containers to return. If the request does not specify
|
||||
// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the
|
||||
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the
|
||||
// remainder of the results. For this reason, it is possible that the service will return fewer results than specified
|
||||
// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's
|
||||
// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For
|
||||
// more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: maxresults, |
||||
constraints: []constraint{{target: "maxresults", name: null, rule: false, |
||||
chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*ListContainersSegmentResponse), err |
||||
} |
||||
|
||||
// listContainersSegmentPreparer prepares the ListContainersSegment request.
|
||||
func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("GET", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if prefix != nil && len(*prefix) > 0 { |
||||
params.Set("prefix", *prefix) |
||||
} |
||||
if marker != nil && len(*marker) > 0 { |
||||
params.Set("marker", *marker) |
||||
} |
||||
if maxresults != nil { |
||||
params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) |
||||
} |
||||
if include != ListContainersIncludeNone { |
||||
params.Set("include", string(include)) |
||||
} |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("comp", "list") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// listContainersSegmentResponder handles the response to the ListContainersSegment request.
|
||||
func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
result := &ListContainersSegmentResponse{rawResponse: resp.Response()} |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
defer resp.Response().Body.Close() |
||||
b, err := ioutil.ReadAll(resp.Response().Body) |
||||
if err != nil { |
||||
return result, err |
||||
} |
||||
if len(b) > 0 { |
||||
b = removeBOM(b) |
||||
err = xml.Unmarshal(b, result) |
||||
if err != nil { |
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage
|
||||
// Analytics and CORS (Cross-Origin Resource Sharing) rules
|
||||
//
|
||||
// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds.
|
||||
// For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { |
||||
if err := validate([]validation{ |
||||
{targetValue: storageServiceProperties, |
||||
constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, |
||||
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, |
||||
}}, |
||||
}}, |
||||
{target: "storageServiceProperties.HourMetrics", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, |
||||
}}, |
||||
}}, |
||||
{target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, |
||||
}}, |
||||
}}, |
||||
{target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, |
||||
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, |
||||
}}}}, |
||||
{targetValue: timeout, |
||||
constraints: []constraint{{target: "timeout", name: null, rule: false, |
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { |
||||
return nil, err |
||||
} |
||||
req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.(*ServiceSetPropertiesResponse), err |
||||
} |
||||
|
||||
// setPropertiesPreparer prepares the SetProperties request.
|
||||
func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { |
||||
req, err := pipeline.NewRequest("PUT", client.url, nil) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to create request") |
||||
} |
||||
params := req.URL.Query() |
||||
if timeout != nil { |
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) |
||||
} |
||||
params.Set("restype", "service") |
||||
params.Set("comp", "properties") |
||||
req.URL.RawQuery = params.Encode() |
||||
req.Header.Set("x-ms-version", ServiceVersion) |
||||
if requestID != nil { |
||||
req.Header.Set("x-ms-client-request-id", *requestID) |
||||
} |
||||
b, err := xml.Marshal(storageServiceProperties) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to marshal request body") |
||||
} |
||||
req.Header.Set("Content-Type", "application/xml") |
||||
err = req.SetBody(bytes.NewReader(b)) |
||||
if err != nil { |
||||
return req, pipeline.NewError(err, "failed to set request body") |
||||
} |
||||
return req, nil |
||||
} |
||||
|
||||
// setPropertiesResponder handles the response to the SetProperties request.
|
||||
func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { |
||||
err := validateResponse(resp, http.StatusOK, http.StatusAccepted) |
||||
if resp == nil { |
||||
return nil, err |
||||
} |
||||
io.Copy(ioutil.Discard, resp.Response().Body) |
||||
resp.Response().Body.Close() |
||||
return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err |
||||
} |
367
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go
generated
vendored
367
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go
generated
vendored
@ -1,367 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"github.com/Azure/azure-pipeline-go/pipeline" |
||||
"reflect" |
||||
"regexp" |
||||
"strings" |
||||
) |
||||
|
||||
// Constraint stores constraint name, target field name
|
||||
// Rule and chain validations.
|
||||
type constraint struct { |
||||
// Target field name for validation.
|
||||
target string |
||||
|
||||
// Constraint name e.g. minLength, MaxLength, Pattern, etc.
|
||||
name string |
||||
|
||||
// Rule for constraint e.g. greater than 10, less than 5 etc.
|
||||
rule interface{} |
||||
|
||||
// Chain validations for struct type
|
||||
chain []constraint |
||||
} |
||||
|
||||
// Validation stores parameter-wise validation.
|
||||
type validation struct { |
||||
targetValue interface{} |
||||
constraints []constraint |
||||
} |
||||
|
||||
// Constraint list
|
||||
const ( |
||||
empty = "Empty" |
||||
null = "Null" |
||||
readOnly = "ReadOnly" |
||||
pattern = "Pattern" |
||||
maxLength = "MaxLength" |
||||
minLength = "MinLength" |
||||
maxItems = "MaxItems" |
||||
minItems = "MinItems" |
||||
multipleOf = "MultipleOf" |
||||
uniqueItems = "UniqueItems" |
||||
inclusiveMaximum = "InclusiveMaximum" |
||||
exclusiveMaximum = "ExclusiveMaximum" |
||||
exclusiveMinimum = "ExclusiveMinimum" |
||||
inclusiveMinimum = "InclusiveMinimum" |
||||
) |
||||
|
||||
// Validate method validates constraints on parameter
|
||||
// passed in validation array.
|
||||
func validate(m []validation) error { |
||||
for _, item := range m { |
||||
v := reflect.ValueOf(item.targetValue) |
||||
for _, constraint := range item.constraints { |
||||
var err error |
||||
switch v.Kind() { |
||||
case reflect.Ptr: |
||||
err = validatePtr(v, constraint) |
||||
case reflect.String: |
||||
err = validateString(v, constraint) |
||||
case reflect.Struct: |
||||
err = validateStruct(v, constraint) |
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
err = validateInt(v, constraint) |
||||
case reflect.Float32, reflect.Float64: |
||||
err = validateFloat(v, constraint) |
||||
case reflect.Array, reflect.Slice, reflect.Map: |
||||
err = validateArrayMap(v, constraint) |
||||
default: |
||||
err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func validateStruct(x reflect.Value, v constraint, name ...string) error { |
||||
//Get field name from target name which is in format a.b.c
|
||||
s := strings.Split(v.target, ".") |
||||
f := x.FieldByName(s[len(s)-1]) |
||||
if isZero(f) { |
||||
return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) |
||||
} |
||||
err := validate([]validation{ |
||||
{ |
||||
targetValue: getInterfaceValue(f), |
||||
constraints: []constraint{v}, |
||||
}, |
||||
}) |
||||
return err |
||||
} |
||||
|
||||
func validatePtr(x reflect.Value, v constraint) error { |
||||
if v.name == readOnly { |
||||
if !x.IsNil() { |
||||
return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") |
||||
} |
||||
return nil |
||||
} |
||||
if x.IsNil() { |
||||
return checkNil(x, v) |
||||
} |
||||
if v.chain != nil { |
||||
return validate([]validation{ |
||||
{ |
||||
targetValue: getInterfaceValue(x.Elem()), |
||||
constraints: v.chain, |
||||
}, |
||||
}) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func validateInt(x reflect.Value, v constraint) error { |
||||
i := x.Int() |
||||
r, ok := v.rule.(int) |
||||
if !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
switch v.name { |
||||
case multipleOf: |
||||
if i%int64(r) != 0 { |
||||
return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) |
||||
} |
||||
case exclusiveMinimum: |
||||
if i <= int64(r) { |
||||
return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) |
||||
} |
||||
case exclusiveMaximum: |
||||
if i >= int64(r) { |
||||
return createError(x, v, fmt.Sprintf("value must be less than %v", r)) |
||||
} |
||||
case inclusiveMinimum: |
||||
if i < int64(r) { |
||||
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) |
||||
} |
||||
case inclusiveMaximum: |
||||
if i > int64(r) { |
||||
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) |
||||
} |
||||
default: |
||||
return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func validateFloat(x reflect.Value, v constraint) error { |
||||
f := x.Float() |
||||
r, ok := v.rule.(float64) |
||||
if !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
switch v.name { |
||||
case exclusiveMinimum: |
||||
if f <= r { |
||||
return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) |
||||
} |
||||
case exclusiveMaximum: |
||||
if f >= r { |
||||
return createError(x, v, fmt.Sprintf("value must be less than %v", r)) |
||||
} |
||||
case inclusiveMinimum: |
||||
if f < r { |
||||
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) |
||||
} |
||||
case inclusiveMaximum: |
||||
if f > r { |
||||
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) |
||||
} |
||||
default: |
||||
return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func validateString(x reflect.Value, v constraint) error { |
||||
s := x.String() |
||||
switch v.name { |
||||
case empty: |
||||
if len(s) == 0 { |
||||
return checkEmpty(x, v) |
||||
} |
||||
case pattern: |
||||
reg, err := regexp.Compile(v.rule.(string)) |
||||
if err != nil { |
||||
return createError(x, v, err.Error()) |
||||
} |
||||
if !reg.MatchString(s) { |
||||
return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) |
||||
} |
||||
case maxLength: |
||||
if _, ok := v.rule.(int); !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
if len(s) > v.rule.(int) { |
||||
return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) |
||||
} |
||||
case minLength: |
||||
if _, ok := v.rule.(int); !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
if len(s) < v.rule.(int) { |
||||
return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) |
||||
} |
||||
case readOnly: |
||||
if len(s) > 0 { |
||||
return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") |
||||
} |
||||
default: |
||||
return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) |
||||
} |
||||
if v.chain != nil { |
||||
return validate([]validation{ |
||||
{ |
||||
targetValue: getInterfaceValue(x), |
||||
constraints: v.chain, |
||||
}, |
||||
}) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func validateArrayMap(x reflect.Value, v constraint) error { |
||||
switch v.name { |
||||
case null: |
||||
if x.IsNil() { |
||||
return checkNil(x, v) |
||||
} |
||||
case empty: |
||||
if x.IsNil() || x.Len() == 0 { |
||||
return checkEmpty(x, v) |
||||
} |
||||
case maxItems: |
||||
if _, ok := v.rule.(int); !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
if x.Len() > v.rule.(int) { |
||||
return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) |
||||
} |
||||
case minItems: |
||||
if _, ok := v.rule.(int); !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
if x.Len() < v.rule.(int) { |
||||
return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) |
||||
} |
||||
case uniqueItems: |
||||
if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { |
||||
if !checkForUniqueInArray(x) { |
||||
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) |
||||
} |
||||
} else if x.Kind() == reflect.Map { |
||||
if !checkForUniqueInMap(x) { |
||||
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) |
||||
} |
||||
} else { |
||||
return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) |
||||
} |
||||
case readOnly: |
||||
if x.Len() != 0 { |
||||
return createError(x, v, "readonly parameter; must send as nil or empty in request") |
||||
} |
||||
case pattern: |
||||
reg, err := regexp.Compile(v.rule.(string)) |
||||
if err != nil { |
||||
return createError(x, v, err.Error()) |
||||
} |
||||
keys := x.MapKeys() |
||||
for _, k := range keys { |
||||
if !reg.MatchString(k.String()) { |
||||
return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) |
||||
} |
||||
} |
||||
default: |
||||
return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) |
||||
} |
||||
if v.chain != nil { |
||||
return validate([]validation{ |
||||
{ |
||||
targetValue: getInterfaceValue(x), |
||||
constraints: v.chain, |
||||
}, |
||||
}) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func checkNil(x reflect.Value, v constraint) error { |
||||
if _, ok := v.rule.(bool); !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
if v.rule.(bool) { |
||||
return createError(x, v, "value can not be null; required parameter") |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func checkEmpty(x reflect.Value, v constraint) error { |
||||
if _, ok := v.rule.(bool); !ok { |
||||
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) |
||||
} |
||||
if v.rule.(bool) { |
||||
return createError(x, v, "value can not be null or empty; required parameter") |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func checkForUniqueInArray(x reflect.Value) bool { |
||||
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { |
||||
return false |
||||
} |
||||
arrOfInterface := make([]interface{}, x.Len()) |
||||
for i := 0; i < x.Len(); i++ { |
||||
arrOfInterface[i] = x.Index(i).Interface() |
||||
} |
||||
m := make(map[interface{}]bool) |
||||
for _, val := range arrOfInterface { |
||||
if m[val] { |
||||
return false |
||||
} |
||||
m[val] = true |
||||
} |
||||
return true |
||||
} |
||||
|
||||
func checkForUniqueInMap(x reflect.Value) bool { |
||||
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { |
||||
return false |
||||
} |
||||
mapOfInterface := make(map[interface{}]interface{}, x.Len()) |
||||
keys := x.MapKeys() |
||||
for _, k := range keys { |
||||
mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() |
||||
} |
||||
m := make(map[interface{}]bool) |
||||
for _, val := range mapOfInterface { |
||||
if m[val] { |
||||
return false |
||||
} |
||||
m[val] = true |
||||
} |
||||
return true |
||||
} |
||||
|
||||
func getInterfaceValue(x reflect.Value) interface{} { |
||||
if x.Kind() == reflect.Invalid { |
||||
return nil |
||||
} |
||||
return x.Interface() |
||||
} |
||||
|
||||
func isZero(x interface{}) bool { |
||||
return x == reflect.Zero(reflect.TypeOf(x)).Interface() |
||||
} |
||||
|
||||
func createError(x reflect.Value, v constraint, message string) error { |
||||
return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", |
||||
v.target, v.name, getInterfaceValue(x), message)) |
||||
} |
@ -1,14 +0,0 @@ |
||||
package azblob |
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string { |
||||
return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09" |
||||
} |
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string { |
||||
return "0.0.0" |
||||
} |
@ -1,242 +0,0 @@ |
||||
package azblob |
||||
|
||||
import ( |
||||
"context" |
||||
"io" |
||||
"net/http" |
||||
"time" |
||||
) |
||||
|
||||
// BlobHTTPHeaders contains read/writeable blob properties.
|
||||
type BlobHTTPHeaders struct { |
||||
ContentType string |
||||
ContentMD5 []byte |
||||
ContentEncoding string |
||||
ContentLanguage string |
||||
ContentDisposition string |
||||
CacheControl string |
||||
} |
||||
|
||||
// NewHTTPHeaders returns the user-modifiable properties for this blob.
|
||||
func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { |
||||
return BlobHTTPHeaders{ |
||||
ContentType: bgpr.ContentType(), |
||||
ContentEncoding: bgpr.ContentEncoding(), |
||||
ContentLanguage: bgpr.ContentLanguage(), |
||||
ContentDisposition: bgpr.ContentDisposition(), |
||||
CacheControl: bgpr.CacheControl(), |
||||
ContentMD5: bgpr.ContentMD5(), |
||||
} |
||||
} |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NewHTTPHeaders returns the user-modifiable properties for this blob.
|
||||
func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { |
||||
return BlobHTTPHeaders{ |
||||
ContentType: dr.ContentType(), |
||||
ContentEncoding: dr.ContentEncoding(), |
||||
ContentLanguage: dr.ContentLanguage(), |
||||
ContentDisposition: dr.ContentDisposition(), |
||||
CacheControl: dr.CacheControl(), |
||||
ContentMD5: dr.ContentMD5(), |
||||
} |
||||
} |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry.
|
||||
type DownloadResponse struct { |
||||
r *downloadResponse |
||||
ctx context.Context |
||||
b BlobURL |
||||
getInfo HTTPGetterInfo |
||||
} |
||||
|
||||
// Body constructs new RetryReader stream for reading data. If a connection failes
|
||||
// while reading, it will make additional requests to reestablish a connection and
|
||||
// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
|
||||
// (the default), returns the original response body and no retries will be performed.
|
||||
func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { |
||||
if o.MaxRetryRequests == 0 { // No additional retries
|
||||
return r.Response().Body |
||||
} |
||||
return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, |
||||
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { |
||||
resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, |
||||
BlobAccessConditions{ |
||||
ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, |
||||
}, |
||||
false) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return resp.Response(), err |
||||
}, |
||||
) |
||||
} |
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (r DownloadResponse) Response() *http.Response { |
||||
return r.r.Response() |
||||
} |
||||
|
||||
// NewHTTPHeaders returns the user-modifiable properties for this blob.
|
||||
func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { |
||||
return r.r.NewHTTPHeaders() |
||||
} |
||||
|
||||
// BlobContentMD5 returns the value for header x-ms-blob-content-md5.
|
||||
func (r DownloadResponse) BlobContentMD5() []byte { |
||||
return r.r.BlobContentMD5() |
||||
} |
||||
|
||||
// ContentMD5 returns the value for header Content-MD5.
|
||||
func (r DownloadResponse) ContentMD5() []byte { |
||||
return r.r.ContentMD5() |
||||
} |
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (r DownloadResponse) StatusCode() int { |
||||
return r.r.StatusCode() |
||||
} |
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (r DownloadResponse) Status() string { |
||||
return r.r.Status() |
||||
} |
||||
|
||||
// AcceptRanges returns the value for header Accept-Ranges.
|
||||
func (r DownloadResponse) AcceptRanges() string { |
||||
return r.r.AcceptRanges() |
||||
} |
||||
|
||||
// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count.
|
||||
func (r DownloadResponse) BlobCommittedBlockCount() int32 { |
||||
return r.r.BlobCommittedBlockCount() |
||||
} |
||||
|
||||
// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number.
|
||||
func (r DownloadResponse) BlobSequenceNumber() int64 { |
||||
return r.r.BlobSequenceNumber() |
||||
} |
||||
|
||||
// BlobType returns the value for header x-ms-blob-type.
|
||||
func (r DownloadResponse) BlobType() BlobType { |
||||
return r.r.BlobType() |
||||
} |
||||
|
||||
// CacheControl returns the value for header Cache-Control.
|
||||
func (r DownloadResponse) CacheControl() string { |
||||
return r.r.CacheControl() |
||||
} |
||||
|
||||
// ContentDisposition returns the value for header Content-Disposition.
|
||||
func (r DownloadResponse) ContentDisposition() string { |
||||
return r.r.ContentDisposition() |
||||
} |
||||
|
||||
// ContentEncoding returns the value for header Content-Encoding.
|
||||
func (r DownloadResponse) ContentEncoding() string { |
||||
return r.r.ContentEncoding() |
||||
} |
||||
|
||||
// ContentLanguage returns the value for header Content-Language.
|
||||
func (r DownloadResponse) ContentLanguage() string { |
||||
return r.r.ContentLanguage() |
||||
} |
||||
|
||||
// ContentLength returns the value for header Content-Length.
|
||||
func (r DownloadResponse) ContentLength() int64 { |
||||
return r.r.ContentLength() |
||||
} |
||||
|
||||
// ContentRange returns the value for header Content-Range.
|
||||
func (r DownloadResponse) ContentRange() string { |
||||
return r.r.ContentRange() |
||||
} |
||||
|
||||
// ContentType returns the value for header Content-Type.
|
||||
func (r DownloadResponse) ContentType() string { |
||||
return r.r.ContentType() |
||||
} |
||||
|
||||
// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
|
||||
func (r DownloadResponse) CopyCompletionTime() time.Time { |
||||
return r.r.CopyCompletionTime() |
||||
} |
||||
|
||||
// CopyID returns the value for header x-ms-copy-id.
|
||||
func (r DownloadResponse) CopyID() string { |
||||
return r.r.CopyID() |
||||
} |
||||
|
||||
// CopyProgress returns the value for header x-ms-copy-progress.
|
||||
func (r DownloadResponse) CopyProgress() string { |
||||
return r.r.CopyProgress() |
||||
} |
||||
|
||||
// CopySource returns the value for header x-ms-copy-source.
|
||||
func (r DownloadResponse) CopySource() string { |
||||
return r.r.CopySource() |
||||
} |
||||
|
||||
// CopyStatus returns the value for header x-ms-copy-status.
|
||||
func (r DownloadResponse) CopyStatus() CopyStatusType { |
||||
return r.r.CopyStatus() |
||||
} |
||||
|
||||
// CopyStatusDescription returns the value for header x-ms-copy-status-description.
|
||||
func (r DownloadResponse) CopyStatusDescription() string { |
||||
return r.r.CopyStatusDescription() |
||||
} |
||||
|
||||
// Date returns the value for header Date.
|
||||
func (r DownloadResponse) Date() time.Time { |
||||
return r.r.Date() |
||||
} |
||||
|
||||
// ETag returns the value for header ETag.
|
||||
func (r DownloadResponse) ETag() ETag { |
||||
return r.r.ETag() |
||||
} |
||||
|
||||
// IsServerEncrypted returns the value for header x-ms-server-encrypted.
|
||||
func (r DownloadResponse) IsServerEncrypted() string { |
||||
return r.r.IsServerEncrypted() |
||||
} |
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
func (r DownloadResponse) LastModified() time.Time { |
||||
return r.r.LastModified() |
||||
} |
||||
|
||||
// LeaseDuration returns the value for header x-ms-lease-duration.
|
||||
func (r DownloadResponse) LeaseDuration() LeaseDurationType { |
||||
return r.r.LeaseDuration() |
||||
} |
||||
|
||||
// LeaseState returns the value for header x-ms-lease-state.
|
||||
func (r DownloadResponse) LeaseState() LeaseStateType { |
||||
return r.r.LeaseState() |
||||
} |
||||
|
||||
// LeaseStatus returns the value for header x-ms-lease-status.
|
||||
func (r DownloadResponse) LeaseStatus() LeaseStatusType { |
||||
return r.r.LeaseStatus() |
||||
} |
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (r DownloadResponse) RequestID() string { |
||||
return r.r.RequestID() |
||||
} |
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (r DownloadResponse) Version() string { |
||||
return r.r.Version() |
||||
} |
||||
|
||||
// NewMetadata returns user-defined key/value pairs.
|
||||
func (r DownloadResponse) NewMetadata() Metadata { |
||||
return r.r.NewMetadata() |
||||
} |
@ -1,20 +0,0 @@ |
||||
The MIT License (MIT) |
||||
|
||||
Copyright (c) 2013 Stack Exchange |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of |
||||
this software and associated documentation files (the "Software"), to deal in |
||||
the Software without restriction, including without limitation the rights to |
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of |
||||
the Software, and to permit persons to whom the Software is furnished to do so, |
||||
subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS |
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR |
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER |
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
@ -1,6 +0,0 @@ |
||||
wmi |
||||
=== |
||||
|
||||
Package wmi provides a WQL interface to Windows WMI. |
||||
|
||||
Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. |
@ -1,260 +0,0 @@ |
||||
// +build windows
|
||||
|
||||
package wmi |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
"runtime" |
||||
"sync" |
||||
|
||||
"github.com/go-ole/go-ole" |
||||
"github.com/go-ole/go-ole/oleutil" |
||||
) |
||||
|
||||
// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx
|
||||
type SWbemServices struct { |
||||
//TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance
|
||||
cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method
|
||||
sWbemLocatorIUnknown *ole.IUnknown |
||||
sWbemLocatorIDispatch *ole.IDispatch |
||||
queries chan *queryRequest |
||||
closeError chan error |
||||
lQueryorClose sync.Mutex |
||||
} |
||||
|
||||
type queryRequest struct { |
||||
query string |
||||
dst interface{} |
||||
args []interface{} |
||||
finished chan error |
||||
} |
||||
|
||||
// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI
|
||||
func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) { |
||||
//fmt.Println("InitializeSWbemServices: Starting")
|
||||
//TODO: implement connectServerArgs as optional argument for init with connectServer call
|
||||
s := new(SWbemServices) |
||||
s.cWMIClient = c |
||||
s.queries = make(chan *queryRequest) |
||||
initError := make(chan error) |
||||
go s.process(initError) |
||||
|
||||
err, ok := <-initError |
||||
if ok { |
||||
return nil, err //Send error to caller
|
||||
} |
||||
//fmt.Println("InitializeSWbemServices: Finished")
|
||||
return s, nil |
||||
} |
||||
|
||||
// Close will clear and release all of the SWbemServices resources
|
||||
func (s *SWbemServices) Close() error { |
||||
s.lQueryorClose.Lock() |
||||
if s == nil || s.sWbemLocatorIDispatch == nil { |
||||
s.lQueryorClose.Unlock() |
||||
return fmt.Errorf("SWbemServices is not Initialized") |
||||
} |
||||
if s.queries == nil { |
||||
s.lQueryorClose.Unlock() |
||||
return fmt.Errorf("SWbemServices has been closed") |
||||
} |
||||
//fmt.Println("Close: sending close request")
|
||||
var result error |
||||
ce := make(chan error) |
||||
s.closeError = ce //Race condition if multiple callers to close. May need to lock here
|
||||
close(s.queries) //Tell background to shut things down
|
||||
s.lQueryorClose.Unlock() |
||||
err, ok := <-ce |
||||
if ok { |
||||
result = err |
||||
} |
||||
//fmt.Println("Close: finished")
|
||||
return result |
||||
} |
||||
|
||||
func (s *SWbemServices) process(initError chan error) { |
||||
//fmt.Println("process: starting background thread initialization")
|
||||
//All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine
|
||||
runtime.LockOSThread() |
||||
defer runtime.LockOSThread() |
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) |
||||
if err != nil { |
||||
oleCode := err.(*ole.OleError).Code() |
||||
if oleCode != ole.S_OK && oleCode != S_FALSE { |
||||
initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err) |
||||
return |
||||
} |
||||
} |
||||
defer ole.CoUninitialize() |
||||
|
||||
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") |
||||
if err != nil { |
||||
initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err) |
||||
return |
||||
} else if unknown == nil { |
||||
initError <- ErrNilCreateObject |
||||
return |
||||
} |
||||
defer unknown.Release() |
||||
s.sWbemLocatorIUnknown = unknown |
||||
|
||||
dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch) |
||||
if err != nil { |
||||
initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err) |
||||
return |
||||
} |
||||
defer dispatch.Release() |
||||
s.sWbemLocatorIDispatch = dispatch |
||||
|
||||
// we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs
|
||||
//fmt.Println("process: initialized. closing initError")
|
||||
close(initError) |
||||
//fmt.Println("process: waiting for queries")
|
||||
for q := range s.queries { |
||||
//fmt.Printf("process: new query: len(query)=%d\n", len(q.query))
|
||||
errQuery := s.queryBackground(q) |
||||
//fmt.Println("process: s.queryBackground finished")
|
||||
if errQuery != nil { |
||||
q.finished <- errQuery |
||||
} |
||||
close(q.finished) |
||||
} |
||||
//fmt.Println("process: queries channel closed")
|
||||
s.queries = nil //set channel to nil so we know it is closed
|
||||
//TODO: I think the Release/Clear calls can panic if things are in a bad state.
|
||||
//TODO: May need to recover from panics and send error to method caller instead.
|
||||
close(s.closeError) |
||||
} |
||||
|
||||
// Query runs the WQL query using a SWbemServices instance and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { |
||||
s.lQueryorClose.Lock() |
||||
if s == nil || s.sWbemLocatorIDispatch == nil { |
||||
s.lQueryorClose.Unlock() |
||||
return fmt.Errorf("SWbemServices is not Initialized") |
||||
} |
||||
if s.queries == nil { |
||||
s.lQueryorClose.Unlock() |
||||
return fmt.Errorf("SWbemServices has been closed") |
||||
} |
||||
|
||||
//fmt.Println("Query: Sending query request")
|
||||
qr := queryRequest{ |
||||
query: query, |
||||
dst: dst, |
||||
args: connectServerArgs, |
||||
finished: make(chan error), |
||||
} |
||||
s.queries <- &qr |
||||
s.lQueryorClose.Unlock() |
||||
err, ok := <-qr.finished |
||||
if ok { |
||||
//fmt.Println("Query: Finished with error")
|
||||
return err //Send error to caller
|
||||
} |
||||
//fmt.Println("Query: Finished")
|
||||
return nil |
||||
} |
||||
|
||||
func (s *SWbemServices) queryBackground(q *queryRequest) error { |
||||
if s == nil || s.sWbemLocatorIDispatch == nil { |
||||
return fmt.Errorf("SWbemServices is not Initialized") |
||||
} |
||||
wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart
|
||||
//fmt.Println("queryBackground: Starting")
|
||||
|
||||
dv := reflect.ValueOf(q.dst) |
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() { |
||||
return ErrInvalidEntityType |
||||
} |
||||
dv = dv.Elem() |
||||
mat, elemType := checkMultiArg(dv) |
||||
if mat == multiArgTypeInvalid { |
||||
return ErrInvalidEntityType |
||||
} |
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
service := serviceRaw.ToIDispatch() |
||||
defer serviceRaw.Clear() |
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
result := resultRaw.ToIDispatch() |
||||
defer resultRaw.Clear() |
||||
|
||||
count, err := oleInt64(result, "Count") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer enumProperty.Clear() |
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if enum == nil { |
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil") |
||||
} |
||||
defer enum.Release() |
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) |
||||
|
||||
var errFieldMismatch error |
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err := func() error { |
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch() |
||||
defer item.Release() |
||||
|
||||
ev := reflect.New(elemType) |
||||
if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil { |
||||
if _, ok := err.(*ErrFieldMismatch); ok { |
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err |
||||
} else { |
||||
return err |
||||
} |
||||
} |
||||
if mat != multiArgTypeStructPtr { |
||||
ev = ev.Elem() |
||||
} |
||||
dv.Set(reflect.Append(dv, ev)) |
||||
return nil |
||||
}() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
//fmt.Println("queryBackground: Finished")
|
||||
return errFieldMismatch |
||||
} |
@ -1,486 +0,0 @@ |
||||
// +build windows
|
||||
|
||||
/* |
||||
Package wmi provides a WQL interface for WMI on Windows. |
||||
|
||||
Example code to print names of running processes: |
||||
|
||||
type Win32_Process struct { |
||||
Name string |
||||
} |
||||
|
||||
func main() { |
||||
var dst []Win32_Process |
||||
q := wmi.CreateQuery(&dst, "") |
||||
err := wmi.Query(q, &dst) |
||||
if err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
for i, v := range dst { |
||||
println(i, v.Name) |
||||
} |
||||
} |
||||
|
||||
*/ |
||||
package wmi |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"log" |
||||
"os" |
||||
"reflect" |
||||
"runtime" |
||||
"strconv" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/go-ole/go-ole" |
||||
"github.com/go-ole/go-ole/oleutil" |
||||
) |
||||
|
||||
var l = log.New(os.Stdout, "", log.LstdFlags) |
||||
|
||||
var ( |
||||
ErrInvalidEntityType = errors.New("wmi: invalid entity type") |
||||
// ErrNilCreateObject is the error returned if CreateObject returns nil even
|
||||
// if the error was nil.
|
||||
ErrNilCreateObject = errors.New("wmi: create object returned nil") |
||||
lock sync.Mutex |
||||
) |
||||
|
||||
// S_FALSE is returned by CoInitializeEx if it was already called on this thread.
|
||||
const S_FALSE = 0x00000001 |
||||
|
||||
// QueryNamespace invokes Query with the given namespace on the local machine.
|
||||
func QueryNamespace(query string, dst interface{}, namespace string) error { |
||||
return Query(query, dst, nil, namespace) |
||||
} |
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
//
|
||||
// Query is a wrapper around DefaultClient.Query.
|
||||
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error { |
||||
if DefaultClient.SWbemServicesClient == nil { |
||||
return DefaultClient.Query(query, dst, connectServerArgs...) |
||||
} |
||||
return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...) |
||||
} |
||||
|
||||
// A Client is an WMI query client.
|
||||
//
|
||||
// Its zero value (DefaultClient) is a usable client.
|
||||
type Client struct { |
||||
// NonePtrZero specifies if nil values for fields which aren't pointers
|
||||
// should be returned as the field types zero value.
|
||||
//
|
||||
// Setting this to true allows stucts without pointer fields to be used
|
||||
// without the risk failure should a nil value returned from WMI.
|
||||
NonePtrZero bool |
||||
|
||||
// PtrNil specifies if nil values for pointer fields should be returned
|
||||
// as nil.
|
||||
//
|
||||
// Setting this to true will set pointer fields to nil where WMI
|
||||
// returned nil, otherwise the types zero value will be returned.
|
||||
PtrNil bool |
||||
|
||||
// AllowMissingFields specifies that struct fields not present in the
|
||||
// query result should not result in an error.
|
||||
//
|
||||
// Setting this to true allows custom queries to be used with full
|
||||
// struct definitions instead of having to define multiple structs.
|
||||
AllowMissingFields bool |
||||
|
||||
// SWbemServiceClient is an optional SWbemServices object that can be
|
||||
// initialized and then reused across multiple queries. If it is null
|
||||
// then the method will initialize a new temporary client each time.
|
||||
SWbemServicesClient *SWbemServices |
||||
} |
||||
|
||||
// DefaultClient is the default Client and is used by Query, QueryNamespace
|
||||
var DefaultClient = &Client{} |
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { |
||||
dv := reflect.ValueOf(dst) |
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() { |
||||
return ErrInvalidEntityType |
||||
} |
||||
dv = dv.Elem() |
||||
mat, elemType := checkMultiArg(dv) |
||||
if mat == multiArgTypeInvalid { |
||||
return ErrInvalidEntityType |
||||
} |
||||
|
||||
lock.Lock() |
||||
defer lock.Unlock() |
||||
runtime.LockOSThread() |
||||
defer runtime.UnlockOSThread() |
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) |
||||
if err != nil { |
||||
oleCode := err.(*ole.OleError).Code() |
||||
if oleCode != ole.S_OK && oleCode != S_FALSE { |
||||
return err |
||||
} |
||||
} |
||||
defer ole.CoUninitialize() |
||||
|
||||
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") |
||||
if err != nil { |
||||
return err |
||||
} else if unknown == nil { |
||||
return ErrNilCreateObject |
||||
} |
||||
defer unknown.Release() |
||||
|
||||
wmi, err := unknown.QueryInterface(ole.IID_IDispatch) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer wmi.Release() |
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
service := serviceRaw.ToIDispatch() |
||||
defer serviceRaw.Clear() |
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
result := resultRaw.ToIDispatch() |
||||
defer resultRaw.Clear() |
||||
|
||||
count, err := oleInt64(result, "Count") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer enumProperty.Clear() |
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if enum == nil { |
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil") |
||||
} |
||||
defer enum.Release() |
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) |
||||
|
||||
var errFieldMismatch error |
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err := func() error { |
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch() |
||||
defer item.Release() |
||||
|
||||
ev := reflect.New(elemType) |
||||
if err = c.loadEntity(ev.Interface(), item); err != nil { |
||||
if _, ok := err.(*ErrFieldMismatch); ok { |
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err |
||||
} else { |
||||
return err |
||||
} |
||||
} |
||||
if mat != multiArgTypeStructPtr { |
||||
ev = ev.Elem() |
||||
} |
||||
dv.Set(reflect.Append(dv, ev)) |
||||
return nil |
||||
}() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return errFieldMismatch |
||||
} |
||||
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct.
|
||||
// StructType is the type of the struct pointed to by the destination argument.
|
||||
type ErrFieldMismatch struct { |
||||
StructType reflect.Type |
||||
FieldName string |
||||
Reason string |
||||
} |
||||
|
||||
func (e *ErrFieldMismatch) Error() string { |
||||
return fmt.Sprintf("wmi: cannot load field %q into a %q: %s", |
||||
e.FieldName, e.StructType, e.Reason) |
||||
} |
||||
|
||||
var timeType = reflect.TypeOf(time.Time{}) |
||||
|
||||
// loadEntity loads a SWbemObject into a struct pointer.
|
||||
func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) { |
||||
v := reflect.ValueOf(dst).Elem() |
||||
for i := 0; i < v.NumField(); i++ { |
||||
f := v.Field(i) |
||||
of := f |
||||
isPtr := f.Kind() == reflect.Ptr |
||||
if isPtr { |
||||
ptr := reflect.New(f.Type().Elem()) |
||||
f.Set(ptr) |
||||
f = f.Elem() |
||||
} |
||||
n := v.Type().Field(i).Name |
||||
if !f.CanSet() { |
||||
return &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: "CanSet() is false", |
||||
} |
||||
} |
||||
prop, err := oleutil.GetProperty(src, n) |
||||
if err != nil { |
||||
if !c.AllowMissingFields { |
||||
errFieldMismatch = &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: "no such struct field", |
||||
} |
||||
} |
||||
continue |
||||
} |
||||
defer prop.Clear() |
||||
|
||||
switch val := prop.Value().(type) { |
||||
case int8, int16, int32, int64, int: |
||||
v := reflect.ValueOf(val).Int() |
||||
switch f.Kind() { |
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
f.SetInt(v) |
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: |
||||
f.SetUint(uint64(v)) |
||||
default: |
||||
return &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: "not an integer class", |
||||
} |
||||
} |
||||
case uint8, uint16, uint32, uint64: |
||||
v := reflect.ValueOf(val).Uint() |
||||
switch f.Kind() { |
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
f.SetInt(int64(v)) |
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: |
||||
f.SetUint(v) |
||||
default: |
||||
return &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: "not an integer class", |
||||
} |
||||
} |
||||
case string: |
||||
switch f.Kind() { |
||||
case reflect.String: |
||||
f.SetString(val) |
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
iv, err := strconv.ParseInt(val, 10, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
f.SetInt(iv) |
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: |
||||
uv, err := strconv.ParseUint(val, 10, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
f.SetUint(uv) |
||||
case reflect.Struct: |
||||
switch f.Type() { |
||||
case timeType: |
||||
if len(val) == 25 { |
||||
mins, err := strconv.Atoi(val[22:]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60) |
||||
} |
||||
t, err := time.Parse("20060102150405.000000-0700", val) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
f.Set(reflect.ValueOf(t)) |
||||
} |
||||
} |
||||
case bool: |
||||
switch f.Kind() { |
||||
case reflect.Bool: |
||||
f.SetBool(val) |
||||
default: |
||||
return &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: "not a bool", |
||||
} |
||||
} |
||||
case float32: |
||||
switch f.Kind() { |
||||
case reflect.Float32: |
||||
f.SetFloat(float64(val)) |
||||
default: |
||||
return &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: "not a Float32", |
||||
} |
||||
} |
||||
default: |
||||
if f.Kind() == reflect.Slice { |
||||
switch f.Type().Elem().Kind() { |
||||
case reflect.String: |
||||
safeArray := prop.ToArray() |
||||
if safeArray != nil { |
||||
arr := safeArray.ToValueArray() |
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) |
||||
for i, v := range arr { |
||||
s := fArr.Index(i) |
||||
s.SetString(v.(string)) |
||||
} |
||||
f.Set(fArr) |
||||
} |
||||
case reflect.Uint8: |
||||
safeArray := prop.ToArray() |
||||
if safeArray != nil { |
||||
arr := safeArray.ToValueArray() |
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) |
||||
for i, v := range arr { |
||||
s := fArr.Index(i) |
||||
s.SetUint(reflect.ValueOf(v).Uint()) |
||||
} |
||||
f.Set(fArr) |
||||
} |
||||
default: |
||||
return &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: fmt.Sprintf("unsupported slice type (%T)", val), |
||||
} |
||||
} |
||||
} else { |
||||
typeof := reflect.TypeOf(val) |
||||
if typeof == nil && (isPtr || c.NonePtrZero) { |
||||
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) { |
||||
of.Set(reflect.Zero(of.Type())) |
||||
} |
||||
break |
||||
} |
||||
return &ErrFieldMismatch{ |
||||
StructType: of.Type(), |
||||
FieldName: n, |
||||
Reason: fmt.Sprintf("unsupported type (%T)", val), |
||||
} |
||||
} |
||||
} |
||||
} |
||||
return errFieldMismatch |
||||
} |
||||
|
||||
type multiArgType int |
||||
|
||||
const ( |
||||
multiArgTypeInvalid multiArgType = iota |
||||
multiArgTypeStruct |
||||
multiArgTypeStructPtr |
||||
) |
||||
|
||||
// checkMultiArg checks that v has type []S, []*S for some struct type S.
|
||||
//
|
||||
// It returns what category the slice's elements are, and the reflect.Type
|
||||
// that represents S.
|
||||
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { |
||||
if v.Kind() != reflect.Slice { |
||||
return multiArgTypeInvalid, nil |
||||
} |
||||
elemType = v.Type().Elem() |
||||
switch elemType.Kind() { |
||||
case reflect.Struct: |
||||
return multiArgTypeStruct, elemType |
||||
case reflect.Ptr: |
||||
elemType = elemType.Elem() |
||||
if elemType.Kind() == reflect.Struct { |
||||
return multiArgTypeStructPtr, elemType |
||||
} |
||||
} |
||||
return multiArgTypeInvalid, nil |
||||
} |
||||
|
||||
func oleInt64(item *ole.IDispatch, prop string) (int64, error) { |
||||
v, err := oleutil.GetProperty(item, prop) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
defer v.Clear() |
||||
|
||||
i := int64(v.Val) |
||||
return i, nil |
||||
} |
||||
|
||||
// CreateQuery returns a WQL query string that queries all columns of src. where
|
||||
// is an optional string that is appended to the query, to be used with WHERE
|
||||
// clauses. In such a case, the "WHERE" string should appear at the beginning.
|
||||
func CreateQuery(src interface{}, where string) string { |
||||
var b bytes.Buffer |
||||
b.WriteString("SELECT ") |
||||
s := reflect.Indirect(reflect.ValueOf(src)) |
||||
t := s.Type() |
||||
if s.Kind() == reflect.Slice { |
||||
t = t.Elem() |
||||
} |
||||
if t.Kind() != reflect.Struct { |
||||
return "" |
||||
} |
||||
var fields []string |
||||
for i := 0; i < t.NumField(); i++ { |
||||
fields = append(fields, t.Field(i).Name) |
||||
} |
||||
b.WriteString(strings.Join(fields, ", ")) |
||||
b.WriteString(" FROM ") |
||||
b.WriteString(t.Name()) |
||||
b.WriteString(" " + where) |
||||
return b.String() |
||||
} |
@ -1,201 +0,0 @@ |
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
APPENDIX: How to apply the Apache License to your work. |
||||
|
||||
To apply the Apache License to your work, attach the following |
||||
boilerplate notice, with the fields enclosed by brackets "{}" |
||||
replaced with your own identifying information. (Don't include |
||||
the brackets!) The text should be enclosed in the appropriate |
||||
comment syntax for the file format. We also recommend that a |
||||
file or class name and description of purpose be included on the |
||||
same "printed page" as the copyright notice for easier |
||||
identification within third-party archives. |
||||
|
||||
Copyright {yyyy} {name of copyright owner} |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
@ -1,150 +0,0 @@ |
||||
# BigCache [![Build Status](https://travis-ci.org/allegro/bigcache.svg?branch=master)](https://travis-ci.org/allegro/bigcache) [![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=master)](https://coveralls.io/github/allegro/bigcache?branch=master) [![GoDoc](https://godoc.org/github.com/allegro/bigcache?status.svg)](https://godoc.org/github.com/allegro/bigcache) [![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache)](https://goreportcard.com/report/github.com/allegro/bigcache) |
||||
|
||||
Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance. |
||||
BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place, |
||||
therefore entries (de)serialization in front of the cache will be needed in most use cases. |
||||
|
||||
## Usage |
||||
|
||||
### Simple initialization |
||||
|
||||
```go |
||||
import "github.com/allegro/bigcache" |
||||
|
||||
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute)) |
||||
|
||||
cache.Set("my-unique-key", []byte("value")) |
||||
|
||||
entry, _ := cache.Get("my-unique-key") |
||||
fmt.Println(string(entry)) |
||||
``` |
||||
|
||||
### Custom initialization |
||||
|
||||
When cache load can be predicted in advance then it is better to use custom initialization because additional memory |
||||
allocation can be avoided in that way. |
||||
|
||||
```go |
||||
import ( |
||||
"log" |
||||
|
||||
"github.com/allegro/bigcache" |
||||
) |
||||
|
||||
config := bigcache.Config { |
||||
// number of shards (must be a power of 2) |
||||
Shards: 1024, |
||||
// time after which entry can be evicted |
||||
LifeWindow: 10 * time.Minute, |
||||
// rps * lifeWindow, used only in initial memory allocation |
||||
MaxEntriesInWindow: 1000 * 10 * 60, |
||||
// max entry size in bytes, used only in initial memory allocation |
||||
MaxEntrySize: 500, |
||||
// prints information about additional memory allocation |
||||
Verbose: true, |
||||
// cache will not allocate more memory than this limit, value in MB |
||||
// if value is reached then the oldest entries can be overridden for the new ones |
||||
// 0 value means no size limit |
||||
HardMaxCacheSize: 8192, |
||||
// callback fired when the oldest entry is removed because of its expiration time or no space left |
||||
// for the new entry, or because delete was called. A bitmask representing the reason will be returned. |
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry. |
||||
OnRemove: nil, |
||||
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left |
||||
// for the new entry, or because delete was called. A constant representing the reason will be passed through. |
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry. |
||||
// Ignored if OnRemove is specified. |
||||
OnRemoveWithReason: nil, |
||||
} |
||||
|
||||
cache, initErr := bigcache.NewBigCache(config) |
||||
if initErr != nil { |
||||
log.Fatal(initErr) |
||||
} |
||||
|
||||
cache.Set("my-unique-key", []byte("value")) |
||||
|
||||
if entry, err := cache.Get("my-unique-key"); err == nil { |
||||
fmt.Println(string(entry)) |
||||
} |
||||
``` |
||||
|
||||
## Benchmarks |
||||
|
||||
Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map. |
||||
Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10. |
||||
|
||||
### Writes and reads |
||||
|
||||
```bash |
||||
cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m |
||||
|
||||
BenchmarkMapSet-8 3000000 569 ns/op 202 B/op 3 allocs/op |
||||
BenchmarkConcurrentMapSet-8 1000000 1592 ns/op 347 B/op 8 allocs/op |
||||
BenchmarkFreeCacheSet-8 3000000 775 ns/op 355 B/op 2 allocs/op |
||||
BenchmarkBigCacheSet-8 3000000 640 ns/op 303 B/op 2 allocs/op |
||||
BenchmarkMapGet-8 5000000 407 ns/op 24 B/op 1 allocs/op |
||||
BenchmarkConcurrentMapGet-8 3000000 558 ns/op 24 B/op 2 allocs/op |
||||
BenchmarkFreeCacheGet-8 2000000 682 ns/op 136 B/op 2 allocs/op |
||||
BenchmarkBigCacheGet-8 3000000 512 ns/op 152 B/op 4 allocs/op |
||||
BenchmarkBigCacheSetParallel-8 10000000 225 ns/op 313 B/op 3 allocs/op |
||||
BenchmarkFreeCacheSetParallel-8 10000000 218 ns/op 341 B/op 3 allocs/op |
||||
BenchmarkConcurrentMapSetParallel-8 5000000 318 ns/op 200 B/op 6 allocs/op |
||||
BenchmarkBigCacheGetParallel-8 20000000 178 ns/op 152 B/op 4 allocs/op |
||||
BenchmarkFreeCacheGetParallel-8 20000000 295 ns/op 136 B/op 3 allocs/op |
||||
BenchmarkConcurrentMapGetParallel-8 10000000 237 ns/op 24 B/op 2 allocs/op |
||||
``` |
||||
|
||||
Writes and reads in bigcache are faster than in freecache. |
||||
Writes to map are the slowest. |
||||
|
||||
### GC pause time |
||||
|
||||
```bash |
||||
cd caches_bench; go run caches_gc_overhead_comparison.go |
||||
|
||||
Number of entries: 20000000 |
||||
GC pause for bigcache: 5.8658ms |
||||
GC pause for freecache: 32.4341ms |
||||
GC pause for map: 52.9661ms |
||||
``` |
||||
|
||||
Test shows how long are the GC pauses for caches filled with 20mln of entries. |
||||
Bigcache and freecache have very similar GC pause time. |
||||
It is clear that both reduce GC overhead in contrast to map |
||||
which GC pause time took more than 10 seconds. |
||||
|
||||
## How it works |
||||
|
||||
BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)). |
||||
This optimization states that if map without pointers in keys and values is used then GC will omit its content. |
||||
Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries. |
||||
|
||||
Entries are kept in bytes array, to omit GC again. |
||||
Bytes array size can grow to gigabytes without impact on performance |
||||
because GC will only see single pointer to it. |
||||
|
||||
## Bigcache vs Freecache |
||||
|
||||
Both caches provide the same core features but they reduce GC overhead in different ways. |
||||
Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on |
||||
slices to reduce number of pointers. |
||||
|
||||
Results from benchmark tests are presented above. |
||||
One of the advantage of bigcache over freecache is that you don’t need to know |
||||
the size of the cache in advance, because when bigcache is full, |
||||
it can allocate additional memory for new entries instead of |
||||
overwriting existing ones as freecache does currently. |
||||
However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config). |
||||
|
||||
## HTTP Server |
||||
|
||||
This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package. |
||||
|
||||
## More |
||||
|
||||
Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html) |
||||
|
||||
## License |
||||
|
||||
BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE)) |
@ -1,202 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
minimumEntriesInShard = 10 // Minimum number of entries in single shard
|
||||
) |
||||
|
||||
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
|
||||
// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays,
|
||||
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||
type BigCache struct { |
||||
shards []*cacheShard |
||||
lifeWindow uint64 |
||||
clock clock |
||||
hash Hasher |
||||
config Config |
||||
shardMask uint64 |
||||
maxShardSize uint32 |
||||
close chan struct{} |
||||
} |
||||
|
||||
// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback.
|
||||
type RemoveReason uint32 |
||||
|
||||
const ( |
||||
// Expired means the key is past its LifeWindow.
|
||||
Expired RemoveReason = iota |
||||
// NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the
|
||||
// entry exceeded the maximum shard size.
|
||||
NoSpace |
||||
// Deleted means Delete was called and this key was removed as a result.
|
||||
Deleted |
||||
) |
||||
|
||||
// NewBigCache initialize new instance of BigCache
|
||||
func NewBigCache(config Config) (*BigCache, error) { |
||||
return newBigCache(config, &systemClock{}) |
||||
} |
||||
|
||||
func newBigCache(config Config, clock clock) (*BigCache, error) { |
||||
|
||||
if !isPowerOfTwo(config.Shards) { |
||||
return nil, fmt.Errorf("Shards number must be power of two") |
||||
} |
||||
|
||||
if config.Hasher == nil { |
||||
config.Hasher = newDefaultHasher() |
||||
} |
||||
|
||||
cache := &BigCache{ |
||||
shards: make([]*cacheShard, config.Shards), |
||||
lifeWindow: uint64(config.LifeWindow.Seconds()), |
||||
clock: clock, |
||||
hash: config.Hasher, |
||||
config: config, |
||||
shardMask: uint64(config.Shards - 1), |
||||
maxShardSize: uint32(config.maximumShardSize()), |
||||
close: make(chan struct{}), |
||||
} |
||||
|
||||
var onRemove func(wrappedEntry []byte, reason RemoveReason) |
||||
if config.OnRemove != nil { |
||||
onRemove = cache.providedOnRemove |
||||
} else if config.OnRemoveWithReason != nil { |
||||
onRemove = cache.providedOnRemoveWithReason |
||||
} else { |
||||
onRemove = cache.notProvidedOnRemove |
||||
} |
||||
|
||||
for i := 0; i < config.Shards; i++ { |
||||
cache.shards[i] = initNewShard(config, onRemove, clock) |
||||
} |
||||
|
||||
if config.CleanWindow > 0 { |
||||
go func() { |
||||
ticker := time.NewTicker(config.CleanWindow) |
||||
defer ticker.Stop() |
||||
for { |
||||
select { |
||||
case t := <-ticker.C: |
||||
cache.cleanUp(uint64(t.Unix())) |
||||
case <-cache.close: |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
return cache, nil |
||||
} |
||||
|
||||
// Close is used to signal a shutdown of the cache when you are done with it.
|
||||
// This allows the cleaning goroutines to exit and ensures references are not
|
||||
// kept to the cache preventing GC of the entire cache.
|
||||
func (c *BigCache) Close() error { |
||||
close(c.close) |
||||
return nil |
||||
} |
||||
|
||||
// Get reads entry for the key.
|
||||
// It returns an ErrEntryNotFound when
|
||||
// no entry exists for the given key.
|
||||
func (c *BigCache) Get(key string) ([]byte, error) { |
||||
hashedKey := c.hash.Sum64(key) |
||||
shard := c.getShard(hashedKey) |
||||
return shard.get(key, hashedKey) |
||||
} |
||||
|
||||
// Set saves entry under the key
|
||||
func (c *BigCache) Set(key string, entry []byte) error { |
||||
hashedKey := c.hash.Sum64(key) |
||||
shard := c.getShard(hashedKey) |
||||
return shard.set(key, hashedKey, entry) |
||||
} |
||||
|
||||
// Delete removes the key
|
||||
func (c *BigCache) Delete(key string) error { |
||||
hashedKey := c.hash.Sum64(key) |
||||
shard := c.getShard(hashedKey) |
||||
return shard.del(key, hashedKey) |
||||
} |
||||
|
||||
// Reset empties all cache shards
|
||||
func (c *BigCache) Reset() error { |
||||
for _, shard := range c.shards { |
||||
shard.reset(c.config) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Len computes number of entries in cache
|
||||
func (c *BigCache) Len() int { |
||||
var len int |
||||
for _, shard := range c.shards { |
||||
len += shard.len() |
||||
} |
||||
return len |
||||
} |
||||
|
||||
// Capacity returns amount of bytes store in the cache.
|
||||
func (c *BigCache) Capacity() int { |
||||
var len int |
||||
for _, shard := range c.shards { |
||||
len += shard.capacity() |
||||
} |
||||
return len |
||||
} |
||||
|
||||
// Stats returns cache's statistics
|
||||
func (c *BigCache) Stats() Stats { |
||||
var s Stats |
||||
for _, shard := range c.shards { |
||||
tmp := shard.getStats() |
||||
s.Hits += tmp.Hits |
||||
s.Misses += tmp.Misses |
||||
s.DelHits += tmp.DelHits |
||||
s.DelMisses += tmp.DelMisses |
||||
s.Collisions += tmp.Collisions |
||||
} |
||||
return s |
||||
} |
||||
|
||||
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
|
||||
func (c *BigCache) Iterator() *EntryInfoIterator { |
||||
return newIterator(c) |
||||
} |
||||
|
||||
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { |
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry) |
||||
if currentTimestamp-oldestTimestamp > c.lifeWindow { |
||||
evict(Expired) |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (c *BigCache) cleanUp(currentTimestamp uint64) { |
||||
for _, shard := range c.shards { |
||||
shard.cleanUp(currentTimestamp) |
||||
} |
||||
} |
||||
|
||||
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) { |
||||
return c.shards[hashedKey&c.shardMask] |
||||
} |
||||
|
||||
func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) { |
||||
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry)) |
||||
} |
||||
|
||||
func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) { |
||||
if c.config.onRemoveFilter == 0 || (1<<uint(reason))&c.config.onRemoveFilter > 0 { |
||||
c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason) |
||||
} |
||||
} |
||||
|
||||
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) { |
||||
} |
@ -1,14 +0,0 @@ |
||||
// +build !appengine
|
||||
|
||||
package bigcache |
||||
|
||||
import ( |
||||
"reflect" |
||||
"unsafe" |
||||
) |
||||
|
||||
func bytesToString(b []byte) string { |
||||
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b)) |
||||
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len} |
||||
return *(*string)(unsafe.Pointer(&strHeader)) |
||||
} |
@ -1,7 +0,0 @@ |
||||
// +build appengine
|
||||
|
||||
package bigcache |
||||
|
||||
func bytesToString(b []byte) string { |
||||
return string(b) |
||||
} |
@ -1,14 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import "time" |
||||
|
||||
type clock interface { |
||||
epoch() int64 |
||||
} |
||||
|
||||
type systemClock struct { |
||||
} |
||||
|
||||
func (c systemClock) epoch() int64 { |
||||
return time.Now().Unix() |
||||
} |
@ -1,86 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import "time" |
||||
|
||||
// Config for BigCache
|
||||
type Config struct { |
||||
// Number of cache shards, value must be a power of two
|
||||
Shards int |
||||
// Time after which entry can be evicted
|
||||
LifeWindow time.Duration |
||||
// Interval between removing expired entries (clean up).
|
||||
// If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
|
||||
CleanWindow time.Duration |
||||
// Max number of entries in life window. Used only to calculate initial size for cache shards.
|
||||
// When proper value is set then additional memory allocation does not occur.
|
||||
MaxEntriesInWindow int |
||||
// Max size of entry in bytes. Used only to calculate initial size for cache shards.
|
||||
MaxEntrySize int |
||||
// Verbose mode prints information about new memory allocation
|
||||
Verbose bool |
||||
// Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
|
||||
Hasher Hasher |
||||
// HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
|
||||
// It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
|
||||
// Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
|
||||
// the oldest entries are overridden for the new ones.
|
||||
HardMaxCacheSize int |
||||
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
OnRemove func(key string, entry []byte) |
||||
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
// Ignored if OnRemove is specified.
|
||||
OnRemoveWithReason func(key string, entry []byte, reason RemoveReason) |
||||
|
||||
onRemoveFilter int |
||||
|
||||
// Logger is a logging interface and used in combination with `Verbose`
|
||||
// Defaults to `DefaultLogger()`
|
||||
Logger Logger |
||||
} |
||||
|
||||
// DefaultConfig initializes config with default values.
|
||||
// When load for BigCache can be predicted in advance then it is better to use custom config.
|
||||
func DefaultConfig(eviction time.Duration) Config { |
||||
return Config{ |
||||
Shards: 1024, |
||||
LifeWindow: eviction, |
||||
CleanWindow: 0, |
||||
MaxEntriesInWindow: 1000 * 10 * 60, |
||||
MaxEntrySize: 500, |
||||
Verbose: true, |
||||
Hasher: newDefaultHasher(), |
||||
HardMaxCacheSize: 0, |
||||
Logger: DefaultLogger(), |
||||
} |
||||
} |
||||
|
||||
// initialShardSize computes initial shard size
|
||||
func (c Config) initialShardSize() int { |
||||
return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard) |
||||
} |
||||
|
||||
// maximumShardSize computes maximum shard size
|
||||
func (c Config) maximumShardSize() int { |
||||
maxShardSize := 0 |
||||
|
||||
if c.HardMaxCacheSize > 0 { |
||||
maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards |
||||
} |
||||
|
||||
return maxShardSize |
||||
} |
||||
|
||||
// OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason.
|
||||
// Filtering out reasons prevents bigcache from unwrapping them, which saves cpu.
|
||||
func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config { |
||||
c.onRemoveFilter = 0 |
||||
for i := range reasons { |
||||
c.onRemoveFilter |= 1 << uint(reasons[i]) |
||||
} |
||||
|
||||
return c |
||||
} |
@ -1,62 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
) |
||||
|
||||
const ( |
||||
timestampSizeInBytes = 8 // Number of bytes used for timestamp
|
||||
hashSizeInBytes = 8 // Number of bytes used for hash
|
||||
keySizeInBytes = 2 // Number of bytes used for size of entry key
|
||||
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
|
||||
) |
||||
|
||||
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte { |
||||
keyLength := len(key) |
||||
blobLength := len(entry) + headersSizeInBytes + keyLength |
||||
|
||||
if blobLength > len(*buffer) { |
||||
*buffer = make([]byte, blobLength) |
||||
} |
||||
blob := *buffer |
||||
|
||||
binary.LittleEndian.PutUint64(blob, timestamp) |
||||
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash) |
||||
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength)) |
||||
copy(blob[headersSizeInBytes:], key) |
||||
copy(blob[headersSizeInBytes+keyLength:], entry) |
||||
|
||||
return blob[:blobLength] |
||||
} |
||||
|
||||
func readEntry(data []byte) []byte { |
||||
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) |
||||
|
||||
// copy on read
|
||||
dst := make([]byte, len(data)-int(headersSizeInBytes+length)) |
||||
copy(dst, data[headersSizeInBytes+length:]) |
||||
|
||||
return dst |
||||
} |
||||
|
||||
func readTimestampFromEntry(data []byte) uint64 { |
||||
return binary.LittleEndian.Uint64(data) |
||||
} |
||||
|
||||
func readKeyFromEntry(data []byte) string { |
||||
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) |
||||
|
||||
// copy on read
|
||||
dst := make([]byte, length) |
||||
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length]) |
||||
|
||||
return bytesToString(dst) |
||||
} |
||||
|
||||
func readHashFromEntry(data []byte) uint64 { |
||||
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:]) |
||||
} |
||||
|
||||
func resetKeyFromEntry(data []byte) { |
||||
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0) |
||||
} |
@ -1,6 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import "errors" |
||||
|
||||
// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
|
||||
var ErrEntryNotFound = errors.New("Entry not found") |
@ -1,28 +0,0 @@ |
||||
package bigcache |
||||
|
||||
// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations.
|
||||
// Its Sum64 method will lay the value out in big-endian byte order.
|
||||
// See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function
|
||||
func newDefaultHasher() Hasher { |
||||
return fnv64a{} |
||||
} |
||||
|
||||
type fnv64a struct{} |
||||
|
||||
const ( |
||||
// offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
|
||||
offset64 = 14695981039346656037 |
||||
// prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
|
||||
prime64 = 1099511628211 |
||||
) |
||||
|
||||
// Sum64 gets the string and returns its uint64 hash value.
|
||||
func (f fnv64a) Sum64(key string) uint64 { |
||||
var hash uint64 = offset64 |
||||
for i := 0; i < len(key); i++ { |
||||
hash ^= uint64(key[i]) |
||||
hash *= prime64 |
||||
} |
||||
|
||||
return hash |
||||
} |
@ -1,8 +0,0 @@ |
||||
package bigcache |
||||
|
||||
// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
|
||||
// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
|
||||
// you can use FarmHash family).
|
||||
type Hasher interface { |
||||
Sum64(string) uint64 |
||||
} |
@ -1,122 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import "sync" |
||||
|
||||
type iteratorError string |
||||
|
||||
func (e iteratorError) Error() string { |
||||
return string(e) |
||||
} |
||||
|
||||
// ErrInvalidIteratorState is reported when iterator is in invalid state
|
||||
const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position") |
||||
|
||||
// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
|
||||
const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache") |
||||
|
||||
var emptyEntryInfo = EntryInfo{} |
||||
|
||||
// EntryInfo holds informations about entry in the cache
|
||||
type EntryInfo struct { |
||||
timestamp uint64 |
||||
hash uint64 |
||||
key string |
||||
value []byte |
||||
} |
||||
|
||||
// Key returns entry's underlying key
|
||||
func (e EntryInfo) Key() string { |
||||
return e.key |
||||
} |
||||
|
||||
// Hash returns entry's hash value
|
||||
func (e EntryInfo) Hash() uint64 { |
||||
return e.hash |
||||
} |
||||
|
||||
// Timestamp returns entry's timestamp (time of insertion)
|
||||
func (e EntryInfo) Timestamp() uint64 { |
||||
return e.timestamp |
||||
} |
||||
|
||||
// Value returns entry's underlying value
|
||||
func (e EntryInfo) Value() []byte { |
||||
return e.value |
||||
} |
||||
|
||||
// EntryInfoIterator allows to iterate over entries in the cache
|
||||
type EntryInfoIterator struct { |
||||
mutex sync.Mutex |
||||
cache *BigCache |
||||
currentShard int |
||||
currentIndex int |
||||
elements []uint32 |
||||
elementsCount int |
||||
valid bool |
||||
} |
||||
|
||||
// SetNext moves to next element and returns true if it exists.
|
||||
func (it *EntryInfoIterator) SetNext() bool { |
||||
it.mutex.Lock() |
||||
|
||||
it.valid = false |
||||
it.currentIndex++ |
||||
|
||||
if it.elementsCount > it.currentIndex { |
||||
it.valid = true |
||||
it.mutex.Unlock() |
||||
return true |
||||
} |
||||
|
||||
for i := it.currentShard + 1; i < it.cache.config.Shards; i++ { |
||||
it.elements, it.elementsCount = it.cache.shards[i].copyKeys() |
||||
|
||||
// Non empty shard - stick with it
|
||||
if it.elementsCount > 0 { |
||||
it.currentIndex = 0 |
||||
it.currentShard = i |
||||
it.valid = true |
||||
it.mutex.Unlock() |
||||
return true |
||||
} |
||||
} |
||||
it.mutex.Unlock() |
||||
return false |
||||
} |
||||
|
||||
func newIterator(cache *BigCache) *EntryInfoIterator { |
||||
elements, count := cache.shards[0].copyKeys() |
||||
|
||||
return &EntryInfoIterator{ |
||||
cache: cache, |
||||
currentShard: 0, |
||||
currentIndex: -1, |
||||
elements: elements, |
||||
elementsCount: count, |
||||
} |
||||
} |
||||
|
||||
// Value returns current value from the iterator
|
||||
func (it *EntryInfoIterator) Value() (EntryInfo, error) { |
||||
it.mutex.Lock() |
||||
|
||||
if !it.valid { |
||||
it.mutex.Unlock() |
||||
return emptyEntryInfo, ErrInvalidIteratorState |
||||
} |
||||
|
||||
entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex])) |
||||
|
||||
if err != nil { |
||||
it.mutex.Unlock() |
||||
return emptyEntryInfo, ErrCannotRetrieveEntry |
||||
} |
||||
it.mutex.Unlock() |
||||
|
||||
return EntryInfo{ |
||||
timestamp: readTimestampFromEntry(entry), |
||||
hash: readHashFromEntry(entry), |
||||
key: readKeyFromEntry(entry), |
||||
value: readEntry(entry), |
||||
}, nil |
||||
} |
@ -1,30 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import ( |
||||
"log" |
||||
"os" |
||||
) |
||||
|
||||
// Logger is invoked when `Config.Verbose=true`
|
||||
type Logger interface { |
||||
Printf(format string, v ...interface{}) |
||||
} |
||||
|
||||
// this is a safeguard, breaking on compile time in case
|
||||
// `log.Logger` does not adhere to our `Logger` interface.
|
||||
// see https://golang.org/doc/faq#guarantee_satisfies_interface
|
||||
var _ Logger = &log.Logger{} |
||||
|
||||
// DefaultLogger returns a `Logger` implementation
|
||||
// backed by stdlib's log
|
||||
func DefaultLogger() *log.Logger { |
||||
return log.New(os.Stdout, "", log.LstdFlags) |
||||
} |
||||
|
||||
func newLogger(custom Logger) Logger { |
||||
if custom != nil { |
||||
return custom |
||||
} |
||||
|
||||
return DefaultLogger() |
||||
} |
@ -1,238 +0,0 @@ |
||||
package queue |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"log" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
// Number of bytes used to keep information about entry size
|
||||
headerEntrySize = 4 |
||||
// Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
|
||||
leftMarginIndex = 1 |
||||
// Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation.
|
||||
// It keeps entries indexes unchanged
|
||||
minimumEmptyBlobSize = 32 + headerEntrySize |
||||
) |
||||
|
||||
var ( |
||||
errEmptyQueue = &queueError{"Empty queue"} |
||||
errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."} |
||||
errIndexOutOfBounds = &queueError{"Index out of range"} |
||||
) |
||||
|
||||
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
|
||||
// For every push operation index of entry is returned. It can be used to read the entry later
|
||||
type BytesQueue struct { |
||||
array []byte |
||||
capacity int |
||||
maxCapacity int |
||||
head int |
||||
tail int |
||||
count int |
||||
rightMargin int |
||||
headerBuffer []byte |
||||
verbose bool |
||||
initialCapacity int |
||||
} |
||||
|
||||
type queueError struct { |
||||
message string |
||||
} |
||||
|
||||
// NewBytesQueue initialize new bytes queue.
|
||||
// Initial capacity is used in bytes array allocation
|
||||
// When verbose flag is set then information about memory allocation are printed
|
||||
func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue { |
||||
return &BytesQueue{ |
||||
array: make([]byte, initialCapacity), |
||||
capacity: initialCapacity, |
||||
maxCapacity: maxCapacity, |
||||
headerBuffer: make([]byte, headerEntrySize), |
||||
tail: leftMarginIndex, |
||||
head: leftMarginIndex, |
||||
rightMargin: leftMarginIndex, |
||||
verbose: verbose, |
||||
initialCapacity: initialCapacity, |
||||
} |
||||
} |
||||
|
||||
// Reset removes all entries from queue
|
||||
func (q *BytesQueue) Reset() { |
||||
// Just reset indexes
|
||||
q.tail = leftMarginIndex |
||||
q.head = leftMarginIndex |
||||
q.rightMargin = leftMarginIndex |
||||
q.count = 0 |
||||
} |
||||
|
||||
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
|
||||
// Returns index for pushed data or error if maximum size queue limit is reached.
|
||||
func (q *BytesQueue) Push(data []byte) (int, error) { |
||||
dataLen := len(data) |
||||
|
||||
if q.availableSpaceAfterTail() < dataLen+headerEntrySize { |
||||
if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize { |
||||
q.tail = leftMarginIndex |
||||
} else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 { |
||||
return -1, &queueError{"Full queue. Maximum size limit reached."} |
||||
} else { |
||||
q.allocateAdditionalMemory(dataLen + headerEntrySize) |
||||
} |
||||
} |
||||
|
||||
index := q.tail |
||||
|
||||
q.push(data, dataLen) |
||||
|
||||
return index, nil |
||||
} |
||||
|
||||
func (q *BytesQueue) allocateAdditionalMemory(minimum int) { |
||||
start := time.Now() |
||||
if q.capacity < minimum { |
||||
q.capacity += minimum |
||||
} |
||||
q.capacity = q.capacity * 2 |
||||
if q.capacity > q.maxCapacity && q.maxCapacity > 0 { |
||||
q.capacity = q.maxCapacity |
||||
} |
||||
|
||||
oldArray := q.array |
||||
q.array = make([]byte, q.capacity) |
||||
|
||||
if leftMarginIndex != q.rightMargin { |
||||
copy(q.array, oldArray[:q.rightMargin]) |
||||
|
||||
if q.tail < q.head { |
||||
emptyBlobLen := q.head - q.tail - headerEntrySize |
||||
q.push(make([]byte, emptyBlobLen), emptyBlobLen) |
||||
q.head = leftMarginIndex |
||||
q.tail = q.rightMargin |
||||
} |
||||
} |
||||
|
||||
if q.verbose { |
||||
log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity) |
||||
} |
||||
} |
||||
|
||||
func (q *BytesQueue) push(data []byte, len int) { |
||||
binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len)) |
||||
q.copy(q.headerBuffer, headerEntrySize) |
||||
|
||||
q.copy(data, len) |
||||
|
||||
if q.tail > q.head { |
||||
q.rightMargin = q.tail |
||||
} |
||||
|
||||
q.count++ |
||||
} |
||||
|
||||
func (q *BytesQueue) copy(data []byte, len int) { |
||||
q.tail += copy(q.array[q.tail:], data[:len]) |
||||
} |
||||
|
||||
// Pop reads the oldest entry from queue and moves head pointer to the next one
|
||||
func (q *BytesQueue) Pop() ([]byte, error) { |
||||
data, size, err := q.peek(q.head) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
q.head += headerEntrySize + size |
||||
q.count-- |
||||
|
||||
if q.head == q.rightMargin { |
||||
q.head = leftMarginIndex |
||||
if q.tail == q.rightMargin { |
||||
q.tail = leftMarginIndex |
||||
} |
||||
q.rightMargin = q.tail |
||||
} |
||||
|
||||
return data, nil |
||||
} |
||||
|
||||
// Peek reads the oldest entry from list without moving head pointer
|
||||
func (q *BytesQueue) Peek() ([]byte, error) { |
||||
data, _, err := q.peek(q.head) |
||||
return data, err |
||||
} |
||||
|
||||
// Get reads entry from index
|
||||
func (q *BytesQueue) Get(index int) ([]byte, error) { |
||||
data, _, err := q.peek(index) |
||||
return data, err |
||||
} |
||||
|
||||
// CheckGet checks if an entry can be read from index
|
||||
func (q *BytesQueue) CheckGet(index int) error { |
||||
return q.peekCheckErr(index) |
||||
} |
||||
|
||||
// Capacity returns number of allocated bytes for queue
|
||||
func (q *BytesQueue) Capacity() int { |
||||
return q.capacity |
||||
} |
||||
|
||||
// Len returns number of entries kept in queue
|
||||
func (q *BytesQueue) Len() int { |
||||
return q.count |
||||
} |
||||
|
||||
// Error returns error message
|
||||
func (e *queueError) Error() string { |
||||
return e.message |
||||
} |
||||
|
||||
// peekCheckErr is identical to peek, but does not actually return any data
|
||||
func (q *BytesQueue) peekCheckErr(index int) error { |
||||
|
||||
if q.count == 0 { |
||||
return errEmptyQueue |
||||
} |
||||
|
||||
if index <= 0 { |
||||
return errInvalidIndex |
||||
} |
||||
|
||||
if index+headerEntrySize >= len(q.array) { |
||||
return errIndexOutOfBounds |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (q *BytesQueue) peek(index int) ([]byte, int, error) { |
||||
|
||||
if q.count == 0 { |
||||
return nil, 0, errEmptyQueue |
||||
} |
||||
|
||||
if index <= 0 { |
||||
return nil, 0, errInvalidIndex |
||||
} |
||||
|
||||
if index+headerEntrySize >= len(q.array) { |
||||
return nil, 0, errIndexOutOfBounds |
||||
} |
||||
|
||||
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize])) |
||||
return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil |
||||
} |
||||
|
||||
func (q *BytesQueue) availableSpaceAfterTail() int { |
||||
if q.tail >= q.head { |
||||
return q.capacity - q.tail |
||||
} |
||||
return q.head - q.tail - minimumEmptyBlobSize |
||||
} |
||||
|
||||
func (q *BytesQueue) availableSpaceBeforeHead() int { |
||||
if q.tail >= q.head { |
||||
return q.head - leftMarginIndex - minimumEmptyBlobSize |
||||
} |
||||
return q.head - q.tail - minimumEmptyBlobSize |
||||
} |
@ -1,259 +0,0 @@ |
||||
package bigcache |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sync" |
||||
"sync/atomic" |
||||
|
||||
"github.com/allegro/bigcache/queue" |
||||
) |
||||
|
||||
type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason) |
||||
|
||||
type cacheShard struct { |
||||
hashmap map[uint64]uint32 |
||||
entries queue.BytesQueue |
||||
lock sync.RWMutex |
||||
entryBuffer []byte |
||||
onRemove onRemoveCallback |
||||
|
||||
isVerbose bool |
||||
logger Logger |
||||
clock clock |
||||
lifeWindow uint64 |
||||
|
||||
stats Stats |
||||
} |
||||
|
||||
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) { |
||||
s.lock.RLock() |
||||
itemIndex := s.hashmap[hashedKey] |
||||
|
||||
if itemIndex == 0 { |
||||
s.lock.RUnlock() |
||||
s.miss() |
||||
return nil, ErrEntryNotFound |
||||
} |
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex)) |
||||
if err != nil { |
||||
s.lock.RUnlock() |
||||
s.miss() |
||||
return nil, err |
||||
} |
||||
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { |
||||
if s.isVerbose { |
||||
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) |
||||
} |
||||
s.lock.RUnlock() |
||||
s.collision() |
||||
return nil, ErrEntryNotFound |
||||
} |
||||
entry := readEntry(wrappedEntry) |
||||
s.lock.RUnlock() |
||||
s.hit() |
||||
return entry, nil |
||||
} |
||||
|
||||
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error { |
||||
currentTimestamp := uint64(s.clock.epoch()) |
||||
|
||||
s.lock.Lock() |
||||
|
||||
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { |
||||
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { |
||||
resetKeyFromEntry(previousEntry) |
||||
} |
||||
} |
||||
|
||||
if oldestEntry, err := s.entries.Peek(); err == nil { |
||||
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) |
||||
} |
||||
|
||||
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer) |
||||
|
||||
for { |
||||
if index, err := s.entries.Push(w); err == nil { |
||||
s.hashmap[hashedKey] = uint32(index) |
||||
s.lock.Unlock() |
||||
return nil |
||||
} |
||||
if s.removeOldestEntry(NoSpace) != nil { |
||||
s.lock.Unlock() |
||||
return fmt.Errorf("entry is bigger than max shard size") |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (s *cacheShard) del(key string, hashedKey uint64) error { |
||||
// Optimistic pre-check using only readlock
|
||||
s.lock.RLock() |
||||
itemIndex := s.hashmap[hashedKey] |
||||
|
||||
if itemIndex == 0 { |
||||
s.lock.RUnlock() |
||||
s.delmiss() |
||||
return ErrEntryNotFound |
||||
} |
||||
|
||||
if err := s.entries.CheckGet(int(itemIndex)); err != nil { |
||||
s.lock.RUnlock() |
||||
s.delmiss() |
||||
return err |
||||
} |
||||
s.lock.RUnlock() |
||||
|
||||
s.lock.Lock() |
||||
{ |
||||
// After obtaining the writelock, we need to read the same again,
|
||||
// since the data delivered earlier may be stale now
|
||||
itemIndex = s.hashmap[hashedKey] |
||||
|
||||
if itemIndex == 0 { |
||||
s.lock.Unlock() |
||||
s.delmiss() |
||||
return ErrEntryNotFound |
||||
} |
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex)) |
||||
if err != nil { |
||||
s.lock.Unlock() |
||||
s.delmiss() |
||||
return err |
||||
} |
||||
|
||||
delete(s.hashmap, hashedKey) |
||||
s.onRemove(wrappedEntry, Deleted) |
||||
resetKeyFromEntry(wrappedEntry) |
||||
} |
||||
s.lock.Unlock() |
||||
|
||||
s.delhit() |
||||
return nil |
||||
} |
||||
|
||||
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { |
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry) |
||||
if currentTimestamp-oldestTimestamp > s.lifeWindow { |
||||
evict(Expired) |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (s *cacheShard) cleanUp(currentTimestamp uint64) { |
||||
s.lock.Lock() |
||||
for { |
||||
if oldestEntry, err := s.entries.Peek(); err != nil { |
||||
break |
||||
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted { |
||||
break |
||||
} |
||||
} |
||||
s.lock.Unlock() |
||||
} |
||||
|
||||
func (s *cacheShard) getOldestEntry() ([]byte, error) { |
||||
s.lock.RLock() |
||||
defer s.lock.RUnlock() |
||||
return s.entries.Peek() |
||||
} |
||||
|
||||
func (s *cacheShard) getEntry(index int) ([]byte, error) { |
||||
s.lock.RLock() |
||||
entry, err := s.entries.Get(index) |
||||
s.lock.RUnlock() |
||||
|
||||
return entry, err |
||||
} |
||||
|
||||
func (s *cacheShard) copyKeys() (keys []uint32, next int) { |
||||
s.lock.RLock() |
||||
keys = make([]uint32, len(s.hashmap)) |
||||
|
||||
for _, index := range s.hashmap { |
||||
keys[next] = index |
||||
next++ |
||||
} |
||||
|
||||
s.lock.RUnlock() |
||||
return keys, next |
||||
} |
||||
|
||||
func (s *cacheShard) removeOldestEntry(reason RemoveReason) error { |
||||
oldest, err := s.entries.Pop() |
||||
if err == nil { |
||||
hash := readHashFromEntry(oldest) |
||||
delete(s.hashmap, hash) |
||||
s.onRemove(oldest, reason) |
||||
return nil |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func (s *cacheShard) reset(config Config) { |
||||
s.lock.Lock() |
||||
s.hashmap = make(map[uint64]uint32, config.initialShardSize()) |
||||
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes) |
||||
s.entries.Reset() |
||||
s.lock.Unlock() |
||||
} |
||||
|
||||
func (s *cacheShard) len() int { |
||||
s.lock.RLock() |
||||
res := len(s.hashmap) |
||||
s.lock.RUnlock() |
||||
return res |
||||
} |
||||
|
||||
func (s *cacheShard) capacity() int { |
||||
s.lock.RLock() |
||||
res := s.entries.Capacity() |
||||
s.lock.RUnlock() |
||||
return res |
||||
} |
||||
|
||||
func (s *cacheShard) getStats() Stats { |
||||
var stats = Stats{ |
||||
Hits: atomic.LoadInt64(&s.stats.Hits), |
||||
Misses: atomic.LoadInt64(&s.stats.Misses), |
||||
DelHits: atomic.LoadInt64(&s.stats.DelHits), |
||||
DelMisses: atomic.LoadInt64(&s.stats.DelMisses), |
||||
Collisions: atomic.LoadInt64(&s.stats.Collisions), |
||||
} |
||||
return stats |
||||
} |
||||
|
||||
func (s *cacheShard) hit() { |
||||
atomic.AddInt64(&s.stats.Hits, 1) |
||||
} |
||||
|
||||
func (s *cacheShard) miss() { |
||||
atomic.AddInt64(&s.stats.Misses, 1) |
||||
} |
||||
|
||||
func (s *cacheShard) delhit() { |
||||
atomic.AddInt64(&s.stats.DelHits, 1) |
||||
} |
||||
|
||||
func (s *cacheShard) delmiss() { |
||||
atomic.AddInt64(&s.stats.DelMisses, 1) |
||||
} |
||||
|
||||
func (s *cacheShard) collision() { |
||||
atomic.AddInt64(&s.stats.Collisions, 1) |
||||
} |
||||
|
||||
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard { |
||||
return &cacheShard{ |
||||
hashmap: make(map[uint64]uint32, config.initialShardSize()), |
||||
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose), |
||||
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes), |
||||
onRemove: callback, |
||||
|
||||
isVerbose: config.Verbose, |
||||
logger: newLogger(config.Logger), |
||||
clock: clock, |
||||
lifeWindow: uint64(config.LifeWindow.Seconds()), |
||||
} |
||||
} |
@ -1,15 +0,0 @@ |
||||
package bigcache |
||||
|
||||
// Stats stores cache statistics
|
||||
type Stats struct { |
||||
// Hits is a number of successfully found keys
|
||||
Hits int64 `json:"hits"` |
||||
// Misses is a number of not found keys
|
||||
Misses int64 `json:"misses"` |
||||
// DelHits is a number of successfully deleted keys
|
||||
DelHits int64 `json:"delete_hits"` |
||||
// DelMisses is a number of not deleted keys
|
||||
DelMisses int64 `json:"delete_misses"` |
||||
// Collisions is a number of happened key-collisions
|
||||
Collisions int64 `json:"collisions"` |
||||
} |
@ -1,16 +0,0 @@ |
||||
package bigcache |
||||
|
||||
func max(a, b int) int { |
||||
if a > b { |
||||
return a |
||||
} |
||||
return b |
||||
} |
||||
|
||||
func convertMBToBytes(value int) int { |
||||
return value * 1024 * 1024 |
||||
} |
||||
|
||||
func isPowerOfTwo(number int) bool { |
||||
return (number & (number - 1)) == 0 |
||||
} |
@ -1,16 +0,0 @@ |
||||
language: go |
||||
go: |
||||
- 1.7.3 |
||||
- tip |
||||
before_install: |
||||
- go get -v github.com/golang/lint/golint |
||||
- go get -v -t -d ./... |
||||
after_success: |
||||
- make coverdata |
||||
- bash <(curl -s https://codecov.io/bash) |
||||
script: |
||||
- make -j4 check GOTEST_FLAGS=-v |
||||
notifications: |
||||
slack: |
||||
secure: MO/3LqbyALbi9vAY3pZetp/LfRuKEPAYEUya7XKmTWA3OFHYkTGqJWNosVkFJd6eSKwnc3HP4jlKADEBNVxADHzcA3uMPUQi1mIcNk/Ps1WWMNDv1liE2XOoOmHSHZ/8ksk6TNq83x+d17ZffYq8KAH6iKNKvllO1JzQPgJJdf+cNXQQlg6uPSe+ggMpjqVLkKcHqA4L3/BWo6fNcyvkqaN3uXcEzYPi7Nb2q9tl0ja6ToyZV4H6SinwitZmpedN3RkBcm4fKmGyw5ikzH93ycA5SvWrnXTh1dJvq6DU0FV7iwI6oqPTbAUc3FE5g7aEkK0qVR21s2j+KNaOLnuX10ZGQFwj2r3SW2REHq4j+qqFla/2EmSFZJt3GXYS+plmGCxqCgyjSw6tTi7LaGZ/mWBJEA9/EaXG1NkwlQYx5tdUMeGj77OczjXClynpb2hJ7MM2b32Rnp0JmNaXAh01SmClo+8nDWuksAsIdPtWsbF0/XHmEJiqpu8ojvVXOQIbPt43bjG7PS1t5jaRAU/N1n56SiCGgCSGd3Ui5eX5vmgWdpZMl8NG05G4LFsgmkdphRT5fru0C2PrhNZYRDGWs63XKapBxsvfqGzdHxTtYuaDjHjrI+9w0BC/8kEzSWoPmabQ5ci4wf4DeplcIay4tDMgMSo8pGAf52vrne4rmUo= |
||||
on_success: change |
@ -1,25 +0,0 @@ |
||||
All contributors are required to sign a "Contributor License Agreement" at |
||||
<TBD> |
||||
|
||||
The following organizations and people have contributed code to this library. |
||||
(Please keep both lists sorted alphabetically.) |
||||
|
||||
|
||||
Arista Networks, Inc. |
||||
|
||||
|
||||
Benoit Sigoure |
||||
Fabrice Rabaute |
||||
|
||||
|
||||
|
||||
The list of individual contributors for code currently in HEAD can be obtained |
||||
at any time with the following script: |
||||
|
||||
find . -type f \ |
||||
| while read i; do \ |
||||
git blame -t $i 2>/dev/null; \ |
||||
done \ |
||||
| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \ |
||||
| awk '{a[$0]++; t++} END{for(n in a) print n}' \ |
||||
| sort |
@ -1,177 +0,0 @@ |
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
@ -1,14 +0,0 @@ |
||||
# Copyright (C) 2016 Arista Networks, Inc. |
||||
# Use of this source code is governed by the Apache License 2.0 |
||||
# that can be found in the COPYING file. |
||||
|
||||
# TODO: move this to cmd/ockafka (https://github.com/docker/hub-feedback/issues/292) |
||||
FROM golang:1.7.3 |
||||
|
||||
RUN mkdir -p /go/src/github.com/aristanetworks/goarista/cmd |
||||
WORKDIR /go/src/github.com/aristanetworks/goarista |
||||
COPY ./ . |
||||
RUN go get -d ./cmd/ockafka/... \ |
||||
&& go install ./cmd/ockafka |
||||
|
||||
ENTRYPOINT ["/go/bin/ockafka"] |
@ -1,58 +0,0 @@ |
||||
# Copyright (C) 2015 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the COPYING file.
|
||||
|
||||
GO := go
|
||||
TEST_TIMEOUT := 30s
|
||||
GOTEST_FLAGS :=
|
||||
|
||||
DEFAULT_GOPATH := $${GOPATH%%:*}
|
||||
GOPATH_BIN := $(DEFAULT_GOPATH)/bin
|
||||
GOPATH_PKG := $(DEFAULT_GOPATH)/pkg
|
||||
GOLINT := $(GOPATH_BIN)/golint
|
||||
GOFOLDERS := find . -type d ! -path "./.git/*"
|
||||
|
||||
all: install |
||||
|
||||
install: |
||||
$(GO) install ./...
|
||||
|
||||
check: vet test fmtcheck lint |
||||
|
||||
COVER_PKGS := key test
|
||||
COVER_MODE := count
|
||||
coverdata: |
||||
echo 'mode: $(COVER_MODE)' >coverage.out
|
||||
for dir in $(COVER_PKGS); do \
|
||||
$(GO) test -covermode=$(COVER_MODE) -coverprofile=cov.out-t ./$$dir || exit; \
|
||||
tail -n +2 cov.out-t >> coverage.out && \
|
||||
rm cov.out-t; \
|
||||
done;
|
||||
|
||||
coverage: coverdata |
||||
$(GO) tool cover -html=coverage.out
|
||||
rm -f coverage.out
|
||||
|
||||
fmtcheck: |
||||
errors=`gofmt -l .`; if test -n "$$errors"; then echo Check these files for style errors:; echo "$$errors"; exit 1; fi
|
||||
find . -name '*.go' ! -name '*.pb.go' -exec ./check_line_len.awk {} +
|
||||
|
||||
vet: |
||||
$(GO) vet ./...
|
||||
|
||||
lint: |
||||
lint=`$(GOFOLDERS) | xargs -L 1 $(GOLINT) | fgrep -v .pb.go`; if test -n "$$lint"; then echo "$$lint"; exit 1; fi
|
||||
# The above is ugly, but unfortunately golint doesn't exit 1 when it finds
|
||||
# lint. See https://github.com/golang/lint/issues/65
|
||||
|
||||
test: |
||||
$(GO) test $(GOTEST_FLAGS) -timeout=$(TEST_TIMEOUT) ./...
|
||||
|
||||
docker: |
||||
docker build -f cmd/occlient/Dockerfile .
|
||||
|
||||
clean: |
||||
rm -rf $(GOPATH_PKG)/*/github.com/aristanetworks/goarista
|
||||
$(GO) clean ./...
|
||||
|
||||
.PHONY: all check coverage coverdata docker fmtcheck install lint test vet |
@ -1,62 +0,0 @@ |
||||
# Arista Go library [![Build Status](https://travis-ci.org/aristanetworks/goarista.svg?branch=master)](https://travis-ci.org/aristanetworks/goarista) [![codecov.io](http://codecov.io/github/aristanetworks/goarista/coverage.svg?branch=master)](http://codecov.io/github/aristanetworks/goarista?branch=master) [![GoDoc](https://godoc.org/github.com/aristanetworks/goarista?status.png)](https://godoc.org/github.com/aristanetworks/goarista) [![Go Report Card](https://goreportcard.com/badge/github.com/aristanetworks/goarista)](https://goreportcard.com/report/github.com/aristanetworks/goarista) |
||||
|
||||
## areflect |
||||
|
||||
Helper functions to work with the `reflect` package. Contains |
||||
`ForceExport()`, which bypasses the check in `reflect.Value` that |
||||
prevents accessing unexported attributes. |
||||
|
||||
## monotime |
||||
|
||||
Provides access to a fast monotonic clock source, to fill in the gap in the |
||||
[Go standard library, which lacks one](https://github.com/golang/go/issues/12914). |
||||
Don't use `time.Now()` in code that needs to time things or otherwise assume |
||||
that time passes at a constant rate, instead use `monotime.Now()`. |
||||
|
||||
## cmd |
||||
|
||||
See the [cmd](cmd) directory. |
||||
|
||||
## dscp |
||||
|
||||
Provides `ListenTCPWithTOS()`, which is a replacement for `net.ListenTCP()` |
||||
that allows specifying the ToS (Type of Service), to specify DSCP / ECN / |
||||
class of service flags to use for incoming connections. |
||||
|
||||
## key |
||||
|
||||
Provides a common type used across various Arista projects, named `key.Key`, |
||||
which is used to work around the fact that Go can't let one |
||||
use a non-hashable type as a key to a `map`, and we sometimes need to use |
||||
a `map[string]interface{}` (or something containing one) as a key to maps. |
||||
As a result, we frequently use `map[key.Key]interface{}` instead of just |
||||
`map[interface{}]interface{}` when we need a generic key-value collection. |
||||
|
||||
## lanz |
||||
A client for [LANZ](https://eos.arista.com/latency-analyzer-lanz-architectures-and-configuration/) |
||||
streaming servers. It connects to a LANZ streaming server, |
||||
listens for notifications, decodes them and sends the LANZ protobuf on the |
||||
provided channel. |
||||
|
||||
## monitor |
||||
|
||||
A library to help expose monitoring metrics on top of the |
||||
[`expvar`](https://golang.org/pkg/expvar/) infrastructure. |
||||
|
||||
## netns |
||||
|
||||
`netns.Do(namespace, cb)` provides a handy mechanism to execute the given |
||||
callback `cb` in the given [network namespace](https://lwn.net/Articles/580893/). |
||||
|
||||
## pathmap |
||||
|
||||
A datastructure for mapping keys of type string slice to values. It |
||||
allows for some fuzzy matching. |
||||
|
||||
## test |
||||
|
||||
This is a [Go](http://golang.org/) library to help in writing unit tests. |
||||
|
||||
## Examples |
||||
|
||||
TBD |
@ -1,25 +0,0 @@ |
||||
#!/usr/bin/awk -f |
||||
# Copyright (C) 2015 Arista Networks, Inc. |
||||
# Use of this source code is governed by the Apache License 2.0 |
||||
# that can be found in the COPYING file. |
||||
|
||||
BEGIN { |
||||
max = 100; |
||||
} |
||||
|
||||
# Expand tabs to 4 spaces. |
||||
{ |
||||
gsub(/\t/, " "); |
||||
} |
||||
|
||||
length() > max { |
||||
errors++; |
||||
print FILENAME ":" FNR ": Line too long (" length() "/" max ")"; |
||||
} |
||||
|
||||
END { |
||||
if (errors >= 125) { |
||||
errors = 125; |
||||
} |
||||
exit errors; |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue