mirror of https://github.com/ethereum/go-ethereum
cmd, dashboard, log: log collection and exploration (#17097)
* cmd, dashboard, internal, log, node: logging feature * cmd, dashboard, internal, log: requested changes * dashboard, vendor: gofmt, govendor, use vendored file watcher * dashboard, log: gofmt -s -w, goimports * dashboard, log: gosimplepull/16734/head
parent
2eedbe799f
commit
a9835c1816
File diff suppressed because one or more lines are too long
@ -0,0 +1,310 @@ |
||||
// @flow |
||||
|
||||
// Copyright 2018 The go-ethereum Authors |
||||
// This file is part of the go-ethereum library. |
||||
// |
||||
// The go-ethereum library is free software: you can redistribute it and/or modify |
||||
// it under the terms of the GNU Lesser General Public License as published by |
||||
// the Free Software Foundation, either version 3 of the License, or |
||||
// (at your option) any later version. |
||||
// |
||||
// The go-ethereum library is distributed in the hope that it will be useful, |
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
// GNU Lesser General Public License for more details. |
||||
// |
||||
// You should have received a copy of the GNU Lesser General Public License |
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. |
||||
|
||||
import React, {Component} from 'react'; |
||||
|
||||
import List, {ListItem} from 'material-ui/List'; |
||||
import type {Record, Content, LogsMessage, Logs as LogsType} from '../types/content'; |
||||
|
||||
// requestBand says how wide is the top/bottom zone, eg. 0.1 means 10% of the container height. |
||||
const requestBand = 0.05; |
||||
|
||||
// fieldPadding is a global map with maximum field value lengths seen until now |
||||
// to allow padding log contexts in a bit smarter way. |
||||
const fieldPadding = new Map(); |
||||
|
||||
// createChunk creates an HTML formatted object, which displays the given array similarly to |
||||
// the server side terminal. |
||||
const createChunk = (records: Array<Record>) => { |
||||
let content = ''; |
||||
records.forEach((record) => { |
||||
const {t, ctx} = record; |
||||
let {lvl, msg} = record; |
||||
let color = '#ce3c23'; |
||||
switch (lvl) { |
||||
case 'trace': |
||||
case 'trce': |
||||
lvl = 'TRACE'; |
||||
color = '#3465a4'; |
||||
break; |
||||
case 'debug': |
||||
case 'dbug': |
||||
lvl = 'DEBUG'; |
||||
color = '#3d989b'; |
||||
break; |
||||
case 'info': |
||||
lvl = 'INFO '; |
||||
color = '#4c8f0f'; |
||||
break; |
||||
case 'warn': |
||||
lvl = 'WARN '; |
||||
color = '#b79a22'; |
||||
break; |
||||
case 'error': |
||||
case 'eror': |
||||
lvl = 'ERROR'; |
||||
color = '#754b70'; |
||||
break; |
||||
case 'crit': |
||||
lvl = 'CRIT '; |
||||
color = '#ce3c23'; |
||||
break; |
||||
default: |
||||
lvl = ''; |
||||
} |
||||
const time = new Date(t); |
||||
if (lvl === '' || !(time instanceof Date) || isNaN(time) || typeof msg !== 'string' || !Array.isArray(ctx)) { |
||||
content += '<span style="color:#ce3c23">Invalid log record</span><br />'; |
||||
return; |
||||
} |
||||
if (ctx.length > 0) { |
||||
msg += ' '.repeat(Math.max(40 - msg.length, 0)); |
||||
} |
||||
const month = `0${time.getMonth() + 1}`.slice(-2); |
||||
const date = `0${time.getDate()}`.slice(-2); |
||||
const hours = `0${time.getHours()}`.slice(-2); |
||||
const minutes = `0${time.getMinutes()}`.slice(-2); |
||||
const seconds = `0${time.getSeconds()}`.slice(-2); |
||||
content += `<span style="color:${color}">${lvl}</span>[${month}-${date}|${hours}:${minutes}:${seconds}] ${msg}`; |
||||
|
||||
for (let i = 0; i < ctx.length; i += 2) { |
||||
const key = ctx[i]; |
||||
const val = ctx[i + 1]; |
||||
let padding = fieldPadding.get(key); |
||||
if (typeof padding !== 'number' || padding < val.length) { |
||||
padding = val.length; |
||||
fieldPadding.set(key, padding); |
||||
} |
||||
let p = ''; |
||||
if (i < ctx.length - 2) { |
||||
p = ' '.repeat(padding - val.length); |
||||
} |
||||
content += ` <span style="color:${color}">${key}</span>=${val}${p}`; |
||||
} |
||||
content += '<br />'; |
||||
}); |
||||
return content; |
||||
}; |
||||
|
||||
// inserter is a state updater function for the main component, which inserts the new log chunk into the chunk array. |
||||
// limit is the maximum length of the chunk array, used in order to prevent the browser from OOM. |
||||
export const inserter = (limit: number) => (update: LogsMessage, prev: LogsType) => { |
||||
prev.topChanged = 0; |
||||
prev.bottomChanged = 0; |
||||
if (!Array.isArray(update.chunk) || update.chunk.length < 1) { |
||||
return prev; |
||||
} |
||||
if (!Array.isArray(prev.chunks)) { |
||||
prev.chunks = []; |
||||
} |
||||
const content = createChunk(update.chunk); |
||||
if (!update.source) { |
||||
// In case of stream chunk. |
||||
if (!prev.endBottom) { |
||||
return prev; |
||||
} |
||||
if (prev.chunks.length < 1) { |
||||
// This should never happen, because the first chunk is always a non-stream chunk. |
||||
return [{content, name: '00000000000000.log'}]; |
||||
} |
||||
prev.chunks[prev.chunks.length - 1].content += content; |
||||
prev.bottomChanged = 1; |
||||
return prev; |
||||
} |
||||
const chunk = { |
||||
content, |
||||
name: update.source.name, |
||||
}; |
||||
if (prev.chunks.length > 0 && update.source.name < prev.chunks[0].name) { |
||||
if (update.source.last) { |
||||
prev.endTop = true; |
||||
} |
||||
if (prev.chunks.length >= limit) { |
||||
prev.endBottom = false; |
||||
prev.chunks.splice(limit - 1, prev.chunks.length - limit + 1); |
||||
prev.bottomChanged = -1; |
||||
} |
||||
prev.chunks = [chunk, ...prev.chunks]; |
||||
prev.topChanged = 1; |
||||
return prev; |
||||
} |
||||
if (update.source.last) { |
||||
prev.endBottom = true; |
||||
} |
||||
if (prev.chunks.length >= limit) { |
||||
prev.endTop = false; |
||||
prev.chunks.splice(0, prev.chunks.length - limit + 1); |
||||
prev.topChanged = -1; |
||||
} |
||||
prev.chunks = [...prev.chunks, chunk]; |
||||
prev.bottomChanged = 1; |
||||
return prev; |
||||
}; |
||||
|
||||
// styles contains the constant styles of the component. |
||||
const styles = { |
||||
logListItem: { |
||||
padding: 0, |
||||
}, |
||||
logChunk: { |
||||
color: 'white', |
||||
fontFamily: 'monospace', |
||||
whiteSpace: 'nowrap', |
||||
width: 0, |
||||
}, |
||||
}; |
||||
|
||||
export type Props = { |
||||
container: Object, |
||||
content: Content, |
||||
shouldUpdate: Object, |
||||
send: string => void, |
||||
}; |
||||
|
||||
type State = { |
||||
requestAllowed: boolean, |
||||
}; |
||||
|
||||
// Logs renders the log page. |
||||
class Logs extends Component<Props, State> { |
||||
constructor(props: Props) { |
||||
super(props); |
||||
this.content = React.createRef(); |
||||
this.state = { |
||||
requestAllowed: true, |
||||
}; |
||||
} |
||||
|
||||
componentDidMount() { |
||||
const {container} = this.props; |
||||
container.scrollTop = container.scrollHeight - container.clientHeight; |
||||
} |
||||
|
||||
// onScroll is triggered by the parent component's scroll event, and sends requests if the scroll position is |
||||
// at the top or at the bottom. |
||||
onScroll = () => { |
||||
if (!this.state.requestAllowed || typeof this.content === 'undefined') { |
||||
return; |
||||
} |
||||
const {logs} = this.props.content; |
||||
if (logs.chunks.length < 1) { |
||||
return; |
||||
} |
||||
if (this.atTop()) { |
||||
if (!logs.endTop) { |
||||
this.setState({requestAllowed: false}); |
||||
this.props.send(JSON.stringify({ |
||||
Logs: { |
||||
Name: logs.chunks[0].name, |
||||
Past: true, |
||||
}, |
||||
})); |
||||
} |
||||
} else if (this.atBottom()) { |
||||
if (!logs.endBottom) { |
||||
this.setState({requestAllowed: false}); |
||||
this.props.send(JSON.stringify({ |
||||
Logs: { |
||||
Name: logs.chunks[logs.chunks.length - 1].name, |
||||
Past: false, |
||||
}, |
||||
})); |
||||
} |
||||
} |
||||
}; |
||||
|
||||
// atTop checks if the scroll position it at the top of the container. |
||||
atTop = () => this.props.container.scrollTop <= this.props.container.scrollHeight * requestBand; |
||||
|
||||
// atBottom checks if the scroll position it at the bottom of the container. |
||||
atBottom = () => { |
||||
const {container} = this.props; |
||||
return container.scrollHeight - container.scrollTop <= |
||||
container.clientHeight + container.scrollHeight * requestBand; |
||||
}; |
||||
|
||||
// beforeUpdate is called by the parent component, saves the previous scroll position |
||||
// and the height of the first log chunk, which can be deleted during the insertion. |
||||
beforeUpdate = () => { |
||||
let firstHeight = 0; |
||||
if (this.content && this.content.children[0] && this.content.children[0].children[0]) { |
||||
firstHeight = this.content.children[0].children[0].clientHeight; |
||||
} |
||||
return { |
||||
scrollTop: this.props.container.scrollTop, |
||||
firstHeight, |
||||
}; |
||||
}; |
||||
|
||||
// didUpdate is called by the parent component, which provides the container. Sends the first request if the |
||||
// visible part of the container isn't full, and resets the scroll position in order to avoid jumping when new |
||||
// chunk is inserted. |
||||
didUpdate = (prevProps, prevState, snapshot) => { |
||||
if (typeof this.props.shouldUpdate.logs === 'undefined' || typeof this.content === 'undefined' || snapshot === null) { |
||||
return; |
||||
} |
||||
const {logs} = this.props.content; |
||||
const {container} = this.props; |
||||
if (typeof container === 'undefined' || logs.chunks.length < 1) { |
||||
return; |
||||
} |
||||
if (this.content.clientHeight < container.clientHeight) { |
||||
// Only enters here at the beginning, when there isn't enough log to fill the container |
||||
// and the scroll bar doesn't appear. |
||||
if (!logs.endTop) { |
||||
this.setState({requestAllowed: false}); |
||||
this.props.send(JSON.stringify({ |
||||
Logs: { |
||||
Name: logs.chunks[0].name, |
||||
Past: true, |
||||
}, |
||||
})); |
||||
} |
||||
return; |
||||
} |
||||
const chunks = this.content.children[0].children; |
||||
let {scrollTop} = snapshot; |
||||
if (logs.topChanged > 0) { |
||||
scrollTop += chunks[0].clientHeight; |
||||
} else if (logs.bottomChanged > 0) { |
||||
if (logs.topChanged < 0) { |
||||
scrollTop -= snapshot.firstHeight; |
||||
} else if (logs.endBottom && this.atBottom()) { |
||||
scrollTop = container.scrollHeight - container.clientHeight; |
||||
} |
||||
} |
||||
container.scrollTop = scrollTop; |
||||
this.setState({requestAllowed: true}); |
||||
}; |
||||
|
||||
render() { |
||||
return ( |
||||
<div ref={(ref) => { this.content = ref; }}> |
||||
<List> |
||||
{this.props.content.logs.chunks.map((c, index) => ( |
||||
<ListItem style={styles.logListItem} key={index}> |
||||
<div style={styles.logChunk} dangerouslySetInnerHTML={{__html: c.content}} /> |
||||
</ListItem> |
||||
))} |
||||
</List> |
||||
</div> |
||||
); |
||||
} |
||||
} |
||||
|
||||
export default Logs; |
@ -0,0 +1,288 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"regexp" |
||||
"sort" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/mohae/deepcopy" |
||||
"github.com/rjeczalik/notify" |
||||
) |
||||
|
||||
var emptyChunk = json.RawMessage("[]") |
||||
|
||||
// prepLogs creates a JSON array from the given log record buffer.
|
||||
// Returns the prepared array and the position of the last '\n'
|
||||
// character in the original buffer, or -1 if it doesn't contain any.
|
||||
func prepLogs(buf []byte) (json.RawMessage, int) { |
||||
b := make(json.RawMessage, 1, len(buf)+1) |
||||
b[0] = '[' |
||||
b = append(b, buf...) |
||||
last := -1 |
||||
for i := 1; i < len(b); i++ { |
||||
if b[i] == '\n' { |
||||
b[i] = ',' |
||||
last = i |
||||
} |
||||
} |
||||
if last < 0 { |
||||
return emptyChunk, -1 |
||||
} |
||||
b[last] = ']' |
||||
return b[:last+1], last - 1 |
||||
} |
||||
|
||||
// handleLogRequest searches for the log file specified by the timestamp of the
|
||||
// request, creates a JSON array out of it and sends it to the requesting client.
|
||||
func (db *Dashboard) handleLogRequest(r *LogsRequest, c *client) { |
||||
files, err := ioutil.ReadDir(db.logdir) |
||||
if err != nil { |
||||
log.Warn("Failed to open logdir", "path", db.logdir, "err", err) |
||||
return |
||||
} |
||||
re := regexp.MustCompile(`\.log$`) |
||||
fileNames := make([]string, 0, len(files)) |
||||
for _, f := range files { |
||||
if f.Mode().IsRegular() && re.MatchString(f.Name()) { |
||||
fileNames = append(fileNames, f.Name()) |
||||
} |
||||
} |
||||
if len(fileNames) < 1 { |
||||
log.Warn("No log files in logdir", "path", db.logdir) |
||||
return |
||||
} |
||||
idx := sort.Search(len(fileNames), func(idx int) bool { |
||||
// Returns the smallest index such as fileNames[idx] >= r.Name,
|
||||
// if there is no such index, returns n.
|
||||
return fileNames[idx] >= r.Name |
||||
}) |
||||
|
||||
switch { |
||||
case idx < 0: |
||||
return |
||||
case idx == 0 && r.Past: |
||||
return |
||||
case idx >= len(fileNames): |
||||
return |
||||
case r.Past: |
||||
idx-- |
||||
case idx == len(fileNames)-1 && fileNames[idx] == r.Name: |
||||
return |
||||
case idx == len(fileNames)-1 || (idx == len(fileNames)-2 && fileNames[idx] == r.Name): |
||||
// The last file is continuously updated, and its chunks are streamed,
|
||||
// so in order to avoid log record duplication on the client side, it is
|
||||
// handled differently. Its actual content is always saved in the history.
|
||||
db.lock.Lock() |
||||
if db.history.Logs != nil { |
||||
c.msg <- &Message{ |
||||
Logs: db.history.Logs, |
||||
} |
||||
} |
||||
db.lock.Unlock() |
||||
return |
||||
case fileNames[idx] == r.Name: |
||||
idx++ |
||||
} |
||||
|
||||
path := filepath.Join(db.logdir, fileNames[idx]) |
||||
var buf []byte |
||||
if buf, err = ioutil.ReadFile(path); err != nil { |
||||
log.Warn("Failed to read file", "path", path, "err", err) |
||||
return |
||||
} |
||||
chunk, end := prepLogs(buf) |
||||
if end < 0 { |
||||
log.Warn("The file doesn't contain valid logs", "path", path) |
||||
return |
||||
} |
||||
c.msg <- &Message{ |
||||
Logs: &LogsMessage{ |
||||
Source: &LogFile{ |
||||
Name: fileNames[idx], |
||||
Last: r.Past && idx == 0, |
||||
}, |
||||
Chunk: chunk, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
// streamLogs watches the file system, and when the logger writes
|
||||
// the new log records into the files, picks them up, then makes
|
||||
// JSON array out of them and sends them to the clients.
|
||||
func (db *Dashboard) streamLogs() { |
||||
defer db.wg.Done() |
||||
var ( |
||||
err error |
||||
errc chan error |
||||
) |
||||
defer func() { |
||||
if errc == nil { |
||||
errc = <-db.quit |
||||
} |
||||
errc <- err |
||||
}() |
||||
|
||||
files, err := ioutil.ReadDir(db.logdir) |
||||
if err != nil { |
||||
log.Warn("Failed to open logdir", "path", db.logdir, "err", err) |
||||
return |
||||
} |
||||
var ( |
||||
opened *os.File // File descriptor for the opened active log file.
|
||||
buf []byte // Contains the recently written log chunks, which are not sent to the clients yet.
|
||||
) |
||||
|
||||
// The log records are always written into the last file in alphabetical order, because of the timestamp.
|
||||
re := regexp.MustCompile(`\.log$`) |
||||
i := len(files) - 1 |
||||
for i >= 0 && (!files[i].Mode().IsRegular() || !re.MatchString(files[i].Name())) { |
||||
i-- |
||||
} |
||||
if i < 0 { |
||||
log.Warn("No log files in logdir", "path", db.logdir) |
||||
return |
||||
} |
||||
if opened, err = os.OpenFile(filepath.Join(db.logdir, files[i].Name()), os.O_RDONLY, 0600); err != nil { |
||||
log.Warn("Failed to open file", "name", files[i].Name(), "err", err) |
||||
return |
||||
} |
||||
defer opened.Close() // Close the lastly opened file.
|
||||
fi, err := opened.Stat() |
||||
if err != nil { |
||||
log.Warn("Problem with file", "name", opened.Name(), "err", err) |
||||
return |
||||
} |
||||
db.lock.Lock() |
||||
db.history.Logs = &LogsMessage{ |
||||
Source: &LogFile{ |
||||
Name: fi.Name(), |
||||
Last: true, |
||||
}, |
||||
Chunk: emptyChunk, |
||||
} |
||||
db.lock.Unlock() |
||||
|
||||
watcher := make(chan notify.EventInfo, 10) |
||||
if err := notify.Watch(db.logdir, watcher, notify.Create); err != nil { |
||||
log.Warn("Failed to create file system watcher", "err", err) |
||||
return |
||||
} |
||||
defer notify.Stop(watcher) |
||||
|
||||
ticker := time.NewTicker(db.config.Refresh) |
||||
defer ticker.Stop() |
||||
|
||||
loop: |
||||
for err == nil || errc == nil { |
||||
select { |
||||
case event := <-watcher: |
||||
// Make sure that new log file was created.
|
||||
if !re.Match([]byte(event.Path())) { |
||||
break |
||||
} |
||||
if opened == nil { |
||||
log.Warn("The last log file is not opened") |
||||
break loop |
||||
} |
||||
// The new log file's name is always greater,
|
||||
// because it is created using the actual log record's time.
|
||||
if opened.Name() >= event.Path() { |
||||
break |
||||
} |
||||
// Read the rest of the previously opened file.
|
||||
chunk, err := ioutil.ReadAll(opened) |
||||
if err != nil { |
||||
log.Warn("Failed to read file", "name", opened.Name(), "err", err) |
||||
break loop |
||||
} |
||||
buf = append(buf, chunk...) |
||||
opened.Close() |
||||
|
||||
if chunk, last := prepLogs(buf); last >= 0 { |
||||
// Send the rest of the previously opened file.
|
||||
db.sendToAll(&Message{ |
||||
Logs: &LogsMessage{ |
||||
Chunk: chunk, |
||||
}, |
||||
}) |
||||
} |
||||
if opened, err = os.OpenFile(event.Path(), os.O_RDONLY, 0644); err != nil { |
||||
log.Warn("Failed to open file", "name", event.Path(), "err", err) |
||||
break loop |
||||
} |
||||
buf = buf[:0] |
||||
|
||||
// Change the last file in the history.
|
||||
fi, err := opened.Stat() |
||||
if err != nil { |
||||
log.Warn("Problem with file", "name", opened.Name(), "err", err) |
||||
break loop |
||||
} |
||||
db.lock.Lock() |
||||
db.history.Logs.Source.Name = fi.Name() |
||||
db.history.Logs.Chunk = emptyChunk |
||||
db.lock.Unlock() |
||||
case <-ticker.C: // Send log updates to the client.
|
||||
if opened == nil { |
||||
log.Warn("The last log file is not opened") |
||||
break loop |
||||
} |
||||
// Read the new logs created since the last read.
|
||||
chunk, err := ioutil.ReadAll(opened) |
||||
if err != nil { |
||||
log.Warn("Failed to read file", "name", opened.Name(), "err", err) |
||||
break loop |
||||
} |
||||
b := append(buf, chunk...) |
||||
|
||||
chunk, last := prepLogs(b) |
||||
if last < 0 { |
||||
break |
||||
} |
||||
// Only keep the invalid part of the buffer, which can be valid after the next read.
|
||||
buf = b[last+1:] |
||||
|
||||
var l *LogsMessage |
||||
// Update the history.
|
||||
db.lock.Lock() |
||||
if bytes.Equal(db.history.Logs.Chunk, emptyChunk) { |
||||
db.history.Logs.Chunk = chunk |
||||
l = deepcopy.Copy(db.history.Logs).(*LogsMessage) |
||||
} else { |
||||
b = make([]byte, len(db.history.Logs.Chunk)+len(chunk)-1) |
||||
copy(b, db.history.Logs.Chunk) |
||||
b[len(db.history.Logs.Chunk)-1] = ',' |
||||
copy(b[len(db.history.Logs.Chunk):], chunk[1:]) |
||||
db.history.Logs.Chunk = b |
||||
l = &LogsMessage{Chunk: chunk} |
||||
} |
||||
db.lock.Unlock() |
||||
|
||||
db.sendToAll(&Message{Logs: l}) |
||||
case errc = <-db.quit: |
||||
break loop |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,21 @@ |
||||
The MIT License (MIT) |
||||
|
||||
Copyright (c) 2014 Joel |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE. |
@ -0,0 +1,8 @@ |
||||
deepCopy |
||||
======== |
||||
[![GoDoc](https://godoc.org/github.com/mohae/deepcopy?status.svg)](https://godoc.org/github.com/mohae/deepcopy)[![Build Status](https://travis-ci.org/mohae/deepcopy.png)](https://travis-ci.org/mohae/deepcopy) |
||||
|
||||
DeepCopy makes deep copies of things: unexported field values are not copied. |
||||
|
||||
## Usage |
||||
cpy := deepcopy.Copy(orig) |
@ -0,0 +1,125 @@ |
||||
// deepcopy makes deep copies of things. A standard copy will copy the
|
||||
// pointers: deep copy copies the values pointed to. Unexported field
|
||||
// values are not copied.
|
||||
//
|
||||
// Copyright (c)2014-2016, Joel Scoble (github.com/mohae), all rights reserved.
|
||||
// License: MIT, for more details check the included LICENSE file.
|
||||
package deepcopy |
||||
|
||||
import ( |
||||
"reflect" |
||||
"time" |
||||
) |
||||
|
||||
// Interface for delegating copy process to type
|
||||
type Interface interface { |
||||
DeepCopy() interface{} |
||||
} |
||||
|
||||
// Iface is an alias to Copy; this exists for backwards compatibility reasons.
|
||||
func Iface(iface interface{}) interface{} { |
||||
return Copy(iface) |
||||
} |
||||
|
||||
// Copy creates a deep copy of whatever is passed to it and returns the copy
|
||||
// in an interface{}. The returned value will need to be asserted to the
|
||||
// correct type.
|
||||
func Copy(src interface{}) interface{} { |
||||
if src == nil { |
||||
return nil |
||||
} |
||||
|
||||
// Make the interface a reflect.Value
|
||||
original := reflect.ValueOf(src) |
||||
|
||||
// Make a copy of the same type as the original.
|
||||
cpy := reflect.New(original.Type()).Elem() |
||||
|
||||
// Recursively copy the original.
|
||||
copyRecursive(original, cpy) |
||||
|
||||
// Return the copy as an interface.
|
||||
return cpy.Interface() |
||||
} |
||||
|
||||
// copyRecursive does the actual copying of the interface. It currently has
|
||||
// limited support for what it can handle. Add as needed.
|
||||
func copyRecursive(original, cpy reflect.Value) { |
||||
// check for implement deepcopy.Interface
|
||||
if original.CanInterface() { |
||||
if copier, ok := original.Interface().(Interface); ok { |
||||
cpy.Set(reflect.ValueOf(copier.DeepCopy())) |
||||
return |
||||
} |
||||
} |
||||
|
||||
// handle according to original's Kind
|
||||
switch original.Kind() { |
||||
case reflect.Ptr: |
||||
// Get the actual value being pointed to.
|
||||
originalValue := original.Elem() |
||||
|
||||
// if it isn't valid, return.
|
||||
if !originalValue.IsValid() { |
||||
return |
||||
} |
||||
cpy.Set(reflect.New(originalValue.Type())) |
||||
copyRecursive(originalValue, cpy.Elem()) |
||||
|
||||
case reflect.Interface: |
||||
// If this is a nil, don't do anything
|
||||
if original.IsNil() { |
||||
return |
||||
} |
||||
// Get the value for the interface, not the pointer.
|
||||
originalValue := original.Elem() |
||||
|
||||
// Get the value by calling Elem().
|
||||
copyValue := reflect.New(originalValue.Type()).Elem() |
||||
copyRecursive(originalValue, copyValue) |
||||
cpy.Set(copyValue) |
||||
|
||||
case reflect.Struct: |
||||
t, ok := original.Interface().(time.Time) |
||||
if ok { |
||||
cpy.Set(reflect.ValueOf(t)) |
||||
return |
||||
} |
||||
// Go through each field of the struct and copy it.
|
||||
for i := 0; i < original.NumField(); i++ { |
||||
// The Type's StructField for a given field is checked to see if StructField.PkgPath
|
||||
// is set to determine if the field is exported or not because CanSet() returns false
|
||||
// for settable fields. I'm not sure why. -mohae
|
||||
if original.Type().Field(i).PkgPath != "" { |
||||
continue |
||||
} |
||||
copyRecursive(original.Field(i), cpy.Field(i)) |
||||
} |
||||
|
||||
case reflect.Slice: |
||||
if original.IsNil() { |
||||
return |
||||
} |
||||
// Make a new slice and copy each element.
|
||||
cpy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap())) |
||||
for i := 0; i < original.Len(); i++ { |
||||
copyRecursive(original.Index(i), cpy.Index(i)) |
||||
} |
||||
|
||||
case reflect.Map: |
||||
if original.IsNil() { |
||||
return |
||||
} |
||||
cpy.Set(reflect.MakeMap(original.Type())) |
||||
for _, key := range original.MapKeys() { |
||||
originalValue := original.MapIndex(key) |
||||
copyValue := reflect.New(originalValue.Type()).Elem() |
||||
copyRecursive(originalValue, copyValue) |
||||
copyKey := Copy(key.Interface()) |
||||
cpy.SetMapIndex(reflect.ValueOf(copyKey), copyValue) |
||||
} |
||||
|
||||
default: |
||||
cpy.Set(original) |
||||
} |
||||
} |
Loading…
Reference in new issue