package mining import ( "bytes" "encoding/json" "sync" ) // buf := bufferPool.Get().(*bytes.Buffer); buf.Reset(); defer bufferPool.Put(buf) var bufferPool = sync.Pool{ New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, 1024)) }, } // buf := getBuffer() // defer putBuffer(buf) func getBuffer() *bytes.Buffer { buf := bufferPool.Get().(*bytes.Buffer) buf.Reset() return buf } // defer putBuffer(buf) // return buf after use; buffers >64KB are discarded func putBuffer(buf *bytes.Buffer) { // Don't pool buffers that grew too large (>64KB) if buf.Cap() <= 65536 { bufferPool.Put(buf) } } // UnmarshalJSON(data, &msg) func UnmarshalJSON(data []byte, target interface{}) error { return json.Unmarshal(data, target) } // data, err := MarshalJSON(stats) // pooled buffer; safe to hold after call func MarshalJSON(value interface{}) ([]byte, error) { buf := getBuffer() defer putBuffer(buf) encoder := json.NewEncoder(buf) // Don't escape HTML characters (matches json.Marshal behavior for these use cases) encoder.SetEscapeHTML(false) if err := encoder.Encode(value); err != nil { return nil, err } // json.Encoder.Encode adds a newline; remove it to match json.Marshal data := buf.Bytes() if len(data) > 0 && data[len(data)-1] == '\n' { data = data[:len(data)-1] } result := make([]byte, len(data)) copy(result, data) return result, nil }