Fix usage of sync.Pool
The current usage of sync.Pool is leaky because it stores an arbitrary sized buffer into the pool. However, sync.Pool assumes that all items in the pool are interchangeable from a memory cost perspective. Due to the unbounded size of a buffer that may be added, it is possible for the pool to eventually pin arbitrarily large amounts of memory in a live-lock situation. As a simple fix, we just set a maximum size that we permit back into the pool.
This commit is contained in:
parent
972f27185c
commit
e0f8de6c35
20
array.go
20
array.go
|
@ -20,6 +20,20 @@ type Array struct {
|
||||||
buf []byte
|
buf []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putArray(a *Array) {
|
||||||
|
// Proper usage of a sync.Pool requires each entry to have approximately
|
||||||
|
// the same memory cost. To obtain this property when the stored type
|
||||||
|
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
|
||||||
|
// to place back in the pool.
|
||||||
|
//
|
||||||
|
// See https://golang.org/issue/23199
|
||||||
|
const maxSize = 1 << 16 // 64KiB
|
||||||
|
if cap(a.buf) > maxSize {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
arrayPool.Put(a)
|
||||||
|
}
|
||||||
|
|
||||||
// Arr creates an array to be added to an Event or Context.
|
// Arr creates an array to be added to an Event or Context.
|
||||||
func Arr() *Array {
|
func Arr() *Array {
|
||||||
a := arrayPool.Get().(*Array)
|
a := arrayPool.Get().(*Array)
|
||||||
|
@ -38,7 +52,7 @@ func (a *Array) write(dst []byte) []byte {
|
||||||
dst = append(append(dst, a.buf...))
|
dst = append(append(dst, a.buf...))
|
||||||
}
|
}
|
||||||
dst = enc.AppendArrayEnd(dst)
|
dst = enc.AppendArrayEnd(dst)
|
||||||
arrayPool.Put(a)
|
putArray(a)
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +63,7 @@ func (a *Array) Object(obj LogObjectMarshaler) *Array {
|
||||||
obj.MarshalZerologObject(e)
|
obj.MarshalZerologObject(e)
|
||||||
e.buf = enc.AppendEndMarker(e.buf)
|
e.buf = enc.AppendEndMarker(e.buf)
|
||||||
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
|
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,7 +94,7 @@ func (a *Array) Err(err error) *Array {
|
||||||
e.buf = e.buf[:0]
|
e.buf = e.buf[:0]
|
||||||
e.appendObject(m)
|
e.appendObject(m)
|
||||||
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
|
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
case error:
|
case error:
|
||||||
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error())
|
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error())
|
||||||
case string:
|
case string:
|
||||||
|
|
|
@ -26,7 +26,7 @@ func (c Context) Fields(fields map[string]interface{}) Context {
|
||||||
func (c Context) Dict(key string, dict *Event) Context {
|
func (c Context) Dict(key string, dict *Event) Context {
|
||||||
dict.buf = enc.AppendEndMarker(dict.buf)
|
dict.buf = enc.AppendEndMarker(dict.buf)
|
||||||
c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...)
|
c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...)
|
||||||
eventPool.Put(dict)
|
putEvent(dict)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ func (c Context) Object(key string, obj LogObjectMarshaler) Context {
|
||||||
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
|
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
|
||||||
e.Object(key, obj)
|
e.Object(key, obj)
|
||||||
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
|
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func (c Context) EmbedObject(obj LogObjectMarshaler) Context {
|
||||||
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
|
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
|
||||||
e.EmbedObject(obj)
|
e.EmbedObject(obj)
|
||||||
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
|
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -86,6 +86,16 @@ func (dw Writer) poll() {
|
||||||
}
|
}
|
||||||
p := *(*[]byte)(d)
|
p := *(*[]byte)(d)
|
||||||
dw.w.Write(p)
|
dw.w.Write(p)
|
||||||
bufPool.Put(p[:0])
|
|
||||||
|
// Proper usage of a sync.Pool requires each entry to have approximately
|
||||||
|
// the same memory cost. To obtain this property when the stored type
|
||||||
|
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
|
||||||
|
// to place back in the pool.
|
||||||
|
//
|
||||||
|
// See https://golang.org/issue/23199
|
||||||
|
const maxSize = 1 << 16 // 64KiB
|
||||||
|
if cap(p) <= maxSize {
|
||||||
|
bufPool.Put(p[:0])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
18
event.go
18
event.go
|
@ -33,6 +33,20 @@ type Event struct {
|
||||||
ch []Hook // hooks from context
|
ch []Hook // hooks from context
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putEvent(e *Event) {
|
||||||
|
// Proper usage of a sync.Pool requires each entry to have approximately
|
||||||
|
// the same memory cost. To obtain this property when the stored type
|
||||||
|
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
|
||||||
|
// to place back in the pool.
|
||||||
|
//
|
||||||
|
// See https://golang.org/issue/23199
|
||||||
|
const maxSize = 1 << 16 // 64KiB
|
||||||
|
if cap(e.buf) > maxSize {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
eventPool.Put(e)
|
||||||
|
}
|
||||||
|
|
||||||
// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface
|
// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface
|
||||||
// to be implemented by types used with Event/Context's Object methods.
|
// to be implemented by types used with Event/Context's Object methods.
|
||||||
type LogObjectMarshaler interface {
|
type LogObjectMarshaler interface {
|
||||||
|
@ -66,7 +80,7 @@ func (e *Event) write() (err error) {
|
||||||
_, err = e.w.WriteLevel(e.level, e.buf)
|
_, err = e.w.WriteLevel(e.level, e.buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +155,7 @@ func (e *Event) Dict(key string, dict *Event) *Event {
|
||||||
}
|
}
|
||||||
dict.buf = enc.AppendEndMarker(dict.buf)
|
dict.buf = enc.AppendEndMarker(dict.buf)
|
||||||
e.buf = append(enc.AppendKey(e.buf, key), dict.buf...)
|
e.buf = append(enc.AppendKey(e.buf, key), dict.buf...)
|
||||||
eventPool.Put(dict)
|
putEvent(e)
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ func appendFields(dst []byte, fields map[string]interface{}) []byte {
|
||||||
e.buf = e.buf[:0]
|
e.buf = e.buf[:0]
|
||||||
e.appendObject(val)
|
e.appendObject(val)
|
||||||
dst = append(dst, e.buf...)
|
dst = append(dst, e.buf...)
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch val := val.(type) {
|
switch val := val.(type) {
|
||||||
|
@ -36,7 +36,7 @@ func appendFields(dst []byte, fields map[string]interface{}) []byte {
|
||||||
e.buf = e.buf[:0]
|
e.buf = e.buf[:0]
|
||||||
e.appendObject(m)
|
e.appendObject(m)
|
||||||
dst = append(dst, e.buf...)
|
dst = append(dst, e.buf...)
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
case error:
|
case error:
|
||||||
dst = enc.AppendString(dst, m.Error())
|
dst = enc.AppendString(dst, m.Error())
|
||||||
case string:
|
case string:
|
||||||
|
@ -54,7 +54,7 @@ func appendFields(dst []byte, fields map[string]interface{}) []byte {
|
||||||
e.buf = e.buf[:0]
|
e.buf = e.buf[:0]
|
||||||
e.appendObject(m)
|
e.appendObject(m)
|
||||||
dst = append(dst, e.buf...)
|
dst = append(dst, e.buf...)
|
||||||
eventPool.Put(e)
|
putEvent(e)
|
||||||
case error:
|
case error:
|
||||||
dst = enc.AppendString(dst, m.Error())
|
dst = enc.AppendString(dst, m.Error())
|
||||||
case string:
|
case string:
|
||||||
|
|
Loading…
Reference in New Issue