pool.go 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. // Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
  2. // reduce copying and to allow reuse of individual chunks.
  3. package buffer
  4. import (
  5. "io"
  6. "sync"
  7. )
  8. // PoolConfig contains configuration for the allocation and reuse strategy.
  9. type PoolConfig struct {
  10. StartSize int // Minimum chunk size that is allocated.
  11. PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
  12. MaxSize int // Maximum chunk size that will be allocated.
  13. }
  14. var config = PoolConfig{
  15. StartSize: 128,
  16. PooledSize: 512,
  17. MaxSize: 32768,
  18. }
  19. // Reuse pool: chunk size -> pool.
  20. var buffers = map[int]*sync.Pool{}
  21. func initBuffers() {
  22. for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
  23. buffers[l] = new(sync.Pool)
  24. }
  25. }
  26. func init() {
  27. initBuffers()
  28. }
  29. // Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
  30. func Init(cfg PoolConfig) {
  31. config = cfg
  32. initBuffers()
  33. }
  34. // putBuf puts a chunk to reuse pool if it can be reused.
  35. func putBuf(buf []byte) {
  36. size := cap(buf)
  37. if size < config.PooledSize {
  38. return
  39. }
  40. if c := buffers[size]; c != nil {
  41. c.Put(buf[:0])
  42. }
  43. }
  44. // getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
  45. func getBuf(size int) []byte {
  46. if size < config.PooledSize {
  47. return make([]byte, 0, size)
  48. }
  49. if c := buffers[size]; c != nil {
  50. v := c.Get()
  51. if v != nil {
  52. return v.([]byte)
  53. }
  54. }
  55. return make([]byte, 0, size)
  56. }
  57. // Buffer is a buffer optimized for serialization without extra copying.
  58. type Buffer struct {
  59. // Buf is the current chunk that can be used for serialization.
  60. Buf []byte
  61. toPool []byte
  62. bufs [][]byte
  63. }
  64. // EnsureSpace makes sure that the current chunk contains at least s free bytes,
  65. // possibly creating a new chunk.
  66. func (b *Buffer) EnsureSpace(s int) {
  67. if cap(b.Buf)-len(b.Buf) >= s {
  68. return
  69. }
  70. l := len(b.Buf)
  71. if l > 0 {
  72. if cap(b.toPool) != cap(b.Buf) {
  73. // Chunk was reallocated, toPool can be pooled.
  74. putBuf(b.toPool)
  75. }
  76. if cap(b.bufs) == 0 {
  77. b.bufs = make([][]byte, 0, 8)
  78. }
  79. b.bufs = append(b.bufs, b.Buf)
  80. l = cap(b.toPool) * 2
  81. } else {
  82. l = config.StartSize
  83. }
  84. if l > config.MaxSize {
  85. l = config.MaxSize
  86. }
  87. b.Buf = getBuf(l)
  88. b.toPool = b.Buf
  89. }
  90. // AppendByte appends a single byte to buffer.
  91. func (b *Buffer) AppendByte(data byte) {
  92. if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
  93. b.EnsureSpace(1)
  94. }
  95. b.Buf = append(b.Buf, data)
  96. }
  97. // AppendBytes appends a byte slice to buffer.
  98. func (b *Buffer) AppendBytes(data []byte) {
  99. for len(data) > 0 {
  100. if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
  101. b.EnsureSpace(1)
  102. }
  103. sz := cap(b.Buf) - len(b.Buf)
  104. if sz > len(data) {
  105. sz = len(data)
  106. }
  107. b.Buf = append(b.Buf, data[:sz]...)
  108. data = data[sz:]
  109. }
  110. }
  111. // AppendBytes appends a string to buffer.
  112. func (b *Buffer) AppendString(data string) {
  113. for len(data) > 0 {
  114. if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
  115. b.EnsureSpace(1)
  116. }
  117. sz := cap(b.Buf) - len(b.Buf)
  118. if sz > len(data) {
  119. sz = len(data)
  120. }
  121. b.Buf = append(b.Buf, data[:sz]...)
  122. data = data[sz:]
  123. }
  124. }
  125. // Size computes the size of a buffer by adding sizes of every chunk.
  126. func (b *Buffer) Size() int {
  127. size := len(b.Buf)
  128. for _, buf := range b.bufs {
  129. size += len(buf)
  130. }
  131. return size
  132. }
  133. // DumpTo outputs the contents of a buffer to a writer and resets the buffer.
  134. func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
  135. var n int
  136. for _, buf := range b.bufs {
  137. if err == nil {
  138. n, err = w.Write(buf)
  139. written += n
  140. }
  141. putBuf(buf)
  142. }
  143. if err == nil {
  144. n, err = w.Write(b.Buf)
  145. written += n
  146. }
  147. putBuf(b.toPool)
  148. b.bufs = nil
  149. b.Buf = nil
  150. b.toPool = nil
  151. return
  152. }
  153. // BuildBytes creates a single byte slice with all the contents of the buffer. Data is
  154. // copied if it does not fit in a single chunk.
  155. func (b *Buffer) BuildBytes() []byte {
  156. if len(b.bufs) == 0 {
  157. ret := b.Buf
  158. b.toPool = nil
  159. b.Buf = nil
  160. return ret
  161. }
  162. ret := make([]byte, 0, b.Size())
  163. for _, buf := range b.bufs {
  164. ret = append(ret, buf...)
  165. putBuf(buf)
  166. }
  167. ret = append(ret, b.Buf...)
  168. putBuf(b.toPool)
  169. b.bufs = nil
  170. b.toPool = nil
  171. b.Buf = nil
  172. return ret
  173. }