copystructure.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. package copystructure
  2. import (
  3. "errors"
  4. "reflect"
  5. "sync"
  6. "github.com/mitchellh/reflectwalk"
  7. )
  8. const tagKey = "copy"
  9. // Copy returns a deep copy of v.
  10. //
  11. // Copy is unable to copy unexported fields in a struct (lowercase field names).
  12. // Unexported fields can't be reflected by the Go runtime and therefore
  13. // copystructure can't perform any data copies.
  14. //
  15. // For structs, copy behavior can be controlled with struct tags. For example:
  16. //
  17. // struct {
  18. // Name string
  19. // Data *bytes.Buffer `copy:"shallow"`
  20. // }
  21. //
  22. // The available tag values are:
  23. //
  24. // * "ignore" - The field will be ignored, effectively resulting in it being
  25. // assigned the zero value in the copy.
  26. //
  27. // * "shallow" - The field will be be shallow copied. This means that references
  28. // values such as pointers, maps, slices, etc. will be directly assigned
  29. // versus deep copied.
  30. //
  31. func Copy(v interface{}) (interface{}, error) {
  32. return Config{}.Copy(v)
  33. }
  34. // CopierFunc is a function that knows how to deep copy a specific type.
  35. // Register these globally with the Copiers variable.
  36. type CopierFunc func(interface{}) (interface{}, error)
  37. // Copiers is a map of types that behave specially when they are copied.
  38. // If a type is found in this map while deep copying, this function
  39. // will be called to copy it instead of attempting to copy all fields.
  40. //
  41. // The key should be the type, obtained using: reflect.TypeOf(value with type).
  42. //
  43. // It is unsafe to write to this map after Copies have started. If you
  44. // are writing to this map while also copying, wrap all modifications to
  45. // this map as well as to Copy in a mutex.
  46. var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
  47. // ShallowCopiers is a map of pointer types that behave specially
  48. // when they are copied. If a type is found in this map while deep
  49. // copying, the pointer value will be shallow copied and not walked
  50. // into.
  51. //
  52. // The key should be the type, obtained using: reflect.TypeOf(value
  53. // with type).
  54. //
  55. // It is unsafe to write to this map after Copies have started. If you
  56. // are writing to this map while also copying, wrap all modifications to
  57. // this map as well as to Copy in a mutex.
  58. var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{})
  59. // Must is a helper that wraps a call to a function returning
  60. // (interface{}, error) and panics if the error is non-nil. It is intended
  61. // for use in variable initializations and should only be used when a copy
  62. // error should be a crashing case.
  63. func Must(v interface{}, err error) interface{} {
  64. if err != nil {
  65. panic("copy error: " + err.Error())
  66. }
  67. return v
  68. }
  69. var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
  70. type Config struct {
  71. // Lock any types that are a sync.Locker and are not a mutex while copying.
  72. // If there is an RLocker method, use that to get the sync.Locker.
  73. Lock bool
  74. // Copiers is a map of types associated with a CopierFunc. Use the global
  75. // Copiers map if this is nil.
  76. Copiers map[reflect.Type]CopierFunc
  77. // ShallowCopiers is a map of pointer types that when they are
  78. // shallow copied no matter where they are encountered. Use the
  79. // global ShallowCopiers if this is nil.
  80. ShallowCopiers map[reflect.Type]struct{}
  81. }
  82. func (c Config) Copy(v interface{}) (interface{}, error) {
  83. if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
  84. return nil, errPointerRequired
  85. }
  86. w := new(walker)
  87. if c.Lock {
  88. w.useLocks = true
  89. }
  90. if c.Copiers == nil {
  91. c.Copiers = Copiers
  92. }
  93. w.copiers = c.Copiers
  94. if c.ShallowCopiers == nil {
  95. c.ShallowCopiers = ShallowCopiers
  96. }
  97. w.shallowCopiers = c.ShallowCopiers
  98. err := reflectwalk.Walk(v, w)
  99. if err != nil {
  100. return nil, err
  101. }
  102. // Get the result. If the result is nil, then we want to turn it
  103. // into a typed nil if we can.
  104. result := w.Result
  105. if result == nil {
  106. val := reflect.ValueOf(v)
  107. result = reflect.Indirect(reflect.New(val.Type())).Interface()
  108. }
  109. return result, nil
  110. }
  111. // Return the key used to index interfaces types we've seen. Store the number
  112. // of pointers in the upper 32bits, and the depth in the lower 32bits. This is
  113. // easy to calculate, easy to match a key with our current depth, and we don't
  114. // need to deal with initializing and cleaning up nested maps or slices.
  115. func ifaceKey(pointers, depth int) uint64 {
  116. return uint64(pointers)<<32 | uint64(depth)
  117. }
  118. type walker struct {
  119. Result interface{}
  120. copiers map[reflect.Type]CopierFunc
  121. shallowCopiers map[reflect.Type]struct{}
  122. depth int
  123. ignoreDepth int
  124. vals []reflect.Value
  125. cs []reflect.Value
  126. // This stores the number of pointers we've walked over, indexed by depth.
  127. ps []int
  128. // If an interface is indirected by a pointer, we need to know the type of
  129. // interface to create when creating the new value. Store the interface
  130. // types here, indexed by both the walk depth and the number of pointers
  131. // already seen at that depth. Use ifaceKey to calculate the proper uint64
  132. // value.
  133. ifaceTypes map[uint64]reflect.Type
  134. // any locks we've taken, indexed by depth
  135. locks []sync.Locker
  136. // take locks while walking the structure
  137. useLocks bool
  138. }
  139. func (w *walker) Enter(l reflectwalk.Location) error {
  140. w.depth++
  141. // ensure we have enough elements to index via w.depth
  142. for w.depth >= len(w.locks) {
  143. w.locks = append(w.locks, nil)
  144. }
  145. for len(w.ps) < w.depth+1 {
  146. w.ps = append(w.ps, 0)
  147. }
  148. return nil
  149. }
  150. func (w *walker) Exit(l reflectwalk.Location) error {
  151. locker := w.locks[w.depth]
  152. w.locks[w.depth] = nil
  153. if locker != nil {
  154. defer locker.Unlock()
  155. }
  156. // clear out pointers and interfaces as we exit the stack
  157. w.ps[w.depth] = 0
  158. for k := range w.ifaceTypes {
  159. mask := uint64(^uint32(0))
  160. if k&mask == uint64(w.depth) {
  161. delete(w.ifaceTypes, k)
  162. }
  163. }
  164. w.depth--
  165. if w.ignoreDepth > w.depth {
  166. w.ignoreDepth = 0
  167. }
  168. if w.ignoring() {
  169. return nil
  170. }
  171. switch l {
  172. case reflectwalk.Array:
  173. fallthrough
  174. case reflectwalk.Map:
  175. fallthrough
  176. case reflectwalk.Slice:
  177. w.replacePointerMaybe()
  178. // Pop map off our container
  179. w.cs = w.cs[:len(w.cs)-1]
  180. case reflectwalk.MapValue:
  181. // Pop off the key and value
  182. mv := w.valPop()
  183. mk := w.valPop()
  184. m := w.cs[len(w.cs)-1]
  185. // If mv is the zero value, SetMapIndex deletes the key form the map,
  186. // or in this case never adds it. We need to create a properly typed
  187. // zero value so that this key can be set.
  188. if !mv.IsValid() {
  189. mv = reflect.Zero(m.Elem().Type().Elem())
  190. }
  191. m.Elem().SetMapIndex(mk, mv)
  192. case reflectwalk.ArrayElem:
  193. // Pop off the value and the index and set it on the array
  194. v := w.valPop()
  195. i := w.valPop().Interface().(int)
  196. if v.IsValid() {
  197. a := w.cs[len(w.cs)-1]
  198. ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
  199. if ae.CanSet() {
  200. ae.Set(v)
  201. }
  202. }
  203. case reflectwalk.SliceElem:
  204. // Pop off the value and the index and set it on the slice
  205. v := w.valPop()
  206. i := w.valPop().Interface().(int)
  207. if v.IsValid() {
  208. s := w.cs[len(w.cs)-1]
  209. se := s.Elem().Index(i)
  210. if se.CanSet() {
  211. se.Set(v)
  212. }
  213. }
  214. case reflectwalk.Struct:
  215. w.replacePointerMaybe()
  216. // Remove the struct from the container stack
  217. w.cs = w.cs[:len(w.cs)-1]
  218. case reflectwalk.StructField:
  219. // Pop off the value and the field
  220. v := w.valPop()
  221. f := w.valPop().Interface().(reflect.StructField)
  222. if v.IsValid() {
  223. s := w.cs[len(w.cs)-1]
  224. sf := reflect.Indirect(s).FieldByName(f.Name)
  225. if sf.CanSet() {
  226. sf.Set(v)
  227. }
  228. }
  229. case reflectwalk.WalkLoc:
  230. // Clear out the slices for GC
  231. w.cs = nil
  232. w.vals = nil
  233. }
  234. return nil
  235. }
  236. func (w *walker) Map(m reflect.Value) error {
  237. if w.ignoring() {
  238. return nil
  239. }
  240. w.lock(m)
  241. // Create the map. If the map itself is nil, then just make a nil map
  242. var newMap reflect.Value
  243. if m.IsNil() {
  244. newMap = reflect.New(m.Type())
  245. } else {
  246. newMap = wrapPtr(reflect.MakeMap(m.Type()))
  247. }
  248. w.cs = append(w.cs, newMap)
  249. w.valPush(newMap)
  250. return nil
  251. }
  252. func (w *walker) MapElem(m, k, v reflect.Value) error {
  253. return nil
  254. }
  255. func (w *walker) PointerEnter(v bool) error {
  256. if v {
  257. w.ps[w.depth]++
  258. }
  259. return nil
  260. }
  261. func (w *walker) PointerExit(v bool) error {
  262. if v {
  263. w.ps[w.depth]--
  264. }
  265. return nil
  266. }
  267. func (w *walker) Pointer(v reflect.Value) error {
  268. if _, ok := w.shallowCopiers[v.Type()]; ok {
  269. // Shallow copy this value. Use the same logic as primitive, then
  270. // return skip.
  271. if err := w.Primitive(v); err != nil {
  272. return err
  273. }
  274. return reflectwalk.SkipEntry
  275. }
  276. return nil
  277. }
  278. func (w *walker) Interface(v reflect.Value) error {
  279. if !v.IsValid() {
  280. return nil
  281. }
  282. if w.ifaceTypes == nil {
  283. w.ifaceTypes = make(map[uint64]reflect.Type)
  284. }
  285. w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
  286. return nil
  287. }
  288. func (w *walker) Primitive(v reflect.Value) error {
  289. if w.ignoring() {
  290. return nil
  291. }
  292. w.lock(v)
  293. // IsValid verifies the v is non-zero and CanInterface verifies
  294. // that we're allowed to read this value (unexported fields).
  295. var newV reflect.Value
  296. if v.IsValid() && v.CanInterface() {
  297. newV = reflect.New(v.Type())
  298. newV.Elem().Set(v)
  299. }
  300. w.valPush(newV)
  301. w.replacePointerMaybe()
  302. return nil
  303. }
  304. func (w *walker) Slice(s reflect.Value) error {
  305. if w.ignoring() {
  306. return nil
  307. }
  308. w.lock(s)
  309. var newS reflect.Value
  310. if s.IsNil() {
  311. newS = reflect.New(s.Type())
  312. } else {
  313. newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
  314. }
  315. w.cs = append(w.cs, newS)
  316. w.valPush(newS)
  317. return nil
  318. }
  319. func (w *walker) SliceElem(i int, elem reflect.Value) error {
  320. if w.ignoring() {
  321. return nil
  322. }
  323. // We don't write the slice here because elem might still be
  324. // arbitrarily complex. Just record the index and continue on.
  325. w.valPush(reflect.ValueOf(i))
  326. return nil
  327. }
  328. func (w *walker) Array(a reflect.Value) error {
  329. if w.ignoring() {
  330. return nil
  331. }
  332. w.lock(a)
  333. newA := reflect.New(a.Type())
  334. w.cs = append(w.cs, newA)
  335. w.valPush(newA)
  336. return nil
  337. }
  338. func (w *walker) ArrayElem(i int, elem reflect.Value) error {
  339. if w.ignoring() {
  340. return nil
  341. }
  342. // We don't write the array here because elem might still be
  343. // arbitrarily complex. Just record the index and continue on.
  344. w.valPush(reflect.ValueOf(i))
  345. return nil
  346. }
  347. func (w *walker) Struct(s reflect.Value) error {
  348. if w.ignoring() {
  349. return nil
  350. }
  351. w.lock(s)
  352. var v reflect.Value
  353. if c, ok := w.copiers[s.Type()]; ok {
  354. // We have a Copier for this struct, so we use that copier to
  355. // get the copy, and we ignore anything deeper than this.
  356. w.ignoreDepth = w.depth
  357. dup, err := c(s.Interface())
  358. if err != nil {
  359. return err
  360. }
  361. // We need to put a pointer to the value on the value stack,
  362. // so allocate a new pointer and set it.
  363. v = reflect.New(s.Type())
  364. reflect.Indirect(v).Set(reflect.ValueOf(dup))
  365. } else {
  366. // No copier, we copy ourselves and allow reflectwalk to guide
  367. // us deeper into the structure for copying.
  368. v = reflect.New(s.Type())
  369. }
  370. // Push the value onto the value stack for setting the struct field,
  371. // and add the struct itself to the containers stack in case we walk
  372. // deeper so that its own fields can be modified.
  373. w.valPush(v)
  374. w.cs = append(w.cs, v)
  375. return nil
  376. }
  377. func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
  378. if w.ignoring() {
  379. return nil
  380. }
  381. // If PkgPath is non-empty, this is a private (unexported) field.
  382. // We do not set this unexported since the Go runtime doesn't allow us.
  383. if f.PkgPath != "" {
  384. return reflectwalk.SkipEntry
  385. }
  386. switch f.Tag.Get(tagKey) {
  387. case "shallow":
  388. // If we're shallow copying then assign the value directly to the
  389. // struct and skip the entry.
  390. if v.IsValid() {
  391. s := w.cs[len(w.cs)-1]
  392. sf := reflect.Indirect(s).FieldByName(f.Name)
  393. if sf.CanSet() {
  394. sf.Set(v)
  395. }
  396. }
  397. return reflectwalk.SkipEntry
  398. case "ignore":
  399. // Do nothing
  400. return reflectwalk.SkipEntry
  401. }
  402. // Push the field onto the stack, we'll handle it when we exit
  403. // the struct field in Exit...
  404. w.valPush(reflect.ValueOf(f))
  405. return nil
  406. }
  407. // ignore causes the walker to ignore any more values until we exit this on
  408. func (w *walker) ignore() {
  409. w.ignoreDepth = w.depth
  410. }
  411. func (w *walker) ignoring() bool {
  412. return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
  413. }
  414. func (w *walker) pointerPeek() bool {
  415. return w.ps[w.depth] > 0
  416. }
  417. func (w *walker) valPop() reflect.Value {
  418. result := w.vals[len(w.vals)-1]
  419. w.vals = w.vals[:len(w.vals)-1]
  420. // If we're out of values, that means we popped everything off. In
  421. // this case, we reset the result so the next pushed value becomes
  422. // the result.
  423. if len(w.vals) == 0 {
  424. w.Result = nil
  425. }
  426. return result
  427. }
  428. func (w *walker) valPush(v reflect.Value) {
  429. w.vals = append(w.vals, v)
  430. // If we haven't set the result yet, then this is the result since
  431. // it is the first (outermost) value we're seeing.
  432. if w.Result == nil && v.IsValid() {
  433. w.Result = v.Interface()
  434. }
  435. }
  436. func (w *walker) replacePointerMaybe() {
  437. // Determine the last pointer value. If it is NOT a pointer, then
  438. // we need to push that onto the stack.
  439. if !w.pointerPeek() {
  440. w.valPush(reflect.Indirect(w.valPop()))
  441. return
  442. }
  443. v := w.valPop()
  444. // If the expected type is a pointer to an interface of any depth,
  445. // such as *interface{}, **interface{}, etc., then we need to convert
  446. // the value "v" from *CONCRETE to *interface{} so types match for
  447. // Set.
  448. //
  449. // Example if v is type *Foo where Foo is a struct, v would become
  450. // *interface{} instead. This only happens if we have an interface expectation
  451. // at this depth.
  452. //
  453. // For more info, see GH-16
  454. if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
  455. y := reflect.New(iType) // Create *interface{}
  456. y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
  457. v = y // v is now typed *interface{} (where *v = Foo)
  458. }
  459. for i := 1; i < w.ps[w.depth]; i++ {
  460. if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
  461. iface := reflect.New(iType).Elem()
  462. iface.Set(v)
  463. v = iface
  464. }
  465. p := reflect.New(v.Type())
  466. p.Elem().Set(v)
  467. v = p
  468. }
  469. w.valPush(v)
  470. }
  471. // if this value is a Locker, lock it and add it to the locks slice
  472. func (w *walker) lock(v reflect.Value) {
  473. if !w.useLocks {
  474. return
  475. }
  476. if !v.IsValid() || !v.CanInterface() {
  477. return
  478. }
  479. type rlocker interface {
  480. RLocker() sync.Locker
  481. }
  482. var locker sync.Locker
  483. // We can't call Interface() on a value directly, since that requires
  484. // a copy. This is OK, since the pointer to a value which is a sync.Locker
  485. // is also a sync.Locker.
  486. if v.Kind() == reflect.Ptr {
  487. switch l := v.Interface().(type) {
  488. case rlocker:
  489. // don't lock a mutex directly
  490. if _, ok := l.(*sync.RWMutex); !ok {
  491. locker = l.RLocker()
  492. }
  493. case sync.Locker:
  494. locker = l
  495. }
  496. } else if v.CanAddr() {
  497. switch l := v.Addr().Interface().(type) {
  498. case rlocker:
  499. // don't lock a mutex directly
  500. if _, ok := l.(*sync.RWMutex); !ok {
  501. locker = l.RLocker()
  502. }
  503. case sync.Locker:
  504. locker = l
  505. }
  506. }
  507. // still no callable locker
  508. if locker == nil {
  509. return
  510. }
  511. // don't lock a mutex directly
  512. switch locker.(type) {
  513. case *sync.Mutex, *sync.RWMutex:
  514. return
  515. }
  516. locker.Lock()
  517. w.locks[w.depth] = locker
  518. }
  519. // wrapPtr is a helper that takes v and always make it *v. copystructure
  520. // stores things internally as pointers until the last moment before unwrapping
  521. func wrapPtr(v reflect.Value) reflect.Value {
  522. if !v.IsValid() {
  523. return v
  524. }
  525. vPtr := reflect.New(v.Type())
  526. vPtr.Elem().Set(v)
  527. return vPtr
  528. }