1. Crush Map:
  2. # begin crush map
  3. tunable choose_local_tries 0
  4. tunable choose_local_fallback_tries 0
  5. tunable choose_total_tries 50
  6. tunable chooseleaf_descend_once 1
  7. tunable straw_calc_version 1
  8. # devices
  9. device 0 osd.0
  10. device 1 osd.1
  11. device 2 osd.2
  12. device 3 osd.3
  13. device 4 osd.4
  14. device 5 osd.5
  15. device 6 osd.6
  16. # types
  17. type 0 osd
  18. type 1 host
  19. type 2 chassis
  20. type 3 rack
  21. type 4 row
  22. type 5 pdu
  23. type 6 pod
  24. type 7 room
  25. type 8 datacenter
  26. type 9 region
  27. type 10 root
  28. # buckets
  29. root sata {
  30. id -22 # do not change unnecessarily
  31. # weight 0.000
  32. alg straw
  33. hash 0 # rjenkins1
  34. item osd.0 weight 0.270
  35. item osd.1 weight 0.270
  36. item osd.2 weight 0.270
  37. }
  38. root ssd {
  39. id -21 # do not change unnecessarily
  40. # weight 0.000
  41. alg straw
  42. hash 0 # rjenkins1
  43. item osd.5 weight 0.180
  44. item osd.6 weight 0.180
  45. item osd.4 weight 0.270
  46. item osd.3 weight 0.270
  47. }
  48. # rules
  49. rule sata {
  50. ruleset 0
  51. type replicated
  52. min_size 1
  53. max_size 10
  54. step take sata
  55. step chooseleaf firstn 0 type osd
  56. step emit
  57. }
  58. rule ssd {
  59. ruleset 1
  60. type erasure
  61. min_size 1
  62. max_size 10
  63. step take ssd
  64. step chooseleaf firstn 0 type osd
  65. step emit
  66. }
  67. ceph osd erasure-code-profile set myprofile k=2 m=1 ruleset-root=ssd plugin=jerasure ruleset-failure-domain=osd
  68. ceph osd pool create ecpool 128 128 erasure myprofile
  69. Actually the PGs are getting stuck while creation process.
  70. '0 2015-06-10 04:47:55.642678 0'0 2015-06-10 04:47:55.642678
  71. 9.e 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642688 0'0 2015-06-10 04:47:55.642688
  72. 9.f 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642688 0'0 2015-06-10 04:47:55.642688
  73. 9.c 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642686 0'0 2015-06-10 04:47:55.642686
  74. 9.d 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642687 0'0 2015-06-10 04:47:55.642687
  75. 9.a 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642685 0'0 2015-06-10 04:47:55.642685
  76. 9.b 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642686 0'0 2015-06-10 04:47:55.642686
  77. 9.8 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642683 0'0 2015-06-10 04:47:55.642683
  78. 9.9 0 0 0 0 0 0 0 0 creating 0.000000 0'0 0:0 [NONE,NONE,NONE] -1 [NONE,NONE,NONE] -1 0'0 2015-06-10 04:47:55.642684 0'0 2015-06-10 04:47:55.642684
  79. pool 9 0 0 0 0 0 0 0 0
  80. sum 0 0 0 0 0 0 0 0
  81. osdstat kbused kbavail kb hb in hb out
  82. 0 36300 287001188 287037488 [] []
  83. 1 36456 287001032 287037488 [0] []
  84. 2 36628 287000860 287037488 [0,1] []
  85. 3 36684 287000804 287037488 [0,1,2] []
  86. 4 36748 287000740 287037488 [0,1,2,3] []
  87. 5 36564 189399964 189436528 [0,1,2,3,4] []
  88. 6 36688 189399840 189436528 [0,1,2,3,4,5] []