-
Notifications
You must be signed in to change notification settings - Fork 0
/
osds.yml.sample
182 lines (139 loc) · 5.22 KB
/
osds.yml.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
#dummy:
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
###########
# GENERAL #
###########
#fetch_directory: fetch/
# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
####################
# OSD CRUSH LOCATION
####################
# /!\
#
# BE EXTREMELY CAREFUL WITH THIS OPTION
# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
#
# /!\
#
# It is probably best to keep this option to 'false' as the default
# suggests it. This option should only be used while doing some complex
# CRUSH map. It allows you to force a specific location for a set of OSDs.
#
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
# osd crush location = "root=location"
#
# This works with your inventory file
# To match the following 'osd_crush_location' option the inventory must look like:
#
# [osds]
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
#crush_location: false
#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
##############
# CEPH OPTIONS
##############
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#cephx: true
# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#
# Declare devices to be used as OSDs
# All scenario(except 3rd) inherit from the following device declaration
devices:
- /dev/nvme1n1
- /dev/nvme2n1
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# You can use this option with First and Forth and Fifth OSDS scenario.
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
#
osd_auto_discovery: false
# !! WARNING !!
# #
# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
# #
# # !! WARNING !!
#
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
# List devices under 'devices' variable above or choose 'osd_auto_discovery'
journal_collocation: true
# II. Second scenario: N journal devices for N OSDs
# Use 'true' for 'raw_multi_journal' to enable this scenario
# List devices under 'devices' variable above and
# write journal devices for those under 'raw_journal_devices'
# In the following example:
# * sdb and sdc will get sdf as a journal
# * sdd and sde will get sdg as a journal
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
#raw_multi_journal: false
#raw_journal_devices:
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg
#raw_journal_devices: []
# III. Use directory instead of disk for OSDs
# Use 'true' to enable this scenario
#osd_directory: false
#osd_directories:
# - /var/lib/ceph/osd/mydir1
# - /var/lib/ceph/osd/mydir2
#osd_directories: []
# IV. This will partition disks for BlueStore
# Use 'true' to enable this scenario
#bluestore: false
# V. Encrypt osd data and/or journal devices with dm-crypt.
# Keys are stored into the monitors k/v store
# Use 'true' to enable this scenario
# Both journal and data are stored on the same dm-crypt encrypted device
#dmcrypt_journal_collocation: false
# VI. Encrypt osd data and/or journal devices with dm-crypt.
# Keys are stored into the monitors k/v store
# Use 'true' to enable this scenario
# Journal and osd data are separated, each with their own dm-crypt device
# You must use raw_journal_devices and set your journal devices
#dmcrypt_dedicated_journal: false
##########
# DOCKER #
##########
#osd_containerized_deployment: false
#osd_containerized_deployment_with_kv: false
#kv_type: etcd
#kv_endpoint: 127.0.0.1
#kv_port: 4001
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
#ceph_osd_docker_username: ceph
#ceph_osd_docker_imagename: daemon
#ceph_osd_docker_image_tag: latest
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
#ceph_osd_docker_devices: "{{ devices }}"
#ceph_docker_on_openstack: false
#ceph_config_keys: [] # DON'T TOUCH ME