diff --git a/ceilometer/api/controllers/v2/root.py b/ceilometer/api/controllers/v2/root.py index 624126dc..27e6ea2a 100644 --- a/ceilometer/api/controllers/v2/root.py +++ b/ceilometer/api/controllers/v2/root.py @@ -121,10 +121,6 @@ def gnocchi_is_enabled(self): if pecan.request.cfg.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = ( pecan.request.cfg.api.gnocchi_is_enabled) - - elif ("gnocchi" not in pecan.request.cfg.meter_dispatchers - or "database" in pecan.request.cfg.meter_dispatchers): - self._gnocchi_is_enabled = False else: try: catalog = keystone_client.get_service_catalog( diff --git a/ceilometer/dispatcher/data/gnocchi_resources.yaml b/ceilometer/dispatcher/data/gnocchi_resources.yaml deleted file mode 100644 index a92ce7d5..00000000 --- a/ceilometer/dispatcher/data/gnocchi_resources.yaml +++ /dev/null @@ -1,298 +0,0 @@ ---- - -resources: - - resource_type: identity - metrics: - - 'identity.authenticate.success' - - 'identity.authenticate.pending' - - 'identity.authenticate.failure' - - 'identity.user.created' - - 'identity.user.deleted' - - 'identity.user.updated' - - 'identity.group.created' - - 'identity.group.deleted' - - 'identity.group.updated' - - 'identity.role.created' - - 'identity.role.deleted' - - 'identity.role.updated' - - 'identity.project.created' - - 'identity.project.deleted' - - 'identity.project.updated' - - 'identity.trust.created' - - 'identity.trust.deleted' - - 'identity.role_assignment.created' - - 'identity.role_assignment.deleted' - - - resource_type: ceph_account - metrics: - - 'radosgw.objects' - - 'radosgw.objects.size' - - 'radosgw.objects.containers' - - 'radosgw.api.request' - - 'radosgw.containers.objects' - - 'radosgw.containers.objects.size' - - - resource_type: instance - metrics: - - 'memory' - - 'memory.usage' - - 'memory.resident' - - 'memory.swap.in' - - 'memory.swap.out' - - 'memory.bandwidth.total' - - 'memory.bandwidth.local' - - 'vcpus' - - 'cpu' - - 'cpu.delta' - - 'cpu_util' - - 'cpu_l3_cache' - - 'disk.root.size' - - 'disk.ephemeral.size' - - 'disk.read.requests' - - 'disk.read.requests.rate' - - 'disk.write.requests' - - 'disk.write.requests.rate' - - 'disk.read.bytes' - - 'disk.read.bytes.rate' - - 'disk.write.bytes' - - 'disk.write.bytes.rate' - - 'disk.latency' - - 'disk.iops' - - 'disk.capacity' - - 'disk.allocation' - - 'disk.usage' - - 'compute.instance.booting.time' - - 'perf.cpu.cycles' - - 'perf.instructions' - - 'perf.cache.references' - - 'perf.cache.misses' - attributes: - host: resource_metadata.(instance_host|host) - image_ref: resource_metadata.image_ref - display_name: resource_metadata.display_name - flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) - flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) - server_group: resource_metadata.user_metadata.server_group - event_delete: compute.instance.delete.start - event_attributes: - id: instance_id - event_associated_resources: - instance_network_interface: '{"=": {"instance_id": "%s"}}' - instance_disk: '{"=": {"instance_id": "%s"}}' - - - resource_type: instance_network_interface - metrics: - - 'network.outgoing.packets.rate' - - 'network.incoming.packets.rate' - - 'network.outgoing.packets' - - 'network.incoming.packets' - - 'network.outgoing.packets.drop' - - 'network.incoming.packets.drop' - - 'network.outgoing.packets.error' - - 'network.incoming.packets.error' - - 'network.outgoing.bytes.rate' - - 'network.incoming.bytes.rate' - - 'network.outgoing.bytes' - - 'network.incoming.bytes' - attributes: - name: resource_metadata.vnic_name - instance_id: resource_metadata.instance_id - - - resource_type: instance_disk - metrics: - - 'disk.device.read.requests' - - 'disk.device.read.requests.rate' - - 'disk.device.write.requests' - - 'disk.device.write.requests.rate' - - 'disk.device.read.bytes' - - 'disk.device.read.bytes.rate' - - 'disk.device.write.bytes' - - 'disk.device.write.bytes.rate' - - 'disk.device.latency' - - 'disk.device.iops' - - 'disk.device.capacity' - - 'disk.device.allocation' - - 'disk.device.usage' - attributes: - name: resource_metadata.disk_name - instance_id: resource_metadata.instance_id - - - resource_type: image - metrics: - - 'image.size' - - 'image.download' - - 'image.serve' - attributes: - name: resource_metadata.name - container_format: resource_metadata.container_format - disk_format: resource_metadata.disk_format - event_delete: image.delete - event_attributes: - id: resource_id - - - resource_type: ipmi - metrics: - - 'hardware.ipmi.node.power' - - 'hardware.ipmi.node.temperature' - - 'hardware.ipmi.node.inlet_temperature' - - 'hardware.ipmi.node.outlet_temperature' - - 'hardware.ipmi.node.fan' - - 'hardware.ipmi.node.current' - - 'hardware.ipmi.node.voltage' - - 'hardware.ipmi.node.airflow' - - 'hardware.ipmi.node.cups' - - 'hardware.ipmi.node.cpu_util' - - 'hardware.ipmi.node.mem_util' - - 'hardware.ipmi.node.io_util' - - - resource_type: network - metrics: - - 'bandwidth' - - 'ip.floating' - event_delete: floatingip.delete.end - event_attributes: - id: resource_id - - - resource_type: stack - metrics: - - 'stack.create' - - 'stack.update' - - 'stack.delete' - - 'stack.resume' - - 'stack.suspend' - - - resource_type: swift_account - metrics: - - 'storage.objects.incoming.bytes' - - 'storage.objects.outgoing.bytes' - - 'storage.api.request' - - 'storage.objects.size' - - 'storage.objects' - - 'storage.objects.containers' - - 'storage.containers.objects' - - 'storage.containers.objects.size' - - - resource_type: volume - metrics: - - 'volume' - - 'volume.size' - - 'snapshot.size' - - 'volume.snapshot.size' - - 'volume.backup.size' - attributes: - display_name: resource_metadata.(display_name|name) - volume_type: resource_metadata.volume_type - event_delete: volume.delete.start - event_attributes: - id: resource_id - - - resource_type: host - metrics: - - 'hardware.cpu.load.1min' - - 'hardware.cpu.load.5min' - - 'hardware.cpu.load.15min' - - 'hardware.cpu.util' - - 'hardware.memory.total' - - 'hardware.memory.used' - - 'hardware.memory.swap.total' - - 'hardware.memory.swap.avail' - - 'hardware.memory.buffer' - - 'hardware.memory.cached' - - 'hardware.network.ip.outgoing.datagrams' - - 'hardware.network.ip.incoming.datagrams' - - 'hardware.system_stats.cpu.idle' - - 'hardware.system_stats.io.outgoing.blocks' - - 'hardware.system_stats.io.incoming.blocks' - attributes: - host_name: resource_metadata.resource_url - - - resource_type: host_disk - metrics: - - 'hardware.disk.size.total' - - 'hardware.disk.size.used' - attributes: - host_name: resource_metadata.resource_url - device_name: resource_metadata.device - - - resource_type: host_network_interface - metrics: - - 'hardware.network.incoming.bytes' - - 'hardware.network.outgoing.bytes' - - 'hardware.network.outgoing.errors' - attributes: - host_name: resource_metadata.resource_url - device_name: resource_metadata.name - - - resource_type: nova_compute - metrics: - - 'compute.node.cpu.frequency' - - 'compute.node.cpu.idle.percent' - - 'compute.node.cpu.idle.time' - - 'compute.node.cpu.iowait.percent' - - 'compute.node.cpu.iowait.time' - - 'compute.node.cpu.kernel.percent' - - 'compute.node.cpu.kernel.time' - - 'compute.node.cpu.percent' - - 'compute.node.cpu.user.percent' - - 'compute.node.cpu.user.time' - attributes: - host_name: resource_metadata.host - - - resource_type: manila_share - metrics: - - 'manila.share.size' - attributes: - name: resource_metadata.name - host: resource_metadata.host - status: resource_metadata.status - availability_zone: resource_metadata.availability_zone - protocol: resource_metadata.protocol - - - resource_type: switch - metrics: - - 'switch' - - 'switch.ports' - attributes: - controller: resource_metadata.controller - - - resource_type: switch_port - metrics: - - 'switch.port' - - 'switch.port.uptime' - - 'switch.port.receive.packets' - - 'switch.port.transmit.packets' - - 'switch.port.receive.bytes' - - 'switch.port.transmit.bytes' - - 'switch.port.receive.drops' - - 'switch.port.transmit.drops' - - 'switch.port.receive.errors' - - 'switch.port.transmit.errors' - - 'switch.port.receive.frame_error' - - 'switch.port.receive.overrun_error' - - 'switch.port.receive.crc_error' - - 'switch.port.collision.count' - attributes: - switch: resource_metadata.switch - port_number_on_switch: resource_metadata.port_number_on_switch - neutron_port_id: resource_metadata.neutron_port_id - controller: resource_metadata.controller - - - resource_type: port - metrics: - - 'port' - - 'port.uptime' - - 'port.receive.packets' - - 'port.transmit.packets' - - 'port.receive.bytes' - - 'port.transmit.bytes' - - 'port.receive.drops' - - 'port.receive.errors' - attributes: - controller: resource_metadata.controller - - - resource_type: switch_table - metrics: - - 'switch.table.active.entries' - attributes: - controller: resource_metadata.controller - switch: resource_metadata.switch diff --git a/ceilometer/dispatcher/gnocchi_opts.py b/ceilometer/dispatcher/gnocchi_opts.py deleted file mode 100644 index 06be4466..00000000 --- a/ceilometer/dispatcher/gnocchi_opts.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - - -dispatcher_opts = [ - cfg.BoolOpt('filter_service_activity', - default=True, - help='Filter out samples generated by Gnocchi ' - 'service activity'), - cfg.StrOpt('filter_project', - default='gnocchi', - help='Gnocchi project used to filter out samples ' - 'generated by Gnocchi service activity'), - cfg.StrOpt('archive_policy', - help='The archive policy to use when the dispatcher ' - 'create a new metric.'), - cfg.StrOpt('resources_definition_file', - default='gnocchi_resources.yaml', - help=('The Yaml file that defines mapping between samples ' - 'and gnocchi resources/metrics')), - cfg.FloatOpt('request_timeout', default=6.05, min=0.0, - help='Number of seconds before request to gnocchi times out'), -] diff --git a/ceilometer/gnocchi_client.py b/ceilometer/gnocchi_client.py index 69948f69..4f6e8062 100644 --- a/ceilometer/gnocchi_client.py +++ b/ceilometer/gnocchi_client.py @@ -21,12 +21,10 @@ LOG = log.getLogger(__name__) -def get_gnocchiclient(conf, timeout_override=False): - group = conf.dispatcher_gnocchi.auth_section - timeout = (None if (not conf.dispatcher_gnocchi.request_timeout or - timeout_override) - else conf.dispatcher_gnocchi.request_timeout) - session = keystone_client.get_session(conf, group=group, timeout=timeout) +def get_gnocchiclient(conf, request_timeout=None): + group = conf.gnocchi.auth_section + session = keystone_client.get_session(conf, group=group, + timeout=request_timeout) adapter = keystoneauth1.session.TCPKeepAliveAdapter( pool_maxsize=conf.max_parallel_requests) session.mount("http://", adapter) @@ -188,7 +186,7 @@ def get_gnocchiclient(conf, timeout_override=False): def upgrade_resource_types(conf): - gnocchi = get_gnocchiclient(conf, True) + gnocchi = get_gnocchiclient(conf) for name, attributes in resources_initial.items(): try: gnocchi.resource_type.get(name=name) diff --git a/ceilometer/keystone_client.py b/ceilometer/keystone_client.py index 7323575b..835bf3fc 100644 --- a/ceilometer/keystone_client.py +++ b/ceilometer/keystone_client.py @@ -23,7 +23,7 @@ # List of group that can set auth_section to use a different # credentials section -OVERRIDABLE_GROUPS = ['dispatcher_gnocchi', 'zaqar'] +OVERRIDABLE_GROUPS = ['gnocchi', 'zaqar'] def get_session(conf, requests_session=None, group=None, timeout=None): diff --git a/ceilometer/opts.py b/ceilometer/opts.py index e97025c4..60d00a24 100644 --- a/ceilometer/opts.py +++ b/ceilometer/opts.py @@ -28,7 +28,6 @@ import ceilometer.compute.virt.xenapi.inspector import ceilometer.dispatcher import ceilometer.dispatcher.file -import ceilometer.dispatcher.gnocchi_opts import ceilometer.dispatcher.http import ceilometer.event.converter import ceilometer.exchange_control @@ -122,8 +121,28 @@ def list_opts(): ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts), - ('dispatcher_gnocchi', - ceilometer.dispatcher.gnocchi_opts.dispatcher_opts), + ('dispatcher_gnocchi', ( + cfg.StrOpt( + 'filter_project', + deprecated_for_removal=True, + default='gnocchi', + help='Gnocchi project used to filter out samples ' + 'generated by Gnocchi service activity'), + cfg.StrOpt( + 'archive_policy', + deprecated_for_removal=True, + help='The archive policy to use when the dispatcher ' + 'create a new metric.'), + cfg.StrOpt( + 'resources_definition_file', + deprecated_for_removal=True, + default='gnocchi_resources.yaml', + help=('The Yaml file that defines mapping between samples ' + 'and gnocchi resources/metrics')), + cfg.FloatOpt( + 'request_timeout', default=6.05, min=0.0, + deprecated_for_removal=True, + help='Number of seconds before request to gnocchi times out'))), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, diff --git a/ceilometer/pipeline/data/event_pipeline.yaml b/ceilometer/pipeline/data/event_pipeline.yaml index d2311b15..6482f4f2 100644 --- a/ceilometer/pipeline/data/event_pipeline.yaml +++ b/ceilometer/pipeline/data/event_pipeline.yaml @@ -9,4 +9,4 @@ sinks: - name: event_sink transformers: publishers: - - direct://?dispatcher=panko + - gnocchi:// diff --git a/ceilometer/pipeline/data/pipeline.yaml b/ceilometer/pipeline/data/pipeline.yaml index 173643f4..6e361df0 100644 --- a/ceilometer/pipeline/data/pipeline.yaml +++ b/ceilometer/pipeline/data/pipeline.yaml @@ -1,3 +1,4 @@ +--- sources: - name: meter_source meters: @@ -10,7 +11,6 @@ sources: sinks: - cpu_sink - cpu_delta_sink - - vcpu_sink - name: disk_source meters: - "disk.read.bytes" @@ -35,7 +35,7 @@ sinks: - name: meter_sink transformers: publishers: - - notifier:// + - gnocchi:// - name: cpu_sink transformers: - name: "rate_of_change" @@ -47,7 +47,7 @@ sinks: max: 100 scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" publishers: - - notifier:// + - gnocchi:// - name: cpu_delta_sink transformers: - name: "delta" @@ -56,19 +56,7 @@ sinks: name: "cpu.delta" growth_only: True publishers: - - notifier:// - - name: vcpu_sink - transformers: - - name: "rate_of_change" - parameters: - target: - name: "vcpu_util" - unit: "%" - type: "gauge" - max: 100 - scale: "100.0 / (10**9 * (resource_metadata.vcpu_number or 1))" - publishers: - - notifier:// + - gnocchi:// - name: disk_sink transformers: - name: "rate_of_change" @@ -83,7 +71,7 @@ sinks: unit: "\\1/s" type: "gauge" publishers: - - notifier:// + - gnocchi:// - name: network_sink transformers: - name: "rate_of_change" @@ -98,4 +86,4 @@ sinks: unit: "\\1/s" type: "gauge" publishers: - - notifier:// + - gnocchi:// diff --git a/ceilometer/publisher/data/gnocchi_resources.yaml b/ceilometer/publisher/data/gnocchi_resources.yaml new file mode 100644 index 00000000..567b50f2 --- /dev/null +++ b/ceilometer/publisher/data/gnocchi_resources.yaml @@ -0,0 +1,358 @@ +--- +archive_policy_default: ceilometer-low +archive_policies: + # NOTE(sileht): We keep "mean" for now to not break all gating that + # use the current tempest scenario. + - name: ceilometer-low + aggregation_methods: + - mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days + - name: ceilometer-low-rate + aggregation_methods: + - mean + - rate:mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days + +resources: + - resource_type: identity + metrics: + identity.authenticate.success: + identity.authenticate.pending: + identity.authenticate.failure: + identity.user.created: + identity.user.deleted: + identity.user.updated: + identity.group.created: + identity.group.deleted: + identity.group.updated: + identity.role.created: + identity.role.deleted: + identity.role.updated: + identity.project.created: + identity.project.deleted: + identity.project.updated: + identity.trust.created: + identity.trust.deleted: + identity.role_assignment.created: + identity.role_assignment.deleted: + + - resource_type: ceph_account + metrics: + radosgw.objects: + radosgw.objects.size: + radosgw.objects.containers: + radosgw.api.request: + radosgw.containers.objects: + radosgw.containers.objects.size: + + - resource_type: instance + metrics: + memory: + memory.usage: + memory.resident: + memory.swap.in: + memory.swap.out: + memory.bandwidth.total: + memory.bandwidth.local: + vcpus: + cpu: + archive_policy_name: ceilometer-low-rate + cpu.delta: + cpu_util: + cpu_l3_cache: + disk.root.size: + disk.ephemeral.size: + disk.read.requests: + archive_policy_name: ceilometer-low-rate + disk.read.requests.rate: + disk.write.requests: + archive_policy_name: ceilometer-low-rate + disk.write.requests.rate: + disk.read.bytes: + archive_policy_name: ceilometer-low-rate + disk.read.bytes.rate: + disk.write.bytes: + archive_policy_name: ceilometer-low-rate + disk.write.bytes.rate: + disk.latency: + disk.iops: + disk.capacity: + disk.allocation: + disk.usage: + compute.instance.booting.time: + perf.cpu.cycles: + perf.instructions: + perf.cache.references: + perf.cache.misses: + attributes: + host: resource_metadata.(instance_host|host) + image_ref: resource_metadata.image_ref + display_name: resource_metadata.display_name + flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) + flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) + server_group: resource_metadata.user_metadata.server_group + event_delete: compute.instance.delete.start + event_attributes: + id: instance_id + event_associated_resources: + instance_network_interface: '{"=": {"instance_id": "%s"}}' + instance_disk: '{"=": {"instance_id": "%s"}}' + + - resource_type: instance_network_interface + metrics: + network.outgoing.packets.rate: + network.incoming.packets.rate: + network.outgoing.packets: + archive_policy_name: ceilometer-low-rate + network.incoming.packets: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.drop: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.drop: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.error: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.error: + archive_policy_name: ceilometer-low-rate + network.outgoing.bytes.rate: + network.incoming.bytes.rate: + network.outgoing.bytes: + archive_policy_name: ceilometer-low-rate + network.incoming.bytes: + archive_policy_name: ceilometer-low-rate + attributes: + name: resource_metadata.vnic_name + instance_id: resource_metadata.instance_id + + - resource_type: instance_disk + metrics: + disk.device.read.requests: + archive_policy_name: ceilometer-low-rate + disk.device.read.requests.rate: + disk.device.write.requests: + archive_policy_name: ceilometer-low-rate + disk.device.write.requests.rate: + disk.device.read.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.read.bytes.rate: + disk.device.write.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.write.bytes.rate: + disk.device.latency: + disk.device.read.latency: + disk.device.write.latency: + disk.device.iops: + disk.device.capacity: + disk.device.allocation: + disk.device.usage: + attributes: + name: resource_metadata.disk_name + instance_id: resource_metadata.instance_id + + - resource_type: image + metrics: + image.size: + image.download: + image.serve: + attributes: + name: resource_metadata.name + container_format: resource_metadata.container_format + disk_format: resource_metadata.disk_format + event_delete: image.delete + event_attributes: + id: resource_id + + - resource_type: ipmi + metrics: + hardware.ipmi.node.power: + hardware.ipmi.node.temperature: + hardware.ipmi.node.inlet_temperature: + hardware.ipmi.node.outlet_temperature: + hardware.ipmi.node.fan: + hardware.ipmi.node.current: + hardware.ipmi.node.voltage: + hardware.ipmi.node.airflow: + hardware.ipmi.node.cups: + hardware.ipmi.node.cpu_util: + hardware.ipmi.node.mem_util: + hardware.ipmi.node.io_util: + + - resource_type: network + metrics: + bandwidth: + ip.floating: + event_delete: floatingip.delete.end + event_attributes: + id: resource_id + + - resource_type: stack + metrics: + stack.create: + stack.update: + stack.delete: + stack.resume: + stack.suspend: + + - resource_type: swift_account + metrics: + storage.objects.incoming.bytes: + storage.objects.outgoing.bytes: + storage.api.request: + storage.objects.size: + storage.objects: + storage.objects.containers: + storage.containers.objects: + storage.containers.objects.size: + + - resource_type: volume + metrics: + volume: + volume.size: + snapshot.size: + volume.snapshot.size: + volume.backup.size: + attributes: + display_name: resource_metadata.(display_name|name) + volume_type: resource_metadata.volume_type + event_delete: volume.delete.start + event_attributes: + id: resource_id + + - resource_type: volume_provider + metrics: + volume.provider.capacity.total: + volume.provider.capacity.free: + volume.provider.capacity.allocated: + volume.provider.capacity.provisioned: + volume.provider.capacity.virtual_free: + + - resource_type: volume_provider_pool + metrics: + volume.provider.pool.capacity.total: + volume.provider.pool.capacity.free: + volume.provider.pool.capacity.allocated: + volume.provider.pool.capacity.provisioned: + volume.provider.pool.capacity.virtual_free: + attributes: + provider: resource_metadata.provider + + - resource_type: host + metrics: + hardware.cpu.load.1min: + hardware.cpu.load.5min: + hardware.cpu.load.15min: + hardware.cpu.util: + hardware.memory.total: + hardware.memory.used: + hardware.memory.swap.total: + hardware.memory.swap.avail: + hardware.memory.buffer: + hardware.memory.cached: + hardware.network.ip.outgoing.datagrams: + hardware.network.ip.incoming.datagrams: + hardware.system_stats.cpu.idle: + hardware.system_stats.io.outgoing.blocks: + hardware.system_stats.io.incoming.blocks: + attributes: + host_name: resource_metadata.resource_url + + - resource_type: host_disk + metrics: + hardware.disk.size.total: + hardware.disk.size.used: + hardware.disk.read.bytes: + hardware.disk.write.bytes: + hardware.disk.read.requests: + hardware.disk.write.requests: + attributes: + host_name: resource_metadata.resource_url + device_name: resource_metadata.device + + - resource_type: host_network_interface + metrics: + hardware.network.incoming.bytes: + hardware.network.outgoing.bytes: + hardware.network.outgoing.errors: + attributes: + host_name: resource_metadata.resource_url + device_name: resource_metadata.name + + - resource_type: nova_compute + metrics: + compute.node.cpu.frequency: + compute.node.cpu.idle.percent: + compute.node.cpu.idle.time: + compute.node.cpu.iowait.percent: + compute.node.cpu.iowait.time: + compute.node.cpu.kernel.percent: + compute.node.cpu.kernel.time: + compute.node.cpu.percent: + compute.node.cpu.user.percent: + compute.node.cpu.user.time: + attributes: + host_name: resource_metadata.host + + - resource_type: manila_share + metrics: + manila.share.size: + attributes: + name: resource_metadata.name + host: resource_metadata.host + status: resource_metadata.status + availability_zone: resource_metadata.availability_zone + protocol: resource_metadata.protocol + + - resource_type: switch + metrics: + switch: + switch.ports: + attributes: + controller: resource_metadata.controller + + - resource_type: switch_port + metrics: + switch.port: + switch.port.uptime: + switch.port.receive.packets: + switch.port.transmit.packets: + switch.port.receive.bytes: + switch.port.transmit.bytes: + switch.port.receive.drops: + switch.port.transmit.drops: + switch.port.receive.errors: + switch.port.transmit.errors: + switch.port.receive.frame_error: + switch.port.receive.overrun_error: + switch.port.receive.crc_error: + switch.port.collision.count: + attributes: + switch: resource_metadata.switch + port_number_on_switch: resource_metadata.port_number_on_switch + neutron_port_id: resource_metadata.neutron_port_id + controller: resource_metadata.controller + + - resource_type: port + metrics: + port: + port.uptime: + port.receive.packets: + port.transmit.packets: + port.receive.bytes: + port.transmit.bytes: + port.receive.drops: + port.receive.errors: + attributes: + controller: resource_metadata.controller + + - resource_type: switch_table + metrics: + switch.table.active.entries: + attributes: + controller: resource_metadata.controller + switch: resource_metadata.switch diff --git a/ceilometer/publisher/direct.py b/ceilometer/publisher/direct.py index 96f287a1..657dac95 100644 --- a/ceilometer/publisher/direct.py +++ b/ceilometer/publisher/direct.py @@ -31,15 +31,14 @@ class DirectPublisher(publisher.ConfigPublisherBase): are required. By default, the database dispatcher is used to select another one we - can use direct://?dispatcher=gnocchi, direct://?dispatcher=http, - direct://?dispatcher=file, ... + can use direct://?dispatcher=name_of_dispatcher, ... """ def __init__(self, conf, parsed_url): super(DirectPublisher, self).__init__(conf, parsed_url) default_dispatcher = parsed_url.scheme if default_dispatcher == 'direct': LOG.warning('Direct publisher is deprecated for removal. Use ' - 'an explicit publisher instead, e.g. "gnocchi", ' + 'an explicit publisher instead, e.g. ' '"database", "file", ...') default_dispatcher = 'database' options = urlparse.parse_qs(parsed_url.query) diff --git a/ceilometer/dispatcher/gnocchi.py b/ceilometer/publisher/gnocchi.py similarity index 58% rename from ceilometer/dispatcher/gnocchi.py rename to ceilometer/publisher/gnocchi.py index c49a527c..e6653f5c 100644 --- a/ceilometer/dispatcher/gnocchi.py +++ b/ceilometer/publisher/gnocchi.py @@ -15,6 +15,7 @@ from collections import defaultdict import hashlib import itertools +import json import operator import pkg_resources import threading @@ -23,17 +24,17 @@ from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import exceptions as ka_exceptions from oslo_log import log -from oslo_serialization import jsonutils from oslo_utils import fnmatch from oslo_utils import timeutils import six +import six.moves.urllib.parse as urlparse from stevedore import extension from ceilometer import declarative -from ceilometer import dispatcher from ceilometer import gnocchi_client from ceilometer.i18n import _ from ceilometer import keystone_client +from ceilometer import publisher NAME_ENCODED = __name__.encode('utf-8') CACHE_NAMESPACE = uuid.UUID(bytes=hashlib.md5(NAME_ENCODED).digest()) @@ -53,12 +54,12 @@ def cache_key_mangler(key): class ResourcesDefinition(object): MANDATORY_FIELDS = {'resource_type': six.string_types, - 'metrics': list} + 'metrics': (dict, list)} MANDATORY_EVENT_FIELDS = {'id': six.string_types} - def __init__(self, definition_cfg, default_archive_policy, plugin_manager): - self._default_archive_policy = default_archive_policy + def __init__(self, definition_cfg, archive_policy_default, + archive_policy_override, plugin_manager): self.cfg = definition_cfg self._check_required_and_types(self.MANDATORY_FIELDS, self.cfg) @@ -78,24 +79,44 @@ def __init__(self, definition_cfg, default_archive_policy, plugin_manager): name, attr_cfg, plugin_manager) self.metrics = {} - for t in self.cfg['metrics']: - archive_policy = self.cfg.get('archive_policy', - self._default_archive_policy) - if archive_policy is None: - self.metrics[t] = {} - else: - self.metrics[t] = dict(archive_policy_name=archive_policy) + + # NOTE(sileht): Convert old list to new dict format + if isinstance(self.cfg['metrics'], list): + values = [None] * len(self.cfg['metrics']) + self.cfg['metrics'] = dict(zip(self.cfg['metrics'], values)) + + for m, extra in self.cfg['metrics'].items(): + if not extra: + extra = {} + + if not extra.get("archive_policy_name"): + extra["archive_policy_name"] = archive_policy_default + + if archive_policy_override: + extra["archive_policy_name"] = archive_policy_override + + # NOTE(sileht): For backward compat, this is after the override to + # preserve the wierd previous behavior. We don't really care as we + # deprecate it. + if 'archive_policy' in self.cfg: + LOG.warning("archive_policy '%s' for a resource-type (%s) is " + "deprecated, set it for each metric instead.", + self.cfg["archive_policy"], + self.cfg["resource_type"]) + extra["archive_policy_name"] = self.cfg['archive_policy'] + + self.metrics[m] = extra @staticmethod def _check_required_and_types(expected, definition): - for field, field_type in expected.items(): + for field, field_types in expected.items(): if field not in definition: raise declarative.ResourceDefinitionException( _("Required field %s not specified") % field, definition) - if not isinstance(definition[field], field_type): + if not isinstance(definition[field], field_types): raise declarative.ResourceDefinitionException( _("Required field %(field)s should be a %(type)s") % - {'field': field, 'type': field_type}, definition) + {'field': field, 'type': field_types}, definition) @staticmethod def _ensure_list(value): @@ -103,12 +124,6 @@ def _ensure_list(value): return value return [value] - def metric_match(self, metric_name): - for t in self.cfg['metrics']: - if fnmatch.fnmatch(metric_name, t): - return True - return False - def support_events(self): for e in ["event_create", "event_delete", "event_update"]: if e in self.cfg: @@ -128,15 +143,16 @@ def event_match(self, event_type): def sample_attributes(self, sample): attrs = {} + sample_dict = sample.as_dict() for name, definition in self._attributes.items(): - value = definition.parse(sample) + value = definition.parse(sample_dict) if value is not None: attrs[name] = value return attrs def event_attributes(self, event): attrs = {'type': self.cfg['resource_type']} - traits = dict([(trait[0], trait[2]) for trait in event['traits']]) + traits = dict([(trait.name, trait.value) for trait in event.traits]) for attr, field in self.cfg.get('event_attributes', {}).items(): value = traits.get(field) if value is not None: @@ -168,44 +184,53 @@ def pop(self, key, *args): key_lock.release() -class GnocchiDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): - """Dispatcher class for recording metering data into the Gnocchi service. +class GnocchiPublisher(publisher.ConfigPublisherBase): + """Publisher class for recording metering data into the Gnocchi service. - The dispatcher class records each meter into the gnocchi service - configured in ceilometer configuration file. An example configuration may + The publisher class records each meter into the gnocchi service + configured in Ceilometer pipeline file. An example target may look like the following: - [dispatcher_gnocchi] - archive_policy = low - - To enable this dispatcher, the following section needs to be present in - ceilometer.conf file - - [DEFAULT] - meter_dispatchers = gnocchi - event_dispatchers = gnocchi + gnocchi://?archive_policy=low&filter_project=gnocchi """ - def __init__(self, conf): - super(GnocchiDispatcher, self).__init__(conf) - self.conf = conf - self.filter_service_activity = ( - conf.dispatcher_gnocchi.filter_service_activity) + def __init__(self, conf, parsed_url): + super(GnocchiPublisher, self).__init__(conf, parsed_url) + # TODO(jd) allow to override Gnocchi endpoint via the host in the URL + options = urlparse.parse_qs(parsed_url.query) + + self.filter_project = options.get( + 'filter_project', + [conf.dispatcher_gnocchi.filter_project])[-1] + + resources_definition_file = options.get( + 'resources_definition_file', + [conf.dispatcher_gnocchi.resources_definition_file])[-1] + + archive_policy_override = options.get( + 'archive_policy', + [conf.dispatcher_gnocchi.archive_policy])[-1] + self.resources_definition, self.archive_policies_definition = ( + self._load_definitions(conf, archive_policy_override, + resources_definition_file)) + self.metric_map = dict((metric, rd) for rd in self.resources_definition + for metric in rd.metrics) + + timeout = options.get('timeout', + [conf.dispatcher_gnocchi.request_timeout])[-1] self._ks_client = keystone_client.get_client(conf) - self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache - oslo_cache.configure(self.conf) + oslo_cache.configure(conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. - if self.conf.cache.enabled: + if conf.cache.enabled: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( - self.conf, cache_region) + conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass @@ -216,28 +241,45 @@ def __init__(self, conf): self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) - self._gnocchi = gnocchi_client.get_gnocchiclient(conf) + self._gnocchi = gnocchi_client.get_gnocchiclient( + conf, request_timeout=timeout) self._already_logged_event_types = set() self._already_logged_metric_names = set() - @classmethod - def _load_resources_definitions(cls, conf): + self._already_configured_archive_policies = False + + @staticmethod + def _load_definitions(conf, archive_policy_override, + resources_definition_file): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') data = declarative.load_definitions( - conf, {}, conf.dispatcher_gnocchi.resources_definition_file, + conf, {}, resources_definition_file, pkg_resources.resource_filename(__name__, "data/gnocchi_resources.yaml")) + + archive_policy_default = data.get("archive_policy_default", "low") resource_defs = [] for resource in data.get('resources', []): try: resource_defs.append(ResourcesDefinition( resource, - conf.dispatcher_gnocchi.archive_policy, plugin_manager)) + archive_policy_default, + archive_policy_override, + plugin_manager)) except Exception as exc: LOG.error("Failed to load resource due to error %s" % exc) - return resource_defs + return resource_defs, data.get("archive_policies", []) + + def ensures_archives_policies(self): + if not self._already_configured_archive_policies: + for ap in self.archive_policies_definition: + try: + self._gnocchi.archive_policy.get(ap["name"]) + except gnocchi_exc.ArchivePolicyNotFound: + self._gnocchi.archive_policy.create(ap) + self._already_configured_archive_policies = True @property def gnocchi_project_id(self): @@ -247,75 +289,61 @@ def gnocchi_project_id(self): if self._gnocchi_project_id is None: try: project = self._ks_client.projects.find( - name=self.conf.dispatcher_gnocchi.filter_project) + name=self.filter_project) except ka_exceptions.NotFound: - LOG.warning('gnocchi project not found in keystone,' - ' ignoring the filter_service_activity ' + LOG.warning('filtered project not found in keystone,' + ' ignoring the filter_project ' 'option') - self.filter_service_activity = False + self.filter_project = None return None except Exception: - LOG.exception('fail to retrieve user of Gnocchi ' - 'service') + LOG.exception('fail to retrieve filtered project ') raise self._gnocchi_project_id = project.id - LOG.debug("gnocchi project found: %s", self.gnocchi_project_id) + LOG.debug("filtered project found: %s", + self.gnocchi_project_id) return self._gnocchi_project_id def _is_swift_account_sample(self, sample): - return bool([rd for rd in self.resources_definition - if rd.cfg['resource_type'] == 'swift_account' - and rd.metric_match(sample['counter_name'])]) + try: + return (self.metric_map[sample.name].cfg['resource_type'] + == 'swift_account') + except KeyError: + return False def _is_gnocchi_activity(self, sample): - return (self.filter_service_activity and self.gnocchi_project_id and ( + return (self.filter_project and self.gnocchi_project_id and ( # avoid anything from the user used by gnocchi - sample['project_id'] == self.gnocchi_project_id or + sample.project_id == self.gnocchi_project_id or # avoid anything in the swift account used by gnocchi - (sample['resource_id'] == self.gnocchi_project_id and + (sample.resource_id == self.gnocchi_project_id and self._is_swift_account_sample(sample)) )) - def _get_resource_definition_from_metric(self, metric_name): - for rd in self.resources_definition: - if rd.metric_match(metric_name): - return rd - def _get_resource_definition_from_event(self, event_type): for rd in self.resources_definition: operation = rd.event_match(event_type) if operation: return rd, operation - def record_metering_data(self, data): - # We may have receive only one counter on the wire - if not isinstance(data, list): - data = [data] + def publish_samples(self, data): + self.ensures_archives_policies() + # NOTE(sileht): skip sample generated by gnocchi itself data = [s for s in data if not self._is_gnocchi_activity(s)] - - data.sort(key=lambda s: (s['resource_id'], s['counter_name'])) + data.sort(key=operator.attrgetter('resource_id')) resource_grouped_samples = itertools.groupby( - data, key=operator.itemgetter('resource_id')) + data, key=operator.attrgetter('resource_id')) gnocchi_data = {} measures = {} - stats = dict(measures=0, resources=0, metrics=0) for resource_id, samples_of_resource in resource_grouped_samples: # NOTE(sileht): / is forbidden by Gnocchi resource_id = resource_id.replace('/', '_') - stats['resources'] += 1 - metric_grouped_samples = itertools.groupby( - list(samples_of_resource), - key=operator.itemgetter('counter_name')) - - res_info = {} - for metric_name, samples in metric_grouped_samples: - stats['metrics'] += 1 - - samples = list(samples) - rd = self._get_resource_definition_from_metric(metric_name) + for sample in samples_of_resource: + metric_name = sample.name + rd = self.metric_map.get(metric_name) if rd is None: if metric_name not in self._already_logged_metric_names: LOG.warning("metric %s is not handled by Gnocchi" % @@ -325,34 +353,29 @@ def record_metering_data(self, data): if rd.cfg.get("ignore"): continue - res_info['resource_type'] = rd.cfg['resource_type'] - res_info.setdefault("resource", {}).update({ - "id": resource_id, - "user_id": samples[0]['user_id'], - "project_id": samples[0]['project_id'], - "metrics": rd.metrics, - }) - - for sample in samples: - res_info.setdefault("resource_extra", {}).update( - rd.sample_attributes(sample)) - m = measures.setdefault(resource_id, {}).setdefault( - metric_name, []) - m.append({'timestamp': sample['timestamp'], - 'value': sample['counter_volume']}) - unit = sample['counter_unit'] - metric = sample['counter_name'] - res_info['resource']['metrics'][metric]['unit'] = unit - - stats['measures'] += len(measures[resource_id][metric_name]) - res_info["resource"].update(res_info["resource_extra"]) - if res_info: - gnocchi_data[resource_id] = res_info + if resource_id not in gnocchi_data: + gnocchi_data[resource_id] = { + 'resource_type': rd.cfg['resource_type'], + 'resource': {"id": resource_id, + "user_id": sample.user_id, + "project_id": sample.project_id}} + + gnocchi_data[resource_id].setdefault( + "resource_extra", {}).update(rd.sample_attributes(sample)) + measures.setdefault(resource_id, {}).setdefault( + metric_name, + {"measures": [], + "archive_policy_name": + rd.metrics[metric_name]["archive_policy_name"], + "unit": sample.unit} + )["measures"].append( + {'timestamp': sample.timestamp, + 'value': sample.volume} + ) try: - self.batch_measures(measures, gnocchi_data, stats) - except (gnocchi_exc.ClientException, - ka_exceptions.ConnectFailure) as e: + self.batch_measures(measures, gnocchi_data) + except gnocchi_exc.ClientException as e: LOG.error(six.text_type(e)) except Exception as e: LOG.error(six.text_type(e), exc_info=True) @@ -364,8 +387,8 @@ def record_metering_data(self, data): if not resource_extra: continue try: - self._if_not_cached("update", resource_type, resource, - self._update_resource, resource_extra) + self._if_not_cached(resource_type, resource['id'], + resource_extra) except gnocchi_exc.ClientException as e: LOG.error(six.text_type(e)) except Exception as e: @@ -376,10 +399,11 @@ def _extract_resources_from_error(e, resource_infos): resource_ids = set([r['original_resource_id'] for r in e.message['detail']]) return [(resource_infos[rid]['resource_type'], - resource_infos[rid]['resource']) + resource_infos[rid]['resource'], + resource_infos[rid]['resource_extra']) for rid in resource_ids] - def batch_measures(self, measures, resource_infos, stats): + def batch_measures(self, measures, resource_infos): # NOTE(sileht): We don't care about error here, we want # resources metadata always been updated try: @@ -392,10 +416,10 @@ def batch_measures(self, measures, resource_infos, stats): raise resources = self._extract_resources_from_error(e, resource_infos) - for resource_type, resource in resources: + for resource_type, resource, resource_extra in resources: try: - self._if_not_cached("create", resource_type, resource, - self._create_resource) + resource.update(resource_extra) + self._create_resource(resource_type, resource) except gnocchi_exc.ResourceAlreadyExists: # NOTE(sileht): resource created in the meantime pass @@ -406,72 +430,67 @@ def batch_measures(self, measures, resource_infos, stats): # and we can't patch it later del measures[resource['id']] del resource_infos[resource['id']] + else: + if self.cache and resource_extra: + self.cache.set(resource['id'], + self._hash_resource(resource_extra)) # NOTE(sileht): we have created missing resources/metrics, # now retry to post measures self._gnocchi.metric.batch_resources_metrics_measures( measures, create_metrics=True) - # FIXME(sileht): take care of measures removed in stats - LOG.debug("%(measures)d measures posted against %(metrics)d " - "metrics through %(resources)d resources", stats) + LOG.debug( + "%d measures posted against %d metrics through %d resources", + sum(len(m["measures"]) + for rid in measures + for m in measures[rid].values()), + sum(len(m) for m in measures.values()), len(resource_infos)) def _create_resource(self, resource_type, resource): self._gnocchi.resource.create(resource_type, resource) LOG.debug('Resource %s created', resource["id"]) - def _update_resource(self, resource_type, resource, resource_extra): - self._gnocchi.resource.update(resource_type, - resource["id"], - resource_extra) - LOG.debug('Resource %s updated', resource["id"]) + def _update_resource(self, resource_type, res_id, resource_extra): + self._gnocchi.resource.update(resource_type, res_id, resource_extra) + LOG.debug('Resource %s updated', res_id) - def _if_not_cached(self, operation, resource_type, resource, method, - *args, **kwargs): + def _if_not_cached(self, resource_type, res_id, resource_extra): if self.cache: - cache_key = resource['id'] - attribute_hash = self._check_resource_cache(cache_key, resource) - hit = False - if attribute_hash: - with self._gnocchi_resource_lock[cache_key]: + attribute_hash = self._hash_resource(resource_extra) + if self._resource_cache_diff(res_id, attribute_hash): + with self._gnocchi_resource_lock[res_id]: # NOTE(luogangyi): there is a possibility that the # resource was already built in cache by another - # ceilometer-collector when we get the lock here. - attribute_hash = self._check_resource_cache(cache_key, - resource) - if attribute_hash: - method(resource_type, resource, *args, **kwargs) - self.cache.set(cache_key, attribute_hash) + # ceilometer-notification-agent when we get the lock here. + if self._resource_cache_diff(res_id, attribute_hash): + self._update_resource(resource_type, res_id, + resource_extra) + self.cache.set(res_id, attribute_hash) else: - hit = True - LOG.debug('resource cache recheck hit for ' - '%s %s', operation, cache_key) - self._gnocchi_resource_lock.pop(cache_key, None) + LOG.debug('Resource cache hit for %s', res_id) + self._gnocchi_resource_lock.pop(res_id, None) else: - hit = True - LOG.debug('Resource cache hit for %s %s', operation, cache_key) - if hit and operation == "create": - raise gnocchi_exc.ResourceAlreadyExists() + LOG.debug('Resource cache hit for %s', res_id) else: - method(resource_type, resource, *args, **kwargs) + self._update_resource(resource_type, res_id, resource_extra) - def _check_resource_cache(self, key, resource_data): + @staticmethod + def _hash_resource(resource): + return hash(tuple(i for i in resource.items() if i[0] != 'metrics')) + + def _resource_cache_diff(self, key, attribute_hash): cached_hash = self.cache.get(key) - attribute_hash = hash(frozenset(filter(lambda x: x[0] != "metrics", - resource_data.items()))) - if not cached_hash or cached_hash != attribute_hash: - return attribute_hash - else: - return None + return not cached_hash or cached_hash != attribute_hash - def record_events(self, events): + def publish_events(self, events): for event in events: - rd = self._get_resource_definition_from_event(event['event_type']) + rd = self._get_resource_definition_from_event(event.event_type) if not rd: - if event['event_type'] not in self._already_logged_event_types: + if event.event_type not in self._already_logged_event_types: LOG.debug("No gnocchi definition for event type: %s", - event['event_type']) - self._already_logged_event_types.add(event['event_type']) + event.event_type) + self._already_logged_event_types.add(event.event_type) continue rd, operation = rd @@ -498,7 +517,7 @@ def _delete_event(self, rd, event): def _search_resource(self, resource_type, query): try: return self._gnocchi.resource.search( - resource_type, jsonutils.loads(query)) + resource_type, json.loads(query)) except Exception: LOG.error("Fail to search resource type %{resource_type}s " "with '%{query}s'", diff --git a/ceilometer/publisher/http.py b/ceilometer/publisher/http.py index 200f216c..e57370e3 100644 --- a/ceilometer/publisher/http.py +++ b/ceilometer/publisher/http.py @@ -13,8 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. +import json + from oslo_log import log -from oslo_serialization import jsonutils from oslo_utils import strutils import requests from requests import adapters @@ -154,7 +155,7 @@ def _do_post(self, data): if not data: LOG.debug('Data set is empty!') return - data = jsonutils.dumps(data) + data = json.dumps(data) LOG.trace('Message: %s', data) try: res = self.session.post(self.target, data=data, diff --git a/ceilometer/tests/functional/api/v2/test_api_upgrade.py b/ceilometer/tests/functional/api/v2/test_api_upgrade.py index 11a008b3..6958eab9 100644 --- a/ceilometer/tests/functional/api/v2/test_api_upgrade.py +++ b/ceilometer/tests/functional/api/v2/test_api_upgrade.py @@ -64,7 +64,6 @@ def _url_for(service_type=None): return 'http://event-endpoint:8009/' def _do_test_gnocchi_enabled_without_database_backend(self): - self.CONF.set_override('meter_dispatchers', ['gnocchi']) for endpoint in ['meters', 'samples', 'resources']: response = self.app.get(self.PATH_PREFIX + '/' + endpoint, status=410) diff --git a/ceilometer/tests/unit/dispatcher/test_dispatcher.py b/ceilometer/tests/unit/dispatcher/test_dispatcher.py index c2e2e325..fb9b0414 100644 --- a/ceilometer/tests/unit/dispatcher/test_dispatcher.py +++ b/ceilometer/tests/unit/dispatcher/test_dispatcher.py @@ -30,16 +30,13 @@ def setUp(self): super(TestDispatchManager, self).setUp() conf = service.prepare_service([], []) self.conf = self.useFixture(fixture.Config(conf)) - self.conf.config(meter_dispatchers=['database', 'gnocchi'], + self.conf.config(meter_dispatchers=['database'], event_dispatchers=['database']) self.CONF = self.conf.conf - self.useFixture(fixtures.MockPatch( - 'ceilometer.dispatcher.gnocchi.GnocchiDispatcher', - new=FakeMeterDispatcher)) self.useFixture(fixtures.MockPatch( 'ceilometer.dispatcher.database.MeterDatabaseDispatcher', new=FakeMeterDispatcher)) def test_load(self): sample_mg, event_mg = dispatcher.load_dispatcher_manager(self.CONF) - self.assertEqual(2, len(list(sample_mg))) + self.assertEqual(1, len(list(sample_mg))) diff --git a/ceilometer/tests/unit/dispatcher/test_gnocchi.py b/ceilometer/tests/unit/dispatcher/test_gnocchi.py deleted file mode 100644 index 6e7ec7bd..00000000 --- a/ceilometer/tests/unit/dispatcher/test_gnocchi.py +++ /dev/null @@ -1,795 +0,0 @@ -# -# Copyright 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import uuid - -import fixtures -from gnocchiclient import exceptions as gnocchi_exc -from keystoneauth1 import exceptions as ka_exceptions -import mock -from oslo_config import fixture as config_fixture -from oslo_utils import fileutils -from oslo_utils import fixture as utils_fixture -from oslo_utils import timeutils -import requests -import six -from stevedore import extension -import testscenarios - -from ceilometer.dispatcher import gnocchi -from ceilometer.publisher import utils -from ceilometer import service as ceilometer_service -from ceilometer.tests import base - -load_tests = testscenarios.load_tests_apply_scenarios - -INSTANCE_DELETE_START = { - 'event_type': u'compute.instance.delete.start', - 'traits': [ - [ - 'state', - 1, - u'active' - ], - [ - 'user_id', - 1, - u'1e3ce043029547f1a61c1996d1a531a2' - ], - [ - 'service', - 1, - u'compute' - ], - [ - 'disk_gb', - 2, - 0 - ], - [ - 'instance_type', - 1, - u'm1.tiny' - ], - [ - 'tenant_id', - 1, - u'7c150a59fe714e6f9263774af9688f0e' - ], - [ - 'root_gb', - 2, - 0 - ], - [ - 'ephemeral_gb', - 2, - 0 - ], - [ - 'instance_type_id', - 2, - 2 - ], - [ - 'vcpus', - 2, - 1 - ], - [ - 'memory_mb', - 2, - 512 - ], - [ - 'instance_id', - 1, - u'9f9d01b9-4a58-4271-9e27-398b21ab20d1' - ], - [ - 'host', - 1, - u'vagrant-precise' - ], - [ - 'request_id', - 1, - u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39' - ], - [ - 'project_id', - 1, - u'7c150a59fe714e6f9263774af9688f0e' - ], - [ - 'launched_at', - 4, - '2012-05-08T20:23:47' - ] - ], - 'message_signature': - '831719d54059734f82e7d6498c6d7a8fd637568732e79c1fd375e128f142373a', - 'raw': {}, - 'generated': '2012-05-08T20:24:14.824743', - 'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4' -} - -IMAGE_DELETE_START = { - u'event_type': u'image.delete', - u'traits': [ - [ - u'status', - 1, - u'deleted' - ], - [ - u'deleted_at', - 1, - u'2016-11-04T04:25:56Z' - ], - [ - u'user_id', - 1, - u'e97ef33a20ed4843b520d223f3cc33d4' - ], - [ - u'name', - 1, - u'cirros' - ], - [ - u'service', - 1, - u'image.localhost' - ], - [ - u'resource_id', - 1, - u'dc337359-de70-4044-8e2c-80573ba6e577' - ], - [ - u'created_at', - 1, - u'2016-11-04T04:24:36Z' - ], - [ - u'project_id', - 1, - u'e97ef33a20ed4843b520d223f3cc33d4' - ], - [ - u'size', - 1, - u'13287936' - ] - ], - u'message_signature': - u'46fb4958e911f45007a3bb5934c5de7610892a6d742a4900695fd5929cd4c9b3', - u'raw': {}, - u'generated': u'2016-11-04T04:25:56.493820', - u'message_id': u'7f5280f7-1d10-46a5-ba58-4d5508e49f99' -} - - -VOLUME_DELETE_START = { - 'event_type': u'volume.delete.start', - 'traits': [ - [ - u'availability_zone', - 1, - u'nova' - ], - [ - u'created_at', - 1, - u'2016-11-28T13:19:53+00:00' - ], - [ - u'display_name', - 1, - u'vol-001' - ], - [ - u'host', - 1, - u'zhangguoqing-dev@lvmdriver-1#lvmdriver-1' - ], - [ - u'project_id', - 1, - u'd53fcc7dc53c4662ad77822c36a21f00' - ], - [ - u'replication_status', - 1, - u'disabled' - ], - [ - u'request_id', - 1, - u'req-f44df096-50d4-4211-95ea-64be6f5e4f60' - ], - [ - u'resource_id', - 1, - u'6cc6e7dd-d17d-460f-ae79-7e08a216ce96' - ], - [ - u'service', - 1, - u'volume.zhangguoqing-dev@lvmdriver-1' - ], - [ - u'size', - 1, - u'1' - ], - [ - u'status', - 1, - u'deleting' - ], - [ - u'tenant_id', - 1, - u'd53fcc7dc53c4662ad77822c36a21f00' - ], - [ - u'type', - 1, - u'af6271fa-13c4-44e6-9246-754ce9dc7df8' - ], - [ - u'user_id', - 1, - u'819bbd28f5374506b8502521c89430b5' - ] - ], - 'message_signature': - '831719d54059734f82e7d6498c6d7a8fd637568732e79c1fd375e128f142373a', - 'raw': {}, - 'generated': '2016-11-28T13:42:15.484674', - 'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4' -} - -FLOATINGIP_DELETE_END = { - 'event_type': u'floatingip.delete.end', - 'traits': [ - [ - u'project_id', - 1, - u'd53fcc7dc53c4662ad77822c36a21f00' - ], - [ - u'request_id', - 1, - 'req-443ddb77-31f7-41fe-abbf-921107dd9f00' - ], - [ - u'resource_id', - 1, - u'705e2c08-08e8-45cb-8673-5c5be955569b' - ], - [ - u'service', - 1, - u'network.zhangguoqing-dev' - ], - [ - u'tenant_id', - 1, - u'd53fcc7dc53c4662ad77822c36a21f00' - ], - [ - u'user_id', - 1, - u'819bbd28f5374506b8502521c89430b5' - ] - ], - 'message_signature': - '831719d54059734f82e7d6498c6d7a8fd637568732e79c1fd375e128f142373a', - 'raw': {}, - 'generated': '2016-11-29T09:25:55.474710', - 'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4' -} - - -class DispatcherTest(base.BaseTestCase): - - def setUp(self): - super(DispatcherTest, self).setUp() - conf = ceilometer_service.prepare_service(argv=[], config_files=[]) - self.conf = self.useFixture(config_fixture.Config(conf)) - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - self.resource_id = str(uuid.uuid4()) - self.samples = [{ - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }, - { - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2014-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }] - for sample in self.samples: - sample['message_signature'] = utils.compute_signature( - sample, self.conf.conf.publisher.telemetry_secret) - - ks_client = mock.Mock(auth_token='fake_token') - ks_client.projects.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(fixtures.MockPatch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - self.ks_client = ks_client - self.conf.conf.dispatcher_gnocchi.filter_service_activity = True - - def test_config_load(self): - self.conf.config(filter_service_activity=False, - group='dispatcher_gnocchi') - d = gnocchi.GnocchiDispatcher(self.conf.conf) - names = [rd.cfg['resource_type'] for rd in d.resources_definition] - self.assertIn('instance', names) - self.assertIn('volume', names) - - def test_match(self): - resource = { - 'metrics': - ['image', 'image.size', 'image.download', 'image.serve'], - 'attributes': - {'container_format': 'resource_metadata.container_format', - 'disk_format': 'resource_metadata.disk_format', - 'name': 'resource_metadata.name'}, - 'event_delete': 'image.delete', - 'event_attributes': {'id': 'resource_id'}, - 'resource_type': 'image'} - plugin_manager = extension.ExtensionManager( - namespace='ceilometer.event.trait.trait_plugin') - rd = gnocchi.ResourcesDefinition( - resource, self.conf.conf.dispatcher_gnocchi.archive_policy, - plugin_manager) - operation = rd.event_match("image.delete") - self.assertEqual('delete', operation) - self.assertEqual(True, rd.metric_match('image')) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - def test_broken_config_load(self, mylog): - contents = [("---\n" - "resources:\n" - " - resource_type: foobar\n"), - ("---\n" - "resources:\n" - " - resource_type: 0\n"), - ("---\n" - "resources:\n" - " - sample_types: ['foo', 'bar']\n"), - ("---\n" - "resources:\n" - " - sample_types: foobar\n" - " - resource_type: foobar\n"), - ] - - for content in contents: - if six.PY3: - content = content.encode('utf-8') - - temp = fileutils.write_to_tempfile(content=content, - prefix='gnocchi_resources', - suffix='.yaml') - self.addCleanup(os.remove, temp) - self.conf.config(filter_service_activity=False, - resources_definition_file=temp, - group='dispatcher_gnocchi') - d = gnocchi.GnocchiDispatcher(self.conf.conf) - self.assertTrue(mylog.error.called) - self.assertEqual(0, len(d.resources_definition)) - - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '._if_not_cached') - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '.batch_measures') - def _do_test_activity_filter(self, expected_measures, fake_batch, __): - - d = gnocchi.GnocchiDispatcher(self.conf.conf) - d.record_metering_data(self.samples) - fake_batch.assert_called_with( - mock.ANY, mock.ANY, - {'metrics': 1, 'resources': 1, 'measures': expected_measures}) - - def test_activity_filter_match_project_id(self): - self.samples[0]['project_id'] = ( - 'a2d42c23-d518-46b6-96ab-3fba2e146859') - self._do_test_activity_filter(1) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - def test_activity_gnocchi_project_not_found(self, logger): - self.ks_client.projects.find.side_effect = ka_exceptions.NotFound - self._do_test_activity_filter(2) - logger.warning.assert_called_with('gnocchi project not found in ' - 'keystone, ignoring the ' - 'filter_service_activity option') - - def test_activity_filter_match_swift_event(self): - self.samples[0]['counter_name'] = 'storage.api.request' - self.samples[0]['resource_id'] = 'a2d42c23-d518-46b6-96ab-3fba2e146859' - self._do_test_activity_filter(1) - - def test_activity_filter_nomatch(self): - self._do_test_activity_filter(2) - - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '.batch_measures') - def test_unhandled_meter(self, fake_batch): - samples = [{ - 'counter_name': 'unknown.meter', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2014-05-08 20:23:48.028195', - 'resource_id': 'randomid', - 'resource_metadata': {} - }] - d = gnocchi.GnocchiDispatcher(self.conf.conf) - d.record_metering_data(samples) - self.assertEqual(0, len(fake_batch.call_args[0][1])) - - -class MockResponse(mock.NonCallableMock): - def __init__(self, code): - text = {500: 'Internal Server Error', - 404: 'Not Found', - 204: 'Created', - 409: 'Conflict', - }.get(code) - super(MockResponse, self).__init__(spec=requests.Response, - status_code=code, - text=text) - - -class DispatcherWorkflowTest(base.BaseTestCase, - testscenarios.TestWithScenarios): - - sample_scenarios = [ - ('disk.root.size', dict( - sample={ - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - 'host': 'foo', - 'image_ref': 'imageref!', - 'flavor_id': 1234, - 'display_name': 'myinstance', - }, - metric_names=[ - 'disk.root.size', 'disk.ephemeral.size', - 'memory', 'vcpus', 'memory.usage', 'memory.resident', - 'memory.swap.in', 'memory.swap.out', - 'memory.bandwidth.total', 'memory.bandwidth.local', - 'cpu', 'cpu.delta', 'cpu_util', 'vcpus', 'disk.read.requests', - 'cpu_l3_cache', 'perf.cpu.cycles', 'perf.instructions', - 'perf.cache.references', 'perf.cache.misses', - 'disk.read.requests.rate', 'disk.write.requests', - 'disk.write.requests.rate', 'disk.read.bytes', - 'disk.read.bytes.rate', 'disk.write.bytes', - 'disk.write.bytes.rate', 'disk.latency', 'disk.iops', - 'disk.capacity', 'disk.allocation', 'disk.usage', - 'compute.instance.booting.time'], - resource_type='instance')), - ('hardware.ipmi.node.power', dict( - sample={ - 'counter_name': 'hardware.ipmi.node.power', - 'counter_unit': 'W', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'useless': 'not_used', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - }, - metric_names=[ - 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', - 'hardware.ipmi.node.inlet_temperature', - 'hardware.ipmi.node.outlet_temperature', - 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', - 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', - 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', - 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' - ], - resource_type='ipmi')), - ] - - default_workflow = dict(resource_exists=True, - post_measure_fail=False, - create_resource_fail=False, - create_resource_race=False, - update_resource_fail=False, - retry_post_measures_fail=False) - workflow_scenarios = [ - ('normal_workflow', {}), - ('new_resource', dict(resource_exists=False)), - ('new_resource_compat', dict(resource_exists=False)), - ('new_resource_fail', dict(resource_exists=False, - create_resource_fail=True)), - ('new_resource_race', dict(resource_exists=False, - create_resource_race=True)), - ('resource_update_fail', dict(update_resource_fail=True)), - ('retry_fail', dict(resource_exists=False, - retry_post_measures_fail=True)), - ('measure_fail', dict(post_measure_fail=True)), - ] - - @classmethod - def generate_scenarios(cls): - workflow_scenarios = [] - for name, wf_change in cls.workflow_scenarios: - wf = cls.default_workflow.copy() - wf.update(wf_change) - workflow_scenarios.append((name, wf)) - cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, - workflow_scenarios) - - def setUp(self): - super(DispatcherWorkflowTest, self).setUp() - conf = ceilometer_service.prepare_service(argv=[], config_files=[]) - self.conf = self.useFixture(config_fixture.Config(conf)) - ks_client = mock.Mock() - ks_client.projects.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(fixtures.MockPatch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - self.ks_client = ks_client - - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - - self.sample['resource_id'] = str(uuid.uuid4()) + "_foobar" - self.sample['message_signature'] = utils.compute_signature( - self.sample, self.conf.conf.publisher.telemetry_secret) - - @mock.patch('gnocchiclient.v1.client.Client') - def test_event_workflow(self, fakeclient_cls): - self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) - - fakeclient = fakeclient_cls.return_value - - fakeclient.resource.search.side_effect = [ - [{"id": "b26268d6-8bb5-11e6-baff-00224d8226cd", - "type": "instance_disk", - "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], - [{"id": "b1c7544a-8bb5-11e6-850e-00224d8226cd", - "type": "instance_network_interface", - "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], - ] - - search_params = { - '=': {'instance_id': '9f9d01b9-4a58-4271-9e27-398b21ab20d1'} - } - - now = timeutils.utcnow() - self.useFixture(utils_fixture.TimeFixture(now)) - - expected_calls = [ - mock.call.resource.search('instance_disk', search_params), - mock.call.resource.search('instance_network_interface', - search_params), - mock.call.resource.update( - 'instance', '9f9d01b9-4a58-4271-9e27-398b21ab20d1', - {'ended_at': now.isoformat()}), - mock.call.resource.update( - 'instance_disk', - 'b26268d6-8bb5-11e6-baff-00224d8226cd', - {'ended_at': now.isoformat()}), - mock.call.resource.update( - 'instance_network_interface', - 'b1c7544a-8bb5-11e6-850e-00224d8226cd', - {'ended_at': now.isoformat()}), - mock.call.resource.update( - 'image', 'dc337359-de70-4044-8e2c-80573ba6e577', - {'ended_at': now.isoformat()}), - mock.call.resource.update( - 'volume', '6cc6e7dd-d17d-460f-ae79-7e08a216ce96', - {'ended_at': now.isoformat()}), - mock.call.resource.update( - 'network', '705e2c08-08e8-45cb-8673-5c5be955569b', - {'ended_at': now.isoformat()}) - ] - - self.dispatcher.record_events([INSTANCE_DELETE_START, - IMAGE_DELETE_START, - VOLUME_DELETE_START, - FLOATINGIP_DELETE_END]) - self.assertEqual(8, len(fakeclient.mock_calls)) - for call in expected_calls: - self.assertIn(call, fakeclient.mock_calls) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - @mock.patch('gnocchiclient.v1.client.Client') - def test_workflow(self, fakeclient_cls, logger): - self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) - - fakeclient = fakeclient_cls.return_value - - resource_id = self.sample['resource_id'].replace("/", "_") - metric_name = self.sample['counter_name'] - gnocchi_id = uuid.uuid4() - - expected_calls = [ - mock.call.metric.batch_resources_metrics_measures( - {resource_id: {metric_name: self.measures_attributes}}, - create_metrics=True) - ] - expected_debug = [ - mock.call('gnocchi project found: %s', - 'a2d42c23-d518-46b6-96ab-3fba2e146859'), - ] - - measures_posted = False - batch_side_effect = [] - if self.post_measure_fail: - batch_side_effect += [Exception('boom!')] - elif not self.resource_exists: - batch_side_effect += [ - gnocchi_exc.BadRequest( - 400, {"cause": "Unknown resources", - 'detail': [{ - 'resource_id': gnocchi_id, - 'original_resource_id': resource_id}]})] - - attributes = self.postable_attributes.copy() - attributes.update(self.patchable_attributes) - attributes['id'] = self.sample['resource_id'] - attributes['metrics'] = dict((metric_name, {}) - for metric_name in self.metric_names) - for k, v in six.iteritems(attributes['metrics']): - if k == 'disk.root.size': - v['unit'] = 'GB' - continue - if k == 'hardware.ipmi.node.power': - v['unit'] = 'W' - continue - expected_calls.append(mock.call.resource.create( - self.resource_type, attributes)) - - if self.create_resource_fail: - fakeclient.resource.create.side_effect = [Exception('boom!')] - elif self.create_resource_race: - fakeclient.resource.create.side_effect = [ - gnocchi_exc.ResourceAlreadyExists(409)] - else: # not resource_exists - expected_debug.append(mock.call( - 'Resource %s created', self.sample['resource_id'])) - - if not self.create_resource_fail: - expected_calls.append( - mock.call.metric.batch_resources_metrics_measures( - {resource_id: {metric_name: self.measures_attributes}}, - create_metrics=True) - ) - - if self.retry_post_measures_fail: - batch_side_effect += [Exception('boom!')] - else: - measures_posted = True - - else: - measures_posted = True - - if measures_posted: - batch_side_effect += [None] - expected_debug.append( - mock.call("%(measures)d measures posted against %(metrics)d " - "metrics through %(resources)d resources", dict( - measures=len(self.measures_attributes), - metrics=1, resources=1)) - ) - - if self.patchable_attributes: - expected_calls.append(mock.call.resource.update( - self.resource_type, resource_id, - self.patchable_attributes)) - if self.update_resource_fail: - fakeclient.resource.update.side_effect = [Exception('boom!')] - else: - expected_debug.append(mock.call( - 'Resource %s updated', self.sample['resource_id'])) - - batch = fakeclient.metric.batch_resources_metrics_measures - batch.side_effect = batch_side_effect - - self.dispatcher.record_metering_data([self.sample]) - - # Check that the last log message is the expected one - if (self.post_measure_fail - or self.create_resource_fail - or self.retry_post_measures_fail - or (self.update_resource_fail and self.patchable_attributes)): - logger.error.assert_called_with('boom!', exc_info=True) - else: - self.assertEqual(0, logger.error.call_count) - self.assertEqual(expected_calls, fakeclient.mock_calls) - self.assertEqual(expected_debug, logger.debug.mock_calls) - -DispatcherWorkflowTest.generate_scenarios() diff --git a/ceilometer/tests/unit/publisher/test_gnocchi.py b/ceilometer/tests/unit/publisher/test_gnocchi.py new file mode 100644 index 00000000..ff94245d --- /dev/null +++ b/ceilometer/tests/unit/publisher/test_gnocchi.py @@ -0,0 +1,625 @@ +# +# Copyright 2014 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import uuid + +import fixtures +from gnocchiclient import exceptions as gnocchi_exc +from keystoneauth1 import exceptions as ka_exceptions +import mock +from oslo_config import fixture as config_fixture +from oslo_utils import fileutils +from oslo_utils import fixture as utils_fixture +from oslo_utils import netutils +from oslo_utils import timeutils +import requests +import six +from stevedore import extension +import testscenarios + +from ceilometer.event.storage import models +from ceilometer.publisher import gnocchi +from ceilometer import sample +from ceilometer import service as ceilometer_service +from ceilometer.tests import base + +load_tests = testscenarios.load_tests_apply_scenarios + +INSTANCE_DELETE_START = models.Event( + event_type=u'compute.instance.delete.start', + traits=[models.Trait('state', 1, u'active'), + models.Trait( + 'user_id', 1, u'1e3ce043029547f1a61c1996d1a531a2'), + models.Trait('service', 1, u'compute'), + models.Trait('disk_gb', 2, 0), + models.Trait('instance_type', 1, u'm1.tiny'), + models.Trait('tenant_id', 1, u'7c150a59fe714e6f9263774af9688f0e'), + models.Trait('root_gb', 2, 0), + models.Trait('ephemeral_gb', 2, 0), + models.Trait('instance_type_id', 2, 2), + models.Trait('vcpus', 2, 1), + models.Trait('memory_mb', 2, 512), + models.Trait( + 'instance_id', 1, u'9f9d01b9-4a58-4271-9e27-398b21ab20d1'), + models.Trait('host', 1, u'vagrant-precise'), + models.Trait( + 'request_id', 1, u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39'), + models.Trait('project_id', 1, u'7c150a59fe714e6f9263774af9688f0e'), + models.Trait('launched_at', 4, '2012-05-08T20:23:47')], + raw={}, + generated='2012-05-08T20:24:14.824743', + message_id=u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', +) + +IMAGE_DELETE_START = models.Event( + event_type=u'image.delete', + traits=[models.Trait(u'status', 1, u'deleted'), + models.Trait(u'deleted_at', 1, u'2016-11-04T04:25:56Z'), + models.Trait(u'user_id', 1, u'e97ef33a20ed4843b520d223f3cc33d4'), + models.Trait(u'name', 1, u'cirros'), + models.Trait(u'service', 1, u'image.localhost'), + models.Trait( + u'resource_id', 1, u'dc337359-de70-4044-8e2c-80573ba6e577'), + models.Trait(u'created_at', 1, u'2016-11-04T04:24:36Z'), + models.Trait( + u'project_id', 1, u'e97ef33a20ed4843b520d223f3cc33d4'), + models.Trait(u'size', 1, u'13287936')], + raw={}, + generated=u'2016-11-04T04:25:56.493820', + message_id=u'7f5280f7-1d10-46a5-ba58-4d5508e49f99' +) + + +VOLUME_DELETE_START = models.Event( + event_type=u'volume.delete.start', + traits=[models.Trait(u'availability_zone', 1, u'nova'), + models.Trait(u'created_at', 1, u'2016-11-28T13:19:53+00:00'), + models.Trait(u'display_name', 1, u'vol-001'), + models.Trait( + u'host', 1, u'zhangguoqing-dev@lvmdriver-1#lvmdriver-1'), + models.Trait( + u'project_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), + models.Trait(u'replication_status', 1, u'disabled'), + models.Trait( + u'request_id', 1, u'req-f44df096-50d4-4211-95ea-64be6f5e4f60'), + models.Trait( + u'resource_id', 1, u'6cc6e7dd-d17d-460f-ae79-7e08a216ce96'), + models.Trait( + u'service', 1, u'volume.zhangguoqing-dev@lvmdriver-1'), + models.Trait(u'size', 1, u'1'), + models.Trait(u'status', 1, u'deleting'), + models.Trait(u'tenant_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), + models.Trait(u'type', 1, u'af6271fa-13c4-44e6-9246-754ce9dc7df8'), + models.Trait(u'user_id', 1, u'819bbd28f5374506b8502521c89430b5')], + raw={}, + generated='2016-11-28T13:42:15.484674', + message_id=u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', +) + +FLOATINGIP_DELETE_END = models.Event( + event_type=u'floatingip.delete.end', + traits=[models.Trait(u'service', 1, u'network.zhangguoqing-dev'), + models.Trait( + u'project_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), + models.Trait( + u'request_id', 1, 'req-443ddb77-31f7-41fe-abbf-921107dd9f00'), + models.Trait( + u'resource_id', 1, u'705e2c08-08e8-45cb-8673-5c5be955569b'), + models.Trait(u'tenant_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), + models.Trait(u'user_id', 1, u'819bbd28f5374506b8502521c89430b5')], + raw={}, + generated='2016-11-29T09:25:55.474710', + message_id=u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4' +) + + +class PublisherTest(base.BaseTestCase): + + def setUp(self): + super(PublisherTest, self).setUp() + conf = ceilometer_service.prepare_service(argv=[], config_files=[]) + self.conf = self.useFixture(config_fixture.Config(conf)) + self.resource_id = str(uuid.uuid4()) + self.samples = [sample.Sample( + name='disk.root.size', + unit='GB', + type=sample.TYPE_GAUGE, + volume=2, + user_id='test_user', + project_id='test_project', + source='openstack', + timestamp='2012-05-08 20:23:48.028195', + resource_id=self.resource_id, + resource_metadata={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + } + ), + sample.Sample( + name='disk.root.size', + unit='GB', + type=sample.TYPE_GAUGE, + volume=2, + user_id='test_user', + project_id='test_project', + source='openstack', + timestamp='2014-05-08 20:23:48.028195', + resource_id=self.resource_id, + resource_metadata={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + }, + ), + ] + + ks_client = mock.Mock(auth_token='fake_token') + ks_client.projects.find.return_value = mock.Mock( + name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') + self.useFixture(fixtures.MockPatch( + 'ceilometer.keystone_client.get_client', + return_value=ks_client)) + self.useFixture(fixtures.MockPatch( + 'gnocchiclient.v1.client.Client', + return_value=mock.Mock())) + self.ks_client = ks_client + + def test_config_load(self): + url = netutils.urlsplit("gnocchi://") + d = gnocchi.GnocchiPublisher(self.conf.conf, url) + names = [rd.cfg['resource_type'] for rd in d.resources_definition] + self.assertIn('instance', names) + self.assertIn('volume', names) + + def test_match(self): + resource = { + 'metrics': + ['image', 'image.size', 'image.download', 'image.serve'], + 'attributes': + {'container_format': 'resource_metadata.container_format', + 'disk_format': 'resource_metadata.disk_format', + 'name': 'resource_metadata.name'}, + 'event_delete': 'image.delete', + 'event_attributes': {'id': 'resource_id'}, + 'resource_type': 'image'} + plugin_manager = extension.ExtensionManager( + namespace='ceilometer.event.trait.trait_plugin') + rd = gnocchi.ResourcesDefinition( + resource, "high", "low", plugin_manager) + operation = rd.event_match("image.delete") + self.assertEqual('delete', operation) + + def test_metric_match(self): + pub = gnocchi.GnocchiPublisher(self.conf.conf, + netutils.urlsplit("gnocchi://")) + self.assertIn('image.size', pub.metric_map['image.size'].metrics) + + @mock.patch('ceilometer.publisher.gnocchi.LOG') + def test_broken_config_load(self, mylog): + contents = [("---\n" + "resources:\n" + " - resource_type: foobar\n"), + ("---\n" + "resources:\n" + " - resource_type: 0\n"), + ("---\n" + "resources:\n" + " - sample_types: ['foo', 'bar']\n"), + ("---\n" + "resources:\n" + " - sample_types: foobar\n" + " - resource_type: foobar\n"), + ] + + for content in contents: + if six.PY3: + content = content.encode('utf-8') + + temp = fileutils.write_to_tempfile(content=content, + prefix='gnocchi_resources', + suffix='.yaml') + self.addCleanup(os.remove, temp) + url = netutils.urlsplit( + "gnocchi://?resources_definition_file=" + temp) + d = gnocchi.GnocchiPublisher(self.conf.conf, url) + self.assertTrue(mylog.error.called) + self.assertEqual(0, len(d.resources_definition)) + + @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' + '._if_not_cached', mock.Mock()) + @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' + '.batch_measures') + def _do_test_activity_filter(self, expected_measures, fake_batch): + url = netutils.urlsplit("gnocchi://") + d = gnocchi.GnocchiPublisher(self.conf.conf, url) + d._already_checked_archive_policies = True + d.publish_samples(self.samples) + self.assertEqual(1, len(fake_batch.mock_calls)) + measures = fake_batch.mock_calls[0][1][0] + self.assertEqual( + expected_measures, + sum(len(m["measures"]) for rid in measures + for m in measures[rid].values())) + + def test_activity_filter_match_project_id(self): + self.samples[0].project_id = ( + 'a2d42c23-d518-46b6-96ab-3fba2e146859') + self._do_test_activity_filter(1) + + @mock.patch('ceilometer.publisher.gnocchi.LOG') + def test_activity_gnocchi_project_not_found(self, logger): + self.ks_client.projects.find.side_effect = ka_exceptions.NotFound + self._do_test_activity_filter(2) + logger.warning.assert_called_with('filtered project not found in ' + 'keystone, ignoring the ' + 'filter_project option') + + def test_activity_filter_match_swift_event(self): + self.samples[0].name = 'storage.api.request' + self.samples[0].resource_id = 'a2d42c23-d518-46b6-96ab-3fba2e146859' + self._do_test_activity_filter(1) + + def test_activity_filter_nomatch(self): + self._do_test_activity_filter(2) + + @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' + '.batch_measures') + def test_unhandled_meter(self, fake_batch): + samples = [sample.Sample( + name='unknown.meter', + unit='GB', + type=sample.TYPE_GAUGE, + volume=2, + user_id='test_user', + project_id='test_project', + source='openstack', + timestamp='2014-05-08 20:23:48.028195', + resource_id='randomid', + resource_metadata={} + )] + url = netutils.urlsplit("gnocchi://") + d = gnocchi.GnocchiPublisher(self.conf.conf, url) + d._already_checked_archive_policies = True + d.publish_samples(samples) + self.assertEqual(0, len(fake_batch.call_args[0][1])) + + +class MockResponse(mock.NonCallableMock): + def __init__(self, code): + text = {500: 'Internal Server Error', + 404: 'Not Found', + 204: 'Created', + 409: 'Conflict', + }.get(code) + super(MockResponse, self).__init__(spec=requests.Response, + status_code=code, + text=text) + + +class PublisherWorkflowTest(base.BaseTestCase, + testscenarios.TestWithScenarios): + + sample_scenarios = [ + ('cpu', dict( + sample=sample.Sample( + resource_id=str(uuid.uuid4()) + "_foobar", + name='cpu', + unit='ns', + type=sample.TYPE_CUMULATIVE, + volume=500, + user_id='test_user', + project_id='test_project', + source='openstack', + timestamp='2012-05-08 20:23:48.028195', + resource_metadata={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + }, + ), + metric_attributes={ + "archive_policy_name": "ceilometer-low-rate", + "unit": "ns", + "measures": [{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': 500 + }] + }, + postable_attributes={ + 'user_id': 'test_user', + 'project_id': 'test_project', + }, + patchable_attributes={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'flavor_id': 1234, + 'display_name': 'myinstance', + }, + resource_type='instance')), + ('disk.root.size', dict( + sample=sample.Sample( + resource_id=str(uuid.uuid4()) + "_foobar", + name='disk.root.size', + unit='GB', + type=sample.TYPE_GAUGE, + volume=2, + user_id='test_user', + project_id='test_project', + source='openstack', + timestamp='2012-05-08 20:23:48.028195', + resource_metadata={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + }, + ), + metric_attributes={ + "archive_policy_name": "ceilometer-low", + "unit": "GB", + "measures": [{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': 2 + }] + }, + postable_attributes={ + 'user_id': 'test_user', + 'project_id': 'test_project', + }, + patchable_attributes={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'flavor_id': 1234, + 'display_name': 'myinstance', + }, + resource_type='instance')), + ('hardware.ipmi.node.power', dict( + sample=sample.Sample( + resource_id=str(uuid.uuid4()) + "_foobar", + name='hardware.ipmi.node.power', + unit='W', + type=sample.TYPE_GAUGE, + volume=2, + user_id='test_user', + project_id='test_project', + source='openstack', + timestamp='2012-05-08 20:23:48.028195', + resource_metadata={ + 'useless': 'not_used', + }, + ), + metric_attributes={ + "archive_policy_name": "ceilometer-low", + "unit": "W", + "measures": [{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': 2 + }] + }, + postable_attributes={ + 'user_id': 'test_user', + 'project_id': 'test_project', + }, + patchable_attributes={ + }, + resource_type='ipmi')), + ] + + default_workflow = dict(resource_exists=True, + post_measure_fail=False, + create_resource_fail=False, + create_resource_race=False, + update_resource_fail=False, + retry_post_measures_fail=False) + workflow_scenarios = [ + ('normal_workflow', {}), + ('new_resource', dict(resource_exists=False)), + ('new_resource_compat', dict(resource_exists=False)), + ('new_resource_fail', dict(resource_exists=False, + create_resource_fail=True)), + ('new_resource_race', dict(resource_exists=False, + create_resource_race=True)), + ('resource_update_fail', dict(update_resource_fail=True)), + ('retry_fail', dict(resource_exists=False, + retry_post_measures_fail=True)), + ('measure_fail', dict(post_measure_fail=True)), + ] + + @classmethod + def generate_scenarios(cls): + workflow_scenarios = [] + for name, wf_change in cls.workflow_scenarios: + wf = cls.default_workflow.copy() + wf.update(wf_change) + workflow_scenarios.append((name, wf)) + cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, + workflow_scenarios) + + def setUp(self): + super(PublisherWorkflowTest, self).setUp() + conf = ceilometer_service.prepare_service(argv=[], config_files=[]) + self.conf = self.useFixture(config_fixture.Config(conf)) + ks_client = mock.Mock() + ks_client.projects.find.return_value = mock.Mock( + name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') + self.useFixture(fixtures.MockPatch( + 'ceilometer.keystone_client.get_client', + return_value=ks_client)) + self.ks_client = ks_client + + @mock.patch('gnocchiclient.v1.client.Client') + def test_event_workflow(self, fakeclient_cls): + url = netutils.urlsplit("gnocchi://") + self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) + + fakeclient = fakeclient_cls.return_value + + fakeclient.resource.search.side_effect = [ + [{"id": "b26268d6-8bb5-11e6-baff-00224d8226cd", + "type": "instance_disk", + "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], + [{"id": "b1c7544a-8bb5-11e6-850e-00224d8226cd", + "type": "instance_network_interface", + "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], + ] + + search_params = { + '=': {'instance_id': '9f9d01b9-4a58-4271-9e27-398b21ab20d1'} + } + + now = timeutils.utcnow() + self.useFixture(utils_fixture.TimeFixture(now)) + + expected_calls = [ + mock.call.resource.search('instance_network_interface', + search_params), + mock.call.resource.search('instance_disk', search_params), + mock.call.resource.update( + 'instance', '9f9d01b9-4a58-4271-9e27-398b21ab20d1', + {'ended_at': now.isoformat()}), + mock.call.resource.update( + 'instance_disk', + 'b26268d6-8bb5-11e6-baff-00224d8226cd', + {'ended_at': now.isoformat()}), + mock.call.resource.update( + 'instance_network_interface', + 'b1c7544a-8bb5-11e6-850e-00224d8226cd', + {'ended_at': now.isoformat()}), + mock.call.resource.update( + 'image', 'dc337359-de70-4044-8e2c-80573ba6e577', + {'ended_at': now.isoformat()}), + mock.call.resource.update( + 'volume', '6cc6e7dd-d17d-460f-ae79-7e08a216ce96', + {'ended_at': now.isoformat()}), + mock.call.resource.update( + 'network', '705e2c08-08e8-45cb-8673-5c5be955569b', + {'ended_at': now.isoformat()}) + ] + + self.publisher.publish_events([INSTANCE_DELETE_START, + IMAGE_DELETE_START, + VOLUME_DELETE_START, + FLOATINGIP_DELETE_END]) + self.assertEqual(8, len(fakeclient.mock_calls)) + for call in expected_calls: + self.assertIn(call, fakeclient.mock_calls) + + @mock.patch('ceilometer.publisher.gnocchi.LOG') + @mock.patch('gnocchiclient.v1.client.Client') + def test_workflow(self, fakeclient_cls, logger): + + fakeclient = fakeclient_cls.return_value + + resource_id = self.sample.resource_id.replace("/", "_") + metric_name = self.sample.name + gnocchi_id = uuid.uuid4() + + expected_calls = [ + mock.call.archive_policy.get("ceilometer-low"), + mock.call.archive_policy.get("ceilometer-low-rate"), + mock.call.metric.batch_resources_metrics_measures( + {resource_id: {metric_name: self.metric_attributes}}, + create_metrics=True) + ] + expected_debug = [ + mock.call('filtered project found: %s', + 'a2d42c23-d518-46b6-96ab-3fba2e146859'), + ] + + measures_posted = False + batch_side_effect = [] + if self.post_measure_fail: + batch_side_effect += [Exception('boom!')] + elif not self.resource_exists: + batch_side_effect += [ + gnocchi_exc.BadRequest( + 400, {"cause": "Unknown resources", + 'detail': [{ + 'resource_id': gnocchi_id, + 'original_resource_id': resource_id}]})] + + attributes = self.postable_attributes.copy() + attributes.update(self.patchable_attributes) + attributes['id'] = self.sample.resource_id + expected_calls.append(mock.call.resource.create( + self.resource_type, attributes)) + + if self.create_resource_fail: + fakeclient.resource.create.side_effect = [Exception('boom!')] + elif self.create_resource_race: + fakeclient.resource.create.side_effect = [ + gnocchi_exc.ResourceAlreadyExists(409)] + else: # not resource_exists + expected_debug.append(mock.call( + 'Resource %s created', self.sample.resource_id)) + + if not self.create_resource_fail: + expected_calls.append( + mock.call.metric.batch_resources_metrics_measures( + {resource_id: {metric_name: self.metric_attributes}}, + create_metrics=True) + ) + + if self.retry_post_measures_fail: + batch_side_effect += [Exception('boom!')] + else: + measures_posted = True + + else: + measures_posted = True + + if measures_posted: + batch_side_effect += [None] + expected_debug.append( + mock.call("%d measures posted against %d metrics through %d " + "resources", len(self.metric_attributes["measures"]), + 1, 1) + ) + + if self.patchable_attributes: + expected_calls.append(mock.call.resource.update( + self.resource_type, resource_id, + self.patchable_attributes)) + if self.update_resource_fail: + fakeclient.resource.update.side_effect = [Exception('boom!')] + else: + expected_debug.append(mock.call( + 'Resource %s updated', self.sample.resource_id)) + + batch = fakeclient.metric.batch_resources_metrics_measures + batch.side_effect = batch_side_effect + + url = netutils.urlsplit("gnocchi://") + publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) + publisher.publish_samples([self.sample]) + + # Check that the last log message is the expected one + if (self.post_measure_fail + or self.create_resource_fail + or self.retry_post_measures_fail + or (self.update_resource_fail and self.patchable_attributes)): + logger.error.assert_called_with('boom!', exc_info=True) + else: + self.assertEqual(0, logger.error.call_count) + self.assertEqual(expected_calls, fakeclient.mock_calls) + self.assertEqual(expected_debug, logger.debug.mock_calls) + + +PublisherWorkflowTest.generate_scenarios() diff --git a/ceilometer/tests/unit/publisher/test_utils.py b/ceilometer/tests/unit/publisher/test_utils.py index 767dd541..a882b964 100644 --- a/ceilometer/tests/unit/publisher/test_utils.py +++ b/ceilometer/tests/unit/publisher/test_utils.py @@ -14,7 +14,8 @@ # under the License. """Tests for ceilometer/publisher/utils.py """ -from oslo_serialization import jsonutils +import json + from oslotest import base from ceilometer.publisher import utils @@ -104,7 +105,7 @@ def test_verify_signature_nested_json(self): data['message_signature'] = utils.compute_signature( data, 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) + jsondata = json.loads(json.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_unicode_symbols(self): @@ -114,7 +115,7 @@ def test_verify_unicode_symbols(self): data['message_signature'] = utils.compute_signature( data, 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) + jsondata = json.loads(json.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_no_secret(self): diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 5a152f81..cecce6a9 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -301,13 +301,7 @@ function _ceilometer_configure_storage_backend { # NOTE(gordc): set batching to better handle recording on a slow machine iniset $CEILOMETER_CONF collector batch_size 50 iniset $CEILOMETER_CONF collector batch_timeout 5 - iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY} - if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True" - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift" - else - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False" - fi + sed -i "s/gnocchi:\/\//gnocchi:\/\/?archive_policy=${GNOCCHI_ARCHIVE_POLICY}\&filter_project=gnocchi_swift/" $CEILOMETER_CONF_DIR/event_pipeline.yaml $CEILOMETER_CONF_DIR/pipeline.yaml else die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi diff --git a/doc/source/admin/telemetry-measurements.rst b/doc/source/admin/telemetry-measurements.rst index 16bc922b..1fafade3 100644 --- a/doc/source/admin/telemetry-measurements.rst +++ b/doc/source/admin/telemetry-measurements.rst @@ -389,17 +389,13 @@ above table is the following: .. note:: - To enable the libvirt ``memory.usage`` support, you need to install - libvirt version 1.1.1+, QEMU version 1.5+, and you also need to - prepare suitable balloon driver in the image. It is applicable - particularly for Windows guests, most modern Linux distributions - already have it built in. Telemetry is not able to fetch the - ``memory.usage`` samples without the image balloon driver. + If storing data in Gnocchi, derived rate_of_change metrics are also + computed using Gnocchi in addition to Ceilometer transformers. It avoids + missing data when Ceilometer services restart. + To minimize Ceilometer memory requirements transformers can be disabled. + These ``rate_of_change`` meters are deprecated and will be removed in + default Ceilometer configuration in future release. -.. note:: - - To enable libvirt ``disk.*`` support when running on RBD-backed shared - storage, you need to install libvirt version 1.2.16+. OpenStack Compute is capable of collecting ``CPU`` related meters from the compute host machines. In order to use that you need to set the diff --git a/doc/source/contributor/install/custom.rst b/doc/source/contributor/install/custom.rst new file mode 100644 index 00000000..1dfcc994 --- /dev/null +++ b/doc/source/contributor/install/custom.rst @@ -0,0 +1,149 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _customizing_deployment: + +=================================== + Customizing Ceilometer Deployment +=================================== + +Notifications queues +==================== + +.. index:: + double: customizing deployment; notifications queues; multiple topics + +By default, Ceilometer consumes notifications on the messaging bus sent to +**topics** by using a queue/pool name that is identical to the +topic name. You shouldn't have different applications consuming messages from +this queue. If you want to also consume the topic notifications with a system +other than Ceilometer, you should configure a separate queue that listens for +the same messages. + +Ceilometer allows multiple topics to be configured so that the polling agent can +send the same messages of notifications to other queues. Notification agents +also use **topics** to configure which queue to listen for. If +you use multiple topics, you should configure notification agent and polling +agent separately, otherwise Ceilometer collects duplicate samples. + +By default, the ceilometer.conf file is as follows:: + + [oslo_messaging_notifications] + topics = notifications + +To use multiple topics, you should give ceilometer-agent-notification and +ceilometer-polling services different ceilometer.conf files. The Ceilometer +configuration file ceilometer.conf is normally locate in the /etc/ceilometer +directory. Make changes according to your requirements which may look like +the following:: + +For notification agent using ceilometer-notification.conf, settings like:: + + [oslo_messaging_notifications] + topics = notifications,xxx + +For polling agent using ceilometer-polling.conf, settings like:: + + [oslo_messaging_notifications] + topics = notifications,foo + +.. note:: + + notification_topics in ceilometer-notification.conf should only have one same + topic in ceilometer-polling.conf + +Doing this, it's easy to listen/receive data from multiple internal and external services. + +.. _publisher-configuration: + +Using multiple publishers +========================= + +.. index:: + double: customizing deployment; multiple publishers + +Ceilometer allows multiple publishers to be configured in pipeline so that +data can be easily sent to multiple internal and external systems. Ceilometer +allows to set two types of pipelines. One is ``pipeline.yaml`` which is for +meters, another is ``event_pipeline.yaml`` which is for events. + +By default, Ceilometer only saves event and meter data into Gnocchi_. If you +want Ceilometer to send data to other systems, instead of or in addition to +the default storage services, multiple publishers can be enabled by modifying +the Ceilometer pipeline. + +Ceilometer ships multiple publishers currently. They are ``database``, +``notifier``, ``file``, ``http`` and ``gnocchi`` publishers. + +.. _Gnocchi: http://gnocchi.xyz + +To configure one or multiple publishers for Ceilometer, find the Ceilometer +configuration file ``pipeline.yaml`` and/or ``event_pipeline.yaml`` which is +normally located at /etc/ceilometer directory and make changes accordingly. +Your configuration file can be in a different directory. + +For the Gnocchi publisher, the archive policy can be defined as a configuration +settings. The value specified for ``archive_policy`` should correspond to the +name of an ``archive_policy`` configured within Gnocchi. + +To use multiple publishers, add multiple publisher lines in ``pipeline.yaml`` and/or +``event_pipeline.yaml`` file like the following:: + + --- + sources: + - name: source_name + events: + - "*" + sinks: + - sink_name + sinks: + - name: sink_name + transformers: + publishers: + - database:// + - gnocchi://?archive_policy=low + - file:// + + +For the Gnocchi publisher backed by Swift storage, the following additional +configuration settings should be added:: + + [dispatcher_gnocchi] + filter_project = gnocchi_swift + filter_service_activity = True + +Custom pipeline +=============== + +The paths of all pipeline files including ``pipeline.yaml`` and ``event_pipeline.yaml`` +are located to ceilometer/pipeline/data by default. And it's possible to set the +path through ``pipeline_cfg_file`` being assigned to another one in ``ceilometer.conf``. + +Ceilometer allow users to customize pipeline files. Before that, copy the following +yaml files:: + + $ cp ceilometer/pipeline/data/*.yaml /etc/ceilometer + +Then you can add configurations according to the former section. + +Efficient polling +================= + +- There is an optional config called ``shuffle_time_before_polling_task`` + in ceilometer.conf. Enable this by setting an integer greater than zero to + shuffle polling time for agents. This will add some random jitter to the time + of sending requests to Nova or other components to avoid large number of + requests in a short time period. +- There is an option to stream samples to minimise latency (at the + expense of load) by setting ``batch_polled_samples`` to ``False`` in + ``ceilometer.conf``. diff --git a/doc/source/contributor/install/manual.rst b/doc/source/contributor/install/manual.rst new file mode 100644 index 00000000..e76bc9aa --- /dev/null +++ b/doc/source/contributor/install/manual.rst @@ -0,0 +1,255 @@ +.. + Copyright 2012 Nicolas Barcet for Canonical + 2013 New Dream Network, LLC (DreamHost) + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _installing_manually: + +===================== + Installing Manually +===================== + +.. note:: + + Ceilometer collector service is deprecated. Configure dispatchers under publisher + in pipeline to push data instead. For more details about how to configure + publishers in the :ref:`publisher-configuration`. + +Storage Backend Installation +============================ + + +Gnocchi +------- + +#. Follow `Gnocchi installation`_ instructions + +#. Edit `/etc/ceilometer/ceilometer.conf` for the collector service: + + * With Keystone authentication enabled:: + + [service_credentials] + auth_url = :5000 + region_name = RegionOne + password = password + username = ceilometer + project_name = service + project_domain_id = default + user_domain_id = default + auth_type = password + + * In somes cases, it is possible to disable keystone authentication for + Gnocchi to remove the overhead of token creation/verification when request + authentication doesn't matter. This will increase the performance of + Gnocchi:: + + [gnocchi] + auth_section=service_credentials_gnocchi + + [service_credentials_gnocchi] + auth_type=gnocchi-noauth + roles = admin + user_id = + project_id = + endpoint = + +#. Copy gnocchi_resources.yaml to config directory (e.g./etc/ceilometer) + +#. Initialize Gnocchi database by creating ceilometer resources:: + + ceilometer-upgrade --skip-metering-database + +#. To minimize data requests, caching and batch processing should be enabled: + + 1. Enable resource caching (oslo.cache_ should be installed):: + + [cache] + backend_argument = redis_expiration_time:600 + backend_argument = db:0 + backend_argument = distributed_lock:True + backend_argument = url:redis://localhost:6379 + backend = dogpile.cache.redis + + 2. Enable batch processing:: + + [notification] + batch_size = 100 + batch_timeout = 5 + +#. Start notification service + +.. _oslo.cache: https://docs.openstack.org/oslo.cache/latest/configuration/index.html +.. _`Gnocchi installation`: http://gnocchi.xyz/install.html + + +Installing the notification agent +================================= + +.. index:: + double: installing; agent-notification + +1. Clone the ceilometer git repository to the management server:: + + $ cd /opt/stack + $ git clone https://git.openstack.org/openstack/ceilometer.git + +2. As a user with ``root`` permissions or ``sudo`` privileges, run the + ceilometer installer:: + + $ cd ceilometer + $ sudo python setup.py install + +3. Generate configuration file:: + + $ tox -egenconfig + +4. Copy the sample configuration files from the source tree + to their final location:: + + $ mkdir -p /etc/ceilometer + $ cp etc/ceilometer/ceilometer.conf /etc/ceilometer + $ cp ceilometer/pipeline/data/*.yaml /etc/ceilometer + +5. Edit ``/etc/ceilometer/ceilometer.conf`` + + 1. Configure messaging:: + + [oslo_messaging_notifications] + topics = notifications + + [oslo_messaging_rabbit] + rabbit_userid = stackrabbit + rabbit_password = openstack1 + rabbit_hosts = 10.0.2.15 + + 2. Set the ``telemetry_secret`` value. + + Set the ``telemetry_secret`` value to a large, random, value. Use + the same value in all ceilometer configuration files, on all + nodes, so that messages passing between the nodes can be + validated. This value can be left empty to disable message signing. + + .. note:: + + Disabling signing will improve message handling performance + + Refer to :doc:`/configuration` for details about any other options + you might want to modify before starting the service. + +6. Edit ``/etc/ceilometer/ceilometer.conf``: + + Change publisher endpoints to expected targets. By default, it pushes to a + `metering.sample` topic on the oslo.messaging queue. Available publishers + are listed in :ref:`pipeline-publishers` section. + +5. Start the notification daemon:: + + $ ceilometer-agent-notification + + .. note:: + + The default development configuration of the notification logs to + stderr, so you may want to run this step using a screen session + or other tool for maintaining a long-running program in the + background. + + +Installing the Polling Agent +============================ + +.. index:: + double: installing; agent + +.. note:: + + The polling agent needs to be able to talk to Keystone and any of + the services being polled for updates. It also needs to run on your compute + nodes to poll instances. + +1. Clone the ceilometer git repository to the server:: + + $ cd /opt/stack + $ git clone https://git.openstack.org/openstack/ceilometer.git + +2. As a user with ``root`` permissions or ``sudo`` privileges, run the + ceilometer installer:: + + $ cd ceilometer + $ sudo python setup.py install + +3. Generate configuration file:: + + $ tox -egenconfig + +4. Copy the sample configuration files from the source tree + to their final location:: + + $ mkdir -p /etc/ceilometer + $ cp etc/ceilometer/ceilometer.conf /etc/ceilometer/ceilometer.conf + $ cp ceilometer/pipeline/data/*.yaml /etc/ceilometer + +5. Configure messaging by editing ``/etc/ceilometer/ceilometer.conf``:: + + [oslo_messaging_rabbit] + rabbit_userid = stackrabbit + rabbit_password = openstack1 + rabbit_hosts = 10.0.2.15 + +6. In order to retrieve object store statistics, ceilometer needs + access to swift with ``ResellerAdmin`` role. You should give this + role to your ``os_username`` user for tenant ``os_tenant_name``:: + + $ openstack role create ResellerAdmin + +-----------+----------------------------------+ + | Field | Value | + +-----------+----------------------------------+ + | domain_id | None | + | id | f5153dae801244e8bb4948f0a6fb73b7 | + | name | ResellerAdmin | + +-----------+----------------------------------+ + + $ openstack role add f5153dae801244e8bb4948f0a6fb73b7 \ + --project $SERVICE_TENANT \ + --user $CEILOMETER_USER + +7. Start the agent:: + + $ ceilometer-polling + +8. By default, the polling agent polls the `compute` and `central` namespaces. + You can specify which namespace to poll in the `ceilometer.conf` + configuration file or on the command line:: + + $ ceilometer-polling --polling-namespaces central,ipmi + + +Installing the API Server +========================= + +.. index:: + double: installing; API + +.. note:: + + The Ceilometer's API service is no longer supported. Data storage should be + handled by a separate service such as Gnocchi. + + +Enabling Service Notifications +============================== + +See the `install guide`_ for instructions on how to enable meters for specific +OpenStack services. + +.. _`install guide`: https://docs.openstack.org/project-install-guide/telemetry/draft/install-controller.html diff --git a/doc/source/install/install-base-config-common.inc b/doc/source/install/install-base-config-common.inc index 849921cb..a6512003 100644 --- a/doc/source/install/install-base-config-common.inc +++ b/doc/source/install/install-base-config-common.inc @@ -1,15 +1,15 @@ -2. Edit the ``/etc/ceilometer/ceilometer.conf`` file and complete +2. Edit the ``/etc/ceilometer/pipeline.yaml`` file and complete the following actions: * Configure Gnocchi connection: - .. code-block:: ini + .. code-block:: yaml - [dispatcher_gnocchi] - # filter out Gnocchi-related activity meters (Swift driver) - filter_service_activity = False - # default metric storage archival policy - archive_policy = low + publishers: + # set address of Gnocchi + # + filter out Gnocchi-related activity meters (Swift driver) + # + set default archive policy + - gnocchi://?filter_project=service&archive_policy=low * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: diff --git a/etc/ceilometer/controller.yaml b/etc/ceilometer/controller.yaml deleted file mode 100644 index 3ba5c76d..00000000 --- a/etc/ceilometer/controller.yaml +++ /dev/null @@ -1,108 +0,0 @@ -sources: - - name: meter_source - meters: - - "*" - sinks: - - meter_sink - - csv_sink - - name: cpu_source - meters: - - "cpu" - sinks: - - cpu_sink - - cpu_delta_sink - - vcpu_sink - - csv_sink - - name: disk_source - meters: - - "disk.read.bytes" - - "disk.read.requests" - - "disk.write.bytes" - - "disk.write.requests" - - "disk.device.read.bytes" - - "disk.device.read.requests" - - "disk.device.write.bytes" - - "disk.device.write.requests" - sinks: - - disk_sink - - csv_sink - - name: network_source - meters: - - "network.incoming.bytes" - - "network.incoming.packets" - - "network.outgoing.bytes" - - "network.outgoing.packets" - sinks: - - network_sink - - csv_sink -sinks: - - name: meter_sink - transformers: - publishers: - - notifier:// - - name: cpu_sink - transformers: - - name: "rate_of_change" - parameters: - target: - name: "cpu_util" - unit: "%" - type: "gauge" - max: 100 - scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - publishers: - - notifier:// - - name: cpu_delta_sink - transformers: - - name: "delta" - parameters: - target: - name: "cpu.delta" - growth_only: True - publishers: - - notifier:// - - name: vcpu_sink - transformers: - - name: "rate_of_change" - parameters: - target: - name: "vcpu_util" - unit: "%" - type: "gauge" - max: 100 - scale: "100.0 / (10**9 * (resource_metadata.vcpu_number or 1))" - publishers: - - notifier:// - - name: disk_sink - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)" - unit: "(B|request)" - target: - map_to: - name: "\\1.\\2.\\3.rate" - unit: "\\1/s" - type: "gauge" - publishers: - - notifier:// - - name: network_sink - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "network\\.(incoming|outgoing)\\.(bytes|packets)" - unit: "(B|packet)" - target: - map_to: - name: "network.\\1.\\2.rate" - unit: "\\1/s" - type: "gauge" - publishers: - - notifier:// - - name: csv_sink - publishers: - - csvfile:///opt/cgcs/ceilometer/csv/pm.csv?max_bytes=10000000&backup_count=5&compress=True&enabled=True diff --git a/etc/ceilometer/polling.yaml b/etc/ceilometer/polling.yaml index 6e8fffb8..0175c485 100644 --- a/etc/ceilometer/polling.yaml +++ b/etc/ceilometer/polling.yaml @@ -1,68 +1,26 @@ --- sources: - - name: instance_pollster - interval: 600 + - name: some_pollsters + interval: 300 meters: + - cpu + - memory.usage + - network.incoming.bytes + - network.incoming.packets + - network.outgoing.bytes + - network.outgoing.packets - disk.read.bytes - - disk.read.bytes.rate - disk.read.requests - - disk.read.requests.rate - disk.write.bytes - - disk.write.bytes.rate - disk.write.requests - - disk.write.requests.rate - - disk.capacity - - disk.allocation - - disk.usage - - name: instance_cpu_pollster - interval: 30 - meters: - - cpu - - name: instance_disk_pollster - interval: 600 - meters: - - disk.device.read.requests - - disk.device.read.requests.rate - - disk.device.write.requests - - disk.device.write.requests.rate - - disk.device.read.bytes - - disk.device.read.bytes.rate - - disk.device.write.bytes - - disk.device.write.bytes.rate - - disk.device.capacity - - disk.device.allocation - - disk.device.usage - - name: ipmi_pollster - interval: 600 - meters: - - hardware.ipmi.node.power - - hardware.ipmi.node.temperature - - hardware.ipmi.node.outlet_temperature - - hardware.ipmi.node.airflow - - hardware.ipmi.node.cups - - hardware.ipmi.node.cpu_util - - hardware.ipmi.node.mem_util - - hardware.ipmi.node.io_util - - hardware.ipmi.temperature - - hardware.ipmi.voltage - - hardware.ipmi.current - - hardware.ipmi.fan - - name: ceph_pollster - interval: 600 - meters: - - radosgw.objects - - radosgw.objects.size - - radosgw.objects.containers - - radosgw.api.request - - radosgw.containers.objects - - radosgw.containers.objects.size - - name: image_pollster - interval: 600 - meters: - - image.size - - name: volume_pollster - interval: 600 - meters: - - volume.size - - volume.snapshot.size - - volume.backup.size + - hardware.cpu.util + - hardware.memory.used + - hardware.memory.total + - hardware.memory.buffer + - hardware.memory.cached + - hardware.memory.swap.avail + - hardware.memory.swap.total + - hardware.system_stats.io.outgoing.blocks + - hardware.system_stats.io.incoming.blocks + - hardware.network.ip.incoming.datagrams + - hardware.network.ip.outgoing.datagrams diff --git a/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml b/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml new file mode 100644 index 00000000..2178f217 --- /dev/null +++ b/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml @@ -0,0 +1,6 @@ +--- +issues: + - | + Ceilometer created metrics that could never get measures depending on the + polling configuration. Metrics are now created only if Ceilometer gets at + least a measure for them. diff --git a/releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml b/releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml new file mode 100644 index 00000000..5f1003cb --- /dev/null +++ b/releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The Gnocchi dispatcher has been removed and replaced by a native Gnocchi + publisher. The configuration options from the `[dispatcher_gnocchi]` has + been removed and should be passed via the URL in `pipeline.yaml`. The + service authentication override can be done by adding specific credentials + to a `[gnocchi]` section instead. diff --git a/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml b/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml new file mode 100644 index 00000000..b29c6799 --- /dev/null +++ b/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml @@ -0,0 +1,18 @@ +--- +features: + - | + Archive policies can now be configured per metrics in gnocchi_resources.yaml. + A default list of archive policies is now created by Ceilometer. + They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low" + for others. +upgrade: + - | + Ceilometer now creates it own archive policies in Gnocchi and use them to + create metrics in Gnocchi. Old metrics kept their current archive policies + and will not be updated with ceilometer-upgrade. Only newly created metrics + will be impacted. Archive policy can still be overriden with the publisher url + (e.g: gnocchi://archive_policy=high). +deprecations: + - | + cpu_util and \*.rate meters are deprecated and will be removed in future + release in favor of the Gnocchi rate calculation equivalent. diff --git a/requirements.txt b/requirements.txt index b352b9f0..04ab6bcc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,7 +27,6 @@ pbr>=1.6 # Apache-2.0 pecan>=1.0.0 # BSD oslo.messaging>=5.12.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 -oslo.serialization>=1.10.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 pysnmp<5.0.0,>=4.2.3 # BSD python-glanceclient>=2.0.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg index 44c5ca94..64aec164 100644 --- a/setup.cfg +++ b/setup.cfg @@ -247,7 +247,7 @@ ceilometer.sample.publisher = kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher - gnocchi = ceilometer.publisher.direct:DirectPublisher + gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher database = ceilometer.publisher.direct:DirectPublisher file_alt = ceilometer.publisher.direct:DirectPublisher http_alt = ceilometer.publisher.direct:DirectPublisher @@ -261,7 +261,7 @@ ceilometer.event.publisher = kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher - gnocchi = ceilometer.publisher.direct:DirectPublisher + gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher database = ceilometer.publisher.direct:DirectPublisher file_alt = ceilometer.publisher.direct:DirectPublisher http_alt = ceilometer.publisher.direct:DirectPublisher @@ -290,12 +290,10 @@ ceilometer.dispatcher.meter = database = ceilometer.dispatcher.database:MeterDatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher - gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher ceilometer.dispatcher.event = file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher - gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher network.statistics.drivers = opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver diff --git a/tools/migrate_data_to_gnocchi.py b/tools/migrate_data_to_gnocchi.py index 5e87765f..27c87d97 100755 --- a/tools/migrate_data_to_gnocchi.py +++ b/tools/migrate_data_to_gnocchi.py @@ -37,7 +37,7 @@ from oslo_db import options as db_options from oslo_log import log -from ceilometer.dispatcher import gnocchi +from ceilometer.publisher import gnocchi from ceilometer import service from ceilometer import storage from ceilometer.storage import impl_mongodb @@ -60,7 +60,7 @@ def get_parser(): parser.add_argument( '--ceilometer-config-file', help="The config file of ceilometer, it is main used for gnocchi " - "dispatcher to init gnocchiclient with the service credentials " + "publisher to init gnocchiclient with the service credentials " "defined in the ceilometer config file. Default as " "/etc/ceilometer/ceilometer.conf", ) @@ -144,7 +144,7 @@ def main(): if args.end_timestamp: time_filters.append({"<": {'timestamp': args.end_timestamp}}) - gnocchi_dispatcher = gnocchi.GnocchiDispatcher(gnocchi_conf) + gnocchi_publisher = gnocchi.GnocchiPublisher(gnocchi_conf, "gnocchi://") batch_size = args.batch_migration_size if total_amount == 'Unknown': @@ -181,7 +181,7 @@ def main(): sample.counter_name, sample.resource_id)) samples_dict = [sample.as_dict() for sample in samples] - gnocchi_dispatcher.record_metering_data(samples_dict) + gnocchi_publisher.publish_samples(samples_dict) length = len(samples) migrated_amount += length if pbar: