Skip to content

Commit

Permalink
Merge pull request ManageIQ#16017 from Ladas/add_miq_postgres_legacy_…
Browse files Browse the repository at this point in the history
…adapter_and_proper_specs_

Add miq postgres legacy adapter and proper specs
  • Loading branch information
agrare committed Oct 5, 2017
2 parents cd2b42d + 47f4b25 commit 56745db
Show file tree
Hide file tree
Showing 2 changed files with 253 additions and 51 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
require 'active_metrics/connection_adapters/abstract_adapter'

module ActiveMetrics
module ConnectionAdapters
class MiqPostgresLegacyAdapter < AbstractAdapter
include Vmdb::Logging

# TODO Use the actual configuration from the initializer or whatever
def self.create_connection(_config)
ActiveRecord::Base.connection
end

def write_multiple(*metrics)
metrics.flatten!

flatten_metrics(metrics).each do |interval_name, by_resource|
by_resource.each do |resource, by_timestamp|
start_time = by_timestamp.keys.min
end_time = by_timestamp.keys.max
data = by_timestamp.values
write_rows(interval_name, resource, start_time, end_time, data)
end
end

metrics
end

private

def flatten_metrics(metrics)
{}.tap do |index|
metrics.each do |m|
interval_name = m.fetch_path(:tags, :capture_interval_name)
resource = m[:resource] || m.fetch_path(:tags, :resource_type).safe_constantize.find(m.fetch_path(:tags, :resource_id))
fields = index.fetch_path(interval_name, resource, m[:timestamp]) || m[:tags].symbolize_keys.except(:resource_type, :resource_id).merge(:timestamp => m[:timestamp])
fields[m[:metric_name].to_sym] = m[:value]
index.store_path(interval_name, resource, m[:timestamp], fields)
end
end
end

def write_rows(interval_name, resource, start_time, end_time, data)
log_header = "[#{interval_name}]"

# Read all the existing perfs for this time range to speed up lookups
obj_perfs, = Benchmark.realtime_block(:db_find_prev_perfs) do
Metric::Finders.hash_by_capture_interval_name_and_timestamp(resource, start_time, end_time, interval_name)
end

_klass, meth = Metric::Helper.class_and_association_for_interval_name(interval_name)

# Create or update the performance rows from the hashes
_log.info("#{log_header} Processing #{data.length} performance rows...")
a = u = 0
data.each do |v|
ts = v[:timestamp]
perf = nil

Benchmark.realtime_block(:process_perfs) do
perf = obj_perfs.fetch_path(interval_name, ts)
perf ||= obj_perfs.store_path(interval_name, ts, resource.send(meth).build(:resource_name => resource.name))
perf.new_record? ? a += 1 : u += 1

v.reverse_merge!(perf.attributes.symbolize_keys)
v.delete("id") # Remove protected attributes
v.merge!(Metric::Processing.process_derived_columns(resource, v, interval_name == 'realtime' ? Metric::Helper.nearest_hourly_timestamp(ts) : nil))
end

# TODO: Should we change this into a single metrics.push like we do in ems_refresh?
Benchmark.realtime_block(:process_perfs_db) { perf.update_attributes(v) }

if interval_name == 'hourly'
Benchmark.realtime_block(:process_perfs_tag) { VimPerformanceTagValue.build_from_performance_record(perf) }
end
end

if interval_name == 'hourly'
_log.info("#{log_header} Adding missing timestamp intervals...")
Benchmark.realtime_block(:add_missing_intervals) { Metric::Processing.add_missing_intervals(resource, "hourly", start_time, end_time) }
_log.info("#{log_header} Adding missing timestamp intervals...Complete")
end

_log.info("#{log_header} Processing #{data.length} performance rows...Complete - Added #{a} / Updated #{u}")
end
end
end
end
217 changes: 166 additions & 51 deletions spec/models/metric/ci_mixin/capture_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@
$log = @orig_log
end

describe "#perf_capture_realtime" do
describe "#perf_capture_realtime integration tests" do
let(:expected_stats_period_start) { parse_datetime('2013-08-28T11:01:20Z') }
let(:expected_stats_period_end) { parse_datetime('2013-08-28T12:41:40Z') }
let(:expected_timestamps) { (expected_stats_period_start + 20.seconds..expected_stats_period_end).step_value(20.seconds) }
let(:expected_stats_period_end) { parse_datetime('2013-08-28T12:41:40Z') }
let(:expected_timestamps) { (expected_stats_period_start + 20.seconds..expected_stats_period_end).step_value(20.seconds) }

def capture_data(second_collection_period_start, collection_overlap_period)
# 1.collection period, save all metrics
Expand Down Expand Up @@ -55,7 +55,7 @@ def capture_data(second_collection_period_start, collection_overlap_period)
vm.perf_capture_realtime(Time.parse(second_collection_period_start).utc, Time.parse('2013-08-28T14:02:00Z').utc)

@metrics_by_ts = {}
vm.metrics.each do |x|
vm.metrics.reload.each do |x|
@metrics_by_ts[x.timestamp.iso8601] = x
end

Expand All @@ -69,7 +69,7 @@ def capture_data(second_collection_period_start, collection_overlap_period)
end

def filter_statistics(stats, op, date, subtract_by = nil)
filter_date = parse_datetime(date)
filter_date = parse_datetime(date)
filter_date -= subtract_by if subtract_by
stats.select { |x| x['period_end'].send(op, filter_date) }
end
Expand All @@ -84,72 +84,187 @@ def parse_datetime(datetime)
Time.parse(datetime).utc
end

context "2 collection periods total, end of 1. period has complete stats" do
it "checks that saved metrics are correct" do
capture_data('2013-08-28T12:06:00Z', 20.minutes)
def mock_adapter(postgre_adapter)
connection_config = {:adapter => postgre_adapter, :database => "manageiq_metrics"}
adapter = "#{postgre_adapter}_adapter"
# require "active_metrics/connection_adapters/#{adapter}"
adapter_class = ActiveMetrics::ConnectionAdapters.const_get(adapter.classify)
raw_connection = adapter_class.create_connection(connection_config)

stats_period_start = [api_time_as_utc(@read_bytes.first), api_time_as_utc(@write_bytes.first)].min
stats_period_end = [api_time_as_utc(@read_bytes.last), api_time_as_utc(@write_bytes.last)].min
allow(ActiveMetrics::Base).to receive(:connection).and_return(adapter_class.new(raw_connection))
end

["miq_postgres", "miq_postgres_legacy"].each do |postgre_adapter|
context "with adapter #{postgre_adapter}" do
before :each do
mock_adapter(postgre_adapter)
end

context "2 collection periods total, end of 1. period has complete stats" do
it "checks that saved metrics are correct" do
capture_data('2013-08-28T12:06:00Z', 20.minutes)

stats_period_start = [api_time_as_utc(@read_bytes.first), api_time_as_utc(@write_bytes.first)].min
stats_period_end = [api_time_as_utc(@read_bytes.last), api_time_as_utc(@write_bytes.last)].min

# check start date and end date
expect(stats_period_start).to eq expected_stats_period_start
expect(stats_period_end).to eq expected_stats_period_end
# check start date and end date
expect(stats_period_start).to eq expected_stats_period_start
expect(stats_period_end).to eq expected_stats_period_end

# check that 20s block is not interrupted between start and end time for net_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:net_usage_rate_average)).not_to eq nil
# check that 20s block is not interrupted between start and end time for net_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:net_usage_rate_average)).not_to eq nil
end

# check that 20s block is not interrupted between start and end time for disk_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:disk_usage_rate_average)).not_to eq nil
end

# check that 20s block is not interrupted between start and end time for cpu_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:cpu_usage_rate_average)).not_to eq nil
end
end
end

# check that 20s block is not interrupted between start and end time for disk_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:disk_usage_rate_average)).not_to eq nil
context "2 collection periods total, there is data hole between periods" do
it "verifies that hole in the data is logged, corrupted data is logged and no other warnings are logged" do
# Hole in the data is logged
expect($log).to receive(:warn).with(/expected to get data as of/).exactly(:once)
# Corrupted data is logged NOTE: This is emitted from the provider
expect($log).to receive(:warn).with(/Distance of the multiple streams of data is invalid/).exactly(:once)
# No to other warnings should be logged
expect($log).not_to receive(:warn)

# sending no collection period overlap will cause hole in the data
capture_data('2013-08-28T11:56:00Z', nil)
end
end

# check that 20s block is not interrupted between start and end time for cpu_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:cpu_usage_rate_average)).not_to eq nil
context "2 collection periods total, end of 1. period has incomplete stat" do
it "checks that saved metrics are correct" do
capture_data('2013-08-28T12:02:00Z', 20.minutes)

stats_period_start = [api_time_as_utc(@read_bytes.first), api_time_as_utc(@write_bytes.first)].min
stats_period_end = [api_time_as_utc(@read_bytes.last), api_time_as_utc(@write_bytes.last)].min

# check start date and end date
expect(stats_period_start).to eq expected_stats_period_start
expect(stats_period_end).to eq expected_stats_period_end

# check that 20s block is not interrupted between start and end time for net_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:net_usage_rate_average)).not_to eq nil
end

# check that 20s block is not interrupted between start and end time for disk_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:disk_usage_rate_average)).not_to eq nil
end

# check that 20s block is not interrupted between start and end time for cpu_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:cpu_usage_rate_average)).not_to eq nil
end
end
end
end
end

context "2 collection periods total, there is data hole between periods" do
it "verifies that hole in the data is logged, corrupted data is logged and no other warnings are logged" do
# Hole in the data is logged
expect($log).to receive(:warn).with(/expected to get data as of/).exactly(:once)
# Corrupted data is logged NOTE: This is emitted from the provider
expect($log).to receive(:warn).with(/Distance of the multiple streams of data is invalid/).exactly(:once)
# No to other warnings should be logged
expect($log).not_to receive(:warn)

# sending no collection period overlap will cause hole in the data
capture_data('2013-08-28T11:56:00Z', nil)
end
end
context "comparing miq_postgres and miq_postgres_legacy adapters" do
it "creates the same metrics in the DB, starting with empty db" do
# Do a capture data with new_adapter
mock_adapter("miq_postgres_legacy")
expect(ActiveMetrics::Base.connection.class).to eq(ActiveMetrics::ConnectionAdapters::MiqPostgresLegacyAdapter)
capture_data('2013-08-28T12:02:00Z', 20.minutes)

legacy_adapter_stored_data = @metrics_by_ts

context "2 collection periods total, end of 1. period has incomplete stat" do
it "checks that saved metrics are correct" do
# Delete the metrics so we can fetch them again with new adapter
Metric.delete_all

# Do a capture data with new_adapter
mock_adapter("miq_postgres")
expect(ActiveMetrics::Base.connection.class).to eq(ActiveMetrics::ConnectionAdapters::MiqPostgresAdapter)
capture_data('2013-08-28T12:02:00Z', 20.minutes)

stats_period_start = [api_time_as_utc(@read_bytes.first), api_time_as_utc(@write_bytes.first)].min
stats_period_end = [api_time_as_utc(@read_bytes.last), api_time_as_utc(@write_bytes.last)].min
new_adapter_stored_data = @metrics_by_ts

# check start date and end date
expect(stats_period_start).to eq expected_stats_period_start
expect(stats_period_end).to eq expected_stats_period_end
# Assert the number of saved metrics is the same
expect(new_adapter_stored_data.keys.count).to be > 0
expect(new_adapter_stored_data.keys.count).to eq(legacy_adapter_stored_data.keys.count)

# check that 20s block is not interrupted between start and end time for net_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:net_usage_rate_average)).not_to eq nil
# Assert that the metrics are exactly the same
legacy_adapter_stored_data.each do |timestamp, data|
legacy_adapter_attributes = data.attributes.except("id", "created_on")
new_adapter_attributes = new_adapter_stored_data[timestamp].attributes.except("id", "created_on")
# Is there an hash exact match matcher? Include from both sides does exact match, but it's a bit clunky
expect(legacy_adapter_attributes).to include(new_adapter_attributes)
expect(new_adapter_attributes).to include(legacy_adapter_attributes)
end

# check that 20s block is not interrupted between start and end time for disk_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:disk_usage_rate_average)).not_to eq nil
# Assert the created on of new metrics is bigger, to make sure we did change the data
legacy_adapter_stored_data.each do |timestamp, data|
expect(data.created_on).to be < new_adapter_stored_data[timestamp].created_on
end
end

it "doesn't change the data when changing from legacy_driver to new_driver" do
# Do a capture data with new_adapter
mock_adapter("miq_postgres")
expect(ActiveMetrics::Base.connection.class).to eq(ActiveMetrics::ConnectionAdapters::MiqPostgresAdapter)
capture_data('2013-08-28T12:02:00Z', 20.minutes)

new_adapter_stored_data = @metrics_by_ts

# Do a capture data with new_adapter
mock_adapter("miq_postgres_legacy")
expect(ActiveMetrics::Base.connection.class).to eq(ActiveMetrics::ConnectionAdapters::MiqPostgresLegacyAdapter)
capture_data('2013-08-28T12:02:00Z', 20.minutes)

legacy_adapter_stored_data = @metrics_by_ts

# Assert the number of saved metrics is the same
expect(new_adapter_stored_data.keys.count).to be > 0
expect(new_adapter_stored_data.keys.count).to eq(legacy_adapter_stored_data.keys.count)

# Assert that the metrics are exactly the same
legacy_adapter_stored_data.each do |timestamp, data|
legacy_adapter_attributes = data.attributes
new_adapter_attributes = new_adapter_stored_data[timestamp].attributes
# Is there an hash exact match matcher? Include from both sides does exact match, but it's a bit clunky
expect(legacy_adapter_attributes).to include(new_adapter_attributes)
expect(new_adapter_attributes).to include(legacy_adapter_attributes)
end
end

it "doesn't change the data when changing from new_driver to legacy_driver" do
# Do a capture data with new_adapter
mock_adapter("miq_postgres_legacy")
expect(ActiveMetrics::Base.connection.class).to eq(ActiveMetrics::ConnectionAdapters::MiqPostgresLegacyAdapter)
capture_data('2013-08-28T12:02:00Z', 20.minutes)

legacy_adapter_stored_data = @metrics_by_ts

# Do a capture data with new_adapter
mock_adapter("miq_postgres")
expect(ActiveMetrics::Base.connection.class).to eq(ActiveMetrics::ConnectionAdapters::MiqPostgresAdapter)
capture_data('2013-08-28T12:02:00Z', 20.minutes)

new_adapter_stored_data = @metrics_by_ts

# Assert the number of saved metrics is the same
expect(new_adapter_stored_data.keys.count).to be > 0
expect(new_adapter_stored_data.keys.count).to eq(legacy_adapter_stored_data.keys.count)

# check that 20s block is not interrupted between start and end time for cpu_usage_rate_average
expected_timestamps.each do |timestamp|
expect(@metrics_by_ts[timestamp.iso8601].try(:cpu_usage_rate_average)).not_to eq nil
# Assert that the metrics are exactly the same
legacy_adapter_stored_data.each do |timestamp, data|
legacy_adapter_attributes = data.attributes
new_adapter_attributes = new_adapter_stored_data[timestamp].attributes
# Is there an hash exact match matcher? Include from both sides does exact match, but it's a bit clunky
expect(legacy_adapter_attributes).to include(new_adapter_attributes)
expect(new_adapter_attributes).to include(legacy_adapter_attributes)
end
end
end
Expand Down

0 comments on commit 56745db

Please sign in to comment.