diff --git a/app/channels/notification_channel.rb b/app/channels/notification_channel.rb index 0c76e76d2be..49b9671d11f 100644 --- a/app/channels/notification_channel.rb +++ b/app/channels/notification_channel.rb @@ -1,6 +1,6 @@ class NotificationChannel < ApplicationCable::Channel def subscribed - stream_from "notifications_#{current_user.id}" if current_user + stream_from("notifications_#{current_user.id}") if current_user end def unsubscribed diff --git a/app/models/asset_tag_import.rb b/app/models/asset_tag_import.rb index 62fbe414f42..9f384cceed3 100644 --- a/app/models/asset_tag_import.rb +++ b/app/models/asset_tag_import.rb @@ -47,7 +47,7 @@ def verify objs = MiqBulkImport.find_entry_by_keys(@klass, keys) if objs.empty? bad += 1 - _log.warn "#{@keys[0].titleize} #{line[@keys[0]]}: Unable to find a #{@klass.name}" + _log.warn("#{@keys[0].titleize} #{line[@keys[0]]}: Unable to find a #{@klass.name}") err = "#{@klass.name.downcase}notfound".to_sym @errors.add(err, "#{@keys[0].titleize}: #{line[@keys[0]]}: Unable to find a #{@klass.name}") next @@ -55,7 +55,7 @@ def verify if objs.length > 1 bad += 1 err = "serveral#{@klass.name.downcase}sfound4keys".to_sym - _log.warn "#{@keys[0].titleize}: #{line[@keys[0]]}: Could not resolve a #{@klass.name}, an entry will be skipped" + _log.warn("#{@keys[0].titleize}: #{line[@keys[0]]}: Could not resolve a #{@klass.name}, an entry will be skipped") @errors.add(err, "#{@keys[0].titleize}: #{line[@keys[0]]}: Could not resolve a #{@klass.name}, an entry will be skipped") else @verified_data[objs[0].id] ||= [] @@ -73,14 +73,14 @@ def verify obj = @klass.find_by(:id => id) while data.length > 1 data.shift - _log.warn "#{@klass.name} #{obj.name}, Multiple lines for the same object, the last line is applied" + _log.warn("#{@klass.name} #{obj.name}, Multiple lines for the same object, the last line is applied") @errors.add(:singlevaluedassettag, "#{@klass.name}: #{obj.name}, Multiple lines for the same object, the last line is applied") end end end @stats = {:good => good, :bad => bad} - _log.info "Number of valid entries #{@stats[:good]}, number of invalid entries #{@stats[:bad]}" + _log.info("Number of valid entries #{@stats[:good]}, number of invalid entries #{@stats[:bad]}") @stats end @@ -95,17 +95,17 @@ def apply attr = attrs.detect { |ca| ca.name == key } if attr.nil? if value.blank? - _log.info "#{@klass.name}: #{obj.name}, Skipping tag <#{key}> due to blank value" + _log.info("#{@klass.name}: #{obj.name}, Skipping tag <#{key}> due to blank value") else - _log.info "#{@klass.name}: #{obj.name}, Adding tag <#{key}>, value <#{value}>" + _log.info("#{@klass.name}: #{obj.name}, Adding tag <#{key}>, value <#{value}>") new_attrs << {:name => key, :value => value, :source => 'EVM'} end else if value.blank? - _log.info "#{@klass.name}: #{obj.name}, Deleting tag <#{key}> due to blank value" + _log.info("#{@klass.name}: #{obj.name}, Deleting tag <#{key}> due to blank value") attr.delete else - _log.info "#{@klass.name}: #{obj.name}, Updating tag <#{key}>, value <#{value}>" + _log.info("#{@klass.name}: #{obj.name}, Updating tag <#{key}>, value <#{value}>") attr.update_attribute(:value, value) end end diff --git a/app/models/auth_token.rb b/app/models/auth_token.rb index 5249d996cdc..697360f79b5 100644 --- a/app/models/auth_token.rb +++ b/app/models/auth_token.rb @@ -1,6 +1,6 @@ class AuthToken < Authentication def auth_key=(val) @auth_changed = true if val != auth_key - super val + super(val) end end diff --git a/app/models/auth_userid_password.rb b/app/models/auth_userid_password.rb index 6a595460de8..a01eb9a62da 100644 --- a/app/models/auth_userid_password.rb +++ b/app/models/auth_userid_password.rb @@ -1,11 +1,11 @@ class AuthUseridPassword < Authentication def password=(val) @auth_changed = true if val != password - super val + super(val) end def userid=(val) @auth_changed = true if val != userid - super val + super(val) end end diff --git a/app/models/authentication_ldap.rb b/app/models/authentication_ldap.rb index d8cf3f413c5..104f1dc2027 100644 --- a/app/models/authentication_ldap.rb +++ b/app/models/authentication_ldap.rb @@ -4,6 +4,6 @@ def assign_values(options) options.each do |key, val| hash["ldap_" + key.to_s] = val end - super hash + super(hash) end end diff --git a/app/models/authenticator/base.rb b/app/models/authenticator/base.rb index d7c125e3c74..6012738edc0 100644 --- a/app/models/authenticator/base.rb +++ b/app/models/authenticator/base.rb @@ -77,7 +77,7 @@ def authenticate(username, password, request = nil, options = {}) end rescue MiqException::MiqEVMLoginError => err - _log.warn err.message + _log.warn(err.message) raise rescue Exception => err _log.log_backtrace(err) diff --git a/app/models/authenticator/httpd.rb b/app/models/authenticator/httpd.rb index d529eaa4721..e8ad7250023 100644 --- a/app/models/authenticator/httpd.rb +++ b/app/models/authenticator/httpd.rb @@ -115,7 +115,7 @@ def user_attrs_from_external_directory(username) sysbus = DBus.system_bus ifp_service = sysbus["org.freedesktop.sssd.infopipe"] - ifp_object = ifp_service.object "/org/freedesktop/sssd/infopipe" + ifp_object = ifp_service.object("/org/freedesktop/sssd/infopipe") ifp_object.introspect ifp_interface = ifp_object["org.freedesktop.sssd.infopipe"] begin diff --git a/app/models/automation_request.rb b/app/models/automation_request.rb index 68a102e2483..a6659423846 100644 --- a/app/models/automation_request.rb +++ b/app/models/automation_request.rb @@ -12,7 +12,7 @@ class AutomationRequest < MiqRequest # parameters: var1=vvvvv|var2=wwww|var3=xxxxx ############################################## def self.create_from_ws(version, user, uri_parts, parameters, requester) - _log.info "Starting with interface version=<#{version}> for user=<#{user.userid}> with uri_parts=<#{uri_parts.inspect}>, parameters=<#{parameters.inspect}> and requester=<#{requester.inspect}>" + _log.info("Starting with interface version=<#{version}> for user=<#{user.userid}> with uri_parts=<#{uri_parts.inspect}>, parameters=<#{parameters.inspect}> and requester=<#{requester.inspect}>") options = {} requester_options = MiqRequestWorkflow.parse_ws_string(requester) @@ -22,7 +22,7 @@ def self.create_from_ws(version, user, uri_parts, parameters, requester) uri_options = MiqRequestWorkflow.parse_ws_string(uri_parts) [:namespace, :class, :instance, :message].each { |key| options[key] = uri_options.delete(key) if uri_options.key?(key) } - uri_options.keys.each { |key| _log.warn "invalid keyword <#{key}> specified in uri_parts" } + uri_options.keys.each { |key| _log.warn("invalid keyword <#{key}> specified in uri_parts") } options[:namespace] = (options.delete(:namespace) || DEFAULT_NAMESPACE).strip.gsub(/(^\/|\/$)/, "") # Strip blanks and slashes from beginning and end of string options[:class_name] = (options.delete(:class) || DEFAULT_CLASS).strip.gsub(/(^\/|\/$)/, "") options[:instance_name] = (options.delete(:instance) || DEFAULT_INSTANCE).strip diff --git a/app/models/bottleneck_event.rb b/app/models/bottleneck_event.rb index 9d1647d1bbc..a118cb0cf94 100644 --- a/app/models/bottleneck_event.rb +++ b/app/models/bottleneck_event.rb @@ -72,7 +72,7 @@ def format(value, method, options = {}) end def substitute(str) - eval "result = \"#{str}\"" + eval("result = \"#{str}\"") end # Future event calculation methods diff --git a/app/models/classification.rb b/app/models/classification.rb index d39b7e527d6..a5ef6fd2ebd 100644 --- a/app/models/classification.rb +++ b/app/models/classification.rb @@ -28,9 +28,9 @@ class Classification < ApplicationRecord validates :syntax, :inclusion => {:in => %w( string integer boolean ), :message => "should be one of 'string', 'integer' or 'boolean'"} - scope :visible, -> { where :show => true } - scope :read_only, -> { where :read_only => true } - scope :writeable, -> { where :read_only => false } + scope :visible, -> { where(:show => true) } + scope :read_only, -> { where(:read_only => true) } + scope :writeable, -> { where(:read_only => false) } DEFAULT_NAMESPACE = "/managed" @@ -56,7 +56,7 @@ def self.hash_all_by_type_and_name(conditions = {}) end def self.parent_ids(parent_ids) - where :parent_id => parent_ids + where(:parent_id => parent_ids) end def self.tags_arel @@ -376,7 +376,7 @@ def enforce_policy(obj, event) def self.export_to_array categories.inject([]) do |a, c| - a.concat c.export_to_array + a.concat(c.export_to_array) end end diff --git a/app/models/classification_import.rb b/app/models/classification_import.rb index a272c8f83da..a260eb7703d 100644 --- a/app/models/classification_import.rb +++ b/app/models/classification_import.rb @@ -33,19 +33,19 @@ def verify vms = MiqBulkImport.find_entry_by_keys(VmOrTemplate, keys) if vms.empty? bad += 1 - _log.warn "#{@keys[0].titleize}: #{line[@keys[0]]}: Unable to find VM" + _log.warn("#{@keys[0].titleize}: #{line[@keys[0]]}: Unable to find VM") @errors.add(:vmnotfound, "#{@keys[0].titleize}: #{line[@keys[0]]}: Unable to find VM") next end if vms.length > 1 bad += 1 - _log.warn "#{@keys[0].titleize}: #{line[@keys[0]]}: Could not resolve a vm, an entry will be skipped" + _log.warn("#{@keys[0].titleize}: #{line[@keys[0]]}: Could not resolve a vm, an entry will be skipped") @errors.add(:severalvmsfound4keys, "#{@keys[0].titleize}: #{line[@keys[0]]}: Could not resolve a vm, an entry will be skipped") else cat = Classification.find_by(:description => line["category"]) if cat.nil? bad += 1 - _log.warn "#{@keys[0].titleize}: #{line[@keys[0]]}: Unable to find category #{line["category"]}" + _log.warn("#{@keys[0].titleize}: #{line[@keys[0]]}: Unable to find category #{line["category"]}") @errors.add(:categorynotfound, "#{@keys[0].titleize}: #{line[@keys[0]]}: Unable to find category #{line["category"]}") next end @@ -62,7 +62,7 @@ def verify end if entry.nil? bad += 1 - _log.warn "#{@keys[0].titleize}: #{line[@keys[0]]}, category: #{line["category"]}: Unable to find entry #{line["entry"]})" + _log.warn("#{@keys[0].titleize}: #{line[@keys[0]]}, category: #{line["category"]}: Unable to find entry #{line["entry"]})") @errors.add(:entrynotfound, "#{@keys[0].titleize}: #{line[@keys[0]]}, category: #{line["category"]}: Unable to find entry #{line["entry"]}") next end @@ -77,14 +77,14 @@ def verify vm = VmOrTemplate.find_by(:id => id) while entries.length > 1 e = entries.shift - _log.warn "Vm: #{vm.name}, Location: #{vm.location}, Category: #{category}: Multiple values given for single-valued category, value #{e} will be ignored" + _log.warn("Vm: #{vm.name}, Location: #{vm.location}, Category: #{category}: Multiple values given for single-valued category, value #{e} will be ignored") @errors.add(:singlevaluedcategory, "Vm #{vm.name}, Location: #{vm.location}, Category: #{category}: Multiple values given for single-valued category, value #{e} will be ignored") end end end end @stats = {:good => good, :bad => bad} - _log.info "Number of valid entries: #{@stats[:good]}, number of invalid entries: #{@stats[:bad]}" + _log.info("Number of valid entries: #{@stats[:good]}, number of invalid entries: #{@stats[:bad]}") @stats end @@ -98,7 +98,7 @@ def apply entries.each do|ent| cat.entries.each do|e| if e.description == ent - _log.info "Vm: #{vm.name}, Location: #{vm.location}, Category: #{cat.description}: Applying entry #{ent}" + _log.info("Vm: #{vm.name}, Location: #{vm.location}, Category: #{cat.description}: Applying entry #{ent}") e.assign_entry_to(vm) break end diff --git a/app/models/cloud_subnet/operations.rb b/app/models/cloud_subnet/operations.rb index d40696d9073..6cbf0fc040c 100644 --- a/app/models/cloud_subnet/operations.rb +++ b/app/models/cloud_subnet/operations.rb @@ -1,6 +1,6 @@ module CloudSubnet::Operations def self.included(base) - base.send :include, InstanceMethods + base.send(:include, InstanceMethods) base.extend ClassMethods end diff --git a/app/models/cloud_volume/operations.rb b/app/models/cloud_volume/operations.rb index 482aeaf7140..0e5a26d5130 100644 --- a/app/models/cloud_volume/operations.rb +++ b/app/models/cloud_volume/operations.rb @@ -1,6 +1,6 @@ module CloudVolume::Operations def self.included(base) - base.send :include, InstanceMethods + base.send(:include, InstanceMethods) base.extend ClassMethods end diff --git a/app/models/container.rb b/app/models/container.rb index e74b143559a..09e2250cb91 100644 --- a/app/models/container.rb +++ b/app/models/container.rb @@ -46,7 +46,7 @@ def perf_rollup_parents(_interval_name = nil) def disconnect_inv return if archived? - _log.info "Disconnecting Container [#{name}] id [#{id}] from EMS " + _log.info("Disconnecting Container [#{name}] id [#{id}] from EMS") self.deleted_on = Time.now.utc save end diff --git a/app/models/container_deployment.rb b/app/models/container_deployment.rb index 4e9a4cf78ed..c6f847dc6be 100644 --- a/app/models/container_deployment.rb +++ b/app/models/container_deployment.rb @@ -35,8 +35,8 @@ def identity_provider_auth end def roles_addresses(role) - if role.include? "deployment_master" - extract_public_ip_or_hostname container_nodes_by_role(role).first + if role.include?("deployment_master") + extract_public_ip_or_hostname(container_nodes_by_role(role).first) else addresses_array(container_nodes_by_role(role)) end @@ -118,9 +118,9 @@ def create_deployment(params, user) self.method_type = params["method_type"] create_needed_tags if method_type.include?("existing_managed") - self.deployed_on_ems = ExtManagementSystem.find params["underline_provider_id"] + self.deployed_on_ems = ExtManagementSystem.find(params["underline_provider_id"]) elsif method_type.include?("provision") - self.deployed_on_ems = ExtManagementSystem.find params["underline_provider_id"] + self.deployed_on_ems = ExtManagementSystem.find(params["underline_provider_id"]) keys = generate_ssh_keys public_key = keys[:public_key] private_key = keys[:private_key] @@ -128,7 +128,7 @@ def create_deployment(params, user) params["ssh_authentication"]["public_key"] = public_key params["ssh_authentication"]["userid"] = "root" end - add_deployment_master_role params["nodes"] + add_deployment_master_role(params["nodes"]) create_deployment_nodes(params["nodes"]) create_deployment_authentication(params["identity_authentication"]) create_deployment_authentication(params["ssh_authentication"].merge("type" => "AuthPrivateKey")) @@ -141,7 +141,7 @@ def create_deployment(params, user) def create_deployment_authentication(authentication) auth = authentication["type"].safe_constantize.new auth.authtype = AUTHENTICATIONS_NAMES[authentication["type"]] - auth.assign_values authentication + auth.assign_values(authentication) authentications << auth save! end @@ -154,7 +154,7 @@ def extract_public_ip_or_hostname(deployment_node) end def create_deployment_nodes(nodes) - work_by_vm_id = method_type.include? "existing_managed" + work_by_vm_id = method_type.include?("existing_managed") nodes.each do |node| container_deployment_node = ContainerDeploymentNode.new container_deployment_node.address = node["name"] unless work_by_vm_id @@ -162,7 +162,7 @@ def create_deployment_nodes(nodes) container_deployment_nodes << container_deployment_node node["roles"].each do |key, value| if value - container_deployment_node.tag_add key + container_deployment_node.tag_add(key) end end container_deployment_node.save! @@ -230,7 +230,7 @@ def identity_ansible_config_format "requestHeaderEmailHeaders" => authentication.request_header_email_headers } end - {'name' => "example_name", 'login' => "true", 'challenge' => "true", 'kind' => authentication.authtype}.merge! options + {'name' => "example_name", 'login' => "true", 'challenge' => "true", 'kind' => authentication.authtype}.merge!(options) end private @@ -250,15 +250,15 @@ def generate_roles result = {} roles = container_deployment_nodes.collect(&:roles).flatten.uniq result["master"] = {"osm_use_cockpit" => "false", - "openshift_master_identity_providers" => [identity_ansible_config_format]} if roles.include? "master" + "openshift_master_identity_providers" => [identity_ansible_config_format]} if roles.include?("master") unless identity_provider_auth.first.htpassd_users.empty? result["master"]["openshift_master_htpasswd_users"] = htpasswd_hash end - result["node"] = {} if roles.include? "node" - result["storage"] = {} if roles.include? "storage" - result["etcd"] = {} if roles.include? "etcd" - result["master_lb"] = {} if roles.include? "master_lb" - result["dns"] = {} if roles.include? "dns" + result["node"] = {} if roles.include?("node") + result["storage"] = {} if roles.include?("storage") + result["etcd"] = {} if roles.include?("etcd") + result["master_lb"] = {} if roles.include?("master_lb") + result["dns"] = {} if roles.include?("dns") result end @@ -282,7 +282,7 @@ def generate_automation_params(params) :containerized => containerized, :rhsub_sku => rhsm_auth.rhsm_sku } - if method_type.include? "provision" + if method_type.include?("provision") node_template = VmOrTemplate.find(params["nodes_creation_template_id"]) master_template = VmOrTemplate.find(params["masters_creation_template_id"]) parameters_provision = { @@ -336,7 +336,7 @@ def vm_fields(type, params) options["vm_name"] = params["#{type.singularize}_base_name"] options["vm_memory"] = params[type + "_vm_memory"] if params[type + "_vm_memory"] options["cpu"] = params[type + "_cpu"] if params[type + "_cpu"] - if deployed_on_ems.kind_of? ManageIQ::Providers::Amazon::CloudManager + if deployed_on_ems.kind_of?(ManageIQ::Providers::Amazon::CloudManager) options["instance_type"] = 1 || params[type + "_instance_type"] options["placement_auto"] = true end diff --git a/app/models/container_group.rb b/app/models/container_group.rb index 14383af4ce3..ab9f6aa97ef 100644 --- a/app/models/container_group.rb +++ b/app/models/container_group.rb @@ -82,8 +82,7 @@ def perf_rollup_parents(interval_name = nil) def disconnect_inv return if archived? - _log.info "Disconnecting Pod [#{name}] id [#{id}] from EMS [#{ext_management_system.name}]" \ - "id [#{ext_management_system.id}] " + _log.info("Disconnecting Pod [#{name}] id [#{id}] from EMS [#{ext_management_system.name}] id [#{ext_management_system.id}]") self.containers.each(&:disconnect_inv) self.container_node_id = nil self.container_services = [] diff --git a/app/models/container_group_performance.rb b/app/models/container_group_performance.rb index b2bebf17e0a..9e5e448d282 100644 --- a/app/models/container_group_performance.rb +++ b/app/models/container_group_performance.rb @@ -1,5 +1,5 @@ class ContainerGroupPerformance < MetricRollup - default_scope { where "resource_type = 'ContainerGroup' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'ContainerGroup' and resource_id IS NOT NULL") } belongs_to :container_group, :foreign_key => :resource_id, :class_name => ContainerGroup.name end diff --git a/app/models/container_image.rb b/app/models/container_image.rb index 8632d44eadf..9b4e9510a55 100644 --- a/app/models/container_image.rb +++ b/app/models/container_image.rb @@ -97,8 +97,7 @@ def openscap_failed_rules_summary def disconnect_inv return if archived? - _log.info "Disconnecting Image [#{name}] id [#{id}] from EMS [#{ext_management_system.name}]" \ - "id [#{ext_management_system.id}] " + _log.info("Disconnecting Image [#{name}] id [#{id}] from EMS [#{ext_management_system.name}] id [#{ext_management_system.id}]") self.container_image_registry = nil self.deleted_on = Time.now.utc save diff --git a/app/models/container_label_tag_mapping.rb b/app/models/container_label_tag_mapping.rb index 655b64a875c..448552e3d42 100644 --- a/app/models/container_label_tag_mapping.rb +++ b/app/models/container_label_tag_mapping.rb @@ -64,7 +64,7 @@ def self.find_or_create_tag(tag_hash) category = Tag.find(tag_hash[:category_tag_id]).classification entry = category.find_entry_by_name(tag_hash[:entry_name]) unless entry - category.lock :exclusive do + category.lock(:exclusive) do begin entry = category.add_entry(:name => tag_hash[:entry_name], :description => tag_hash[:entry_description]) diff --git a/app/models/container_node_performance.rb b/app/models/container_node_performance.rb index b429025c195..4acd0a09f09 100644 --- a/app/models/container_node_performance.rb +++ b/app/models/container_node_performance.rb @@ -1,5 +1,5 @@ class ContainerNodePerformance < MetricRollup - default_scope { where "resource_type = 'ContainerNode' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'ContainerNode' and resource_id IS NOT NULL") } belongs_to :container_node, :foreign_key => :resource_id, :class_name => ContainerNode.name end diff --git a/app/models/container_performance.rb b/app/models/container_performance.rb index 1871e34a094..71a413af173 100644 --- a/app/models/container_performance.rb +++ b/app/models/container_performance.rb @@ -1,5 +1,5 @@ class ContainerPerformance < MetricRollup - default_scope { where "resource_type = 'Container' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'Container' and resource_id IS NOT NULL") } belongs_to :container_node, :foreign_key => :resource_id, :class_name => Container.name end diff --git a/app/models/container_project.rb b/app/models/container_project.rb index c6f826296fb..dd05ce35927 100644 --- a/app/models/container_project.rb +++ b/app/models/container_project.rb @@ -62,8 +62,7 @@ def perf_rollup_parents(interval_name = nil) def disconnect_inv return if archived? - _log.info "Disconnecting Container Project [#{name}] id [#{id}] from EMS [#{ext_management_system.name}]" \ - "id [#{ext_management_system.id}] " + _log.info("Disconnecting Container Project [#{name}] id [#{id}] from EMS [#{ext_management_system.name}] id [#{ext_management_system.id}]") self.deleted_on = Time.now.utc save end diff --git a/app/models/container_project_performance.rb b/app/models/container_project_performance.rb index 27a4e94287c..6cfc84c9e36 100644 --- a/app/models/container_project_performance.rb +++ b/app/models/container_project_performance.rb @@ -1,5 +1,5 @@ class ContainerProjectPerformance < MetricRollup - default_scope { where "resource_type = 'ContainerProject' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'ContainerProject' and resource_id IS NOT NULL") } belongs_to :container_node, :foreign_key => :resource_id, :class_name => ContainerProject.name end diff --git a/app/models/dialog.rb b/app/models/dialog.rb index c02d8105910..828d63bac35 100644 --- a/app/models/dialog.rb +++ b/app/models/dialog.rb @@ -4,7 +4,7 @@ class Dialog < ApplicationRecord # The following gets around a glob symbolic link issue YAML_FILES_PATTERN = "{,*/**/}*.{yaml,yml}".freeze - has_many :dialog_tabs, -> { order :position }, :dependent => :destroy + has_many :dialog_tabs, -> { order(:position) }, :dependent => :destroy validate :validate_children include DialogMixin diff --git a/app/models/dynamic_dialog_field_value_processor.rb b/app/models/dynamic_dialog_field_value_processor.rb index 44b4b5a43df..37c22d7b650 100644 --- a/app/models/dynamic_dialog_field_value_processor.rb +++ b/app/models/dynamic_dialog_field_value_processor.rb @@ -13,8 +13,8 @@ def values_from_automate(dialog_field) dialog_field.normalize_automate_values(workspace.root.attributes) rescue => e - $log.error "DynamicDialogFieldValueProcessor: error getting value from automate #{e.message}" - $log.error e.backtrace.join("\n") + $log.error("DynamicDialogFieldValueProcessor: error getting value from automate #{e.message}") + $log.error(e.backtrace.join("\n")) dialog_field.script_error_values end diff --git a/app/models/ems_cluster.rb b/app/models/ems_cluster.rb index 6ddf2174388..ee780fec09b 100644 --- a/app/models/ems_cluster.rb +++ b/app/models/ems_cluster.rb @@ -17,7 +17,7 @@ class EmsCluster < ApplicationRecord has_many :metric_rollups, :as => :resource # Destroy will be handled by purger has_many :vim_performance_states, :as => :resource # Destroy will be handled by purger - has_many :policy_events, -> { order "timestamp" } + has_many :policy_events, -> { order("timestamp") } has_many :miq_events, :as => :target, :dependent => :destroy has_many :miq_alert_statuses, :as => :resource, :dependent => :destroy @@ -291,7 +291,7 @@ def register_host(host) with_provider_object do |vim_cluster| begin - _log.info "Invoking addHost with options: address => #{network_address}, #{userid}" + _log.info("Invoking addHost with options: address => #{network_address}, #{userid}") host_mor = vim_cluster.addHost(network_address, userid, password) rescue VimFault => verr fault = verr.vimFaultInfo.fault @@ -299,7 +299,7 @@ def register_host(host) raise unless fault.xsiType == "SSLVerifyFault" ssl_thumbprint = fault.thumbprint - _log.info "Invoking addHost with options: address => #{network_address}, userid => #{userid}, sslThumbprint => #{ssl_thumbprint}" + _log.info("Invoking addHost with options: address => #{network_address}, userid => #{userid}, sslThumbprint => #{ssl_thumbprint}") host_mor = vim_cluster.addHost(network_address, userid, password, :sslThumbprint => ssl_thumbprint) end diff --git a/app/models/ems_cluster_performance.rb b/app/models/ems_cluster_performance.rb index 7d4f58932a1..c2e9a4e78f0 100644 --- a/app/models/ems_cluster_performance.rb +++ b/app/models/ems_cluster_performance.rb @@ -1,5 +1,5 @@ class EmsClusterPerformance < MetricRollup - default_scope { where "resource_type = 'EmsCluster' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'EmsCluster' and resource_id IS NOT NULL") } belongs_to :ems_cluster, :foreign_key => :resource_id end diff --git a/app/models/ems_event.rb b/app/models/ems_event.rb index b04b3f40f89..5037bf1201c 100644 --- a/app/models/ems_event.rb +++ b/app/models/ems_event.rb @@ -150,7 +150,7 @@ def self.process_availability_zone_in_event!(event, options = {}) process_object_in_event!(AvailabilityZone, event, options) if event[:availability_zone_id].nil? && event[:vm_or_template_id] vm = VmOrTemplate.find(event[:vm_or_template_id]) - if vm.respond_to? :availability_zone + if vm.respond_to?(:availability_zone) availability_zone = vm.availability_zone unless availability_zone.nil? event[:availability_zone_id] = availability_zone.id diff --git a/app/models/ems_event/automate.rb b/app/models/ems_event/automate.rb index a814ac04016..31cbb04e4e5 100644 --- a/app/models/ems_event/automate.rb +++ b/app/models/ems_event/automate.rb @@ -120,7 +120,7 @@ def parse_policy_parameters(target_str, policy_event, param) def parse_policy_source(target, param) param.blank? ? ext_management_system : target.send(param) rescue => err - _log.warn "Error: #{err.message}, getting policy source, skipping policy evaluation" + _log.warn("Error: #{err.message}, getting policy source, skipping policy evaluation") end def call(target_str, method, options = {}) @@ -128,7 +128,7 @@ def call(target_str, method, options = {}) target = target_original = get_target(target_str) if target.nil? - _log.info "Unable to find target [#{target_str}]. Performing refresh." + _log.info("Unable to find target [#{target_str}]. Performing refresh.") return refresh(target_str) end diff --git a/app/models/ems_folder.rb b/app/models/ems_folder.rb index d20f9246261..b0f7bcd53fc 100644 --- a/app/models/ems_folder.rb +++ b/app/models/ems_folder.rb @@ -128,7 +128,7 @@ def register_host(host) with_provider_connection do |vim| handle = provider_object(vim) begin - _log.info "Invoking addStandaloneHost with options: address => #{network_address}, #{userid}" + _log.info("Invoking addStandaloneHost with options: address => #{network_address}, #{userid}") cr_mor = handle.addStandaloneHost(network_address, userid, password) rescue VimFault => verr fault = verr.vimFaultInfo.fault @@ -136,7 +136,7 @@ def register_host(host) raise unless fault.xsiType == "SSLVerifyFault" ssl_thumbprint = fault.thumbprint - _log.info "Invoking addStandaloneHost with options: address => #{network_address}, userid => #{userid}, sslThumbprint => #{ssl_thumbprint}" + _log.info("Invoking addStandaloneHost with options: address => #{network_address}, userid => #{userid}, sslThumbprint => #{ssl_thumbprint}") cr_mor = handle.addStandaloneHost(network_address, userid, password, :sslThumbprint => ssl_thumbprint) end diff --git a/app/models/ems_refresh.rb b/app/models/ems_refresh.rb index 434a79850dc..9efe58aa0b5 100644 --- a/app/models/ems_refresh.rb +++ b/app/models/ems_refresh.rb @@ -100,13 +100,13 @@ def self.refresh(target, id = nil) def self.refresh_new_target(ems_id, target_hash, target_class, target_find) ems = ExtManagementSystem.find(ems_id) - target_class = target_class.constantize if target_class.kind_of? String + target_class = target_class.constantize if target_class.kind_of?(String) save_ems_inventory_no_disconnect(ems, target_hash) target = target_class.find_by(target_find) if target.nil? - _log.warn "Unknown target for event data: #{target_hash}." + _log.warn("Unknown target for event data: #{target_hash}.") return end @@ -128,7 +128,7 @@ def self.get_target_objects(target, single_id = nil) if ManagerRefresh::Inventory.persister_class_for(target_class).blank? && [VmOrTemplate, Host, ExtManagementSystem, ManagerRefresh::Target].none? { |k| target_class <= k } - _log.warn "Unknown target type: [#{target_class}]." + _log.warn("Unknown target type: [#{target_class}].") next end @@ -149,7 +149,7 @@ def self.get_target_objects(target, single_id = nil) if recs.length != ids.length missing = ids - recs.collect(&:id) - _log.warn "Unable to find a record for [#{target_class}] ids: #{missing.inspect}." + _log.warn("Unable to find a record for [#{target_class}] ids: #{missing.inspect}.") end target_objects.concat(recs) @@ -225,9 +225,9 @@ def self.log_inv_debug_trace(inv, log_header, depth = 1) inv.each do |k, v| if depth == 1 - $log.debug "#{log_header} #{k.inspect}=>#{v.inspect}" + $log.debug("#{log_header} #{k.inspect}=>#{v.inspect}") else - $log.debug "#{log_header} #{k.inspect}=>" + $log.debug("#{log_header} #{k.inspect}=>") log_inv_debug_trace(v, "#{log_header} ", depth - 1) end end diff --git a/app/models/ems_refresh/link_inventory.rb b/app/models/ems_refresh/link_inventory.rb index 2ca292a8ad9..0049ca92012 100644 --- a/app/models/ems_refresh/link_inventory.rb +++ b/app/models/ems_refresh/link_inventory.rb @@ -2,17 +2,17 @@ module EmsRefresh::LinkInventory # Link EMS inventory through the relationships table def link_ems_inventory(ems, target, prev_relats, new_relats) log_header = "EMS: [#{ems.name}], id: [#{ems.id}]" - _log.info "#{log_header} Linking EMS Inventory..." - _log.debug "#{log_header} prev_relats: #{prev_relats.inspect}" - _log.debug "#{log_header} new_relats: #{new_relats.inspect}" + _log.info("#{log_header} Linking EMS Inventory...") + _log.debug("#{log_header} prev_relats: #{prev_relats.inspect}") + _log.debug("#{log_header} new_relats: #{new_relats.inspect}") if prev_relats == new_relats - _log.info "#{log_header} Linking EMS Inventory...Complete" + _log.info("#{log_header} Linking EMS Inventory...Complete") return end # Hook up a relationship from the EMS to the root folder - _log.info "#{log_header} Updating EMS root folder relationship." + _log.info("#{log_header} Updating EMS root folder relationship.") root_id = new_relats[:ext_management_systems_to_folders][ems.id][0] if root_id.nil? ems.remove_all_children @@ -113,7 +113,7 @@ def link_ems_inventory(ems, target, prev_relats, new_relats) proc { |vs| rp.add_vm(instances_with_ids(VmOrTemplate, vs)) }] # Bulk connect proc end - _log.info "#{log_header} Linking EMS Inventory...Complete" + _log.info("#{log_header} Linking EMS Inventory...Complete") end def instance_with_id(klass, id) @@ -143,7 +143,7 @@ def link_habtm(object, hashes, accessor, model, do_disconnect = true) # def update_relats(type, prev_relats, new_relats) - _log.info "Updating #{type.to_s.titleize} relationships." + _log.info("Updating #{type.to_s.titleize} relationships.") if new_relats[type].kind_of?(Array) || prev_relats[type].kind_of?(Array) # Case where we have a single set of ids @@ -170,7 +170,7 @@ def update_relats_by_ids(prev_ids, new_ids, disconnect_proc, connect_proc, bulk_ begin disconnect_proc.call(p) rescue => err - _log.error "An error occurred while disconnecting id [#{p}]: #{err}" + _log.error("An error occurred while disconnecting id [#{p}]: #{err}") _log.log_backtrace(err) end end @@ -181,7 +181,7 @@ def update_relats_by_ids(prev_ids, new_ids, disconnect_proc, connect_proc, bulk_ begin bulk_connect.call(new_ids) rescue => err - _log.error "EMS: [#{@ems.name}], id: [#{@ems.id}] An error occurred while connecting ids [#{new_ids.join(',')}]: #{err}" + _log.error("EMS: [#{@ems.name}], id: [#{@ems.id}] An error occurred while connecting ids [#{new_ids.join(',')}]: #{err}") _log.log_backtrace(err) end elsif connect_proc @@ -189,7 +189,7 @@ def update_relats_by_ids(prev_ids, new_ids, disconnect_proc, connect_proc, bulk_ begin connect_proc.call(n) rescue => err - _log.error "EMS: [#{@ems.name}], id: [#{@ems.id}] An error occurred while connecting id [#{n}]: #{err}" + _log.error("EMS: [#{@ems.name}], id: [#{@ems.id}] An error occurred while connecting id [#{n}]: #{err}") _log.log_backtrace(err) end end diff --git a/app/models/ems_refresh/metadata_relats.rb b/app/models/ems_refresh/metadata_relats.rb index 1002b345b9f..b0c65c9d385 100644 --- a/app/models/ems_refresh/metadata_relats.rb +++ b/app/models/ems_refresh/metadata_relats.rb @@ -8,7 +8,7 @@ def vmdb_relats(target, relats = nil) relats ||= default_relats_hash return relats if target.nil? - _log.info "Getting VMDB relationships for #{target.class} [#{target.name}] id: [#{target.id}]..." + _log.info("Getting VMDB relationships for #{target.class} [#{target.name}] id: [#{target.id}]...") if target.kind_of?(ExtManagementSystem) vmdb_relats_ems(target, relats) else @@ -16,7 +16,7 @@ def vmdb_relats(target, relats = nil) vmdb_relats_descendants(target, relats) end - _log.info "Getting VMDB relationships for #{target.class} [#{target.name}] id: [#{target.id}]...Complete" + _log.info("Getting VMDB relationships for #{target.class} [#{target.name}] id: [#{target.id}]...Complete") relats end diff --git a/app/models/ems_refresh/refreshers/ems_refresher_mixin.rb b/app/models/ems_refresh/refreshers/ems_refresher_mixin.rb index c6449ff6620..5bb4a84ce73 100644 --- a/app/models/ems_refresh/refreshers/ems_refresher_mixin.rb +++ b/app/models/ems_refresh/refreshers/ems_refresher_mixin.rb @@ -17,12 +17,12 @@ def refresh ems_refresh_start_time = Time.now begin - _log.info "Refreshing all targets..." + _log.info("Refreshing all targets...") log_ems_target = format_ems_for_logging(ems) - _log.info "#{log_ems_target} Refreshing targets for EMS..." - targets.each { |t| _log.info "#{log_ems_target} #{t.class} [#{t.name}] id [#{t.id}]" } + _log.info("#{log_ems_target} Refreshing targets for EMS...") + targets.each { |t| _log.info("#{log_ems_target} #{t.class} [#{t.name}] id [#{t.id}]") } _, timings = Benchmark.realtime_block(:ems_refresh) { refresh_targets_for_ems(ems, targets) } - _log.info "#{log_ems_target} Refreshing targets for EMS...Complete - Timings #{timings.inspect}" + _log.info("#{log_ems_target} Refreshing targets for EMS...Complete - Timings #{timings.inspect}") rescue => e raise if EmsRefresh.debug_failures @@ -46,7 +46,7 @@ def refresh post_refresh(ems, ems_refresh_start_time) end - _log.info "Refreshing all targets...Complete" + _log.info("Refreshing all targets...Complete") raise PartialRefreshError, partial_refresh_errors.join(', ') if partial_refresh_errors.any? end @@ -59,10 +59,10 @@ def preprocess_targets ems_in_list = targets.any? { |t| t.kind_of?(ExtManagementSystem) } if ems_in_list - _log.info "Defaulting to full refresh for EMS: [#{ems.name}], id: [#{ems.id}]." if targets.length > 1 + _log.info("Defaulting to full refresh for EMS: [#{ems.name}], id: [#{ems.id}].") if targets.length > 1 targets.clear << ems elsif targets.length >= @full_refresh_threshold - _log.info "Escalating to full refresh for EMS: [#{ems.name}], id: [#{ems.id}]." + _log.info("Escalating to full refresh for EMS: [#{ems.name}], id: [#{ems.id}].") targets.clear << ems end end @@ -83,7 +83,7 @@ def refresh_targets_for_ems(ems, targets) until targets_with_inventory.empty? target, inventory = targets_with_inventory.shift - _log.info "#{log_header} Refreshing target #{target.class} [#{target.name}] id [#{target.id}]..." + _log.info("#{log_header} Refreshing target #{target.class} [#{target.name}] id [#{target.id}]...") parsed, _ = Benchmark.realtime_block(:parse_targeted_inventory) do parse_targeted_inventory(ems, target, inventory) end @@ -93,10 +93,10 @@ def refresh_targets_for_ems(ems, targets) _log.info "#{log_header} Refreshing target #{target.class} [#{target.name}] id [#{target.id}]...Complete" if parsed.kind_of?(Array) - _log.info "#{log_header} ManagerRefresh Post Processing #{target.class} [#{target.name}] id [#{target.id}]..." + _log.info("#{log_header} ManagerRefresh Post Processing #{target.class} [#{target.name}] id [#{target.id}]...") # We have array of InventoryCollection, we want to use that data for post refresh Benchmark.realtime_block(:manager_refresh_post_processing) { manager_refresh_post_processing(ems, target, parsed) } - _log.info "#{log_header} ManagerRefresh Post Processing #{target.class} [#{target.name}] id [#{target.id}]...Complete" + _log.info("#{log_header} ManagerRefresh Post Processing #{target.class} [#{target.name}] id [#{target.id}]...Complete") end end end @@ -126,9 +126,9 @@ def collect_inventory_for_targets(ems, targets) log_header = format_ems_for_logging(ems) targets_to_collectors.map do |target, collector_class| log_msg = "#{log_header} Inventory Collector for #{target.class} [#{target.try(:name)}] id: [#{target.id}]" - _log.info "#{log_msg}..." + _log.info("#{log_msg}...") collector = collector_class.new(ems, target) - _log.info "#{log_msg}...Complete" + _log.info("#{log_msg}...Complete") [target, collector] end else @@ -142,9 +142,9 @@ def parse_targeted_inventory(ems, target, collector) # new refreshers should override this method to parse inventory # TODO: remove this call after all refreshers support retrieving # inventory separate from parsing - if collector.kind_of? ManagerRefresh::Inventory::Collector + if collector.kind_of?(ManagerRefresh::Inventory::Collector) log_header = format_ems_for_logging(ems) - _log.debug "#{log_header} Parsing inventory..." + _log.debug("#{log_header} Parsing inventory...") inventory_collections, = Benchmark.realtime_block(:parse_inventory) do persister = ManagerRefresh::Inventory.persister_class_for(target.class).new(ems, target) parser = ManagerRefresh::Inventory.parser_class_for(target.class).new @@ -152,7 +152,7 @@ def parse_targeted_inventory(ems, target, collector) i = ManagerRefresh::Inventory.new(persister, collector, parser) i.inventory_collections end - _log.debug "#{log_header} Parsing inventory...Complete" + _log.debug("#{log_header} Parsing inventory...Complete") inventory_collections else parsed, _ = Benchmark.realtime_block(:parse_legacy_inventory) { parse_legacy_inventory(ems) } @@ -177,10 +177,10 @@ def post_refresh(ems, ems_refresh_start_time) log_ems_target = "EMS: [#{ems.name}], id: [#{ems.id}]" # Do any post-operations for this EMS post_process_refresh_classes.each do |klass| - next unless klass.respond_to? :post_refresh_ems - _log.info "#{log_ems_target} Performing post-refresh operations for #{klass} instances..." + next unless klass.respond_to?(:post_refresh_ems) + _log.info("#{log_ems_target} Performing post-refresh operations for #{klass} instances...") klass.post_refresh_ems(ems.id, ems_refresh_start_time) - _log.info "#{log_ems_target} Performing post-refresh operations for #{klass} instances...Complete" + _log.info("#{log_ems_target} Performing post-refresh operations for #{klass} instances...Complete") end end end diff --git a/app/models/ems_refresh/save_inventory.rb b/app/models/ems_refresh/save_inventory.rb index f3b0edb1aa7..8809effbfcb 100644 --- a/app/models/ems_refresh/save_inventory.rb +++ b/app/models/ems_refresh/save_inventory.rb @@ -66,7 +66,7 @@ def save_vms_inventory(ems, hashes, target = nil, disconnect = true) disconnects_index = disconnects.index_by { |vm| vm } vms_by_uid_ems = vms.group_by(&:uid_ems) dup_vms_uids = (vms_uids.duplicates + vms.collect(&:uid_ems).duplicates).uniq.sort - _log.info "#{log_header} Duplicate unique values found: #{dup_vms_uids.inspect}" unless dup_vms_uids.empty? + _log.info("#{log_header} Duplicate unique values found: #{dup_vms_uids.inspect}") unless dup_vms_uids.empty? invalids_found = false # Clear vms, so GC can clean them diff --git a/app/models/ems_refresh/save_inventory_block_storage.rb b/app/models/ems_refresh/save_inventory_block_storage.rb index ffdb8e4484c..c4b96111678 100644 --- a/app/models/ems_refresh/save_inventory_block_storage.rb +++ b/app/models/ems_refresh/save_inventory_block_storage.rb @@ -21,7 +21,7 @@ def save_ems_block_storage_inventory(ems, hashes, target = nil) _log.info("#{log_header} Saving EMS Inventory...") if debug_trace require 'yaml' - _log.debug "#{log_header} hashes:\n#{YAML.dump(hashes)}" + _log.debug("#{log_header} hashes:\n#{YAML.dump(hashes)}") end child_keys = [ @@ -111,7 +111,7 @@ def save_block_storage_backing_links_inventory(_ems, hashes, _target) # Existing disk, update attributes. if dh.key?(:id) unless (disk = Disk.where(:id => dh[:id]).first) - _log.warn "Expected disk not found, id = #{dh[:id]}" + _log.warn("Expected disk not found, id = #{dh[:id]}") next end disk.update(dh.except(:id, :backing_volume)) diff --git a/app/models/ems_refresh/save_inventory_cloud.rb b/app/models/ems_refresh/save_inventory_cloud.rb index cf6552532d3..6bcd9a01460 100644 --- a/app/models/ems_refresh/save_inventory_cloud.rb +++ b/app/models/ems_refresh/save_inventory_cloud.rb @@ -42,7 +42,7 @@ def save_ems_cloud_inventory(ems, hashes, target = nil, disconnect = true) _log.info("#{log_header} Saving EMS Inventory...") if debug_trace require 'yaml' - _log.debug "#{log_header} hashes:\n#{YAML.dump(hashes)}" + _log.debug("#{log_header} hashes:\n#{YAML.dump(hashes)}") end child_keys = [ diff --git a/app/models/ems_refresh/save_inventory_infra.rb b/app/models/ems_refresh/save_inventory_infra.rb index 8a82e3c1518..843b5987bcd 100644 --- a/app/models/ems_refresh/save_inventory_infra.rb +++ b/app/models/ems_refresh/save_inventory_infra.rb @@ -49,7 +49,7 @@ def save_ems_infra_inventory(ems, hashes, target = nil, disconnect = true) _log.info("#{log_header} Saving EMS Inventory...") if debug_trace require 'yaml' - _log.debug "#{log_header} hashes:\n#{YAML.dump(hashes)}" + _log.debug("#{log_header} hashes:\n#{YAML.dump(hashes)}") end child_keys = [ @@ -357,18 +357,18 @@ def save_storage_files_inventory(storage, hashes) def find_host(h, ems_id) found = nil if h[:ems_ref] - _log.debug "EMS ID: #{ems_id} Host database lookup - ems_ref: [#{h[:ems_ref]}] ems_id: [#{ems_id}]" + _log.debug("EMS ID: #{ems_id} Host database lookup - ems_ref: [#{h[:ems_ref]}] ems_id: [#{ems_id}]") found = Host.find_by(:ems_ref => h[:ems_ref], :ems_id => ems_id) end if found.nil? if h[:hostname].nil? && h[:ipaddress].nil? - _log.debug "EMS ID: #{ems_id} Host database lookup - name [#{h[:name]}]" + _log.debug("EMS ID: #{ems_id} Host database lookup - name [#{h[:name]}]") found = Host.where(:ems_id => ems_id).detect { |e| e.name.downcase == h[:name].downcase } elsif ["localhost", "localhost.localdomain", "127.0.0.1"].include_none?(h[:hostname], h[:ipaddress]) # host = Host.find_by_hostname(hostname) has a risk of creating duplicate hosts # allow a deleted EMS to be re-added an pick up old orphaned hosts - _log.debug "EMS ID: #{ems_id} Host database lookup - hostname: [#{h[:hostname]}] IP: [#{h[:ipaddress]}] ems_ref: [#{h[:ems_ref]}]" + _log.debug("EMS ID: #{ems_id} Host database lookup - hostname: [#{h[:hostname]}] IP: [#{h[:ipaddress]}] ems_ref: [#{h[:ems_ref]}]") found = look_up_host(h[:hostname], h[:ipaddress], :ems_ref => h[:ems_ref]) end end diff --git a/app/models/ems_refresh/save_inventory_network.rb b/app/models/ems_refresh/save_inventory_network.rb index fe97a312a16..7995436d446 100644 --- a/app/models/ems_refresh/save_inventory_network.rb +++ b/app/models/ems_refresh/save_inventory_network.rb @@ -32,7 +32,7 @@ def save_ems_network_inventory(ems, hashes, target = nil) _log.info("#{log_header} Saving EMS Network Inventory...") if debug_trace require 'yaml' - _log.debug "#{log_header} hashes:\n#{YAML.dump(hashes)}" + _log.debug("#{log_header} hashes:\n#{YAML.dump(hashes)}") end child_keys = [ diff --git a/app/models/ems_refresh/save_inventory_object_storage.rb b/app/models/ems_refresh/save_inventory_object_storage.rb index fb50c821321..a4413e7b102 100644 --- a/app/models/ems_refresh/save_inventory_object_storage.rb +++ b/app/models/ems_refresh/save_inventory_object_storage.rb @@ -19,7 +19,7 @@ def save_ems_object_storage_inventory(ems, hashes, target = nil) _log.info("#{log_header} Saving EMS Object Storage Inventory...") if debug_trace require 'yaml' - _log.debug "#{log_header} hashes:\n#{YAML.dump(hashes)}" + _log.debug("#{log_header} hashes:\n#{YAML.dump(hashes)}") end child_keys = [ diff --git a/app/models/ems_refresh/save_inventory_physical_infra.rb b/app/models/ems_refresh/save_inventory_physical_infra.rb index d8d5df4fd6e..ca678d3ac1d 100644 --- a/app/models/ems_refresh/save_inventory_physical_infra.rb +++ b/app/models/ems_refresh/save_inventory_physical_infra.rb @@ -18,7 +18,7 @@ def save_ems_physical_infra_inventory(ems, hashes, target = nil) _log.info("#{log_header} Saving EMS Inventory...") if debug_trace require 'yaml' - _log.debug "#{log_header} hashes:\n#{YAML.dump(hashes)}" + _log.debug("#{log_header} hashes:\n#{YAML.dump(hashes)}") end child_keys = [ diff --git a/app/models/ext_management_system.rb b/app/models/ext_management_system.rb index 89728a0c34c..643f3282fb7 100644 --- a/app/models/ext_management_system.rb +++ b/app/models/ext_management_system.rb @@ -45,11 +45,11 @@ def self.supported_types_and_descriptions_hash has_many :disks, :through => :hardwares has_many :storages, -> { distinct }, :through => :hosts - has_many :ems_events, -> { order "timestamp" }, :class_name => "EmsEvent", :foreign_key => "ems_id", + has_many :ems_events, -> { order("timestamp") }, :class_name => "EmsEvent", :foreign_key => "ems_id", :inverse_of => :ext_management_system - has_many :generated_events, -> { order "timestamp" }, :class_name => "EmsEvent", :foreign_key => "generating_ems_id", + has_many :generated_events, -> { order("timestamp") }, :class_name => "EmsEvent", :foreign_key => "generating_ems_id", :inverse_of => :generating_ems - has_many :policy_events, -> { order "timestamp" }, :class_name => "PolicyEvent", :foreign_key => "ems_id" + has_many :policy_events, -> { order("timestamp") }, :class_name => "PolicyEvent", :foreign_key => "ems_id" has_many :blacklisted_events, :foreign_key => "ems_id", :dependent => :destroy, :inverse_of => :ext_management_system has_many :miq_alert_statuses, :foreign_key => "ems_id" @@ -95,7 +95,7 @@ def hostname_uniqueness_valid? include ComplianceMixin include CustomAttributeMixin - after_destroy { |record| $log.info "MIQ(ExtManagementSystem.after_destroy) Removed EMS [#{record.name}] id [#{record.id}]" } + after_destroy { |record| $log.info("MIQ(ExtManagementSystem.after_destroy) Removed EMS [#{record.name}] id [#{record.id}]") } acts_as_miq_taggable @@ -196,7 +196,7 @@ def self.create_discovered_ems(ost) :zone_id => MiqServer.my_server.zone.id ) - _log.info "#{ui_lookup(:table => "ext_management_systems")} #{ems.name} created" + _log.info("#{ui_lookup(:table => "ext_management_systems")} #{ems.name} created") AuditEvent.success( :event => "ems_created", :target_id => ems.id, @@ -423,12 +423,12 @@ def self.ems_physical_infra_discovery_types end def disable! - _log.info "Disabling EMS [#{name}] id [#{id}]." + _log.info("Disabling EMS [#{name}] id [#{id}].") update!(:enabled => false) end def enable! - _log.info "Enabling EMS [#{name}] id [#{id}]." + _log.info("Enabling EMS [#{name}] id [#{id}].") update!(:enabled => true) end @@ -572,7 +572,7 @@ def memory_reserve def vm_log_user_event(_vm, user_event) $log.info(user_event) - $log.warn "User event logging is not available on [#{self.class.name}] Name:[#{name}]" + $log.warn("User event logging is not available on [#{self.class.name}] Name:[#{name}]") end # @@ -617,7 +617,7 @@ def start_event_monitor def stop_event_monitor return if event_monitor_class.nil? - _log.info "EMS [#{name}] id [#{id}]: Stopping event monitor." + _log.info("EMS [#{name}] id [#{id}]: Stopping event monitor.") event_monitor_class.stop_worker_for_ems(self) end diff --git a/app/models/ext_management_system_performance.rb b/app/models/ext_management_system_performance.rb index 1ab49e4fda7..d1116a8b093 100644 --- a/app/models/ext_management_system_performance.rb +++ b/app/models/ext_management_system_performance.rb @@ -1,5 +1,5 @@ class ExtManagementSystemPerformance < MetricRollup - default_scope { where "resource_type = 'ExtManagementSystem' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'ExtManagementSystem' and resource_id IS NOT NULL") } belongs_to :ext_management_system, :foreign_key => :resource_id end diff --git a/app/models/hardware.rb b/app/models/hardware.rb index d6b0dc0c049..f0e07f09396 100644 --- a/app/models/hardware.rb +++ b/app/models/hardware.rb @@ -8,7 +8,7 @@ class Hardware < ApplicationRecord has_many :networks, :dependent => :destroy has_many :firmwares, :as => :resource, :dependent => :destroy - has_many :disks, -> { order :location }, :dependent => :destroy + has_many :disks, -> { order(:location) }, :dependent => :destroy has_many :hard_disks, -> { where("device_type != 'floppy' AND device_type NOT LIKE '%cdrom%'").order(:location) }, :class_name => "Disk", :foreign_key => :hardware_id has_many :floppies, -> { where("device_type = 'floppy'").order(:location) }, :class_name => "Disk", :foreign_key => :hardware_id has_many :cdroms, -> { where("device_type LIKE '%cdrom%'").order(:location) }, :class_name => "Disk", :foreign_key => :hardware_id @@ -17,10 +17,10 @@ class Hardware < ApplicationRecord has_many :volumes, :dependent => :destroy has_many :guest_devices, :dependent => :destroy - has_many :storage_adapters, -> { where "device_type = 'storage'" }, :class_name => "GuestDevice", :foreign_key => :hardware_id - has_many :nics, -> { where "device_type = 'ethernet'" }, :class_name => "GuestDevice", :foreign_key => :hardware_id - has_many :ports, -> { where "device_type != 'storage'" }, :class_name => "GuestDevice", :foreign_key => :hardware_id - has_many :physical_ports, -> { where "device_type = 'physical_port'" }, :class_name => "GuestDevice", :foreign_key => :hardware_id + has_many :storage_adapters, -> { where("device_type = 'storage'") }, :class_name => "GuestDevice", :foreign_key => :hardware_id + has_many :nics, -> { where("device_type = 'ethernet'") }, :class_name => "GuestDevice", :foreign_key => :hardware_id + has_many :ports, -> { where("device_type != 'storage'") }, :class_name => "GuestDevice", :foreign_key => :hardware_id + has_many :physical_ports, -> { where("device_type = 'physical_port'") }, :class_name => "GuestDevice", :foreign_key => :hardware_id virtual_column :ipaddresses, :type => :string_set, :uses => :networks virtual_column :hostnames, :type => :string_set, :uses => :networks @@ -80,7 +80,7 @@ def self.add_elements(parent, xmlNode) begin parent.hardware.send("m_#{e.name}", parent, e, deletes) if parent.hardware.respond_to?("m_#{e.name}") rescue => err - _log.warn err.to_s + _log.warn(err.to_s) end end diff --git a/app/models/host.rb b/app/models/host.rb index 1520f77fa37..e1eb6e10647 100644 --- a/app/models/host.rb +++ b/app/models/host.rb @@ -219,7 +219,7 @@ def process_events def raise_cluster_event(ems_cluster, event) # accept ids or objects - ems_cluster = EmsCluster.find(ems_cluster) unless ems_cluster.kind_of? EmsCluster + ems_cluster = EmsCluster.find(ems_cluster) unless ems_cluster.kind_of?(EmsCluster) inputs = {:ems_cluster => ems_cluster, :host => self} begin MiqEvent.raise_evm_event(self, event, inputs) @@ -347,15 +347,15 @@ def check_policy_prevent(event, *cb_method) end def ipmi_power_on - run_ipmi_command :power_on + run_ipmi_command(:power_on) end def ipmi_power_off - run_ipmi_command :power_off + run_ipmi_command(:power_off) end def ipmi_power_reset - run_ipmi_command :power_reset + run_ipmi_command(:power_reset) end def reset @@ -529,7 +529,7 @@ def arch return "unknown" unless hardware && !hardware.cpu_type.nil? cpu = hardware.cpu_type.to_s.downcase return cpu if cpu.include?('x86') - return "x86" if cpu.starts_with? "intel" + return "x86" if cpu.starts_with?("intel") "unknown" end @@ -639,7 +639,7 @@ def disconnect_inv def connect_ems(e) return if ext_management_system == e - _log.debug "Connecting Host [#{name}] id [#{id}] to EMS [#{e.name}] id [#{e.id}]" + _log.debug("Connecting Host [#{name}] id [#{id}] to EMS [#{e.name}] id [#{e.id}]") self.ext_management_system = e save end @@ -647,7 +647,7 @@ def connect_ems(e) def disconnect_ems(e = nil) if e.nil? || ext_management_system == e log_text = " from EMS [#{ext_management_system.name}] id [#{ext_management_system.id}]" unless ext_management_system.nil? - _log.info "Disconnecting Host [#{name}] id [#{id}]#{log_text}" + _log.info("Disconnecting Host [#{name}] id [#{id}]#{log_text}") self.ext_management_system = nil self.ems_cluster = nil @@ -658,14 +658,14 @@ def disconnect_ems(e = nil) def connect_storage(s) unless storages.include?(s) - _log.debug "Connecting Host [#{name}] id [#{id}] to Storage [#{s.name}] id [#{s.id}]" + _log.debug("Connecting Host [#{name}] id [#{id}] to Storage [#{s.name}] id [#{s.id}]") storages << s save end end def disconnect_storage(s) - _log.info "Disconnecting Host [#{name}] id [#{id}] from Storage [#{s.name}] id [#{s.id}]" + _log.info("Disconnecting Host [#{name}] id [#{id}] from Storage [#{s.name}] id [#{s.id}]") storages.delete(s) save end @@ -718,14 +718,14 @@ def parent_datacenter alias_method :owning_datacenter, :parent_datacenter def self.save_metadata(id, dataArray) - _log.info "for host [#{id}]" + _log.info("for host [#{id}]") host = Host.find_by(:id => id) data, data_type = dataArray data.replace(MIQEncode.decode(data)) if data_type.include?('b64,zlib') doc = data_type.include?('yaml') ? YAML.load(data) : MiqXml.load(data) host.add_elements(doc) host.save! - _log.info "for host [#{id}] host saved" + _log.info("for host [#{id}] host saved") rescue => err _log.log_backtrace(err) return false @@ -779,7 +779,7 @@ def verify_credentials_with_ssh(auth_type = nil, options = {}) begin # connect_ssh logs address and user name(s) being used to make connection - _log.info "Verifying Host SSH credentials for [#{name}]" + _log.info("Verifying Host SSH credentials for [#{name}]") connect_ssh(options) { |ssu| ssu.exec("uname -a") } rescue Net::SSH::AuthenticationFailed raise MiqException::MiqInvalidCredentialsError, _("Login failed due to a bad username or password.") @@ -833,7 +833,7 @@ def self.discoverByIpRange(starting, ending, options = {:ping => true}) ipaddr = network_id + "." + h.to_s unless Host.find_by(:ipaddress => ipaddr).nil? # skip discover for existing hosts - _log.info "ipaddress '#{ipaddr}' exists, skipping discovery" + _log.info("ipaddress '#{ipaddr}' exists, skipping discovery") next end @@ -910,14 +910,14 @@ def detect_discovered_hypervisor(ost, ipaddr) if has_credentials?(:ws) begin with_provider_connection(:ip => ipaddr) do |vim| - _log.info "VIM Information for ESX Host with IP Address: [#{ipaddr}], Information: #{vim.about.inspect}" + _log.info("VIM Information for ESX Host with IP Address: [#{ipaddr}], Information: #{vim.about.inspect}") self.vmm_product = vim.about['name'].dup.split(' ').last self.vmm_version = vim.about['version'] self.vmm_buildnumber = vim.about['build'] self.name = "#{vim.about['name']} (#{ipaddr})" end rescue => err - _log.warn "Cannot connect to ESX Host with IP Address: [#{ipaddr}], Username: [#{authentication_userid(:ws)}] because #{err.message}" + _log.warn("Cannot connect to ESX Host with IP Address: [#{ipaddr}], Username: [#{authentication_userid(:ws)}] because #{err.message}") end end self.type = %w(esx esxi).include?(vmm_product.to_s.downcase) ? "ManageIQ::Providers::Vmware::InfraManager::HostEsx" : "ManageIQ::Providers::Vmware::InfraManager::Host" @@ -947,10 +947,10 @@ def self.ost_inspect(ost) def rediscover(ipaddr, discover_types = [:esx]) require 'manageiq-network_discovery' ost = OpenStruct.new(:usePing => true, :discover_types => discover_types, :ipaddr => ipaddr) - _log.info "Rediscovering Host: #{ipaddr} with types: #{discover_types.inspect}" + _log.info("Rediscovering Host: #{ipaddr} with types: #{discover_types.inspect}") begin ManageIQ::NetworkDiscovery.scanHost(ost) - _log.info "Rediscovering Host: #{ipaddr} raw results: #{self.class.ost_inspect(ost)}" + _log.info("Rediscovering Host: #{ipaddr} raw results: #{self.class.ost_inspect(ost)}") unless ost.hypervisor.empty? detect_discovered_hypervisor(ost, ipaddr) @@ -967,14 +967,14 @@ def rediscover(ipaddr, discover_types = [:esx]) def self.discoverHost(options) require 'manageiq-network_discovery' ost = OpenStruct.new(Marshal.load(options)) - _log.info "Discovering Host: #{ost_inspect(ost)}" + _log.info("Discovering Host: #{ost_inspect(ost)}") begin ManageIQ::NetworkDiscovery.scanHost(ost) if ost.hypervisor.empty? - _log.info "NOT Discovered: #{ost_inspect(ost)}" + _log.info("NOT Discovered: #{ost_inspect(ost)}") else - _log.info "Discovered: #{ost_inspect(ost)}" + _log.info("Discovered: #{ost_inspect(ost)}") if [:virtualcenter, :scvmm, :rhevm].any? { |ems_type| ost.hypervisor.include?(ems_type) } ExtManagementSystem.create_discovered_ems(ost) @@ -1000,11 +1000,11 @@ def self.discoverHost(options) unless cred.nil? || cred[:userid].blank? ipmi = MiqIPMI.new(host.ipmi_address, cred[:userid], cred[:password]) if ipmi.connected? - _log.warn "IPMI connected to Host:<#{host.ipmi_address}> with User:<#{cred[:userid]}>" + _log.warn("IPMI connected to Host:<#{host.ipmi_address}> with User:<#{cred[:userid]}>") host.update_authentication(:ipmi => cred) host.scan else - _log.warn "IPMI did not connect to Host:<#{host.ipmi_address}> with User:<#{cred[:userid]}>" + _log.warn("IPMI did not connect to Host:<#{host.ipmi_address}> with User:<#{cred[:userid]}>") end end else @@ -1018,7 +1018,7 @@ def self.discoverHost(options) host.save! end - _log.info "#{host.name} created" + _log.info("#{host.name} created") AuditEvent.success(:event => "host_created", :target_id => host.id, :target_class => "Host", :message => "#{host.name} created") end end @@ -1029,15 +1029,15 @@ def self.discoverHost(options) end def self.get_hostname(ipAddress) - _log.info "Resolving hostname: [#{ipAddress}]" + _log.info("Resolving hostname: [#{ipAddress}]") begin ret = Socket.gethostbyname(ipAddress) name = ret.first rescue => err - _log.error "ERROR: #{err}" + _log.error("ERROR: #{err}") return nil end - _log.info "Resolved hostname: [#{name}] to [#{ipAddress}]" + _log.info("Resolved hostname: [#{name}] to [#{ipAddress}]") name end @@ -1066,15 +1066,15 @@ def connect_ssh(options = {}) logged_options = options.dup logged_options[:key_data] = "[FILTERED]" if logged_options[:key_data] - _log.info "Initiating SSH connection to Host:[#{name}] using [#{hostname}] for user:[#{users}]. Options:[#{logged_options.inspect}]" + _log.info("Initiating SSH connection to Host:[#{name}] using [#{hostname}] for user:[#{users}]. Options:[#{logged_options.inspect}]") begin MiqSshUtil.shell_with_su(hostname, rl_user, rl_password, su_user, su_password, options) do |ssu, _shell| - _log.info "SSH connection established to [#{hostname}]" + _log.info("SSH connection established to [#{hostname}]") yield(ssu) end - _log.info "SSH connection completed to [#{hostname}]" + _log.info("SSH connection completed to [#{hostname}]") rescue Exception => err - _log.error "SSH connection failed for [#{hostname}] with [#{err.class}: #{err}]" + _log.error("SSH connection failed for [#{hostname}] with [#{err.class}: #{err}]") raise err end end @@ -1363,7 +1363,7 @@ def scan_from_queue(taskid = nil) # Skip SSH for ESXi hosts unless is_vmware_esxi? if hostname.blank? - _log.warn "No hostname defined for #{log_target}" + _log.warn("No hostname defined for #{log_target}") task.update_status("Finished", "Warn", "Scanning incomplete due to missing hostname") if task return end @@ -1371,7 +1371,7 @@ def scan_from_queue(taskid = nil) update_ssh_auth_status! if respond_to?(:update_ssh_auth_status!) if missing_credentials? - _log.warn "No credentials defined for #{log_target}" + _log.warn("No credentials defined for #{log_target}") task.update_status("Finished", "Warn", "Scanning incomplete due to Credential Issue") if task return end @@ -1683,7 +1683,7 @@ def get_performance_metric(capture_interval, metric, range, function = nil) time_range[0], time_range[1] ] - ).order "timestamp" + ).order("timestamp") if capture_interval.to_sym == :realtime && metric.to_s.starts_with?("v_pct_cpu_") vm_vals_by_ts = get_pct_cpu_metric_from_child_vm_performances(metric, capture_interval, time_range) diff --git a/app/models/host_metric.rb b/app/models/host_metric.rb index 6eac8c16f77..5142ffab51c 100644 --- a/app/models/host_metric.rb +++ b/app/models/host_metric.rb @@ -1,5 +1,5 @@ class HostMetric < Metric - default_scope { where "resource_type = 'Host' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'Host' and resource_id IS NOT NULL") } belongs_to :host, :foreign_key => :resource_id belongs_to :ems_cluster, :foreign_key => :parent_ems_cluster_id diff --git a/app/models/host_performance.rb b/app/models/host_performance.rb index 2ffd196970e..83a643f1ffc 100644 --- a/app/models/host_performance.rb +++ b/app/models/host_performance.rb @@ -1,5 +1,5 @@ class HostPerformance < MetricRollup - default_scope { where "resource_type = 'Host' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'Host' and resource_id IS NOT NULL") } belongs_to :ems_cluster, :foreign_key => :parent_ems_cluster_id belongs_to :host, :foreign_key => :resource_id diff --git a/app/models/job.rb b/app/models/job.rb index ed6c12d2107..0bf00ebaec9 100644 --- a/app/models/job.rb +++ b/app/models/job.rb @@ -30,7 +30,7 @@ def self.create_job(process_type, options = {}) job.initialize_attributes job.save job.create_miq_task(job.attributes_for_task) - $log.info "Job created: #{job.attributes_log}" + $log.info("Job created: #{job.attributes_log}") job.signal(:initializing) job end @@ -56,11 +56,11 @@ def initialize_attributes def check_active_on_destroy if self.is_active? - _log.warn "Job is active, delete not allowed - #{attributes_log}" + _log.warn("Job is active, delete not allowed - #{attributes_log}") throw :abort end - _log.info "Job deleted: #{attributes_log}" + _log.info("Job deleted: #{attributes_log}") true end @@ -69,7 +69,7 @@ def self.update_message(job_guid, message) if job job.update_message(message) else - _log.warn "jobs.guid: [#{jobid}] not found" + _log.warn("jobs.guid: [#{jobid}] not found") end end @@ -92,7 +92,7 @@ def set_status(message, status = "ok") end def dispatch_start - _log.info "Dispatch Status is 'pending'" + _log.info("Dispatch Status is 'pending'") self.dispatch_status = "pending" save @storage_dispatcher_process_finish_flag = false @@ -100,7 +100,7 @@ def dispatch_start def dispatch_finish return if @storage_dispatcher_process_finish_flag - _log.info "Dispatch Status is 'finished'" + _log.info("Dispatch Status is 'finished'") self.dispatch_status = "finished" save @storage_dispatcher_process_finish_flag = true @@ -110,26 +110,26 @@ def process_cancel(*args) options = args.first || {} options[:message] ||= options[:userid] ? "Job canceled by user [#{options[:useid]}] on #{Time.now}" : "Job canceled on #{Time.now}" options[:status] ||= "ok" - _log.info "job canceling, #{options[:message]}" + _log.info("job canceling, #{options[:message]}") signal(:finish, options[:message], options[:status]) end def process_error(*args) message, status = args - _log.error message.to_s + _log.error(message.to_s) set_status(message, status) end def process_abort(*args) message, status = args - _log.error "job aborting, #{message}" + _log.error("job aborting, #{message}") set_status(message, status) signal(:finish, message, status) end def process_finished(*args) message, status = args - _log.info "job finished, #{message}" + _log.info("job finished, #{message}") set_status(message, status) dispatch_finish end @@ -152,7 +152,7 @@ def target_entity end def self.check_jobs_for_timeout - $log.debug "Checking for timed out jobs" + $log.debug("Checking for timed out jobs") begin in_my_region .where("state != 'finished' and (state != 'waiting_to_start' or dispatch_status = 'active')") diff --git a/app/models/job_proxy_dispatcher.rb b/app/models/job_proxy_dispatcher.rb index 2d92e0d567b..ac0e4a8aa50 100644 --- a/app/models/job_proxy_dispatcher.rb +++ b/app/models/job_proxy_dispatcher.rb @@ -39,8 +39,8 @@ def dispatch jobs_to_dispatch.each do |job| if concurrent_vm_scans_limit > 0 && active_vm_scans_by_zone[@zone] >= concurrent_vm_scans_limit - _log.warn "SKIPPING remaining %s jobs in dispatch since there are [%d] active scans in the zone [%s]" % - [ui_lookup(:table => VmOrTemplate.name), active_vm_scans_by_zone[@zone], @zone] + _log.warn("SKIPPING remaining %s jobs in dispatch since there are [%d] active scans in the zone [%s]" % + [ui_lookup(:table => VmOrTemplate.name), active_vm_scans_by_zone[@zone], @zone]) break end @vm = @vms_for_dispatch_jobs.detect { |v| v.id == job.target_id } @@ -81,7 +81,7 @@ def dispatch if proxy # Skip this embedded scan if the host/vc we'd need has already exceeded the limit next if embedded_resource_limit_exceeded?(job) - _log.info "STARTING job: [#{job.guid}] on proxy: [#{proxy.name}]" + _log.info("STARTING job: [#{job.guid}] on proxy: [#{proxy.name}]") Benchmark.current_realtime[:start_job_on_proxy_count] += 1 Benchmark.realtime_block(:start_job_on_proxy) { start_job_on_proxy(job, proxy) } elsif @vm.host_id && @vm.storage_id && !@vm.template? @@ -91,7 +91,7 @@ def dispatch end end end - _log.info "Complete - Timings: #{t.inspect}" + _log.info("Complete - Timings: #{t.inspect}") end def container_image_scan_class @@ -127,7 +127,7 @@ def dispatch_to_ems(ems_id, jobs, concurrent_ems_limit) end def do_dispatch(job, ems_id) - _log.info "Signaling start for container image scan job [#{job.id}]" + _log.info("Signaling start for container image scan job [#{job.id}]") job.update(:dispatch_status => "active", :started_on => Time.now.utc) @active_container_scans_by_zone_and_ems[@zone][ems_id] += 1 MiqQueue.put_unless_exists( @@ -170,7 +170,7 @@ def queue_signal(job, options) def start_job_on_proxy(job, proxy) assign_proxy_to_job(proxy, job) - _log.info "Job #{job.attributes_log}" + _log.info("Job #{job.attributes_log}") job_options = {:args => ["start"], :zone => MiqServer.my_zone, :server_guid => proxy.guid, :role => "smartproxy"} @active_vm_scans_by_zone[MiqServer.my_zone] += 1 queue_signal(job, job_options) diff --git a/app/models/ldap_region.rb b/app/models/ldap_region.rb index 6f90841b91b..6b5d4035b35 100644 --- a/app/models/ldap_region.rb +++ b/app/models/ldap_region.rb @@ -26,7 +26,7 @@ def user_search(options) users = domain.user_search(options) results.merge!(users) rescue => err - _log.error "Error during user search on domain <#{domain.id}:#{domain.name}>. Msg:<#{err}>" + _log.error("Error during user search on domain <#{domain.id}:#{domain.name}>. Msg:<#{err}>") end end results diff --git a/app/models/ldap_server.rb b/app/models/ldap_server.rb index 3449738fc54..2b43285ec2c 100644 --- a/app/models/ldap_server.rb +++ b/app/models/ldap_server.rb @@ -27,6 +27,6 @@ def verify_credentials def self.sync_data_from_timer(timestamp = Time.now) # Stub for now - _log.info "time: #{timestamp}" + _log.info("time: #{timestamp}") end end diff --git a/app/models/ldap_user.rb b/app/models/ldap_user.rb index 01eb4b14a83..bb947047272 100644 --- a/app/models/ldap_user.rb +++ b/app/models/ldap_user.rb @@ -42,7 +42,7 @@ def self.sync_users(ldap_server) db_users = {} ldap_server.ldap_users.select([:id, :dn]).find_each { |u| db_users[u[:dn]] = u[:id] } - _log.info "#{log_header} Initial DB User count: #{db_users.length}" + _log.info("#{log_header} Initial DB User count: #{db_users.length}") user_count = creates = updates = 0 @@ -64,7 +64,7 @@ def self.sync_users(ldap_server) user_count += 1 if user_count.remainder(1000).zero? ldap_server.save - _log.info "#{log_header} Processed <#{user_count}> LDAP Users. New User count: <#{creates}> Updates: <#{updates}>" + _log.info("#{log_header} Processed <#{user_count}> LDAP Users. New User count: <#{creates}> Updates: <#{updates}>") end ldap_server.save if creates.remainder(1000).zero? @@ -74,9 +74,9 @@ def self.sync_users(ldap_server) # Remaining Users are deletes deletes = db_users - _log.info "#{log_header} Creates: #{creates}" - _log.info "#{log_header} Updates: #{updates}" - _log.info "#{log_header} Deletes: #{deletes.length}" + _log.info("#{log_header} Creates: #{creates}") + _log.info("#{log_header} Updates: #{updates}") + _log.info("#{log_header} Deletes: #{deletes.length}") delete_ids = deletes.collect { |_dn, id| id } LdapUser.destroy(delete_ids) @@ -85,17 +85,17 @@ def self.sync_users(ldap_server) # or only ones that have changed since the last time. last_sync = ldap_server.last_user_sync if last_sync.nil? - _log.info "#{log_header} Initiating full LDAP user sync. Last User Sync: #{last_sync.inspect}" + _log.info("#{log_header} Initiating full LDAP user sync. Last User Sync: #{last_sync.inspect}") LdapUser.full_sync(ldap_server) else - _log.info "#{log_header} Syncing LDAP User data from: #{last_sync.inspect}" + _log.info("#{log_header} Syncing LDAP User data from: #{last_sync.inspect}") LdapUser.update_records_since(ldap_server, last_sync) end end def self.full_sync(ldap_server) log_header = "LDAP Server #{ldap_server.id} : <#{ldap_server.name}>" - _log.info "#{log_header} Starting full LDAP User sync" + _log.info("#{log_header} Starting full LDAP User sync") opts = {:return_result => false, :scope => :sub, :filter => Net::LDAP::Filter.eq("objectCategory", "Person")} rec_count = 0 @@ -106,7 +106,7 @@ def self.full_sync(ldap_server) ldap_server.search(options) { |entry| rec.update_record(entry) } end - _log.info "#{log_header} Completed LDAP User sync for <#{rec_count}> records" + _log.info("#{log_header} Completed LDAP User sync for <#{rec_count}> records") rec_count end @@ -117,7 +117,7 @@ def self.update_records_since(ldap_server, updates_since) # LDAP whenchanged format example: "20121214170416.0Z" when_changed = updates_since.utc.iso8601(1).gsub(/[-:T]/, '') - _log.info "#{log_header} Checking for updated records since <#{when_changed}>" + _log.info("#{log_header} Checking for updated records since <#{when_changed}>") opts[:filter] = opts[:filter] & Net::LDAP::Filter.ge("whenchanged", when_changed) rec_count = 0 @@ -127,7 +127,7 @@ def self.update_records_since(ldap_server, updates_since) rec = find_by_dn(dn) end - _log.info "#{log_header} Completed LDAP User sync for <#{rec_count}> records updated since <#{when_changed}>" + _log.info("#{log_header} Completed LDAP User sync for <#{rec_count}> records updated since <#{when_changed}>") rec_count end diff --git a/app/models/live_metric.rb b/app/models/live_metric.rb index fadf0815de1..3e1d9befd57 100644 --- a/app/models/live_metric.rb +++ b/app/models/live_metric.rb @@ -74,10 +74,10 @@ def self.validate_conditions(processed) def self.process_timestamps(processed, condition) ts = Time.parse("#{condition[:value]} UTC").utc - if %w(>= > =).include? condition[:op] + if %w(>= > =).include?(condition[:op]) processed[:start_time] = ts end - if %w(<= < =).include? condition[:op] + if %w(<= < =).include?(condition[:op]) processed[:end_time] = ts end end @@ -112,7 +112,7 @@ def self.fetch_live_metrics(resource, metrics, start_time, end_time, interval_na processed_metric end rescue => err - _log.error "An error occurred while connecting to #{resource}: #{err}" + _log.error("An error occurred while connecting to #{resource}: #{err}") end end end diff --git a/app/models/manageiq/providers/base_manager/event_catcher/runner.rb b/app/models/manageiq/providers/base_manager/event_catcher/runner.rb index 9c6ebeeaa4b..168363f1a1a 100644 --- a/app/models/manageiq/providers/base_manager/event_catcher/runner.rb +++ b/app/models/manageiq/providers/base_manager/event_catcher/runner.rb @@ -18,7 +18,7 @@ def after_initialize do_exit("EMS ID [#{@cfg[:ems_id]}] failed authentication check.", 1) unless @ems.authentication_check.first @filtered_events = @ems.blacklisted_event_names - _log.info "#{log_prefix} Event Catcher skipping the following events:" + _log.info("#{log_prefix} Event Catcher skipping the following events:") $log.log_hashes(@filtered_events) configure_event_flooding_prevention if worker_settings.try(:[], :flooding_monitor_enabled) diff --git a/app/models/manageiq/providers/base_manager/refresher.rb b/app/models/manageiq/providers/base_manager/refresher.rb index abd123437e8..28cb2f00ff8 100644 --- a/app/models/manageiq/providers/base_manager/refresher.rb +++ b/app/models/manageiq/providers/base_manager/refresher.rb @@ -42,7 +42,7 @@ def group_targets_by_ems(targets) else t end if ems.nil? - _log.warn "Unable to perform refresh for #{t.class} [#{t.name}] id [#{t.id}], since it is not on an EMS." + _log.warn("Unable to perform refresh for #{t.class} [#{t.name}] id [#{t.id}], since it is not on an EMS.") next end diff --git a/app/models/manageiq/providers/cloud_manager/auth_key_pair/operations.rb b/app/models/manageiq/providers/cloud_manager/auth_key_pair/operations.rb index 1bad5995d20..6d5e56bb695 100644 --- a/app/models/manageiq/providers/cloud_manager/auth_key_pair/operations.rb +++ b/app/models/manageiq/providers/cloud_manager/auth_key_pair/operations.rb @@ -1,6 +1,6 @@ module ManageIQ::Providers::CloudManager::AuthKeyPair::Operations def self.included(base) - base.send :include, InstanceMethods + base.send(:include, InstanceMethods) base.extend ClassMethods end diff --git a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_helper_methods.rb b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_helper_methods.rb index 1f27fd888c5..59f494e0032 100644 --- a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_helper_methods.rb +++ b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_helper_methods.rb @@ -23,13 +23,13 @@ def safe_call yield rescue Excon::Errors::Forbidden => err # It can happen user doesn't have rights to read some tenant, in that case log warning but continue refresh - _log.warn "Forbidden response code returned in provider: #{@os_handle.address}. Message=#{err.message}" - _log.warn err.backtrace.join("\n") + _log.warn("Forbidden response code returned in provider: #{@os_handle.address}. Message=#{err.message}") + _log.warn(err.backtrace.join("\n")) nil rescue Excon::Errors::NotFound => err # It can happen that some data do not exist anymore,, in that case log warning but continue refresh - _log.warn "Not Found response code returned in provider: #{@os_handle.address}. Message=#{err.message}" - _log.warn err.backtrace.join("\n") + _log.warn("Not Found response code returned in provider: #{@os_handle.address}. Message=#{err.message}") + _log.warn(err.backtrace.join("\n")) nil end diff --git a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser.rb b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser.rb index be5ed47d04c..cee9628be94 100644 --- a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser.rb +++ b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser.rb @@ -58,8 +58,8 @@ def get_backups end def parse_backup(backup) - _log.debug "backup['size'] = #{backup['size']}" - _log.debug "backup['size'].to_i.gigabytes = #{backup['size'].to_i.gigabytes}" + _log.debug("backup['size'] = #{backup['size']}") + _log.debug("backup['size'].to_i.gigabytes = #{backup['size'].to_i.gigabytes}") uid = backup['id'] new_result = { :ems_ref => uid, diff --git a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers.rb b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers.rb index 5150978c188..9949d32738e 100644 --- a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers.rb +++ b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers.rb @@ -2,16 +2,16 @@ module ManageIQ::Providers::StorageManager::CinderManager::RefreshParser::CrossL def self.cross_link(ems, data) parent_manager = ems.parent_manager unless parent_manager - _log.warn "Manager does not have a parent." + _log.warn("Manager does not have a parent.") return end unless data - _log.warn "Manager does not have volumes, snapshots, or volume backups." + _log.warn("Manager does not have volumes, snapshots, or volume backups.") return end parent_type = parent_manager.class.ems_type - _log.debug "Parent type: #{parent_type}" + _log.debug("Parent type: #{parent_type}") require_nested parent_type.camelize const_get(parent_type.camelize.to_sym).new(parent_manager, data).cross_link diff --git a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers/openstack.rb b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers/openstack.rb index f01f0b35a38..04a105184c0 100644 --- a/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers/openstack.rb +++ b/app/models/manageiq/providers/storage_manager/cinder_manager/refresh_parser/cross_linkers/openstack.rb @@ -36,10 +36,10 @@ def cross_link def link_volume_to_tenant(volume_hash, api_obj) tenant = @parent_ems.cloud_tenants.detect { |t| t.ems_ref == api_obj.tenant_id } unless tenant - _log.info "EMS: #{@parent_ems.name}, tenant not found: #{api_obj.tenant_id}" + _log.info("EMS: #{@parent_ems.name}, tenant not found: #{api_obj.tenant_id}") return end - _log.debug "Found tenant: #{api_obj.tenant_id}, id = #{tenant.id}" + _log.debug("Found tenant: #{api_obj.tenant_id}, id = #{tenant.id}") volume_hash[:cloud_tenant_id] = tenant.id end @@ -48,10 +48,10 @@ def link_volume_to_availability_zone(volume_hash, api_obj) az_ref = api_obj.availability_zone ? api_obj.availability_zone : "null_az" availability_zone = @parent_ems.availability_zones.detect { |az| az.ems_ref == az_ref } unless availability_zone - _log.info "EMS: #{@parent_ems.name}, availability zone not found: #{az_ref}" + _log.info("EMS: #{@parent_ems.name}, availability zone not found: #{az_ref}") return end - _log.debug "Found availability zone: #{az_ref}, id = #{availability_zone.id}" + _log.debug("Found availability zone: #{az_ref}, id = #{availability_zone.id}") volume_hash[:availability_zone_id] = availability_zone.id end @@ -60,10 +60,10 @@ def link_snapshot_to_tenant(snapshot_hash, api_obj) tenant_ref = api_obj['os-extended-snapshot-attributes:project_id'] tenant = @parent_ems.cloud_tenants.detect { |t| t.ems_ref == tenant_ref } unless tenant - _log.info "EMS: #{@parent_ems.name}, tenant not found: #{tenant_ref}" + _log.info("EMS: #{@parent_ems.name}, tenant not found: #{tenant_ref}") return end - _log.debug "Found tenant: #{tenant_ref}, id = #{tenant.id}" + _log.debug("Found tenant: #{tenant_ref}, id = #{tenant.id}") snapshot_hash[:cloud_tenant_id] = tenant.id end @@ -72,10 +72,10 @@ def link_backup_to_availability_zone(backup_hash, api_obj) az_ref = api_obj['availability_zone'] ? api_obj['availability_zone'] : "null_az" availability_zone = @parent_ems.availability_zones.detect { |az| az.ems_ref == az_ref } unless availability_zone - _log.info "EMS: #{@parent_ems.name}, availability zone not found: #{az_ref}" + _log.info("EMS: #{@parent_ems.name}, availability zone not found: #{az_ref}") return end - _log.debug "Found availability zone: #{az_ref}, id = #{availability_zone.id}" + _log.debug("Found availability zone: #{az_ref}, id = #{availability_zone.id}") backup_hash[:availability_zone_id] = availability_zone.id end @@ -85,8 +85,8 @@ def link_volume_to_disk(volume_hash, api_obj) api_obj.attachments.each do |a| if a['device'].blank? - _log.warn "#{log_header}: Volume: #{uid}, is missing a mountpoint, skipping the volume processing" - _log.warn "#{log_header}: EMS: #{@ems.name}, Instance: #{a['server_id']}" + _log.warn("#{log_header}: Volume: #{uid}, is missing a mountpoint, skipping the volume processing") + _log.warn("#{log_header}: EMS: #{@ems.name}, Instance: #{a['server_id']}") next end @@ -94,7 +94,7 @@ def link_volume_to_disk(volume_hash, api_obj) vm = @parent_ems.vms.detect { |v| v.ems_ref == a['server_id'] } unless vm - _log.warn "VM referenced by backing volume not found." + _log.warn("VM referenced by backing volume not found.") next end @@ -102,8 +102,8 @@ def link_volume_to_disk(volume_hash, api_obj) disks = hardware.disks unless disks - _log.warn "#{log_header}: Volume: #{uid}, attached to instance not visible in the scope of this EMS" - _log.warn "#{log_header}: EMS: #{@ems.name}, Instance: #{a['server_id']}" + _log.warn("#{log_header}: Volume: #{uid}, attached to instance not visible in the scope of this EMS") + _log.warn("#{log_header}: EMS: #{@ems.name}, Instance: #{a['server_id']}") next end diff --git a/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser.rb b/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser.rb index d4fb6b622af..b9531a7a7ba 100644 --- a/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser.rb +++ b/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser.rb @@ -106,13 +106,13 @@ def safe_call yield rescue Excon::Errors::Forbidden => err # It can happen user doesn't have rights to read some tenant, in that case log warning but continue refresh - _log.warn "Forbidden response code returned in provider: #{@ems.address}. Message=#{err.message}" - _log.warn err.backtrace.join("\n") + _log.warn("Forbidden response code returned in provider: #{@ems.address}. Message=#{err.message}") + _log.warn(err.backtrace.join("\n")) nil rescue Excon::Errors::NotFound => err # It can happen that some data do not exist anymore,, in that case log warning but continue refresh - _log.warn "Not Found response code returned in provider: #{@ems.address}. Message=#{err.message}" - _log.warn err.backtrace.join("\n") + _log.warn("Not Found response code returned in provider: #{@ems.address}. Message=#{err.message}") + _log.warn(err.backtrace.join("\n")) nil end diff --git a/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers.rb b/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers.rb index 103d4362bc6..aa9eaf72097 100644 --- a/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers.rb +++ b/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers.rb @@ -2,16 +2,16 @@ module ManageIQ::Providers::StorageManager::SwiftManager::RefreshParser::CrossLi def self.cross_link(ems, data) parent_manager = ems.parent_manager unless parent_manager - _log.warn "Manager does not have a parent." + _log.warn("Manager does not have a parent.") return end unless data - _log.warn "Manager does not have any object storage." + _log.warn("Manager does not have any object storage.") return end parent_type = parent_manager.class.ems_type - _log.debug "Parent type: #{parent_type}" + _log.debug("Parent type: #{parent_type}") require_nested parent_type.camelize const_get(parent_type.camelize.to_sym).new(parent_manager, data).cross_link diff --git a/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers/openstack.rb b/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers/openstack.rb index db3ad436d3f..e86ce3c64f4 100644 --- a/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers/openstack.rb +++ b/app/models/manageiq/providers/storage_manager/swift_manager/refresh_parser/cross_linkers/openstack.rb @@ -24,10 +24,10 @@ def link_to_tenant(hash) tenant_id = hash[:tenant_id] tenant = @parent_ems.cloud_tenants.detect { |t| t.ems_ref == tenant_id } unless tenant - _log.info "EMS: #{@parent_ems.name}, tenant not found: #{tenant_id}" + _log.info("EMS: #{@parent_ems.name}, tenant not found: #{tenant_id}") return end - _log.debug "Found tenant: #{tenant_id}, id = #{tenant.id}" + _log.debug("Found tenant: #{tenant_id}, id = #{tenant.id}") hash[:cloud_tenant_id] = tenant.id end diff --git a/app/models/manager_refresh/inventory/persister.rb b/app/models/manager_refresh/inventory/persister.rb index c5f2f2e0708..2d359972b25 100644 --- a/app/models/manager_refresh/inventory/persister.rb +++ b/app/models/manager_refresh/inventory/persister.rb @@ -56,7 +56,7 @@ def self.has_inventory(options) if collection_options[:builder_params] collection_options[:builder_params] = collection_options[:builder_params].transform_values do |value| - if value.respond_to? :call + if value.respond_to?(:call) value.call(self) else value @@ -118,7 +118,7 @@ def add_inventory_collection(options) if options[:builder_params] options[:builder_params] = options[:builder_params].transform_values do |value| - if value.respond_to? :call + if value.respond_to?(:call) value.call(self) else value diff --git a/app/models/manager_refresh/inventory_collection.rb b/app/models/manager_refresh/inventory_collection.rb index d8ee4e8117a..a5a6b9abd63 100644 --- a/app/models/manager_refresh/inventory_collection.rb +++ b/app/models/manager_refresh/inventory_collection.rb @@ -777,7 +777,7 @@ def filtered_dependency_attributes def fixed_attributes if model_class - presence_validators = model_class.validators.detect { |x| x.kind_of? ActiveRecord::Validations::PresenceValidator } + presence_validators = model_class.validators.detect { |x| x.kind_of?(ActiveRecord::Validations::PresenceValidator) } end # Attributes that has to be always on the entity, so attributes making unique index of the record + attributes # that have presence validation @@ -845,7 +845,7 @@ def clone end def belongs_to_associations - model_class.reflect_on_all_associations.select { |x| x.kind_of? ActiveRecord::Reflection::BelongsToReflection } + model_class.reflect_on_all_associations.select { |x| x.kind_of?(ActiveRecord::Reflection::BelongsToReflection) } end def association_to_foreign_key_mapping diff --git a/app/models/manager_refresh/inventory_collection/graph.rb b/app/models/manager_refresh/inventory_collection/graph.rb index 97f5419d73b..3ff95f8cd7b 100644 --- a/app/models/manager_refresh/inventory_collection/graph.rb +++ b/app/models/manager_refresh/inventory_collection/graph.rb @@ -65,7 +65,7 @@ def sort_nodes(nodes) def assert_inventory_collections(inventory_collections) inventory_collections.each do |inventory_collection| - unless inventory_collection.kind_of? ::ManagerRefresh::InventoryCollection + unless inventory_collection.kind_of?(::ManagerRefresh::InventoryCollection) raise "A ManagerRefresh::SaveInventory needs a InventoryCollection object, it got: #{inventory_collection.inspect}" end end diff --git a/app/models/manager_refresh/save_collection/recursive.rb b/app/models/manager_refresh/save_collection/recursive.rb index a273353a1e9..e1d5b2e7ba8 100644 --- a/app/models/manager_refresh/save_collection/recursive.rb +++ b/app/models/manager_refresh/save_collection/recursive.rb @@ -13,7 +13,7 @@ def save_collections(ems, inventory_collections) private def save_collection(ems, inventory_collection, traversed_collections) - unless inventory_collection.kind_of? ::ManagerRefresh::InventoryCollection + unless inventory_collection.kind_of?(::ManagerRefresh::InventoryCollection) raise "A ManagerRefresh::SaveInventory needs a InventoryCollection object, it got: #{inventory_collection.inspect}" end @@ -24,7 +24,7 @@ def save_collection(ems, inventory_collection, traversed_collections) unless inventory_collection.saveable? inventory_collection.dependencies.each do |dependency| next if dependency.saved? - if traversed_collections.include? dependency + if traversed_collections.include?(dependency) raise "Edge from #{inventory_collection} to #{dependency} creates a cycle" end save_collection(ems, dependency, traversed_collections) diff --git a/app/models/metric/capture.rb b/app/models/metric/capture.rb index 24da45460b9..780023cde44 100644 --- a/app/models/metric/capture.rb +++ b/app/models/metric/capture.rb @@ -39,7 +39,7 @@ def self.alert_capture_threshold(target) end def self.perf_capture_timer(zone = nil) - _log.info "Queueing performance capture..." + _log.info("Queueing performance capture...") zone ||= MiqServer.my_server.zone perf_capture_health_check(zone) @@ -53,17 +53,17 @@ def self.perf_capture_timer(zone = nil) # Purge tasks older than 4 hours MiqTask.delete_older(4.hours.ago.utc, "name LIKE 'Performance rollup for %'") - _log.info "Queueing performance capture...Complete" + _log.info("Queueing performance capture...Complete") end def self.perf_capture_gap(start_time, end_time, zone_id = nil) - _log.info "Queueing performance capture for range: [#{start_time} - #{end_time}]..." + _log.info("Queueing performance capture for range: [#{start_time} - #{end_time}]...") zone = Zone.find(zone_id) if zone_id targets = Metric::Targets.capture_targets(zone, :exclude_storages => true) targets.each { |target| target.perf_capture_queue('historical', :start_time => start_time, :end_time => end_time, :zone => zone) } - _log.info "Queueing performance capture for range: [#{start_time} - #{end_time}]...Complete" + _log.info("Queueing performance capture for range: [#{start_time} - #{end_time}]...Complete") end def self.perf_capture_gap_queue(start_time, end_time, zone = nil) @@ -180,7 +180,7 @@ def self.calc_target_options(zone, targets_by_rollup_parent) :interval => "realtime" } ) - _log.info "Created task id: [#{task.id}] for: [#{pkey}] with targets: #{targets_by_rollup_parent[pkey].inspect} for time range: [#{task_start_time} - #{task_end_time}]" + _log.info("Created task id: [#{task.id}] for: [#{pkey}] with targets: #{targets_by_rollup_parent[pkey].inspect} for time range: [#{task_start_time} - #{task_end_time}]") targets.each do |target| h[target] = { :task_id => task.id, diff --git a/app/models/metric/ci_mixin.rb b/app/models/metric/ci_mixin.rb index c86fbdc59e4..ca81f4ca60c 100644 --- a/app/models/metric/ci_mixin.rb +++ b/app/models/metric/ci_mixin.rb @@ -114,7 +114,7 @@ def performances_maintains_value_for_duration?(options) scope = send(meth) if Metric.column_names.include?(column.to_s) - scope = scope.select "capture_interval_name, capture_interval, timestamp, #{column}" + scope = scope.select("capture_interval_name, capture_interval, timestamp, #{column}") end total_records = scope diff --git a/app/models/metric/ci_mixin/capture.rb b/app/models/metric/ci_mixin/capture.rb index be2cb3521cb..a01a89fc595 100644 --- a/app/models/metric/ci_mixin/capture.rb +++ b/app/models/metric/ci_mixin/capture.rb @@ -105,7 +105,7 @@ def perf_capture_queue(interval_name, options = {}) qi else interval = qi[:method_name].sub("perf_capture_", "") - _log.debug "Skipping capture of #{log_target} - Performance capture for interval #{interval} is still running" + _log.debug("Skipping capture of #{log_target} - Performance capture for interval #{interval} is still running") # NOTE: do not update the message queue nil end @@ -171,7 +171,7 @@ def perf_capture(interval_name, start_time = nil, end_time = nil) expected_start_range = expected_start_range.iso8601 end - _log.info "#{log_header} Capture for #{log_target}#{log_time}..." + _log.info("#{log_header} Capture for #{log_target}#{log_time}...") start_range = end_range = counters = counter_values = nil _, t = Benchmark.realtime_block(:total_time) do @@ -187,15 +187,15 @@ def perf_capture(interval_name, start_time = nil, end_time = nil) end_range = ts.last end - _log.info "#{log_header} Capture for #{log_target}#{log_time}...Complete - Timings: #{t.inspect}" + _log.info("#{log_header} Capture for #{log_target}#{log_time}...Complete - Timings: #{t.inspect}") if start_range.nil? - _log.info "#{log_header} Skipping processing for #{log_target}#{log_time} as no metrics were captured." + _log.info("#{log_header} Skipping processing for #{log_target}#{log_time} as no metrics were captured.") # Set the last capture on to end_time to prevent forever queueing up the same collection range update_attributes(:last_perf_capture_on => end_time || Time.now.utc) if interval_name == 'realtime' else if expected_start_range && start_range > expected_start_range - _log.warn "#{log_header} For #{log_target}#{log_time}, expected to get data as of [#{expected_start_range}], but got data as of [#{start_range}]." + _log.warn("#{log_header} For #{log_target}#{log_time}, expected to get data as of [#{expected_start_range}], but got data as of [#{start_range}].") # Raise ems_performance_gap_detected alert event to enable notification. MiqEvent.raise_evm_alert_event_queue(ext_management_system, "ems_performance_gap_detected", @@ -225,9 +225,9 @@ def perf_capture_callback(task_ids, _status, _message, _result) pclass, _, pid = task.context_data[:parent].rpartition(":") parent = pclass.constantize.find(pid) msg = "Queueing [#{task.context_data[:interval]}] rollup to #{parent.class.name} id: [#{parent.id}] for time range: [#{task.context_data[:start]} - #{task.context_data[:end]}]" - _log.info "#{msg}..." + _log.info("#{msg}...") parent.perf_rollup_range_queue(task.context_data[:start], task.context_data[:end], task.context_data[:interval]) - _log.info "#{msg}...Complete" + _log.info("#{msg}...Complete") else task.state, task.status, task.message = [MiqTask::STATE_ACTIVE, MiqTask::STATUS_OK, task.message = "Performance collection active, #{task.context_data[:complete].length} out of #{task.context_data[:targets].length} collections completed"] end @@ -243,7 +243,7 @@ def perf_capture_state def perf_capture_realtime_now # For UI to enable refresh of realtime charts on demand - _log.info "Realtime capture requested for #{log_target}" + _log.info("Realtime capture requested for #{log_target}") perf_capture_queue('realtime', :priority => MiqQueue::HIGH_PRIORITY) end diff --git a/app/models/metric/ci_mixin/rollup.rb b/app/models/metric/ci_mixin/rollup.rb index f7febc6b6e0..9f8bda60bfb 100644 --- a/app/models/metric/ci_mixin/rollup.rb +++ b/app/models/metric/ci_mixin/rollup.rb @@ -19,16 +19,16 @@ def perf_rollup_to_parents(interval_name, start_time, end_time = nil) times = Metric::Helper.hours_from_range(start_time, end_time) log_header = "Queueing [#{new_interval}] rollup to #{parent.class.name} id: [#{parent.id}] for times: #{times.inspect}" - _log.info "#{log_header}..." + _log.info("#{log_header}...") times.each { |t| parent.perf_rollup_queue(t, new_interval) } - _log.info "#{log_header}...Complete" + _log.info("#{log_header}...Complete") when 'daily' then times_by_tp = Metric::Helper.days_from_range_by_time_profile(start_time, end_time) times_by_tp.each do |tp, times| log_header = "Queueing [#{new_interval}] rollup to #{parent.class.name} id: [#{parent.id}] in time profile: [#{tp.description}] for times: #{times.inspect}" - _log.info "#{log_header}..." + _log.info("#{log_header}...") times.each { |t| parent.perf_rollup_queue(t, new_interval, tp) } - _log.info "#{log_header}...Complete" + _log.info("#{log_header}...Complete") end end end @@ -65,7 +65,7 @@ def perf_rollup_queue(time, interval_name, time_profile = nil) :deliver_on => deliver_on, :priority => Metric::Capture.const_get("#{interval_name.upcase}_PRIORITY") ) do |msg| - _log.debug "Skipping queueing [#{interval_name}] rollup of #{self.class.name} name: [#{name}], id: [#{id}] for time: [#{time}], since it is already queued" unless msg.nil? + _log.debug("Skipping queueing [#{interval_name}] rollup of #{self.class.name} name: [#{name}], id: [#{id}] for time: [#{time}], since it is already queued") unless msg.nil? end end diff --git a/app/models/metric/rollup.rb b/app/models/metric/rollup.rb index 82b16d00f4a..9dc29337dab 100644 --- a/app/models/metric/rollup.rb +++ b/app/models/metric/rollup.rb @@ -216,7 +216,7 @@ class << self def self.rollup_daily(obj, day, interval_name, time_profile, new_perf, orig_perf) tp = TimeProfile.extract_objects(time_profile) if tp.nil? - _log.info "Skipping [#{interval_name}] Rollup for #{obj.class.name} name: [#{obj.name}], id: [#{obj.id}] for time: [#{day}] since the time profile no longer exists." + _log.info("Skipping [#{interval_name}] Rollup for #{obj.class.name} name: [#{obj.name}], id: [#{obj.id}] for time: [#{day}] since the time profile no longer exists.") return end @@ -362,9 +362,9 @@ def self.perf_rollup_gap(start_time, end_time, interval_name, time_profile_id = targets = find_distinct_resources return if targets.empty? - _log.info "Queueing #{interval_name} rollups for range: [#{start_time} - #{end_time}]..." + _log.info("Queueing #{interval_name} rollups for range: [#{start_time} - #{end_time}]...") targets.each { |t| t.perf_rollup_range_queue(start_time, end_time, interval_name, time_profile_id, MiqQueue::LOW_PRIORITY) } - _log.info "Queueing #{interval_name} rollups for range: [#{start_time} - #{end_time}]...Complete" + _log.info("Queueing #{interval_name} rollups for range: [#{start_time} - #{end_time}]...Complete") end def self.perf_rollup_gap_queue(start_time, end_time, interval_name, time_profile_id = nil) diff --git a/app/models/middleware_performance.rb b/app/models/middleware_performance.rb index d3d80d21f94..596a3257c10 100644 --- a/app/models/middleware_performance.rb +++ b/app/models/middleware_performance.rb @@ -26,7 +26,7 @@ def self.build_results_for_report_middleware(options) next end if raw_stats.values[0] - parse_raw_stats_columns raw_stats + parse_raw_stats_columns(raw_stats) raw_stats.each { |timestamp, stats| results.push(parse_row(ms, timestamp, interval, stats)) } end end diff --git a/app/models/miq_action.rb b/app/models/miq_action.rb index 234fabe7f00..a1435df6086 100644 --- a/app/models/miq_action.rb +++ b/app/models/miq_action.rb @@ -162,7 +162,7 @@ def self.invoke_actions(apply_policies_to, inputs, succeeded, failed) MiqPolicy.logger.error("MIQ(action-invoke) Aborting action invocation [#{err.message}]") raise rescue MiqException::PolicyPreventAction => err - MiqPolicy.logger.info "MIQ(action-invoke) [#{err}]" + MiqPolicy.logger.info("MIQ(action-invoke) [#{err}]") raise end diff --git a/app/models/miq_ae_method.rb b/app/models/miq_ae_method.rb index 23d0200e97a..83babca7aad 100644 --- a/app/models/miq_ae_method.rb +++ b/app/models/miq_ae_method.rb @@ -6,7 +6,7 @@ class MiqAeMethod < ApplicationRecord default_value_for :embedded_methods, [] belongs_to :ae_class, :class_name => "MiqAeClass", :foreign_key => :class_id - has_many :inputs, -> { order :priority }, :class_name => "MiqAeField", :foreign_key => :method_id, + has_many :inputs, -> { order(:priority) }, :class_name => "MiqAeField", :foreign_key => :method_id, :dependent => :destroy, :autosave => true validates_presence_of :name, :scope diff --git a/app/models/miq_alert.rb b/app/models/miq_alert.rb index f957d65fbea..b56960f7ffc 100644 --- a/app/models/miq_alert.rb +++ b/app/models/miq_alert.rb @@ -271,7 +271,7 @@ def invoke_actions(target, inputs = {}) _log.error("Aborting action invocation [#{err.message}]") raise rescue MiqException::PolicyPreventAction => err - _log.info "[#{err}]" + _log.info("[#{err}]") raise end @@ -559,7 +559,7 @@ def responds_to_events_from_expression end def substitute(str) - eval "result = \"#{str}\"" + eval("result = \"#{str}\"") end def evaluate_in_automate(target, inputs = {}) diff --git a/app/models/miq_alert_status.rb b/app/models/miq_alert_status.rb index 428d5214ea1..e4660a8ffc3 100644 --- a/app/models/miq_alert_status.rb +++ b/app/models/miq_alert_status.rb @@ -5,7 +5,7 @@ class MiqAlertStatus < ApplicationRecord belongs_to :resource, :polymorphic => true belongs_to :ext_management_system belongs_to :assignee, :class_name => 'User' - has_many :miq_alert_status_actions, -> { order "created_at" }, :dependent => :destroy + has_many :miq_alert_status_actions, -> { order("created_at") }, :dependent => :destroy virtual_column :assignee, :type => :string virtual_column :hidden, :type => :boolean diff --git a/app/models/miq_bulk_import.rb b/app/models/miq_bulk_import.rb index d19abac7660..b5c853e2329 100644 --- a/app/models/miq_bulk_import.rb +++ b/app/models/miq_bulk_import.rb @@ -2,14 +2,14 @@ module MiqBulkImport def self.upload(fd, tags, keys) - _log.info "Uploading CSV file" + _log.info("Uploading CSV file") data = fd.read raise _("File is empty") if data.empty? data.gsub!(/\r/, "\n") begin reader = CSV.parse(data) rescue CSV::IllegalFormatError - _log.error "CSV file is invalid" + _log.error("CSV file is invalid") raise "CSV file is invalid" end header = reader.shift @@ -20,7 +20,7 @@ def self.upload(fd, tags, keys) if verified_tags.empty? raise "No valid columns were found in the csv file. One of the following fields is required: (#{tags.join(" ")})." else - _log.info "The following columns are verified in the csv file: #{verified_tags.join(" and ")}" + _log.info("The following columns are verified in the csv file: #{verified_tags.join(" and ")}") end matched_keys = [] @@ -32,7 +32,7 @@ def self.upload(fd, tags, keys) end if matched_keys.empty? - _log.error "The following required columns used for matching are missing: #{keys.join(" or ")}" + _log.error("The following required columns used for matching are missing: #{keys.join(" or ")}") raise "The following required columns used for matching are missing: #{keys.join(" or ")}" end diff --git a/app/models/miq_cockpit_ws_worker/authenticator.rb b/app/models/miq_cockpit_ws_worker/authenticator.rb index 41f567c42d6..22e8898867d 100755 --- a/app/models/miq_cockpit_ws_worker/authenticator.rb +++ b/app/models/miq_cockpit_ws_worker/authenticator.rb @@ -25,7 +25,7 @@ def self.ssh_command def find_container_node_creds(user_obj, host_or_ip) raise "Looking up container nodes requires a valid user" unless user_obj cdn_table = ContainerDeploymentNode.arel_table - cond = cdn_table[:name].eq(host_or_ip).or(cdn_table[:address].eq host_or_ip) + cond = cdn_table[:name].eq(host_or_ip).or(cdn_table[:address].eq(host_or_ip)) deployment = ContainerDeployment.joins(:container_deployment_nodes).find_by(cond) if deployment diff --git a/app/models/miq_cockpit_ws_worker/runner.rb b/app/models/miq_cockpit_ws_worker/runner.rb index 4ce0f5f1858..4252d29d2c5 100644 --- a/app/models/miq_cockpit_ws_worker/runner.rb +++ b/app/models/miq_cockpit_ws_worker/runner.rb @@ -32,8 +32,8 @@ def check_cockpit_ws rescue EOFError _log.info("#{log_prefix} got EOF process exiting") end - outbuf.split("\n").each { |msg| $log.info "cockpit-ws: #{msg.rstrip}" } if outbuf - errbuf.split("\n").each { |msg| $log.error "cockpit-ws: #{msg.rstrip}" } if errbuf + outbuf.split("\n").each { |msg| $log.info("cockpit-ws: #{msg.rstrip}") } if outbuf + errbuf.split("\n").each { |msg| $log.error("cockpit-ws: #{msg.rstrip}") } if errbuf else _log.info("#{log_prefix} Cockpit-ws Process gone. Restarting...") start_cockpit_ws diff --git a/app/models/miq_compare.rb b/app/models/miq_compare.rb index e1536f913cc..25d1bf9e5f1 100644 --- a/app/models/miq_compare.rb +++ b/app/models/miq_compare.rb @@ -351,10 +351,10 @@ def fetch_record_section(id, section, sub_sections, columns) else key = r.send(key_name) if key.nil? - _log.warn "No value was found for the key [#{key_name}] in section [#{section}] for record [#{id}]" + _log.warn("No value was found for the key [#{key_name}] in section [#{section}] for record [#{id}]") next elsif result_section.key?(key) - _log.warn "A duplicate key value [#{key}] for the key [#{key_name}] was found in section [#{section}] for record [#{id}]" + _log.warn("A duplicate key value [#{key}] for the key [#{key_name}] was found in section [#{section}] for record [#{id}]") next end end @@ -382,7 +382,7 @@ def eval_section(rec, section, id) section.to_s.split('.').each do |part| rec = rec.send(part) if rec.nil? - _log.warn "Unable to evaluate section [#{section}] for record [#{id}], since [.#{part}] returns nil" + _log.warn("Unable to evaluate section [#{section}] for record [#{id}], since [.#{part}] returns nil") return nil end end @@ -396,7 +396,7 @@ def eval_column(rec, column, id) parts.each_with_index do |part, i| rec = rec.send(part) if rec.nil? && i != (parts.length - 1) - _log.warn "Unable to evaluate column [#{column}] for record [#{id}], since [.#{part}] returns nil" + _log.warn("Unable to evaluate column [#{column}] for record [#{id}], since [.#{part}] returns nil") return nil end end @@ -545,14 +545,14 @@ def get_compare_records new_rec end - _log.error "No record was found for compare object #{@model}, ids: [#{error_recs.join(", ")}]" unless error_recs.blank? + _log.error("No record was found for compare object #{@model}, ids: [#{error_recs.join(", ")}]") unless error_recs.blank? end # Retrieve the record from the source (compare mode) def get_compare_record(id) return unless @mode == :compare new_rec = @model.find_by(:id => id) - _log.error "No record was found for compare object #{@model}, id: [#{id}]" if new_rec.nil? + _log.error("No record was found for compare object #{@model}, id: [#{id}]") if new_rec.nil? new_rec end @@ -568,7 +568,7 @@ def get_drift_records def get_drift_record(ts) return unless @mode == :drift new_rec = drift_model_record.drift_states.find_by(:timestamp => ts).data_obj - _log.error "No data was found for drift object #{@model} [#{@model_record_id}] at [#{ts}]" if new_rec.nil? + _log.error("No data was found for drift object #{@model} [#{@model_record_id}] at [#{ts}]") if new_rec.nil? new_rec end diff --git a/app/models/miq_database.rb b/app/models/miq_database.rb index 2d5d51041e5..5588cf6faaa 100644 --- a/app/models/miq_database.rb +++ b/app/models/miq_database.rb @@ -67,7 +67,7 @@ def name end def size - ActiveRecord::Base.connection.database_size name + ActiveRecord::Base.connection.database_size(name) end def self.adapter diff --git a/app/models/miq_group.rb b/app/models/miq_group.rb index b145e100d98..a2f9836df2b 100644 --- a/app/models/miq_group.rb +++ b/app/models/miq_group.rb @@ -124,7 +124,7 @@ def self.get_httpd_groups_by_user(user) sysbus = DBus.system_bus ifp_service = sysbus["org.freedesktop.sssd.infopipe"] - ifp_object = ifp_service.object "/org/freedesktop/sssd/infopipe" + ifp_object = ifp_service.object("/org/freedesktop/sssd/infopipe") ifp_object.introspect ifp_interface = ifp_object["org.freedesktop.sssd.infopipe"] begin diff --git a/app/models/miq_host_provision/configuration.rb b/app/models/miq_host_provision/configuration.rb index fa913ccda38..5993f112d31 100644 --- a/app/models/miq_host_provision/configuration.rb +++ b/app/models/miq_host_provision/configuration.rb @@ -13,11 +13,11 @@ def set_network_information def set_maintenance_mode_vmware destination.with_provider_object(:connection_source => host) do |vim_host| if vim_host.inMaintenanceMode? - _log.info "Host is already Maintenance Mode" + _log.info("Host is already Maintenance Mode") else - _log.info "Putting host into Maintenance Mode..." + _log.info("Putting host into Maintenance Mode...") vim_host.enterMaintenanceMode - _log.info "Putting host into Maintenance Mode...complete" + _log.info("Putting host into Maintenance Mode...complete") end end end @@ -27,14 +27,14 @@ def set_maintenance_mode if destination.is_vmware? set_maintenance_mode_vmware else - _log.warn "VMM Vendor [#{destination.vmm_vendor_display}] is not supported" + _log.warn("VMM Vendor [#{destination.vmm_vendor_display}] is not supported") end end # TODO: Subclass def add_storage_vmware if destination.ext_management_system.nil? - _log.error "Host has no External Management System" + _log.error("Host has no External Management System") return end @@ -43,11 +43,11 @@ def add_storage_vmware storages_to_attach.each do |storage| case storage.store_type when 'NFS' - _log.info "Adding datastore: [#{storage.name}]" + _log.info("Adding datastore: [#{storage.name}]") vim_dss.addNasDatastoreByName(storage.name) - _log.info "Adding datastore: [#{storage.name}]...Complete" + _log.info("Adding datastore: [#{storage.name}]...Complete") else - _log.warn "Storage Type [#{storage.store_type}] is not supported" + _log.warn("Storage Type [#{storage.store_type}] is not supported") end end end @@ -58,7 +58,7 @@ def add_storage if destination.is_vmware? add_storage_vmware else - _log.warn "VMM Vendor [#{destination.vmm_vendor_display}] is not supported" + _log.warn("VMM Vendor [#{destination.vmm_vendor_display}] is not supported") end end end diff --git a/app/models/miq_host_provision/placement.rb b/app/models/miq_host_provision/placement.rb index bba19e0b8a0..bd84726cc08 100644 --- a/app/models/miq_host_provision/placement.rb +++ b/app/models/miq_host_provision/placement.rb @@ -31,7 +31,7 @@ def place_in_ems if host.is_vmware? place_in_ems_vmware else - _log.warn "VMM Vendor [#{host.vmm_vendor_display}] is not supported" + _log.warn("VMM Vendor [#{host.vmm_vendor_display}] is not supported") end end end diff --git a/app/models/miq_host_provision/pxe.rb b/app/models/miq_host_provision/pxe.rb index 1fdfb6154c1..0dedeef120c 100644 --- a/app/models/miq_host_provision/pxe.rb +++ b/app/models/miq_host_provision/pxe.rb @@ -43,7 +43,7 @@ def cidr require 'ipaddr' Integer(32 - Math.log2((IPAddr.new(subnet_mask.to_s, Socket::AF_INET).to_i ^ 0xffffffff) + 1)) rescue ArgumentError => err - _log.warn "Cannot convert subnet #{subnet_mask.inspect} to CIDR because #{err.message}" + _log.warn("Cannot convert subnet #{subnet_mask.inspect} to CIDR because #{err.message}") return nil end diff --git a/app/models/miq_host_provision/rediscovery.rb b/app/models/miq_host_provision/rediscovery.rb index 6748458d5ea..efcba0acd9b 100644 --- a/app/models/miq_host_provision/rediscovery.rb +++ b/app/models/miq_host_provision/rediscovery.rb @@ -15,7 +15,7 @@ def host_rediscovered? def rediscover_host # TODO: why is this check here?? unless state == 'active' - _log.info "provision task check has already been processed - state: [#{state}]" + _log.info("provision task check has already been processed - state: [#{state}]") return end diff --git a/app/models/miq_host_provision_workflow.rb b/app/models/miq_host_provision_workflow.rb index ee928115b5c..706439d42a5 100644 --- a/app/models/miq_host_provision_workflow.rb +++ b/app/models/miq_host_provision_workflow.rb @@ -108,7 +108,7 @@ def update_selected_storage_names(values) def ws_template_fields(_values, fields) data = parse_ws_string(fields) - _log.info "data:<#{data.inspect}>" + _log.info("data:<#{data.inspect}>") name = data[:name].blank? ? nil : data[:name].downcase mac_address = data[:mac_address].blank? ? nil : data[:mac_address].downcase @@ -118,9 +118,9 @@ def ws_template_fields(_values, fields) raise _("No host search criteria values were passed. input data:<%{data}>") % {:data => data.inspect} end - _log.info "Host Passed : <#{name}> <#{mac_address}> <#{ipmi_address}>" + _log.info("Host Passed : <#{name}> <#{mac_address}> <#{ipmi_address}>") srcs = allowed_ws_hosts(:include_datacenter => true).find_all do |v| - _log.info "Host Detected: <#{v.name.downcase}> <#{v.mac_address}> <#{v.ipmi_address}>" + _log.info("Host Detected: <#{v.name.downcase}> <#{v.mac_address}> <#{v.ipmi_address}>") (name.nil? || name == v.name.downcase) && (mac_address.nil? || mac_address == v.mac_address.to_s.downcase) && (ipmi_address.nil? || ipmi_address == v.ipmi_address.to_s) end if srcs.length > 1 @@ -129,21 +129,21 @@ def ws_template_fields(_values, fields) src = srcs.first raise _("No target host was found from input data:<%{data}>") % {:data => data.inspect} if src.nil? - _log.info "Host Found: <#{src.name}> MAC:<#{src.mac_address}> IPMI:<#{src.ipmi_address}>" + _log.info("Host Found: <#{src.name}> MAC:<#{src.mac_address}> IPMI:<#{src.ipmi_address}>") src end def ws_host_fields(values, fields) data = parse_ws_string(fields) - _log.info "data:<#{data.inspect}>" + _log.info("data:<#{data.inspect}>") ws_service_fields(values, fields, data) ws_environment_fields(values, fields, data) refresh_field_values(values) ws_customize_fields(values, fields, data) ws_schedule_fields(values, fields, data) - data.each { |k, v| _log.warn "Unprocessed key <#{k}> with value <#{v.inspect}>" } + data.each { |k, v| _log.warn("Unprocessed key <#{k}> with value <#{v.inspect}>") } end def ws_service_fields(values, _fields, data) @@ -216,13 +216,13 @@ def self.from_ws(*args) def self.from_ws_ver_1_x(version, user, template_fields, vm_fields, requester, tags, options) options = MiqHashStruct.new if options.nil? - _log.warn "Web-service host provisioning starting with interface version <#{version}> by requester <#{user.userid}>" + _log.warn("Web-service host provisioning starting with interface version <#{version}> by requester <#{user.userid}>") init_options = {:use_pre_dialog => false, :request_type => request_type(parse_ws_string(template_fields)[:request_type])} data = parse_ws_string(requester) unless data[:user_name].blank? user = User.find_by_userid!(data[:user_name]) - _log.warn "Web-service requester changed to <#{user.userid}>" + _log.warn("Web-service requester changed to <#{user.userid}>") end p = new(values = {}, user, init_options) @@ -245,7 +245,7 @@ def self.from_ws_ver_1_x(version, user, template_fields, vm_fields, requester, t p.raise_validate_errors if request == false end rescue => err - _log.error "<#{err}>" + _log.error("<#{err}>") raise err end end # class MiqHostProvisionWorkflow diff --git a/app/models/miq_provision/automate.rb b/app/models/miq_provision/automate.rb index ff760ed81d6..c877fb1f48f 100644 --- a/app/models/miq_provision/automate.rb +++ b/app/models/miq_provision/automate.rb @@ -54,34 +54,34 @@ def get_network_details related_vm_description = (related_vm == vm) ? "VM" : "Template" if related_vm.nil? - _log.error "No VM or Template Found for Provision Object" + _log.error("No VM or Template Found for Provision Object") return nil end if related_vm.ext_management_system.nil? - _log.error "No EMS Found for #{related_vm_description} of Provision Object" + _log.error("No EMS Found for #{related_vm_description} of Provision Object") return nil end vc_id = related_vm.ext_management_system.id unless vc_id.kind_of?(Fixnum) - _log.error "Invalid EMS ID <#{vc_id.inspect}> for #{related_vm_description} of Provision Object" + _log.error("Invalid EMS ID <#{vc_id.inspect}> for #{related_vm_description} of Provision Object") return nil end vlan_id, vlan_name = options[:vlan] unless vlan_name.kind_of?(String) - _log.error "VLAN Name <#{vlan_name.inspect}> is missing or invalid" + _log.error("VLAN Name <#{vlan_name.inspect}> is missing or invalid") return nil end - _log.info "<< vlan_name=<#{vlan_name}> vlan_id=#{vlan_id} vc_id=<#{vc_id}> user=<#{get_user}>" + _log.info("<< vlan_name=<#{vlan_name}> vlan_id=#{vlan_id} vc_id=<#{vc_id}> user=<#{get_user}>") attrs = automate_attributes('get_networks') ws = MiqAeEngine.resolve_automation_object("REQUEST", get_user, attrs) if ws.root.nil? - _log.warn "- Automate Failed (workspace empty)" + _log.warn("- Automate Failed (workspace empty)") return nil end @@ -101,7 +101,7 @@ def get_network_details return network end if networks.kind_of?(Array) - _log.warn "- No Network matched in Automate Results: #{ws.to_expanded_xml}" + _log.warn("- No Network matched in Automate Results: #{ws.to_expanded_xml}") nil end diff --git a/app/models/miq_provision/custom_attributes.rb b/app/models/miq_provision/custom_attributes.rb index d9776147575..d6c61696477 100644 --- a/app/models/miq_provision/custom_attributes.rb +++ b/app/models/miq_provision/custom_attributes.rb @@ -10,7 +10,7 @@ def set_miq_custom_attributes(vm, custom_attrs) end vm.custom_attributes.create(attrs) rescue => err - _log.warn "Failed to set EVM Custom Attributes #{custom_attrs.inspect}. Reason:<#{err}>" + _log.warn("Failed to set EVM Custom Attributes #{custom_attrs.inspect}. Reason:<#{err}>") end end @@ -22,7 +22,7 @@ def set_ems_custom_attributes(vm, custom_attrs) _log.info("Setting EMS Custom Attribute key=#{k.to_s.inspect}, value=#{v.to_s.inspect}") vm.set_custom_field(k.to_s, v.to_s) rescue => err - _log.warn "Failed to set EMS Custom Attribute <#{k}> to <#{v}>. Reason:<#{err}>" + _log.warn("Failed to set EMS Custom Attribute <#{k}> to <#{v}>. Reason:<#{err}>") end end end diff --git a/app/models/miq_provision/description.rb b/app/models/miq_provision/description.rb index 0d8327ce8f6..cae0fedfff1 100644 --- a/app/models/miq_provision/description.rb +++ b/app/models/miq_provision/description.rb @@ -1,6 +1,6 @@ module MiqProvision::Description def set_description(vm, description) - _log.info "Setting #{vm.class.base_model.name} description to #{description.inspect}" + _log.info("Setting #{vm.class.base_model.name} description to #{description.inspect}") vm.description = description end end diff --git a/app/models/miq_provision/genealogy.rb b/app/models/miq_provision/genealogy.rb index c1e1b61e783..1be7ce8fb34 100644 --- a/app/models/miq_provision/genealogy.rb +++ b/app/models/miq_provision/genealogy.rb @@ -1,6 +1,6 @@ module MiqProvision::Genealogy def set_genealogy(child, parent) - _log.info "Setting Genealogy Parent to #{parent.class.base_model.name} Name=#{parent.name}, ID=#{parent.id}" + _log.info("Setting Genealogy Parent to #{parent.class.base_model.name} Name=#{parent.name}, ID=#{parent.id}") parent.add_genealogy_child(child) end end diff --git a/app/models/miq_provision/naming.rb b/app/models/miq_provision/naming.rb index 96bd1894425..e27d792b0ab 100644 --- a/app/models/miq_provision/naming.rb +++ b/app/models/miq_provision/naming.rb @@ -30,7 +30,7 @@ def get_next_vm_name(prov_obj, determine_index = true) # Check if we need to force a unique target name if prov_obj.get_option(:miq_force_unique_name) == true && unresolved_vm_name !~ NAME_SEQUENCE_REGEX unresolved_vm_name += '$n{4}' - _log.info "Forced unique provision name to #{unresolved_vm_name} for #{prov_obj.class}:#{prov_obj.id}" + _log.info("Forced unique provision name to #{unresolved_vm_name} for #{prov_obj.class}:#{prov_obj.id}") end vm_name = get_vm_full_name(unresolved_vm_name, prov_obj, determine_index) diff --git a/app/models/miq_provision/ownership.rb b/app/models/miq_provision/ownership.rb index 9071d6bb6d2..e5d9774b6d8 100644 --- a/app/models/miq_provision/ownership.rb +++ b/app/models/miq_provision/ownership.rb @@ -1,9 +1,9 @@ module MiqProvision::Ownership def set_ownership(vm, user) - _log.info "Setting Owning User to Name=#{user.name}, ID=#{user.id}" + _log.info("Setting Owning User to Name=#{user.name}, ID=#{user.id}") vm.evm_owner = user - _log.info "Setting Owning Group to Name=#{user.current_group.name}, ID=#{user.current_group.id}" + _log.info("Setting Owning Group to Name=#{user.current_group.name}, ID=#{user.current_group.id}") vm.miq_group = user.current_group end end diff --git a/app/models/miq_provision/pxe.rb b/app/models/miq_provision/pxe.rb index 16faa0759df..327c3baed26 100644 --- a/app/models/miq_provision/pxe.rb +++ b/app/models/miq_provision/pxe.rb @@ -39,7 +39,7 @@ def cidr require 'ipaddr' Integer(32 - Math.log2((IPAddr.new(subnet_mask.to_s, Socket::AF_INET).to_i ^ 0xffffffff) + 1)) rescue ArgumentError => err - _log.warn "Cannot convert subnet #{subnet_mask.inspect} to CIDR because #{err.message}" + _log.warn("Cannot convert subnet #{subnet_mask.inspect} to CIDR because #{err.message}") return nil end diff --git a/app/models/miq_provision/service.rb b/app/models/miq_provision/service.rb index 3d5267f0b1d..e7dd7f98d62 100644 --- a/app/models/miq_provision/service.rb +++ b/app/models/miq_provision/service.rb @@ -1,7 +1,7 @@ module MiqProvision::Service def connect_to_service(vm, service, service_resource) unless service.nil? || service_resource.nil? - _log.info "Connecting VM #{vm.id}:#{vm.name} to service #{service.id}:#{service.name}" + _log.info("Connecting VM #{vm.id}:#{vm.name} to service #{service.id}:#{service.name}") service.add_resource!(vm, service_resource) end end diff --git a/app/models/miq_provision/state_machine.rb b/app/models/miq_provision/state_machine.rb index 99ab0a96775..816bb8345f0 100644 --- a/app/models/miq_provision/state_machine.rb +++ b/app/models/miq_provision/state_machine.rb @@ -82,7 +82,7 @@ def autostart_destination end def post_create_destination - _log.info "Destination #{destination.class.base_model.name} ID=#{destination.id}, Name=#{destination.name}" + _log.info("Destination #{destination.class.base_model.name} ID=#{destination.id}, Name=#{destination.name}") set_description(destination, get_option(:vm_description)) set_ownership(destination, get_owner || get_user) diff --git a/app/models/miq_provision_virt_workflow.rb b/app/models/miq_provision_virt_workflow.rb index a0a52de7867..4089cbf4f7d 100644 --- a/app/models/miq_provision_virt_workflow.rb +++ b/app/models/miq_provision_virt_workflow.rb @@ -89,10 +89,10 @@ def refresh_field_values(values) update_field_visibility @last_vm_id = get_value(@values[:src_vm_id]) - _log.info "provision refresh completed in [#{Time.now - st}] seconds" + _log.info("provision refresh completed in [#{Time.now - st}] seconds") rescue => err - _log.error "[#{err}]" - $log.error err.backtrace.join("\n") + _log.error("[#{err}]") + $log.error(err.backtrace.join("\n")) raise err ensure @allowed_vlan_cache = nil @@ -198,7 +198,7 @@ def filter_hosts_by_vlan_name(all_hosts) vlan_name = get_value(@values[:vlan]) return all_hosts unless vlan_name - _log.info "Filtering hosts with the following network: <#{vlan_name}>" + _log.info("Filtering hosts with the following network: <#{vlan_name}>") all_hosts.reject { |h| !h.lans.pluck(:name).include?(vlan_name) } end @@ -332,7 +332,7 @@ def allowed_templates(options = {}) end unless tag_conditions.blank? - _log.info "Filtering VM templates with the following tag_filters: <#{tag_conditions.inspect}>" + _log.info("Filtering VM templates with the following tag_filters: <#{tag_conditions.inspect}>") vms = MiqTemplate.in_my_region.where(condition).find_tags_by_grouping(tag_conditions, :ns => "/managed") end end @@ -342,11 +342,11 @@ def allowed_templates(options = {}) @allowed_templates_tag_filters = @values[:vm_tags] rails_logger('allowed_templates', 1) if allowed_templates_list.blank? - _log.warn "Allowed Templates is returning an empty list" + _log.warn("Allowed Templates is returning an empty list") else - _log.warn "Allowed Templates is returning <#{allowed_templates_list.length}> template(s)" + _log.warn("Allowed Templates is returning <#{allowed_templates_list.length}> template(s)") allowed_templates_list.each do |vm| - _log.debug "Allowed Template <#{vm.id}:#{vm.name}> GUID: <#{vm.guid}> UID_EMS: <#{vm.uid_ems}>" + _log.debug("Allowed Template <#{vm.id}:#{vm.name}> GUID: <#{vm.guid}> UID_EMS: <#{vm.uid_ems}>") end end @@ -585,7 +585,7 @@ def update_custom_spec return if @current_spec == selected_spec && @custom_spec_override == current_spec_override - _log.info "Custom spec changed from [#{@current_spec}] to [#{selected_spec}]. Customize option:[#{@customize_option}]" + _log.info("Custom spec changed from [#{@current_spec}] to [#{selected_spec}]. Customize option:[#{@customize_option}]") if selected_spec src = get_source_and_targets @@ -737,7 +737,7 @@ def self.from_ws(*args) end def self.from_ws_ver_1_0(version, user, src_name, target_name, auto_approve, tags, additional_values) - _log.info "Web-service provisioning starting with interface version <#{version}> for user <#{user.userid}>" + _log.info("Web-service provisioning starting with interface version <#{version}> for user <#{user.userid}>") values = {} p = new(values, user, :use_pre_dialog => false) src_name_down = src_name.downcase @@ -774,22 +774,22 @@ def ws_template_fields(values, fields, ws_values) placement_cluster_name = ws_values[:cluster] unless placement_cluster_name.blank? data[:placement_cluster_name] = placement_cluster_name.to_s.downcase - _log.info "placement_cluster_name:<#{data[:placement_cluster_name].inspect}>" + _log.info("placement_cluster_name:<#{data[:placement_cluster_name].inspect}>") data[:data_centers] = EmsCluster.where("lower(name) = ?", data[:placement_cluster_name]).collect(&:v_parent_datacenter) end - _log.info "data:<#{data.inspect}>" + _log.info("data:<#{data.inspect}>") src_name = data[:name].blank? ? nil : data[:name].downcase src_guid = data[:guid].blank? ? nil : data[:guid].downcase ems_guid = data[:ems_guid].blank? ? nil : data[:ems_guid].downcase data_centers = data[:data_centers] - _log.info "VM Passed: <#{src_name}> <#{src_guid}> <#{ems_guid}> Datacenters:<#{data_centers.inspect}>" + _log.info("VM Passed: <#{src_name}> <#{src_guid}> <#{ems_guid}> Datacenters:<#{data_centers.inspect}>") if [:clone_to_vm, :clone_to_template].include?(request_type) src = ws_find_template_or_vm(values, src_name, src_guid, ems_guid) else srcs = allowed_templates(:include_datacenter => true).find_all do |v| - _log.info "VM Detected: <#{v.name.downcase}> <#{v.guid}> <#{v.uid_ems}> Datacenter:<#{v.datacenter_name}>" + _log.info("VM Detected: <#{v.name.downcase}> <#{v.guid}> <#{v.uid_ems}> Datacenter:<#{v.datacenter_name}>") (src_name.nil? || src_name == v.name.downcase) && (src_guid.nil? || src_guid == v.guid) && (ems_guid.nil? || ems_guid == v.uid_ems) && (data_centers.nil? || data_centers.include?(v.datacenter_name)) end if srcs.length > 1 @@ -800,7 +800,7 @@ def ws_template_fields(values, fields, ws_values) if src.nil? raise _("No source template was found from input data:<%{data}>") % {:data => data.inspect} end - _log.info "VM Found: <#{src.name}> <#{src.guid}> <#{src.uid_ems}> Datacenter:<#{src.datacenter_name}>" + _log.info("VM Found: <#{src.name}> <#{src.guid}> <#{src.uid_ems}> Datacenter:<#{src.datacenter_name}>") src end @@ -816,7 +816,7 @@ def ws_find_template_or_vm(_values, src_name, src_guid, ems_guid) def ws_vm_fields(values, fields) data = parse_ws_string(fields) - _log.info "data:<#{data.inspect}>" + _log.info("data:<#{data.inspect}>") ws_service_fields(values, fields, data) ws_hardware_fields(values, fields, data) ws_network_fields(values, fields, data) @@ -824,7 +824,7 @@ def ws_vm_fields(values, fields) ws_schedule_fields(values, fields, data) ws_environment_fields(values, data) - data.each { |k, v| _log.warn "Unprocessed key <#{k}> with value <#{v.inspect}>" } + data.each { |k, v| _log.warn("Unprocessed key <#{k}> with value <#{v.inspect}>") } end def ws_environment_fields(values, data) @@ -870,7 +870,7 @@ def ws_hardware_network_fields(values, data) # Check and remove invalid networks specifications values[:networks].delete_if do |d| result = d[:network].blank? - _log.warn "Skipping network due to blank name: <#{d.inspect}>" if result == true + _log.warn("Skipping network due to blank name: <#{d.inspect}>") if result == true result end unless values[:networks].blank? end @@ -892,7 +892,7 @@ def ws_hardware_disk_fields(values, data) # Check and remove invalid disk specifications values[:disk_scsi].delete_if do |d| result = d[:sizeInMB].to_i == 0 - _log.warn "Skipping disk due to invalid size: <#{d.inspect}>" if result == true + _log.warn("Skipping disk due to invalid size: <#{d.inspect}>") if result == true result end unless values[:disk_scsi].blank? end @@ -903,7 +903,7 @@ def parse_ws_hardware_fields(hw_key, regex_filter, values, data) next unless key_name =~ regex_filter item_id = Regexp.last_match(1).to_i v = data.delete(k) - _log.info "processing key with value <#{v.inspect}>" + _log.info("processing key with value <#{v.inspect}>") values[hw_key] ||= [] item = values[hw_key][item_id] ||= {} @@ -940,7 +940,7 @@ def ws_customize_fields(values, _fields, data) def self.from_ws_ver_1_x(version, user, template_fields, vm_fields, requester, tags, options) options = MiqHashStruct.new if options.nil? - _log.warn "Web-service provisioning starting with interface version <#{version}> by requester <#{user.userid}>" + _log.warn("Web-service provisioning starting with interface version <#{version}> by requester <#{user.userid}>") init_options = {:use_pre_dialog => false, :request_type => request_type(parse_ws_string(template_fields)[:request_type]), :initial_pass => true} data = parse_ws_string(requester) @@ -971,7 +971,7 @@ def self.from_ws_ver_1_x(version, user, template_fields, vm_fields, requester, t p.raise_validate_errors if request == false end rescue => err - _log.error "<#{err}>" + _log.error("<#{err}>") raise err end diff --git a/app/models/miq_queue.rb b/app/models/miq_queue.rb index 5d64f4d20d6..338a198b91e 100644 --- a/app/models/miq_queue.rb +++ b/app/models/miq_queue.rb @@ -308,7 +308,7 @@ def self.put_or_update(find_options) # for proper comparison. NOTE: hashes may not compare correctly due to # it's unordered nature. where_scope = if conds.key?(:args) - args = YAML.dump conds.delete(:args) + args = YAML.dump(conds.delete(:args)) MiqQueue.where(conds).where(['args = ?', args]) else MiqQueue.where(conds) @@ -389,10 +389,10 @@ def deliver(requester = nil) obj = obj.find(instance_id) end rescue ActiveRecord::RecordNotFound => err - _log.warn "#{MiqQueue.format_short_log_msg(self)} will not be delivered because #{err.message}" + _log.warn("#{MiqQueue.format_short_log_msg(self)} will not be delivered because #{err.message}") return STATUS_WARN, nil, nil rescue => err - _log.error "#{MiqQueue.format_short_log_msg(self)} will not be delivered because #{err.message}" + _log.error("#{MiqQueue.format_short_log_msg(self)} will not be delivered because #{err.message}") return STATUS_ERROR, err.message, nil end end @@ -476,7 +476,7 @@ def m_callback(msg, result) _log.error("backtrace: #{err.backtrace.join("\n")}") end else - _log.warn "#{MiqQueue.format_short_log_msg(self)}, Callback is not well-defined, skipping" + _log.warn("#{MiqQueue.format_short_log_msg(self)}, Callback is not well-defined, skipping") end end diff --git a/app/models/miq_region.rb b/app/models/miq_region.rb index f5c6b3f783e..f9fe70ed671 100644 --- a/app/models/miq_region.rb +++ b/app/models/miq_region.rb @@ -128,7 +128,7 @@ def self.destroy_region(conn, region, tables = nil) end rows = conn.delete("DELETE FROM #{t} WHERE #{conditions}") - _log.info "Cleared [#{rows}] rows from table [#{t}]" + _log.info("Cleared [#{rows}] rows from table [#{t}]") end end @@ -166,31 +166,31 @@ def self.replication_type=(desired_type) end def ems_clouds - ext_management_systems.select { |e| e.kind_of? EmsCloud } + ext_management_systems.select { |e| e.kind_of?(EmsCloud) } end def ems_infras - ext_management_systems.select { |e| e.kind_of? EmsInfra } + ext_management_systems.select { |e| e.kind_of?(EmsInfra) } end def ems_containers - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::ContainerManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::ContainerManager) } end def ems_middlewares - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::MiddlewareManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::MiddlewareManager) } end def ems_datawarehouses - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::DatawarehouseManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::DatawarehouseManager) } end def ems_monitors - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::MonitoringManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::MonitoringManager) } end def ems_configproviders - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::ConfigurationManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::ConfigurationManager) } end def assigned_roles diff --git a/app/models/miq_report.rb b/app/models/miq_report.rb index 5301c5bf62a..4586068d618 100644 --- a/app/models/miq_report.rb +++ b/app/models/miq_report.rb @@ -138,7 +138,7 @@ def self.get_col_info(path) def list_schedules exp = MiqExpression.new("=" => {"field" => "MiqReport-id", "value" => id}) - MiqSchedule.filter_matches_with exp + MiqSchedule.filter_matches_with(exp) end def add_schedule(data) @@ -151,7 +151,7 @@ def add_schedule(data) params['towhat'] = "MiqReport" params['prod_default'] = "system" - MiqSchedule.create! params + MiqSchedule.create!(params) end def db_class diff --git a/app/models/miq_report/seeding.rb b/app/models/miq_report/seeding.rb index bdc7748cc26..d443fe79cb6 100644 --- a/app/models/miq_report/seeding.rb +++ b/app/models/miq_report/seeding.rb @@ -47,7 +47,7 @@ def sync_from_file(filename, dir, typ) yml = YAML.load_file(filename) rpt = {} column_names.each { |c| rpt[c.to_sym] = yml[c] } - rpt.delete :id + rpt.delete(:id) rpt[:name] = yml["menu_name"].strip rpt[:rpt_group] = File.basename(File.dirname(filename)).split("_").last rpt[:rpt_type] = "Default" diff --git a/app/models/miq_report_result.rb b/app/models/miq_report_result.rb index 4d68d0131a1..40ca1127245 100644 --- a/app/models/miq_report_result.rb +++ b/app/models/miq_report_result.rb @@ -6,7 +6,7 @@ class MiqReportResult < ApplicationRecord belongs_to :miq_task has_one :binary_blob, :as => :resource, :dependent => :destroy has_many :miq_report_result_details, :dependent => :delete_all - has_many :html_details, -> { where "data_type = 'html'" }, :class_name => "MiqReportResultDetail", :foreign_key => "miq_report_result_id" + has_many :html_details, -> { where("data_type = 'html'") }, :class_name => "MiqReportResultDetail", :foreign_key => "miq_report_result_id" serialize :report diff --git a/app/models/miq_request_task.rb b/app/models/miq_request_task.rb index 98cad3a38f6..8095101702a 100644 --- a/app/models/miq_request_task.rb +++ b/app/models/miq_request_task.rb @@ -38,7 +38,7 @@ def after_request_task_create def update_and_notify_parent(upd_attr) upd_attr[:message] = upd_attr[:message][0, 255] if upd_attr.key?(:message) - update_attributes! upd_attr + update_attributes!(upd_attr) # If this request has a miq_request_task parent use that, otherwise the parent is the miq_request parent = miq_request_task || miq_request diff --git a/app/models/miq_request_task/state_machine.rb b/app/models/miq_request_task/state_machine.rb index 1b6c8601b8c..7728c3c36df 100644 --- a/app/models/miq_request_task/state_machine.rb +++ b/app/models/miq_request_task/state_machine.rb @@ -14,7 +14,7 @@ def signal(phase) return signal(:finish) if ![:finish, :provision_error].include?(phase.to_sym) && prematurely_finished? self.phase = phase.to_s - $log.info "Starting Phase <#{self.phase}>" + $log.info("Starting Phase <#{self.phase}>") save begin diff --git a/app/models/miq_request_workflow.rb b/app/models/miq_request_workflow.rb index 08bd894fba2..adb2a1f9ad5 100644 --- a/app/models/miq_request_workflow.rb +++ b/app/models/miq_request_workflow.rb @@ -39,11 +39,11 @@ def self.update_requester_from_parameters(data, user) new_user = User.lookup_by_identity(data[:user_name]) unless new_user - _log.error "requested not changed to <#{data[:user_name]}> due to a lookup failure" + _log.error("requested not changed to <#{data[:user_name]}> due to a lookup failure") raise ActiveRecord::RecordNotFound end - _log.warn "requested changed to <#{new_user.userid}>" + _log.warn("requested changed to <#{new_user.userid}>") new_user end @@ -439,17 +439,17 @@ def set_value_from_list(fn, f, value, values = nil, partial_key = false) unless value.nil? @values[fn] = values.to_a.detect do |v| if partial_key - _log.warn "comparing [#{v[0]}] to [#{value}]" + _log.warn("comparing [#{v[0]}] to [#{value}]") v[0].to_s.downcase.include?(value.to_s.downcase) else v.include?(value) end end if @values[fn].nil? - _log.info "set_value_from_list did not matched an item" if partial_key + _log.info("set_value_from_list did not matched an item") if partial_key @values[fn] = [nil, nil] else - _log.info "set_value_from_list matched item value:[#{value}] to item:[#{@values[fn][0]}]" if partial_key + _log.info("set_value_from_list matched item value:[#{value}] to item:[#{@values[fn][0]}]") if partial_key end end end @@ -606,7 +606,7 @@ def allowed_tags(options = {}) end rails_logger('allowed_tags', 1) - _log.info "allowed_tags returned [#{@tags.length}] objects in [#{Time.now - st}] seconds" + _log.info("allowed_tags returned [#{@tags.length}] objects in [#{Time.now - st}] seconds") @tags end @@ -643,7 +643,7 @@ def build_ci_hash_struct(ci, props) def get_dialogs @values[:miq_request_dialog_name] ||= @values[:provision_dialog_name] || dialog_name_from_automate || self.class.default_dialog_file dp = @values[:miq_request_dialog_name] = File.basename(@values[:miq_request_dialog_name], ".rb") - _log.info "Loading dialogs <#{dp}> for user <#{@requester.userid}>" + _log.info("Loading dialogs <#{dp}> for user <#{@requester.userid}>") d = MiqDialog.find_by("lower(name) = ? and dialog_type = ?", dp.downcase, self.class.base_model.name) if d.nil? raise MiqException::Error, @@ -660,7 +660,7 @@ def get_pre_dialogs pre_dialog_name = File.basename(pre_dialog_name, ".rb") d = MiqDialog.find_by(:name => pre_dialog_name, :dialog_type => self.class.base_model.name) unless d.nil? - _log.info "Loading pre-dialogs <#{pre_dialog_name}> for user <#{@requester.userid}>" + _log.info("Loading pre-dialogs <#{pre_dialog_name}> for user <#{@requester.userid}>") pre_dialogs = d.content end end @@ -671,17 +671,17 @@ def get_pre_dialogs def dialog_name_from_automate(message = 'get_dialog_name', input_fields = [:request_type], extra_attrs = {}) return nil if self.class.automate_dialog_request.nil? - _log.info "Querying Automate Profile for dialog name" + _log.info("Querying Automate Profile for dialog name") attrs = {'request' => self.class.automate_dialog_request, 'message' => message} extra_attrs.each { |k, v| attrs[k] = v } @values.each_key do |k| key = "dialog_input_#{k.to_s.downcase}" if attrs.key?(key) - _log.info "Skipping key=<#{key}> because already set to <#{attrs[key]}>" + _log.info("Skipping key=<#{key}> because already set to <#{attrs[key]}>") else value = (k == :vm_tags) ? get_tags : get_value(@values[k]).to_s - _log.info "Setting attrs[#{key}]=<#{value}>" + _log.info("Setting attrs[#{key}]=<#{value}>") attrs[key] = value end end @@ -697,7 +697,7 @@ def dialog_name_from_automate(message = 'get_dialog_name', input_fields = [:requ next unless key.downcase.starts_with?(dialog_option_prefix) next unless key.length > dialog_option_prefix_length key = key[dialog_option_prefix_length..-1].downcase - _log.info "Setting @values[#{key}]=<#{value}>" + _log.info("Setting @values[#{key}]=<#{value}>") @values[key.to_sym] = value end @@ -779,10 +779,10 @@ def refresh_field_values(values) # Update the display flag for fields based on current settings update_field_visibility - _log.info "refresh completed in [#{Time.now - st}] seconds" + _log.info("refresh completed in [#{Time.now - st}] seconds") rescue => err - _log.error "[#{err}]" - $log.error err.backtrace.join("\n") + _log.error("[#{err}]") + $log.error(err.backtrace.join("\n")) raise err end @@ -839,7 +839,7 @@ def load_ems_node(item, log_header) @ems_xml_nodes ||= {} klass_name = item.kind_of?(MiqHashStruct) ? item.evm_object_class : item.class.base_class.name node = @ems_xml_nodes["#{klass_name}_#{item.id}"] - $log.error "#{log_header} Resource <#{klass_name}_#{item.id} - #{item.name}> not found in cached resource tree." if node.nil? + $log.error("#{log_header} Resource <#{klass_name}_#{item.id} - #{item.name}> not found in cached resource tree.") if node.nil? node end @@ -938,7 +938,7 @@ def get_ems_metadata_tree(src) @ems_xml_nodes = {} xml = MiqXml.newDoc(:xmlhash) convert_to_xml(xml, result) - _log.info "EMS metadata collection completed in [#{Time.zone.now - st}] seconds" + _log.info("EMS metadata collection completed in [#{Time.zone.now - st}] seconds") xml end end @@ -1033,7 +1033,7 @@ def allowed_hosts_obj(options = {}) # Remove any hosts that are no longer in the list all_hosts = load_ar_obj(src[:ems]).hosts.find_all { |h| hosts_ids.include?(h.id) } allowed_hosts_obj_cache = process_filter(:host_filter, Host, all_hosts) - _log.info "allowed_hosts_obj returned [#{allowed_hosts_obj_cache.length}] objects in [#{Time.now - st}] seconds" + _log.info("allowed_hosts_obj returned [#{allowed_hosts_obj_cache.length}] objects in [#{Time.now - st}] seconds") rails_logger('allowed_hosts_obj', 1) allowed_hosts_obj_cache end @@ -1058,7 +1058,7 @@ def allowed_storages(_options = {}) ci_to_hash_struct(s) end - _log.info "allowed_storages returned [#{allowed_storages_cache.length}] objects in [#{Time.now - st}] seconds" + _log.info("allowed_storages returned [#{allowed_storages_cache.length}] objects in [#{Time.now - st}] seconds") rails_logger('allowed_storages', 1) allowed_storages_cache end @@ -1226,7 +1226,7 @@ def set_ws_field_value(values, key, data, dialog_name, dlg_fields) get_source_and_targets(true) get_field(key, dialog_name) field_values = dlg_field[:values] - _log.info "processing key <#{dialog_name}:#{key}(#{data_type})> with values <#{field_values.inspect}>" + _log.info("processing key <#{dialog_name}:#{key}(#{data_type})> with values <#{field_values.inspect}>") if field_values.present? result = if field_values.first.kind_of?(MiqHashStruct) found = field_values.detect { |v| v.id == set_value } @@ -1241,8 +1241,8 @@ def set_ws_field_value(values, key, data, dialog_name, dlg_fields) end end - _log.warn "Unable to find value for key <#{dialog_name}:#{key}(#{data_type})> with input value <#{set_value.inspect}>. No matching item found." if result.nil? - _log.info "setting key <#{dialog_name}:#{key}(#{data_type})> to value <#{set_value.inspect}>" + _log.warn("Unable to find value for key <#{dialog_name}:#{key}(#{data_type})> with input value <#{set_value.inspect}>. No matching item found.") if result.nil? + _log.info("setting key <#{dialog_name}:#{key}(#{data_type})> to value <#{set_value.inspect}>") values[key] = set_value end @@ -1267,7 +1267,7 @@ def set_ws_field_value_by_display_name(values, key, data, dialog_name, dlg_field if dlg_field.key?(:values) field_values = dlg_field[:values] - _log.info "processing key <#{dialog_name}:#{key}(#{data_type})> with values <#{field_values.inspect}>" + _log.info("processing key <#{dialog_name}:#{key}(#{data_type})> with values <#{field_values.inspect}>") if field_values.present? result = if field_values.first.kind_of?(MiqHashStruct) found = field_values.detect { |v| v.send(obj_key).to_s.downcase == find_value } @@ -1277,10 +1277,10 @@ def set_ws_field_value_by_display_name(values, key, data, dialog_name, dlg_field end if result.nil? - _log.warn "Unable to set key <#{dialog_name}:#{key}(#{data_type})> to value <#{find_value.inspect}>. No matching item found." + _log.warn("Unable to set key <#{dialog_name}:#{key}(#{data_type})> to value <#{find_value.inspect}>. No matching item found.") else set_value = [result.first, result.last] - _log.info "setting key <#{dialog_name}:#{key}(#{data_type})> to value <#{set_value.inspect}>" + _log.info("setting key <#{dialog_name}:#{key}(#{data_type})> to value <#{set_value.inspect}>") values[key] = set_value end end @@ -1305,7 +1305,7 @@ def set_ws_field_value_by_id_or_name(values, dlg_field, data, dialog_name, dlg_f def get_ws_dialog_fields(dialog_name) dlg_fields = @dialogs.fetch_path(:dialogs, dialog_name, :fields) - _log.info "<#{dialog_name}> dialog not found in dialogs. Field updates will be skipped." if dlg_fields.nil? + _log.info("<#{dialog_name}> dialog not found in dialogs. Field updates will be skipped.") if dlg_fields.nil? dlg_fields end @@ -1397,12 +1397,12 @@ def ws_requester_fields(values, fields) dialog_name = :requester dlg_fields = @dialogs.fetch_path(:dialogs, :requester, :fields) if dlg_fields.nil? - _log.info "<#{dialog_name}> dialog not found in dialogs. Field updates be skipped." + _log.info("<#{dialog_name}> dialog not found in dialogs. Field updates be skipped.") return end data = parse_ws_string(fields) - _log.info "data:<#{data.inspect}>" + _log.info("data:<#{data.inspect}>") values[:auto_approve] = data.delete(:auto_approve) == 'true' data.delete(:user_name) @@ -1419,10 +1419,10 @@ def ws_requester_fields(values, fields) dlg_keys = dlg_fields.keys data.keys.each do |key| if dlg_keys.include?(key) - _log.info "processing key <#{dialog_name}:#{key}> with value <#{data[key].inspect}>" + _log.info("processing key <#{dialog_name}:#{key}> with value <#{data[key].inspect}>") values[key] = data[key] else - _log.warn "Skipping key <#{dialog_name}:#{key}>. Key name not found in dialog" + _log.warn("Skipping key <#{dialog_name}:#{key}>. Key name not found in dialog") end end end @@ -1436,7 +1436,7 @@ def ws_schedule_fields(values, _fields, data) data_type = :time time_value = data.delete(key) set_value = time_value.blank? ? nil : Time.parse(time_value) - _log.info "setting key <#{dialog_name}:#{key}(#{data_type})> to value <#{set_value.inspect}>" + _log.info("setting key <#{dialog_name}:#{key}(#{data_type})> to value <#{set_value.inspect}>") values[key] = set_value end end @@ -1449,7 +1449,7 @@ def raise_validate_errors errors = [] fields { |_fn, f, _dn, _d| errors << f[:error] unless f[:error].nil? } err_text = "Provision failed for the following reasons:\n#{errors.join("\n")}" - _log.error "<#{err_text}>" + _log.error("<#{err_text}>") raise _("Provision failed for the following reasons:\n%{errors}") % {:errors => errors.join("\n")} end diff --git a/app/models/miq_schedule_worker/runner.rb b/app/models/miq_schedule_worker/runner.rb index d2bebce6e36..3ac71b7e5d2 100644 --- a/app/models/miq_schedule_worker/runner.rb +++ b/app/models/miq_schedule_worker/runner.rb @@ -2,8 +2,8 @@ class MiqScheduleWorker::Runner < MiqWorker::Runner include ActiveSupport::Callbacks - define_callbacks :dst_change - set_callback :dst_change, :after, :load_user_schedules + define_callbacks(:dst_change) + set_callback(:dst_change, :after, :load_user_schedules) OPTIONS_PARSER_SETTINGS = MiqWorker::Runner::OPTIONS_PARSER_SETTINGS + [ [:emsid, 'EMS Instance ID', String], @@ -45,7 +45,7 @@ def queue_length end def enqueue(object) - @queue.enq object + @queue.enq(object) end def load_system_schedules @@ -79,13 +79,13 @@ def schedules_for_all_roles every = worker_settings[:log_active_configuration_interval] scheduler.schedule_every(every, :tags => [:vmdb_appliance_log_config, schedule_category]) do - enqueue :vmdb_appliance_log_config + enqueue(:vmdb_appliance_log_config) end # Schedule - Log current database statistics and bloat every = worker_settings[:log_database_statistics_interval] scheduler.schedule_every(every, :tags => [:log_all_database_statistics, schedule_category]) do - enqueue :vmdb_database_log_all_database_statistics + enqueue(:vmdb_database_log_all_database_statistics) end # Schedule - Update Server Statistics @@ -94,7 +94,7 @@ def schedules_for_all_roles every, :first_in => every, :tags => [:status_update, schedule_category] - ) { enqueue :miq_server_status_update } + ) { enqueue(:miq_server_status_update) } # Schedule - Log Server and Worker Statistics every = worker_settings[:server_log_stats_interval] @@ -102,7 +102,7 @@ def schedules_for_all_roles every, :first_in => every, :tags => [:log_status, schedule_category] - ) { enqueue :miq_server_worker_log_status } + ) { enqueue(:miq_server_worker_log_status) } # Schedule - Periodic logging of database statistics interval = worker_settings[:db_diagnostics_interval] @@ -110,7 +110,7 @@ def schedules_for_all_roles interval, :first_in => 1.minute, :tags => [:log_statistics, schedule_category] - ) { enqueue :vmdb_database_connection_log_statistics } + ) { enqueue(:vmdb_database_connection_log_statistics) } # Schedule - Periodic check for updates on appliances only if MiqEnvironment::Command.is_appliance? @@ -119,7 +119,7 @@ def schedules_for_all_roles interval, :first_in => 1.minute, :tags => [:server_updates, schedule_category] - ) { enqueue :miq_server_queue_update_registration_status } + ) { enqueue(:miq_server_queue_update_registration_status) } end @schedules[:all] @@ -132,34 +132,34 @@ def schedules_for_scheduler_role # Schedule - Check for timed out jobs every = worker_settings[:job_timeout_interval] scheduler.schedule_every(every, :first_in => every) do - enqueue :job_check_jobs_for_timeout + enqueue(:job_check_jobs_for_timeout) end # Schedule - Check for retired items and start retirement # TODO: remove redundant settings in follow-up pr every = [worker_settings[:service_retired_interval], worker_settings[:vm_retired_interval], worker_settings[:orchestration_stack_retired_interval], worker_settings[:load_balancer_retired_interval]].min scheduler.schedule_every(every, :first_in => every) do - enqueue :retirement_check + enqueue(:retirement_check) end # Schedule - Periodic validation of authentications every = worker_settings[:authentication_check_interval] scheduler.schedule_every(every, :first_in => every) do # Queue authentication checks for CIs with credentials - enqueue :host_authentication_check_schedule - enqueue :ems_authentication_check_schedule + enqueue(:host_authentication_check_schedule) + enqueue(:ems_authentication_check_schedule) end every = worker_settings[:drift_state_purge_interval] scheduler.schedule_every(every, :first_in => every) do - enqueue :drift_state_purge_timer + enqueue(:drift_state_purge_timer) end if Session.enabled? # Schedule - Check for session timeouts scheduler.schedule_every(worker_settings[:session_timeout_interval]) do # Session is global to the region, therefore, run it only once on the scheduler's server - enqueue :session_check_session_timeout + enqueue(:session_check_session_timeout) end end @@ -167,36 +167,36 @@ def schedules_for_scheduler_role every = worker_settings[:evm_snapshot_interval] job_not_found_delay = worker_settings[:evm_snapshot_delete_delay_for_job_not_found] scheduler.schedule_every(every, :first_in => every) do - enqueue [:job_check_for_evm_snapshots, job_not_found_delay] + enqueue([:job_check_for_evm_snapshots, job_not_found_delay]) end # Queue a JobProxyDispatcher dispatch task at high priority unless there's already one on the queue # This dispatch method goes through all pending jobs to see if there's a free proxy available to work on one of them # It is very expensive to constantly do this, hence the need to ensure only one is on the queue at one time scheduler.schedule_every(worker_settings[:job_proxy_dispatcher_interval]) do - enqueue :job_proxy_dispatcher_dispatch + enqueue(:job_proxy_dispatcher_dispatch) end stale_interval = worker_settings[:job_proxy_dispatcher_stale_message_check_interval] threshold_seconds = worker_settings[:job_proxy_dispatcher_stale_message_timeout] scheduler.schedule_every(stale_interval) do - enqueue [:check_for_stuck_dispatch, threshold_seconds] + enqueue([:check_for_stuck_dispatch, threshold_seconds]) end # Schedule - Hourly Alert Evaluation Timer scheduler.schedule_every(1.hour, :first_in => 5.minutes) do - enqueue :miq_alert_evaluate_hourly_timer + enqueue(:miq_alert_evaluate_hourly_timer) end # Schedule - Prune old reports Timer every = worker_settings[:report_result_purge_interval] scheduler.schedule_every(every, :first_in => every) do - enqueue :miq_report_result_purge_timer + enqueue(:miq_report_result_purge_timer) end every = worker_settings[:container_entities_purge_interval] scheduler.schedule_every(every, :first_in => every) do - enqueue :archived_entities_purge_timer + enqueue(:archived_entities_purge_timer) end # Schedule every 24 hours @@ -209,11 +209,11 @@ def schedules_for_scheduler_role scheduler.schedule_every( worker_settings[:storage_file_collection_interval], :first_at => time_at - ) { enqueue :storage_scan_timer } + ) { enqueue(:storage_scan_timer) } schedule_settings_for_ems_refresh.each do |klass, every| scheduler.schedule_every(every, :first_in => every) do - enqueue [:ems_refresh_timer, klass] + enqueue([:ems_refresh_timer, klass]) end end @@ -229,7 +229,7 @@ def schedule_check_for_task_timeout every = worker_settings[:task_timeout_check_frequency] scheduler = scheduler_for(:scheduler) scheduler.schedule_every(every, :first_at => Time.current + 1.minute) do - enqueue :check_for_timed_out_active_tasks + enqueue(:check_for_timed_out_active_tasks) end end @@ -240,7 +240,7 @@ def schedule_chargeback_report_for_service_daily time_at += 1.day if time_at < Time.current + 1.hour scheduler = scheduler_for(:scheduler) scheduler.schedule_every(every, :first_at => time_at) do - enqueue [:generate_chargeback_for_service, :report_source => "Daily scheduler"] + enqueue([:generate_chargeback_for_service, :report_source => "Daily scheduler"]) end end @@ -254,21 +254,21 @@ def schedules_for_database_operations_role scheduler.schedule_cron( sched, :tags => [:database_operations, :database_metrics_collection_schedule], - ) { enqueue :vmdb_database_capture_metrics_timer } + ) { enqueue(:vmdb_database_capture_metrics_timer) } sched = ::Settings.database.metrics_collection.daily_rollup_schedule _log.info("database_metrics_daily_rollup_schedule: #{sched}") scheduler.schedule_cron( sched, :tags => [:database_operations, :database_metrics_daily_rollup_schedule], - ) { enqueue :vmdb_database_rollup_metrics_timer } + ) { enqueue(:vmdb_database_rollup_metrics_timer) } sched = ::Settings.database.metrics_history.purge_schedule _log.info("database_metrics_purge_schedule: #{sched}") scheduler.schedule_cron( sched, :tags => [:database_operations, :database_metrics_purge_schedule], - ) { enqueue :metric_purge_all_timer } + ) { enqueue(:metric_purge_all_timer) } @schedules[:database_operations] end @@ -284,7 +284,7 @@ def schedules_for_ldap_synchronization_role scheduler.schedule_cron( sched, :tags => [:ldap_synchronization, :ldap_synchronization_schedule], - ) { enqueue :ldap_server_sync_data_from_timer } + ) { enqueue(:ldap_server_sync_data_from_timer) } @schedules[:ldap_synchronization] end @@ -300,7 +300,7 @@ def schedules_for_ems_metrics_coordinator_role every, :first_in => first_in, :tags => [:ems_metrics_coordinator, :perf_capture_timer] - ) { enqueue :metric_capture_perf_capture_timer } + ) { enqueue(:metric_capture_perf_capture_timer) } every = worker_settings[:performance_realtime_purging_interval] first_in = worker_settings[:performance_realtime_purging_start_delay] @@ -308,7 +308,7 @@ def schedules_for_ems_metrics_coordinator_role every, :first_in => first_in, :tags => [:ems_metrics_coordinator, :purge_realtime_timer] - ) { enqueue :metric_purging_purge_realtime_timer } + ) { enqueue(:metric_purging_purge_realtime_timer) } every = worker_settings[:performance_rollup_purging_interval] first_in = worker_settings[:performance_rollup_purging_start_delay] @@ -316,7 +316,7 @@ def schedules_for_ems_metrics_coordinator_role every, :first_in => first_in, :tags => [:ems_metrics_coordinator, :purge_rollup_timer] - ) { enqueue :metric_purging_purge_rollup_timer } + ) { enqueue(:metric_purging_purge_rollup_timer) } @schedules[:ems_metrics_coordinator] end @@ -331,7 +331,7 @@ def schedules_for_event_role interval, :first_in => "300s", :tags => [:event_stream, :purge_schedule] - ) { enqueue :event_stream_purge_timer } + ) { enqueue(:event_stream_purge_timer) } # Schedule - Policy Event Purging interval = worker_settings[:policy_events_purge_interval] @@ -339,7 +339,7 @@ def schedules_for_event_role interval, :first_in => "300s", :tags => [:policy_event, :purge_schedule] - ) { enqueue :policy_event_purge_timer } + ) { enqueue(:policy_event_purge_timer) } @schedules[:event] end @@ -393,7 +393,7 @@ def rufus_add_normal_schedule(options) options[:job] = true @schedules[:scheduler] << @user_scheduler.send(method, interval, options) do |rufus_job| - enqueue [:miq_schedule_queue_scheduled_work, schedule_id, rufus_job] + enqueue([:miq_schedule_queue_scheduled_work, schedule_id, rufus_job]) end end @@ -407,7 +407,7 @@ def rufus_add_monthly_schedule(options) sch = MiqSchedule.find(schedule_id) next_run = sch.next_interval_time @schedules[:scheduler] << @user_scheduler.send(method, next_run, options.dup) do |rufus_job| - enqueue [:miq_schedule_queue_scheduled_work, schedule_id, rufus_job] + enqueue([:miq_schedule_queue_scheduled_work, schedule_id, rufus_job]) end # Schedule every X months for up to 5 years in the future @@ -415,7 +415,7 @@ def rufus_add_monthly_schedule(options) remaining_months.times do next_run += months.months @schedules[:scheduler] << @user_scheduler.send(method, next_run, options.dup) do |rufus_job| - enqueue [:miq_schedule_queue_scheduled_work, schedule_id, rufus_job] + enqueue([:miq_schedule_queue_scheduled_work, schedule_id, rufus_job]) end end @schedules[:scheduler] diff --git a/app/models/miq_server.rb b/app/models/miq_server.rb index e048623137f..2d4998dfc9a 100644 --- a/app/models/miq_server.rb +++ b/app/models/miq_server.rb @@ -63,7 +63,7 @@ def starting_server_record def self.setup_data_directory # create root data directory data_dir = File.join(File.expand_path(Rails.root), "data") - Dir.mkdir data_dir unless File.exist?(data_dir) + Dir.mkdir(data_dir) unless File.exist?(data_dir) end def self.pidfile @@ -178,7 +178,7 @@ def self.start if server.vm_id.nil? vms = Vm.find_all_by_mac_address_and_hostname_and_ipaddress(mac_address, hostname, ipaddr) if vms.length > 1 - _log.warn "Found multiple Vms that may represent this MiqServer: #{vms.collect(&:id).sort.inspect}" + _log.warn("Found multiple Vms that may represent this MiqServer: #{vms.collect(&:id).sort.inspect}") elsif vms.length == 1 server_hash[:vm_id] = vms.first.id end @@ -198,12 +198,12 @@ def self.start _log.info("Server IP Address: #{server.ipaddress}") unless server.ipaddress.blank? _log.info("Server Hostname: #{server.hostname}") unless server.hostname.blank? _log.info("Server MAC Address: #{server.mac_address}") unless server.mac_address.blank? - _log.info "Server GUID: #{my_guid}" - _log.info "Server Zone: #{my_zone}" - _log.info "Server Role: #{my_role}" + _log.info("Server GUID: #{my_guid}") + _log.info("Server Zone: #{my_zone}") + _log.info("Server Role: #{my_role}") region = MiqRegion.my_region - _log.info "Server Region number: #{region.region}, name: #{region.name}" unless region.nil? - _log.info "Database Latency: #{EvmDatabase.ping(connection)} ms" + _log.info("Server Region number: #{region.region}, name: #{region.name}") unless region.nil? + _log.info("Database Latency: #{EvmDatabase.ping(connection)} ms") Vmdb::Appliance.log_config_on_startup @@ -249,7 +249,7 @@ def log_active_servers MiqRegion.my_region.active_miq_servers.sort_by { |s| [s.my_zone, s.name] }.each do |s| local = s.is_local? ? 'Y' : 'N' master = s.is_master? ? 'Y' : 'N' - $log.info "MiqServer: local=#{local}, master=#{master}, status=#{'%08s' % s.status}, id=#{'%05d' % s.id}, pid=#{'%05d' % s.pid}, guid=#{s.guid}, name=#{s.name}, zone=#{s.my_zone}, hostname=#{s.hostname}, ipaddress=#{s.ipaddress}, version=#{s.version}, build=#{s.build}, active roles=#{s.active_role_names.join(':')}" + $log.info("MiqServer: local=#{local}, master=#{master}, status=#{'%08s' % s.status}, id=#{'%05d' % s.id}, pid=#{'%05d' % s.pid}, guid=#{s.guid}, name=#{s.name}, zone=#{s.my_zone}, hostname=#{s.hostname}, ipaddress=#{s.ipaddress}, version=#{s.version}, build=#{s.build}, active roles=#{s.active_role_names.join(':')}") end end @@ -334,7 +334,7 @@ def monitor def monitor_loop loop do _dummy, timings = Benchmark.realtime_block(:total_time) { monitor } - _log.info "Server Monitoring Complete - Timings: #{timings.inspect}" unless timings[:total_time] < server_log_timings_threshold + _log.info("Server Monitoring Complete - Timings: #{timings.inspect}") unless timings[:total_time] < server_log_timings_threshold sleep monitor_poll end rescue Interrupt => e diff --git a/app/models/miq_server/environment_management.rb b/app/models/miq_server/environment_management.rb index eab7d3d7377..973f1967d34 100644 --- a/app/models/miq_server/environment_management.rb +++ b/app/models/miq_server/environment_management.rb @@ -66,8 +66,8 @@ def validate_database ActiveRecord::Base.connection.reconnect! # Log the Versions - _log.info "Database Adapter: [#{ActiveRecord::Base.connection.adapter_name}], version: [#{ActiveRecord::Base.connection.database_version}]" if ActiveRecord::Base.connection.respond_to?(:database_version) - _log.info "Database Adapter: [#{ActiveRecord::Base.connection.adapter_name}], detailed version: [#{ActiveRecord::Base.connection.detailed_database_version}]" if ActiveRecord::Base.connection.respond_to?(:detailed_database_version) + _log.info("Database Adapter: [#{ActiveRecord::Base.connection.adapter_name}], version: [#{ActiveRecord::Base.connection.database_version}]") if ActiveRecord::Base.connection.respond_to?(:database_version) + _log.info("Database Adapter: [#{ActiveRecord::Base.connection.adapter_name}], detailed version: [#{ActiveRecord::Base.connection.detailed_database_version}]") if ActiveRecord::Base.connection.respond_to?(:detailed_database_version) end def start_memcached diff --git a/app/models/miq_server/queue_management.rb b/app/models/miq_server/queue_management.rb index ca36b03bd9c..26f08555421 100644 --- a/app/models/miq_server/queue_management.rb +++ b/app/models/miq_server/queue_management.rb @@ -50,7 +50,7 @@ def shutdown_and_exit_queue def restart_queue log_message = "Server restart requested" log_message += ", remote server: [#{name}], GUID: [#{guid}], initiated from: [#{MiqServer.my_server.name}], GUID: [#{MiqServer.my_server.guid}]" if self.is_remote? - _log.info log_message + _log.info(log_message) enqueue_for_server('restart') end diff --git a/app/models/miq_server/server_monitor.rb b/app/models/miq_server/server_monitor.rb index 6af246125b8..075969e3fc7 100644 --- a/app/models/miq_server/server_monitor.rb +++ b/app/models/miq_server/server_monitor.rb @@ -16,20 +16,20 @@ def mark_as_not_responding(seconds = miq_server_time_threshold) end def make_master_server(last_master) - _log.info "Master server has #{last_master.nil? ? "not been set" : "died, #{last_master.name}"}. Attempting takeover as new master server, #{name}." + _log.info("Master server has #{last_master.nil? ? "not been set" : "died, #{last_master.name}"}. Attempting takeover as new master server, #{name}.") parent = MiqRegion.my_region(true) parent.lock do # See if an ACTIVE server has already taken over active_servers = parent.active_miq_servers - _log.debug "Double checking that nothing has changed" + _log.debug("Double checking that nothing has changed") master = active_servers.detect(&:is_master?) if (last_master.nil? && !master.nil?) || (!last_master.nil? && !master.nil? && last_master.id != master.id) - _log.info "Aborting master server takeover as another server, #{master.name}, has taken control first." + _log.info("Aborting master server takeover as another server, #{master.name}, has taken control first.") return nil end - _log.debug "Setting this server, #{name}, as master server" + _log.debug("Setting this server, #{name}, as master server") # Set is_master on self, reset every other server in the region, including # inactive ones. @@ -38,7 +38,7 @@ def make_master_server(last_master) s.save! end end - _log.info "This server #{name} is now set as the master server, last_master: #{last_master.try(:name)}" + _log.info("This server #{name} is now set as the master server, last_master: #{last_master.try(:name)}") self end diff --git a/app/models/miq_server/server_smart_proxy.rb b/app/models/miq_server/server_smart_proxy.rb index 9775bc3d67a..051e22e8002 100644 --- a/app/models/miq_server/server_smart_proxy.rb +++ b/app/models/miq_server/server_smart_proxy.rb @@ -87,7 +87,7 @@ def queue_call(ost) timeout_adj = 8 end end - $log.debug "#{log_prefix}: queuing call to #{self.class.name}##{ost.method_name}" + $log.debug("#{log_prefix}: queuing call to #{self.class.name}##{ost.method_name}") # Queue call to scan_metadata or sync_metadata. MiqQueue.submit_job( :service => "smartproxy", @@ -99,7 +99,7 @@ def queue_call(ost) :msg_timeout => worker_setting[:queue_timeout] * timeout_adj ) else - _log.error "Unsupported method [#{ost.method_name}]" + _log.error("Unsupported method [#{ost.method_name}]") end end @@ -108,7 +108,7 @@ def scan_metadata(ost) klass = ost.target_type.constantize target = klass.find(ost.target_id) job = Job.find_by(:guid => ost.taskid) - _log.debug "#{target.name} (#{target.class.name})" + _log.debug("#{target.name} (#{target.class.name})") begin ost.args[1] = YAML.load(ost.args[1]) # TODO: YAML.dump'd in call_scan - need it be? ost.scanData = ost.args[1].kind_of?(Hash) ? ost.args[1] : {} @@ -121,8 +121,8 @@ def scan_metadata(ost) target.perform_metadata_scan(ost) rescue Exception => err - _log.error err.to_s - _log.debug err.backtrace.join("\n") + _log.error(err.to_s) + _log.debug(err.backtrace.join("\n")) job.signal(:abort_retry, err.to_s, "error", true) return end @@ -133,12 +133,12 @@ def sync_metadata(ost) klass = ost.target_type.constantize target = klass.find(ost.target_id) job = Job.find_by(:guid => ost.taskid) - _log.debug "#{log_prefix}: #{target.name} (#{target.class.name})" + _log.debug("#{log_prefix}: #{target.name} (#{target.class.name})") begin target.perform_metadata_sync(ost) rescue Exception => err - _log.error err.to_s - _log.debug err.backtrace.join("\n") + _log.error(err.to_s) + _log.debug(err.backtrace.join("\n")) job.signal(:abort_retry, err.to_s, "error", true) return end @@ -151,7 +151,7 @@ def print_backtrace(errStr) errArray = errArray[0, 2] if $log.level > 1 # Print the stack trace to debug logging level - errArray.each { |e| $log.error "Error Trace: [#{e}]" } + errArray.each { |e| $log.error("Error Trace: [#{e}]") } end def forceVmScan diff --git a/app/models/miq_snmp.rb b/app/models/miq_snmp.rb index 0ac922b8379..6fb59078eb6 100644 --- a/app/models/miq_snmp.rb +++ b/app/models/miq_snmp.rb @@ -18,7 +18,7 @@ class MiqSnmp } def self.trap_v1(inputs) - _log.info ">> inputs=#{inputs.inspect}" + _log.info(">> inputs=#{inputs.inspect}") host = inputs[:host] || inputs['host'] port = inputs[:port] || inputs['port'] || 162 @@ -54,7 +54,7 @@ def self.trap_v1(inputs) hosts = host.kind_of?(Array) ? host : [host] hosts.each do |host| - _log.info "Sending SNMP Trap (v1) to host=[#{host}], port=[#{port}], enterprise_id=[#{enterprise}], generic_trap=[#{generic_trap}], specific_trap=[#{specific_trap}], uptime=[#{uptime}], agent=[#{agent_address}], vars=#{vars.inspect}" + _log.info("Sending SNMP Trap (v1) to host=[#{host}], port=[#{port}], enterprise_id=[#{enterprise}], generic_trap=[#{generic_trap}], specific_trap=[#{specific_trap}], uptime=[#{uptime}], agent=[#{agent_address}], vars=#{vars.inspect}") SNMP::Manager.open(:Host => host, :TrapPort => port) do |manager| manager.trap_v1(enterprise, agent_address, generic_trap, specific_trap, uptime, vars) end @@ -62,7 +62,7 @@ def self.trap_v1(inputs) end def self.trap_v2(inputs) - _log.info ">> inputs=#{inputs.inspect}" + _log.info(">> inputs=#{inputs.inspect}") host = inputs[:host] || inputs['host'] port = inputs[:port] || inputs['port'] || 162 @@ -80,7 +80,7 @@ def self.trap_v2(inputs) hosts = host.kind_of?(Array) ? host : [host] hosts.each do |host| - _log.info "Sending SNMP Trap (v2) to host=[#{host}], port=[#{port}], trap_oid=[#{trap_oid}], vars=#{vars.inspect}" + _log.info("Sending SNMP Trap (v2) to host=[#{host}], port=[#{port}], trap_oid=[#{trap_oid}], vars=#{vars.inspect}") SNMP::Manager.open(:Host => host, :TrapPort => port) do |manager| manager.trap_v2(uptime, trap_oid, vars) end diff --git a/app/models/miq_task.rb b/app/models/miq_task.rb index f08c9e7ff24..b97bc1230da 100644 --- a/app/models/miq_task.rb +++ b/app/models/miq_task.rb @@ -50,10 +50,10 @@ def active? def check_active if active? - _log.warn "Task is active, delete not allowed; id: [#{id}]" + _log.warn("Task is active, delete not allowed; id: [#{id}]") throw :abort end - _log.info "Task deleted; id: [#{id}]" + _log.info("Task deleted; id: [#{id}]") true end @@ -76,7 +76,7 @@ def self.update_status(taskid, state, status, message) def check_associations if job && job.is_active? - _log.warn "Delete not allowed: Task [#{id}] has active job - id: [#{job.id}], guid: [#{job.guid}]," + _log.warn("Delete not allowed: Task [#{id}] has active job - id: [#{job.id}], guid: [#{job.guid}],") throw :abort end true diff --git a/app/models/miq_vim_broker_worker.rb b/app/models/miq_vim_broker_worker.rb index 5393b696a26..afba214fdea 100644 --- a/app/models/miq_vim_broker_worker.rb +++ b/app/models/miq_vim_broker_worker.rb @@ -72,7 +72,7 @@ def self.broker_unavailable(err_class, message) def self.queue_reconnect_ems(ems) deliver_on = Time.now.utc + (worker_settings[:reconnect_retry_interval] || 5.minutes) - _log.info "Queueing reconnect for EMS name: [#{ems.name}], id: [#{ems.id}] at [#{deliver_on}]" + _log.info("Queueing reconnect for EMS name: [#{ems.name}], id: [#{ems.id}] at [#{deliver_on}]") MiqQueue.put( :class_name => name, :method_name => "reconnect_ems", diff --git a/app/models/miq_vim_broker_worker/runner.rb b/app/models/miq_vim_broker_worker/runner.rb index ade0f6471ec..31dbc5900cd 100644 --- a/app/models/miq_vim_broker_worker/runner.rb +++ b/app/models/miq_vim_broker_worker/runner.rb @@ -72,7 +72,7 @@ def enable_broker_update_notification # Set notify method at the class level for new connections, and at the # instance level for existing connections. - MiqVimBroker.notifyMethod = @vim_broker_server.notifyMethod = ->(h) { @queue.enq h } + MiqVimBroker.notifyMethod = @vim_broker_server.notifyMethod = ->(h) { @queue.enq(h) } @notification_enabled = true end @@ -208,12 +208,12 @@ def on_miq_vim_removed_event(ems_id, event) ems = ManageIQ::Providers::Vmware::InfraManager.find(ems_id) if ems.nil? - _log.error "#{log_prefix} Unable to find EMS with address: [#{event[:server]}]" + _log.error("#{log_prefix} Unable to find EMS with address: [#{event[:server]}]") return end unless self.class.emses_and_hosts_to_monitor.include?(ems) - _log.info "#{log_prefix} Not reconnecting inactive connection to #{event[:server]}" + _log.info("#{log_prefix} Not reconnecting inactive connection to #{event[:server]}") return end @@ -325,7 +325,7 @@ def message_reconnect_ems(*args) ems = ManageIQ::Providers::Vmware::InfraManager.find_by(:id => ems_id) if ems.nil? - _log.error "#{log_prefix} Unable to find EMS with id: [#{ems_id}]" + _log.error("#{log_prefix} Unable to find EMS with id: [#{ems_id}]") return end diff --git a/app/models/miq_widget.rb b/app/models/miq_widget.rb index f11b38f6590..22058f755b3 100644 --- a/app/models/miq_widget.rb +++ b/app/models/miq_widget.rb @@ -76,7 +76,7 @@ def create_task(num_targets, userid = User.current_userid) :context_data => context_data ) - _log.info "Created MiqTask ID: [#{miq_task.id}], Name: [#{miq_task.name}] for: [#{num_targets}] groups" + _log.info("Created MiqTask ID: [#{miq_task.id}], Name: [#{miq_task.name}] for: [#{num_targets}] groups") self.miq_task_id = miq_task.id self.save! @@ -114,7 +114,7 @@ def queue_generate_content_for_users_or_group(*args) end def generate_content_complete_callback(status, _message, _result) - _log.info "Widget ID: [#{id}], MiqTask ID: [#{miq_task_id}], Status: [#{status}]" + _log.info("Widget ID: [#{id}], MiqTask ID: [#{miq_task_id}], Status: [#{status}]") miq_task.lock(:exclusive) do |locked_miq_task| if MiqTask.status_error?(status) @@ -263,7 +263,7 @@ def generate_one_content_for_user(group, userid) timezone = user.get_timezone if timezone.nil? - _log.warn "#{log_prefix} No timezone provided for #{userid}! UTC will be used." + _log.warn("#{log_prefix} No timezone provided for #{userid}! UTC will be used.") timezone = "UTC" end @@ -459,7 +459,7 @@ def self.sync_from_file(filename) end def self.sync_from_hash(attrs) - attrs.delete "id" + attrs.delete("id") filename = attrs.delete("filename") rname = attrs.delete("resource_name") if rname && attrs["resource_type"] @@ -532,8 +532,8 @@ def sync_schedule(schedule_info) self.miq_schedule = sched self.save! - _log.info "Created schedule for Widget: [#{title}]" - _log.debug "Widget: [#{title}] created schedule: [#{sched.inspect}]" + _log.info("Created schedule for Widget: [#{title}]") + _log.debug("Widget: [#{title}] created schedule: [#{sched.inspect}]") sched end diff --git a/app/models/miq_widget/chart_content.rb b/app/models/miq_widget/chart_content.rb index 3507f4f5b8f..437cb117603 100644 --- a/app/models/miq_widget/chart_content.rb +++ b/app/models/miq_widget/chart_content.rb @@ -6,6 +6,6 @@ def generate(user_or_group) theme ||= "MIQ" report.to_chart(theme, false, MiqReport.graph_options) - Charting.serialized report.chart + Charting.serialized(report.chart) end end diff --git a/app/models/miq_worker.rb b/app/models/miq_worker.rb index 29fb83a7251..1a05dedf8a4 100644 --- a/app/models/miq_worker.rb +++ b/app/models/miq_worker.rb @@ -7,9 +7,9 @@ class MiqWorker < ApplicationRecord belongs_to :miq_server has_many :messages, :as => :handler, :class_name => 'MiqQueue' - has_many :active_messages, -> { where ["state = ?", "dequeue"] }, :as => :handler, :class_name => 'MiqQueue' - has_many :ready_messages, -> { where ["state = ?", "ready"] }, :as => :handler, :class_name => 'MiqQueue' - has_many :processed_messages, -> { where ["state != ?", "ready"] }, :as => :handler, :class_name => 'MiqQueue', :dependent => :destroy + has_many :active_messages, -> { where(["state = ?", "dequeue"]) }, :as => :handler, :class_name => 'MiqQueue' + has_many :ready_messages, -> { where(["state = ?", "ready"]) }, :as => :handler, :class_name => 'MiqQueue' + has_many :processed_messages, -> { where(["state != ?", "ready"]) }, :as => :handler, :class_name => 'MiqQueue', :dependent => :destroy virtual_column :friendly_name, :type => :string virtual_column :uri_or_queue_name, :type => :string @@ -321,7 +321,7 @@ def self.close_pg_sockets_inherited_from_parent owner_to_pool[Process.ppid].values.compact.each do |pool| pool.connections.each do |conn| socket = conn.raw_connection.socket - _log.info "Closing socket: #{socket}" + _log.info("Closing socket: #{socket}") IO.for_fd(socket).close end end diff --git a/app/models/mixins/cinder_manager_mixin.rb b/app/models/mixins/cinder_manager_mixin.rb index 96dd1c47f73..b699e2d4e91 100644 --- a/app/models/mixins/cinder_manager_mixin.rb +++ b/app/models/mixins/cinder_manager_mixin.rb @@ -29,7 +29,7 @@ def ensure_cinder_managers begin cinder_manager.save cinder_manager.reload - _log.debug "cinder_manager.id = #{cinder_manager.id}" + _log.debug("cinder_manager.id = #{cinder_manager.id}") CloudVolume.where(:ems_id => id).update(:ems_id => cinder_manager.id) CloudVolumeBackup.where(:ems_id => id).update(:ems_id => cinder_manager.id) diff --git a/app/models/mixins/miq_provision_mixin.rb b/app/models/mixins/miq_provision_mixin.rb index 78a567c9f22..e52cbab9fe2 100644 --- a/app/models/mixins/miq_provision_mixin.rb +++ b/app/models/mixins/miq_provision_mixin.rb @@ -220,7 +220,7 @@ def set_customization_spec(custom_spec_name, override = false) self.options.keys.each do |key| v_old = self.options[key] v_new = options[key] - _log.info "option <#{key}> was changed from <#{v_old.inspect}> to <#{v_new.inspect}>" unless v_old == v_new + _log.info("option <#{key}> was changed from <#{v_old.inspect}> to <#{v_new.inspect}>") unless v_old == v_new end update_attribute(:options, options) diff --git a/app/models/mixins/relationship_mixin.rb b/app/models/mixins/relationship_mixin.rb index c03c998f1d2..50357a03e02 100644 --- a/app/models/mixins/relationship_mixin.rb +++ b/app/models/mixins/relationship_mixin.rb @@ -81,7 +81,7 @@ def with_relationship_type(rel) self.relationship_type = rel unless rel.nil? begin - return yield self + yield(self) ensure if rel_changed relationship_types.pop diff --git a/app/models/mixins/retirement_mixin.rb b/app/models/mixins/retirement_mixin.rb index 23a4b3e71c0..8da92586394 100644 --- a/app/models/mixins/retirement_mixin.rb +++ b/app/models/mixins/retirement_mixin.rb @@ -55,9 +55,9 @@ def retires_on=(timestamp) def extend_retires_on(days, date = Time.zone.now) raise _("Invalid Date specified: %{date}") % {:date => date} unless date.kind_of?(ActiveSupport::TimeWithZone) - _log.info "Extending Retirement Date on #{self.class.name} id:<#{self.id}>, name:<#{self.name}> " + _log.info("Extending Retirement Date on #{self.class.name} id:<#{self.id}>, name:<#{self.name}> ") new_retires_date = date.in_time_zone + days.to_i.days - _log.info "Original Date: #{date} Extend days: #{days} New Retirement Date: #{new_retires_date}" + _log.info("Original Date: #{date} Extend days: #{days} New Retirement Date: #{new_retires_date}") self.retires_on = new_retires_date save end diff --git a/app/models/mixins/scanning_mixin.rb b/app/models/mixins/scanning_mixin.rb index 05ad29858e4..41a89ab04f2 100644 --- a/app/models/mixins/scanning_mixin.rb +++ b/app/models/mixins/scanning_mixin.rb @@ -59,7 +59,7 @@ def save_metadata(target_id, data_array) # Reset the root of the xml document to match the expected starting point doc.root = doc.root.elements[1].elements[1] rescue => err - _log.error "Invalid xml error [#{err}] for xml:[#{doc}]" + _log.error("Invalid xml error [#{err}] for xml:[#{doc}]") end target.add_elements(doc) target.save! @@ -117,7 +117,7 @@ def scan_queue(userid = "system", options = {}) # Do the SyncMetadata operation through the server smart proxy def sync_metadata(category, options = {}) - _log.debug "category=[#{category}] [#{category.class}]" + _log.debug("category=[#{category}] [#{category.class}]") options = { "category" => category.join(","), "from_time" => nil, # TODO: is this still needed?: last_drift_state_timestamp.try(:to_i), @@ -139,7 +139,7 @@ def sync_metadata(category, options = {}) # Do the ScanMetadata operation through the server smart proxy def scan_metadata(category, options = {}) - _log.info "category=[#{category}] [#{category.class}]" + _log.info("category=[#{category}] [#{category.class}]") options = { "category" => category.join(","), "taskid" => nil, @@ -207,7 +207,7 @@ def scan_via_miq_vm(miqVm, ost) categories_processed = 0 ost.xml_class = XmlHash::Document - _log.debug "Scanning - Initializing scan" + _log.debug("Scanning - Initializing scan") update_job_message(ost, "Initializing scan") bb, last_err = nil xml_summary = ost.xml_class.createDoc(:summary) @@ -216,7 +216,7 @@ def scan_via_miq_vm(miqVm, ost) xml_summary.root.add_attributes("taskid" => ost.taskid) data_dir = File.join(File.expand_path(Rails.root), "data/metadata") - _log.debug "creating #{data_dir}" + _log.debug("creating #{data_dir}") begin Dir.mkdir(data_dir) rescue Errno::EEXIST @@ -230,33 +230,33 @@ def scan_via_miq_vm(miqVm, ost) begin require 'metadata/MIQExtract/MIQExtract' - _log.debug "instantiating MIQExtract" + _log.debug("instantiating MIQExtract") extractor = MIQExtract.new(miqVm, ost) - _log.debug "instantiated MIQExtract" + _log.debug("instantiated MIQExtract") require 'blackbox/VmBlackBox' - _log.debug "instantiating BlackBox" + _log.debug("instantiating BlackBox") bb = Manageiq::BlackBox.new(guid, ost) # TODO: target must have GUID - _log.debug "instantiated BlackBox" + _log.debug("instantiated BlackBox") - _log.debug "Checking for file systems..." + _log.debug("Checking for file systems...") raise extractor.systemFsMsg unless extractor.systemFs categories = extractor.categories - _log.debug "categories = [ #{categories.join(', ')} ]" + _log.debug("categories = [ #{categories.join(', ')} ]") categories.each do |c| update_job_message(ost, "Scanning #{c}") - _log.info "Scanning [#{c}] information. TaskId:[#{ost.taskid}] VM:[#{name}]" + _log.info("Scanning [#{c}] information. TaskId:[#{ost.taskid}] VM:[#{name}]") st = Time.now xml = extractor.extract(c) { |scan_data| update_job_message(ost, scan_data[:msg]) } categories_processed += 1 - _log.info "Scanning [#{c}] information ran for [#{Time.now - st}] seconds. TaskId:[#{ost.taskid}] VM:[#{name}]" + _log.info("Scanning [#{c}] information ran for [#{Time.now - st}] seconds. TaskId:[#{ost.taskid}] VM:[#{name}]") if xml xml.root.add_attributes("created_on" => ost.scanTime.to_i, "display_time" => ost.scanTime.iso8601) - _log.debug "Writing scanned data to XML for [#{c}] to blackbox." + _log.debug("Writing scanned data to XML for [#{c}] to blackbox.") bb.saveXmlData(xml, c) - _log.debug "writing xml complete." + _log.debug("writing xml complete.") category_node = xml_summary.class.load(xml.root.shallow_copy.to_xml.to_s).root category_node.add_attributes("start_time" => st.utc.iso8601, "end_time" => Time.now.utc.iso8601) @@ -265,14 +265,14 @@ def scan_via_miq_vm(miqVm, ost) # Handle categories that we do not expect to return data. # Otherwise, log an error if we do not get data back. unless c == "vmevents" - _log.error "Error: No XML returned for category [#{c}] TaskId:[#{ost.taskid}] VM:[#{name}]" + _log.error("Error: No XML returned for category [#{c}] TaskId:[#{ost.taskid}] VM:[#{name}]") end end end rescue NoMethodError => scanErr last_err = scanErr - _log.error "Scanmetadata Error - [#{scanErr}]" - _log.error "Scanmetadata Error - [#{scanErr.backtrace.join("\n")}]" + _log.error("Scanmetadata Error - [#{scanErr}]") + _log.error("Scanmetadata Error - [#{scanErr.backtrace.join("\n")}]") rescue Timeout::Error, StandardError => scanErr last_err = scanErr ensure @@ -280,13 +280,13 @@ def scan_via_miq_vm(miqVm, ost) update_job_message(ost, "Scanning completed.") # If we are sent a TaskId transfer a end of job summary xml. - _log.info "Starting: Sending scan summary to server. TaskId:[#{ost.taskid}] VM:[#{name}]" + _log.info("Starting: Sending scan summary to server. TaskId:[#{ost.taskid}] VM:[#{name}]") if last_err status = "Error" status_code = 8 status_code = 16 if categories_processed.zero? scan_message = last_err.to_s - _log.error "ScanMetadata error status:[#{status_code}]: message:[#{last_err}]" + _log.error("ScanMetadata error status:[#{status_code}]: message:[#{last_err}]") _log.debug { last_err.backtrace.join("\n") } end @@ -297,12 +297,12 @@ def scan_via_miq_vm(miqVm, ost) "message" => scan_message ) save_metadata_op(MIQEncode.encode(xml_summary.to_xml.to_s), "b64,zlib,xml", ost.taskid) - _log.info "Completed: Sending scan summary to server. TaskId:[#{ost.taskid}] target:[#{name}]" + _log.info("Completed: Sending scan summary to server. TaskId:[#{ost.taskid}] target:[#{name}]") end end def sync_stashed_metadata(ost) - _log.info "from #{self.class.name}" + _log.info("from #{self.class.name}") xml_summary = nil begin raise _("No synchronize category specified") if ost.category.nil? @@ -316,7 +316,7 @@ def sync_stashed_metadata(ost) bb = nil xml_summary = ost.xml_class.createDoc("") - _log.debug "xml_summary1 = #{xml_summary.class.name}" + _log.debug("xml_summary1 = #{xml_summary.class.name}") xml_node = xml_summary.root.add_element("syncmetadata") xml_summary.root.add_attributes("scan_time" => ost.scanTime, "taskid" => ost.taskid) ost.skipConfig = true @@ -343,20 +343,20 @@ def sync_stashed_metadata(ost) # Verify that we have data to send if !items_selected.zero? - _log.info "Starting: Sending target data for [#{c}] to server. Size:[#{data.length}] TaskId:[#{ost.taskid}] target:[#{name}]" + _log.info("Starting: Sending target data for [#{c}] to server. Size:[#{data.length}] TaskId:[#{ost.taskid}] target:[#{name}]") save_metadata_op(data, "b64,zlib,xml", ost.taskid) - _log.info "Completed: Sending target data for [#{c}] to server. Size:[#{data.length}] TaskId:[#{ost.taskid}] target:[#{name}]" + _log.info("Completed: Sending target data for [#{c}] to server. Size:[#{data.length}] TaskId:[#{ost.taskid}] target:[#{name}]") else # Do not send empty XMLs. Warn if there is not data at all, or just not items selected. if items_total.zero? - _log.warn "Synchronize: No data found for [#{c}]. Items:Total[#{items_total}] Selected[#{items_selected}] TaskId:[#{ost.taskid}] VM:[#{name}]" + _log.warn("Synchronize: No data found for [#{c}]. Items:Total[#{items_total}] Selected[#{items_selected}] TaskId:[#{ost.taskid}] VM:[#{name}]") else - _log.warn "Synchronize: No data selected for [#{c}]. Items:Total[#{items_total}] Selected[#{items_selected}] TaskId:[#{ost.taskid}] VM:[#{name}]" + _log.warn("Synchronize: No data selected for [#{c}]. Items:Total[#{items_total}] Selected[#{items_selected}] TaskId:[#{ost.taskid}] VM:[#{name}]") end end end rescue => syncErr - _log.error syncErr.to_s + _log.error(syncErr.to_s) _log.debug { syncErr.backtrace.join("\n") } ensure if bb @@ -364,10 +364,10 @@ def sync_stashed_metadata(ost) bb.close end - _log.info "Starting: Sending target summary to server. TaskId:[#{ost.taskid}] target:[#{name}]" - _log.debug "xml_summary2 = #{xml_summary.class.name}" + _log.info("Starting: Sending target summary to server. TaskId:[#{ost.taskid}] target:[#{name}]") + _log.debug("xml_summary2 = #{xml_summary.class.name}") save_metadata_op(MIQEncode.encode(xml_summary.to_s), "b64,zlib,xml", ost.taskid) - _log.info "Completed: Sending target summary to server. TaskId:[#{ost.taskid}] target:[#{name}]" + _log.info("Completed: Sending target summary to server. TaskId:[#{ost.taskid}] target:[#{name}]") update_job_message(ost, "Synchronization complete") diff --git a/app/models/mixins/scanning_operations_mixin.rb b/app/models/mixins/scanning_operations_mixin.rb index f90a3d07c81..07ec1b68493 100644 --- a/app/models/mixins/scanning_operations_mixin.rb +++ b/app/models/mixins/scanning_operations_mixin.rb @@ -7,8 +7,8 @@ module ScanningOperationsMixin def save_metadata_op(xmlFile, type, jobid = nil) begin Timeout.timeout(WS_TIMEOUT) do # TODO: do we need this timeout? - _log.info "target [#{guid}], job [#{jobid}] enter" - _log.info "target [#{guid}] found target object id [#{id}], job [#{jobid}]" + _log.info("target [#{guid}], job [#{jobid}] enter") + _log.info("target [#{guid}] found target object id [#{id}], job [#{jobid}]") MiqQueue.submit_job( :service => "smartstate", :affinity => ext_management_system, @@ -18,7 +18,7 @@ def save_metadata_op(xmlFile, type, jobid = nil) :data => Marshal.dump([xmlFile, type]), :task_id => jobid, ) - _log.info "target [#{guid}] data put on queue, job [#{jobid}]" + _log.info("target [#{guid}] data put on queue, job [#{jobid}]") end rescue Exception => err _log.log_backtrace(err) @@ -29,14 +29,14 @@ def save_metadata_op(xmlFile, type, jobid = nil) end def task_update_op(task_id, state, status, message) - _log.info "task_id: [#{task_id}] starting" + _log.info("task_id: [#{task_id}] starting") begin Timeout.timeout(WS_TIMEOUT) do task = MiqTask.find_by(:id => task_id) if !task.nil? task.update_status(state, status, message) else - _log.warn "task_id: [#{task_id}] not found" + _log.warn("task_id: [#{task_id}] not found") end return true end diff --git a/app/models/mixins/service_mixin.rb b/app/models/mixins/service_mixin.rb index deed8a520ef..b094c5ebdd2 100644 --- a/app/models/mixins/service_mixin.rb +++ b/app/models/mixins/service_mixin.rb @@ -3,7 +3,7 @@ module ServiceMixin included do # These relationships are used for resources that are processed as part of the service - has_many :service_resources, -> { order "group_idx ASC" }, :dependent => :destroy + has_many :service_resources, -> { order("group_idx ASC") }, :dependent => :destroy has_many :resource_actions, :as => :resource, :dependent => :destroy serialize :options, Hash diff --git a/app/models/mixins/swift_manager_mixin.rb b/app/models/mixins/swift_manager_mixin.rb index 8bfc7df101c..7456a4b4c46 100644 --- a/app/models/mixins/swift_manager_mixin.rb +++ b/app/models/mixins/swift_manager_mixin.rb @@ -26,7 +26,7 @@ def ensure_swift_managers begin swift_manager.save swift_manager.reload - _log.debug "swift_manager.id = #{swift_manager.id}" + _log.debug("swift_manager.id = #{swift_manager.id}") CloudObjectStoreContainer.where(:ems_id => id).update(:ems_id => swift_manager.id) CloudObjectStoreObject.where(:ems_id => id).update(:ems_id => swift_manager.id) diff --git a/app/models/notification_type.rb b/app/models/notification_type.rb index d7b69c9cfd8..97e13966707 100644 --- a/app/models/notification_type.rb +++ b/app/models/notification_type.rb @@ -20,9 +20,9 @@ def subscriber_ids(subject, initiator) when AUDIENCE_GROUP subject.try(:requester).try(:current_group).try(:user_ids) when AUDIENCE_TENANT - if subject.respond_to? :tenant + if subject.respond_to?(:tenant) subject.tenant - elsif initiator.kind_of? User + elsif initiator.kind_of?(User) initiator.current_tenant end.try(:user_ids) when AUDIENCE_SUPERADMIN diff --git a/app/models/orchestration_template_azure.rb b/app/models/orchestration_template_azure.rb index 8b1037e5c38..ad43e24714b 100644 --- a/app/models/orchestration_template_azure.rb +++ b/app/models/orchestration_template_azure.rb @@ -97,7 +97,7 @@ def add_allowed_values(parameter, vals) end def add_pattern(parameter, val) - return unless val.key? 'AllowedPattern' + return unless val.key?('AllowedPattern') constraint = OrchestrationTemplate::OrchestrationParameterPattern.new(:pattern => val['AllowedPattern']) parameter.constraints << constraint diff --git a/app/models/orchestration_template_cfn.rb b/app/models/orchestration_template_cfn.rb index 5545a21d3db..24d3cda45a9 100644 --- a/app/models/orchestration_template_cfn.rb +++ b/app/models/orchestration_template_cfn.rb @@ -157,14 +157,14 @@ def policy_opt end def add_allowed_values(parameter, val) - return unless val.key? 'AllowedValues' + return unless val.key?('AllowedValues') constraint = OrchestrationTemplate::OrchestrationParameterAllowed.new(:allowed_values => val['AllowedValues']) parameter.constraints << constraint end def add_pattern(parameter, val) - return unless val.key? 'AllowedPattern' + return unless val.key?('AllowedPattern') constraint = OrchestrationTemplate::OrchestrationParameterPattern.new(:pattern => val['AllowedPattern']) parameter.constraints << constraint diff --git a/app/models/orchestration_template_hot.rb b/app/models/orchestration_template_hot.rb index 188e407d5b4..c29aa08acd3 100644 --- a/app/models/orchestration_template_hot.rb +++ b/app/models/orchestration_template_hot.rb @@ -76,15 +76,15 @@ def validate_format def parse_constraints(raw_constraints) raw_constraints.collect do |raw_constraint| - if raw_constraint.key? 'allowed_values' + if raw_constraint.key?('allowed_values') parse_allowed_values(raw_constraint) - elsif raw_constraint.key? 'allowed_pattern' + elsif raw_constraint.key?('allowed_pattern') parse_pattern(raw_constraint) - elsif raw_constraint.key? 'length' + elsif raw_constraint.key?('length') parse_length_constraint(raw_constraint) - elsif raw_constraint.key? 'range' + elsif raw_constraint.key?('range') parse_value_constraint(raw_constraint) - elsif raw_constraint.key? 'custom_constraint' + elsif raw_constraint.key?('custom_constraint') parse_custom_constraint(raw_constraint) else raise MiqException::MiqParsingError, _("Unknown constraint %{constraint}") % {:constraint => raw_constraint} diff --git a/app/models/pxe_image_ipxe.rb b/app/models/pxe_image_ipxe.rb index ad4fd2880f3..2edf024b4ee 100644 --- a/app/models/pxe_image_ipxe.rb +++ b/app/models/pxe_image_ipxe.rb @@ -31,7 +31,7 @@ def self.parse_contents(contents, label) end if current_item[:kernel].blank? - _log.warn "Image #{current_item[:label]} missing kernel - Skipping" + _log.warn("Image #{current_item[:label]} missing kernel - Skipping") return [] end diff --git a/app/models/pxe_menu_ipxe.rb b/app/models/pxe_menu_ipxe.rb index dac8a4a249b..785bdec1465 100644 --- a/app/models/pxe_menu_ipxe.rb +++ b/app/models/pxe_menu_ipxe.rb @@ -43,7 +43,7 @@ def self.parse_labels(contents, labels) items << current_item bad, good = items.compact.partition { |i| i[:kernel].blank? } - bad.each { |i| _log.warn "Image #{i[:label]} missing kernel - Skipping" } + bad.each { |i| _log.warn("Image #{i[:label]} missing kernel - Skipping") } good end diff --git a/app/models/pxe_menu_pxelinux.rb b/app/models/pxe_menu_pxelinux.rb index da1acd15225..c1ea7d1f0e0 100644 --- a/app/models/pxe_menu_pxelinux.rb +++ b/app/models/pxe_menu_pxelinux.rb @@ -34,7 +34,7 @@ def self.parse_contents(contents) end bad, good = items.partition { |i| i[:kernel].blank? } - bad.each { |i| _log.warn "Image #{i[:label]} missing kernel - Skipping" } + bad.each { |i| _log.warn("Image #{i[:label]} missing kernel - Skipping") } good end diff --git a/app/models/resource_action_workflow.rb b/app/models/resource_action_workflow.rb index bfac662dd7b..a9b763d0abe 100644 --- a/app/models/resource_action_workflow.rb +++ b/app/models/resource_action_workflow.rb @@ -21,8 +21,8 @@ def initialize(values, requester, resource_action, options = {}) def dialogs msg = "[DEPRECATION] ResourceActionWorkflow#dialogs should not be used. Please use ResourceActionWorkflow#dialog instead. At #{caller[0]}" - $log.warn msg - Kernel.warn msg + $log.warn(msg) + Kernel.warn(msg) dialog end diff --git a/app/models/scan_item.rb b/app/models/scan_item.rb index 022b42408ff..41f835eb2e7 100644 --- a/app/models/scan_item.rb +++ b/app/models/scan_item.rb @@ -4,7 +4,7 @@ class ScanItem < ApplicationRecord include UuidMixin YAML_DIR = File.expand_path(File.join(Rails.root, "product/scan_items")) - Dir.mkdir YAML_DIR unless File.exist?(YAML_DIR) + Dir.mkdir(YAML_DIR) unless File.exist?(YAML_DIR) SAMPLE_VM_PROFILE = {:name => "sample", :description => "VM Sample", :mode => 'Vm', :read_only => true}.freeze SAMPLE_HOST_PROFILE = {:name => "host sample", :description => "Host Sample", :mode => 'Host', :read_only => true}.freeze @@ -104,7 +104,7 @@ def self.add_elements(vm, xmlNode) guid = profile.attributes['guid'] sis = ScanItemSet.find_by(:guid => guid) if sis.nil? - _log.warn "Unable to find ScanItemSet [guid: #{guid}] in the database." + _log.warn("Unable to find ScanItemSet [guid: #{guid}] in the database.") next end @@ -114,7 +114,7 @@ def self.add_elements(vm, xmlNode) si = ScanItem.find_by(:guid => guid) if si.nil? - _log.warn "Unable to find ScanItem [guid: #{guid} type: #{item_type}] in the database." + _log.warn("Unable to find ScanItem [guid: #{guid} type: #{item_type}] in the database.") next end @@ -124,11 +124,11 @@ def self.add_elements(vm, xmlNode) when 'registry' RegistryItem.add_elements(sis, si, vm, e) when 'category' - _log.debug "Skipping ScanItem [guid: #{guid} type: #{item_type}] as it is not expected in the data." + _log.debug("Skipping ScanItem [guid: #{guid} type: #{item_type}] as it is not expected in the data.") when 'nteventlog' EventLog.add_elements(vm, e) else - _log.debug "Unknown ScanItem type [#{item_type}]" + _log.debug("Unknown ScanItem type [#{item_type}]") end end end diff --git a/app/models/service.rb b/app/models/service.rb index e95b58f86fd..17b9506d390 100644 --- a/app/models/service.rb +++ b/app/models/service.rb @@ -258,15 +258,15 @@ def process_group_action(action, group_idx, direction) rsc_action = service_action(action, svc_rsc) rsc_name = "#{rsc.class.name}:#{rsc.id}" + (rsc.respond_to?(:name) ? ":#{rsc.name}" : "") if rsc_action.nil? - _log.info "Not Processing action for Service:<#{name}:#{id}>, RSC:<#{rsc_name}}> in Group Idx:<#{group_idx}>" + _log.info("Not Processing action for Service:<#{name}:#{id}>, RSC:<#{rsc_name}}> in Group Idx:<#{group_idx}>") elsif rsc.respond_to?(rsc_action) - _log.info "Processing action <#{rsc_action}> for Service:<#{name}:#{id}>, RSC:<#{rsc_name}}> in Group Idx:<#{group_idx}>" + _log.info("Processing action <#{rsc_action}> for Service:<#{name}:#{id}>, RSC:<#{rsc_name}}> in Group Idx:<#{group_idx}>") rsc.send(rsc_action) else - _log.info "Skipping action <#{rsc_action}> for Service:<#{name}:#{id}>, RSC:<#{rsc.class.name}:#{rsc.id}> in Group Idx:<#{group_idx}>" + _log.info("Skipping action <#{rsc_action}> for Service:<#{name}:#{id}>, RSC:<#{rsc.class.name}:#{rsc.id}> in Group Idx:<#{group_idx}>") end rescue => err - _log.error "Error while processing Service:<#{name}> Group Idx:<#{group_idx}> Resource<#{rsc_name}>. Message:<#{err}>" + _log.error("Error while processing Service:<#{name}> Group Idx:<#{group_idx}> Resource<#{rsc_name}>. Message:<#{err}>") end end @@ -372,12 +372,12 @@ def chargeback_report_name end def generate_chargeback_report(options = {}) - _log.info "Generation of chargeback report for service #{name} started..." + _log.info("Generation of chargeback report for service #{name} started...") MiqReportResult.where(:name => chargeback_report_name).destroy_all report = MiqReport.new(chargeback_yaml) options[:report_sync] = true report.queue_generate_table(options) - _log.info "Report #{chargeback_report_name} generated" + _log.info("Report #{chargeback_report_name} generated") end def chargeback_yaml @@ -395,7 +395,7 @@ def queue_chargeback_report_generation(options = {}) :method_name => "generate_chargeback_report", :args => options ) - _log.info "Added to queue: generate_chargeback_report for service #{name}" + _log.info("Added to queue: generate_chargeback_report for service #{name}") end # diff --git a/app/models/service_order.rb b/app/models/service_order.rb index d9ecb020acc..f9247979f27 100644 --- a/app/models/service_order.rb +++ b/app/models/service_order.rb @@ -103,7 +103,7 @@ def deep_copy(new_attributes = {}) request.class.send(:create, request.attributes.except(*REQUEST_ATTRIBUTES)) end new_attributes.each do |attr, value| - new_service_order.send("#{attr}=", value) if self.class.attribute_names.include? attr.to_s + new_service_order.send("#{attr}=", value) if self.class.attribute_names.include?(attr.to_s) end new_service_order.save! end diff --git a/app/models/service_template.rb b/app/models/service_template.rb index 9ff8e3b0334..cd93fb1b7a5 100644 --- a/app/models/service_template.rb +++ b/app/models/service_template.rb @@ -284,10 +284,10 @@ def set_ownership(service, user) return if user.nil? service.evm_owner = user if user.current_group - $log.info "Setting Service Owning User to Name=#{user.name}, ID=#{user.id}, Group to Name=#{user.current_group.name}, ID=#{user.current_group.id}" + $log.info("Setting Service Owning User to Name=#{user.name}, ID=#{user.id}, Group to Name=#{user.current_group.name}, ID=#{user.current_group.id}") service.miq_group = user.current_group else - $log.info "Setting Service Owning User to Name=#{user.name}, ID=#{user.id}" + $log.info("Setting Service Owning User to Name=#{user.name}, ID=#{user.id}") end service.save end diff --git a/app/models/service_template_provision_task.rb b/app/models/service_template_provision_task.rb index 72af388e932..c2961cfe38d 100644 --- a/app/models/service_template_provision_task.rb +++ b/app/models/service_template_provision_task.rb @@ -71,11 +71,11 @@ def after_request_task_create def create_child_tasks parent_svc = Service.find_by(:id => options[:parent_service_id]) parent_name = parent_svc.nil? ? 'none' : "#{parent_svc.class.name}:#{parent_svc.id}" - _log.info "- creating service tasks for service <#{self.class.name}:#{id}> with parent service <#{parent_name}>" + _log.info("- creating service tasks for service <#{self.class.name}:#{id}> with parent service <#{parent_name}>") tasks = source.create_tasks_for_service(self, parent_svc) tasks.each { |t| miq_request_tasks << t } - _log.info "- created <#{tasks.length}> service tasks for service <#{self.class.name}:#{id}> with parent service <#{parent_name}>" + _log.info("- created <#{tasks.length}> service tasks for service <#{self.class.name}:#{id}> with parent service <#{parent_name}>") end def do_request @@ -173,7 +173,7 @@ def mark_pending_items_as_finished def before_ae_starts(_options) reload - if state.to_s.downcase.in? %w(pending queued) + if state.to_s.downcase.in?(%w(pending queued)) _log.info("Executing #{request_class::TASK_DESCRIPTION} request: [#{description}]") update_and_notify_parent(:state => "active", :status => "Ok", :message => "In Process") end diff --git a/app/models/session.rb b/app/models/session.rb index 7862ec7c340..40c67d9c9cb 100644 --- a/app/models/session.rb +++ b/app/models/session.rb @@ -8,7 +8,7 @@ def self.enabled? end def self.check_session_timeout - $log.debug "Checking session data" + $log.debug("Checking session data") purge(::Settings.session.timeout) end diff --git a/app/models/snapshot.rb b/app/models/snapshot.rb index 9b4449ae5e1..f2acdac574e 100644 --- a/app/models/snapshot.rb +++ b/app/models/snapshot.rb @@ -75,11 +75,11 @@ def self.parse_evm_snapshot_description(description) end def self.remove_unused_evm_snapshots(delay) - _log.debug "Called" + _log.debug("Called") find_all_evm_snapshots.each do |sn| job_guid, timestamp = parse_evm_snapshot_description(sn.description) unless Job.guid_active?(job_guid, timestamp, delay) - _log.info "Removing #{sn.description.inspect} under Vm [#{sn.vm_or_template.name}]" + _log.info("Removing #{sn.description.inspect} under Vm [#{sn.vm_or_template.name}]") sn.vm_or_template.remove_evm_snapshot_queue(sn.id) end end diff --git a/app/models/storage.rb b/app/models/storage.rb index 7ae1519a59c..95aa083af2d 100644 --- a/app/models/storage.rb +++ b/app/models/storage.rb @@ -16,8 +16,8 @@ class Storage < ApplicationRecord has_many :vim_performance_states, :as => :resource # Destroy will be handled by purger has_many :storage_files, :dependent => :destroy - has_many :storage_files_files, -> { where "rsc_type = 'file'" }, :class_name => "StorageFile", :foreign_key => "storage_id" - has_many :files, -> { where "rsc_type = 'file'" }, :class_name => "StorageFile", :foreign_key => "storage_id" + has_many :storage_files_files, -> { where("rsc_type = 'file'") }, :class_name => "StorageFile", :foreign_key => "storage_id" + has_many :files, -> { where("rsc_type = 'file'") }, :class_name => "StorageFile", :foreign_key => "storage_id" has_many :host_storages has_many :miq_events, :as => :target, :dependent => :destroy @@ -136,7 +136,7 @@ def scan_starting(miq_task_id, host) end def scan_complete_callback(miq_task_id, status, _message, result) - _log.info "Storage ID: [#{id}], MiqTask ID: [#{miq_task_id}], Status: [#{status}]" + _log.info("Storage ID: [#{id}], MiqTask ID: [#{miq_task_id}], Status: [#{status}]") miq_task = MiqTask.find_by(:id => miq_task_id) if miq_task.nil? @@ -180,7 +180,7 @@ def scan_complete_callback(miq_task_id, status, _message, result) def scan_queue_item(miq_task_id) MiqEvent.raise_evm_job_event(self, :type => "scan", :prefix => "request") - _log.info "Queueing SmartState Analysis for Storage ID: [#{id}], MiqTask ID: [#{miq_task_id}]" + _log.info("Queueing SmartState Analysis for Storage ID: [#{id}], MiqTask ID: [#{miq_task_id}]") cb = {:class_name => self.class.name, :instance_id => id, :method_name => :scan_complete_callback, :args => [miq_task_id]} MiqQueue.submit_job( :service => "ems_operations", @@ -271,7 +271,7 @@ def self.scan_watchdog(miq_task_id) end if scan_complete?(miq_task) - _log.info scan_complete_message(miq_task).to_s + _log.info(scan_complete_message(miq_task).to_s) return end @@ -279,7 +279,7 @@ def self.scan_watchdog(miq_task_id) locked_miq_task.context_data[:pending].each do |storage_id, qitem_id| qitem = MiqQueue.find_by(:id => qitem_id) if qitem.nil? - _log.warn "Pending Scan for Storage ID: [#{storage_id}] is missing MiqQueue ID: [#{qitem_id}] - will requeue" + _log.warn("Pending Scan for Storage ID: [#{storage_id}] is missing MiqQueue ID: [#{qitem_id}] - will requeue") locked_miq_task.context_data[:pending].delete(storage_id) locked_miq_task.save! scan_queue(locked_miq_task) @@ -303,23 +303,23 @@ def self.max_parallel_storage_scans_per_host def self.scan_eligible_storages(zone_name = nil) zone_caption = zone_name ? " for zone [#{zone_name}]" : "" - _log.info "Computing#{zone_caption} Started" + _log.info("Computing#{zone_caption} Started") storages = [] where(:store_type => SUPPORTED_STORAGE_TYPES).each do |storage| unless storage.perf_capture_enabled? - _log.info "Skipping scan of Storage: [#{storage.name}], performance capture is not enabled" + _log.info("Skipping scan of Storage: [#{storage.name}], performance capture is not enabled") next end if zone_name && storage.ext_management_systems_in_zone(zone_name).empty? - _log.info "Skipping scan of Storage: [#{storage.name}], storage under EMS in a different zone from [#{zone_name}]" + _log.info("Skipping scan of Storage: [#{storage.name}], storage under EMS in a different zone from [#{zone_name}]") next end storages << storage end - _log.info "Computing#{zone_caption} Complete -- Storage IDs: #{storages.collect(&:id).sort.inspect}" + _log.info("Computing#{zone_caption} Complete -- Storage IDs: #{storages.collect(&:id).sort.inspect}") storages end @@ -335,7 +335,7 @@ def self.create_scan_task(task_name, userid, storages) :context_data => context_data ) - _log.info "Created MiqTask ID: [#{miq_task.id}], Name: [#{task_name}]" + _log.info("Created MiqTask ID: [#{miq_task.id}], Name: [#{task_name}]") max_qitems = max_qitems_per_scan_request max_qitems = storages.length unless max_qitems.kind_of?(Numeric) && (max_qitems > 0) # Queue them all (unlimited) unless greater than 0 @@ -348,7 +348,7 @@ def self.scan_timer(zone_name = nil) storages = scan_eligible_storages(zone_name) if storages.empty? - _log.info "No Eligible Storages" + _log.info("No Eligible Storages") return nil end @@ -528,7 +528,7 @@ def smartstate_analysis(miq_task_id = nil) hosts = active_hosts_with_authentication_status_ok_in_zone(MiqServer.my_zone) if hosts.empty? message = "There are no active Hosts with valid credentials connected to Storage: [#{name}] in Zone: [#{MiqServer.my_zone}]." - _log.warn message + _log.warn(message) raise MiqException::MiqUnreachableStorage, _("There are no active Hosts with valid credentials connected to Storage: [%{name}] in Zone: [%{zone}].") % {:name => name, :zone => MiqServer.my_zone} @@ -551,15 +551,15 @@ def smartstate_analysis(miq_task_id = nil) st = Time.now message = "Storage [#{name}] via Host [#{host.name}]" - _log.info "#{message}...Starting" + _log.info("#{message}...Starting") scan_starting(miq_task_id, host) if host.respond_to?(:refresh_files_on_datastore) host.refresh_files_on_datastore(self) else - _log.warn "#{message}...Not Supported for #{host.class.name}" + _log.warn("#{message}...Not Supported for #{host.class.name}") end update_attribute(:last_scan_on, Time.now.utc) - _log.info "#{message}...Completed in [#{Time.now - st}] seconds" + _log.info("#{message}...Completed in [#{Time.now - st}] seconds") begin MiqEvent.raise_evm_job_event(self, :type => "scan", :suffix => "complete") @@ -695,7 +695,7 @@ def perf_capture(interval_name, *_args) log_header = "[#{interval_name}]" - _log.info "#{log_header} Capture for #{log_target}..." + _log.info("#{log_header} Capture for #{log_target}...") klass, meth = Metric::Helper.class_and_association_for_interval_name(interval_name) @@ -821,7 +821,7 @@ def perf_capture(interval_name, *_args) perf_rollup_to_parents(interval_name, hour) end - _log.info "#{log_header} Capture for #{log_target}...Complete - Timings: #{t.inspect}" + _log.info("#{log_header} Capture for #{log_target}...Complete - Timings: #{t.inspect}") end def update_vm_perf(vm, vm_perf, vm_attrs) diff --git a/app/models/storage_performance.rb b/app/models/storage_performance.rb index a52c06ae498..eb8f6ded898 100644 --- a/app/models/storage_performance.rb +++ b/app/models/storage_performance.rb @@ -1,5 +1,5 @@ class StoragePerformance < MetricRollup - default_scope { where "resource_type = 'Storage' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'Storage' and resource_id IS NOT NULL") } belongs_to :storage, :foreign_key => :resource_id end diff --git a/app/models/system_console.rb b/app/models/system_console.rb index 2af98d8d625..0c71da7227b 100644 --- a/app/models/system_console.rb +++ b/app/models/system_console.rb @@ -90,7 +90,7 @@ def self.is_local?(originating_server) end def self.launch_proxy_if_not_local(console_args, originating_server, host_address, host_port) - _log.info "Originating server: #{originating_server}, local server: #{MiqServer.my_server.id}" + _log.info("Originating server: #{originating_server}, local server: #{MiqServer.my_server.id}") if ::Settings.server.console_proxy_disabled || SystemConsole.is_local?(originating_server) console_args.update( @@ -102,8 +102,8 @@ def self.launch_proxy_if_not_local(console_args, originating_server, host_addres proxy_address, proxy_port, proxy_pid = SystemConsole.launch_proxy(host_address, host_port) return nil if proxy_address.nil? - _log.info "Proxy server started: #{proxy_address}:#{proxy_port} <--> #{host_address}:#{host_port}" - _log.info "Proxy process PID: #{proxy_pid}" + _log.info("Proxy server started: #{proxy_address}:#{proxy_port} <--> #{host_address}:#{host_port}") + _log.info("Proxy process PID: #{proxy_pid}") console_args.update( :host_name => proxy_address, diff --git a/app/models/tag.rb b/app/models/tag.rb index ae2480e9d33..916b4bfdf0d 100644 --- a/app/models/tag.rb +++ b/app/models/tag.rb @@ -24,7 +24,7 @@ def self.list(object, options = {}) end begin - predicate.inject(object) { |target, method| target.public_send method } + predicate.inject(object) { |target, method| target.public_send(method) } rescue NoMethodError "" end @@ -52,7 +52,7 @@ def self.tags(options = {}) end def self.parse(list) - if list.kind_of? Array + if list.kind_of?(Array) tag_names = list.collect { |tag| tag.nil? ? nil : tag.to_s } return tag_names.compact else @@ -68,7 +68,7 @@ def self.parse(list) list.tr!(',', " ") # then, get whatever's left - tag_names.concat list.split(/\s/) + tag_names.concat(list.split(/\s/)) # strip whitespace from the names tag_names = tag_names.map(&:strip) diff --git a/app/models/time_profile.rb b/app/models/time_profile.rb index c63a9c70ed6..a37bd7a6d4b 100644 --- a/app/models/time_profile.rb +++ b/app/models/time_profile.rb @@ -10,7 +10,7 @@ class TimeProfile < ApplicationRecord has_many :miq_reports has_many :metric_rollups - scope :rollup_daily_metrics, -> { where :rollup_daily_metrics => true } + scope :rollup_daily_metrics, -> { where(:rollup_daily_metrics => true) } after_create :rebuild_daily_metrics_on_create after_save :rebuild_daily_metrics_on_save diff --git a/app/models/vim_performance_analysis.rb b/app/models/vim_performance_analysis.rb index e07a948237b..94ef5b1c98e 100644 --- a/app/models/vim_performance_analysis.rb +++ b/app/models/vim_performance_analysis.rb @@ -535,7 +535,7 @@ def self.group_perf_by_timestamp(obj, perfs, cols = nil) mm[k] = val unless val.nil? mm end - h.reject! { |k, _v| perf_klass.virtual_attribute? k } + h.reject! { |k, _v| perf_klass.virtual_attribute?(k) } end result.inject([]) do |recs, k| diff --git a/app/models/vm.rb b/app/models/vm.rb index d4895c33df1..6547e3e5482 100644 --- a/app/models/vm.rb +++ b/app/models/vm.rb @@ -73,7 +73,7 @@ def running_processes pl = {} check = validate_collect_running_processes unless check[:message].nil? - _log.warn check[:message].to_s + _log.warn(check[:message].to_s) return pl end @@ -82,14 +82,14 @@ def running_processes cred = my_zone_obj.auth_user_pwd(:windows_domain) ipaddresses.each do |ipaddr| break unless pl.blank? - _log.info "Running processes for VM:[#{id}:#{name}] IP:[#{ipaddr}] Logon:[#{cred[0]}]" + _log.info("Running processes for VM:[#{id}:#{name}] IP:[#{ipaddr}] Logon:[#{cred[0]}]") begin wmi = WMIHelper.connectServer(ipaddr, *cred) pl = MiqProcess.process_list_all(wmi) unless wmi.nil? rescue => wmi_err - _log.warn wmi_err.to_s + _log.warn(wmi_err.to_s) end - _log.info "Running processes for VM:[#{id}:#{name}] Count:[#{pl.length}]" + _log.info("Running processes for VM:[#{id}:#{name}] Count:[#{pl.length}]") end rescue => err _log.log_backtrace(err) diff --git a/app/models/vm/operations.rb b/app/models/vm/operations.rb index 0599c16b717..ed293f51d08 100644 --- a/app/models/vm/operations.rb +++ b/app/models/vm/operations.rb @@ -10,7 +10,7 @@ module Vm::Operations included do supports :launch_cockpit do if ipaddresses.blank? - unsupported_reason_add :launch_cockpit, 'Launching of Cockpit requires an IP address for the VM.' + unsupported_reason_add(:launch_cockpit, 'Launching of Cockpit requires an IP address for the VM.') end end end diff --git a/app/models/vm_metric.rb b/app/models/vm_metric.rb index 4f246fa6371..b3b6ecdc074 100644 --- a/app/models/vm_metric.rb +++ b/app/models/vm_metric.rb @@ -1,5 +1,5 @@ class VmMetric < Metric - default_scope { where "resource_type = 'VmOrTemplate' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'VmOrTemplate' and resource_id IS NOT NULL") } belongs_to :host, :foreign_key => :parent_host_id belongs_to :ems_cluster, :foreign_key => :parent_ems_cluster_id diff --git a/app/models/vm_migrate_task.rb b/app/models/vm_migrate_task.rb index 3841177298a..e202a2ef5dd 100644 --- a/app/models/vm_migrate_task.rb +++ b/app/models/vm_migrate_task.rb @@ -61,7 +61,7 @@ def do_request :migrate end - _log.warn "Calling VM #{vc_method} for #{vm.id}:#{vm.name}" + _log.warn("Calling VM #{vc_method} for #{vm.id}:#{vm.name}") if vc_method == :migrate vm.migrate(host, respool) else diff --git a/app/models/vm_or_template.rb b/app/models/vm_or_template.rb index 670cf1b6f12..6c1066796b8 100644 --- a/app/models/vm_or_template.rb +++ b/app/models/vm_or_template.rb @@ -89,14 +89,14 @@ class VmOrTemplate < ApplicationRecord # System Services - Win32_Services, Kernel drivers, Filesystem drivers has_many :system_services, :dependent => :destroy - has_many :win32_services, -> { where "typename = 'win32_service'" }, :class_name => "SystemService" - has_many :kernel_drivers, -> { where "typename = 'kernel' OR typename = 'misc'" }, :class_name => "SystemService" - has_many :filesystem_drivers, -> { where "typename = 'filesystem'" }, :class_name => "SystemService" - has_many :linux_initprocesses, -> { where "typename = 'linux_initprocess' OR typename = 'linux_systemd'" }, :class_name => "SystemService" + has_many :win32_services, -> { where("typename = 'win32_service'") }, :class_name => "SystemService" + has_many :kernel_drivers, -> { where("typename = 'kernel' OR typename = 'misc'") }, :class_name => "SystemService" + has_many :filesystem_drivers, -> { where("typename = 'filesystem'") }, :class_name => "SystemService" + has_many :linux_initprocesses, -> { where("typename = 'linux_initprocess' OR typename = 'linux_systemd'") }, :class_name => "SystemService" has_many :filesystems, :as => :resource, :dependent => :destroy - has_many :directories, -> { where "rsc_type = 'dir'" }, :as => :resource, :class_name => "Filesystem" - has_many :files, -> { where "rsc_type = 'file'" }, :as => :resource, :class_name => "Filesystem" + has_many :directories, -> { where("rsc_type = 'dir'") }, :as => :resource, :class_name => "Filesystem" + has_many :files, -> { where("rsc_type = 'file'") }, :as => :resource, :class_name => "Filesystem" has_many :scan_histories, :dependent => :destroy has_many :lifecycle_events, :class_name => "LifecycleEvent" @@ -110,7 +110,7 @@ class VmOrTemplate < ApplicationRecord has_many :vim_performance_states, :as => :resource # Destroy will be handled by purger has_many :storage_files, :dependent => :destroy - has_many :storage_files_files, -> { where "rsc_type = 'file'" }, :class_name => "StorageFile" + has_many :storage_files_files, -> { where("rsc_type = 'file'") }, :class_name => "StorageFile" # EMS Events has_many :ems_events, ->(vmt) { where(["vm_or_template_id = ? OR dest_vm_or_template_id = ?", vmt.id, vmt.id]).order(:timestamp) }, @@ -608,13 +608,13 @@ def self.repository_parse_path(path) #it's empty string for local type storage_name = "" # NAS - relative_path = if path.starts_with? "//" + relative_path = if path.starts_with?("//") raise _("path, '%{path}', is malformed") % {:path => path} unless path =~ %r{^//[^/].*/.+$} # path is a UNC storage_name = path.split("/")[0..3].join("/") path.split("/")[4..path.length].join("/") if path.length > 4 #VMFS - elsif path.starts_with? "[" + elsif path.starts_with?("[") raise _("path, '%{path}', is malformed") % {:path => path} unless path =~ /^\[[^\]].+\].*$/ # path is a VMWare storage name /^\[(.*)\](.*)$/ =~ path @@ -652,7 +652,7 @@ def disconnect_stack(stack = nil) return if stack && stack != orchestration_stack log_text = " from stack [#{orchestration_stack.name}] id [#{orchestration_stack.id}]" - _log.info "Disconnecting Vm [#{name}] id [#{id}]#{log_text}" + _log.info("Disconnecting Vm [#{name}] id [#{id}]#{log_text}") self.orchestration_stack = nil save @@ -660,7 +660,7 @@ def disconnect_stack(stack = nil) def connect_ems(e) unless ext_management_system == e - _log.debug "Connecting Vm [#{name}] id [#{id}] to EMS [#{e.name}] id [#{e.id}]" + _log.debug("Connecting Vm [#{name}] id [#{id}] to EMS [#{e.name}] id [#{e.id}]") self.ext_management_system = e save end @@ -669,7 +669,7 @@ def connect_ems(e) def disconnect_ems(e = nil) if e.nil? || ext_management_system == e log_text = " from EMS [#{ext_management_system.name}] id [#{ext_management_system.id}]" unless ext_management_system.nil? - _log.info "Disconnecting Vm [#{name}] id [#{id}]#{log_text}" + _log.info("Disconnecting Vm [#{name}] id [#{id}]#{log_text}") self.ext_management_system = nil self.ems_cluster = nil @@ -680,7 +680,7 @@ def disconnect_ems(e = nil) def connect_host(h) unless host == h - _log.debug "Connecting Vm [#{name}] id [#{id}] to Host [#{h.name}] id [#{h.id}]" + _log.debug("Connecting Vm [#{name}] id [#{id}] to Host [#{h.name}] id [#{h.id}]") self.host = h save @@ -692,7 +692,7 @@ def connect_host(h) def disconnect_host(h = nil) if h.nil? || host == h log_text = " from Host [#{host.name}] id [#{host.id}]" unless host.nil? - _log.info "Disconnecting Vm [#{name}] id [#{id}]#{log_text}" + _log.info("Disconnecting Vm [#{name}] id [#{id}]#{log_text}") self.host = nil save @@ -704,7 +704,7 @@ def disconnect_host(h = nil) def connect_storage(s) unless storage == s - _log.debug "Connecting Vm [#{name}] id [#{id}] to #{ui_lookup(:table => "storages")} [#{s.name}] id [#{s.id}]" + _log.debug("Connecting Vm [#{name}] id [#{id}] to #{ui_lookup(:table => "storages")} [#{s.name}] id [#{s.id}]") self.storage = s save end @@ -714,7 +714,7 @@ def disconnect_storage(s = nil) if s.nil? || storage == s || storages.include?(s) stores = s.nil? ? ([storage] + storages).compact.uniq : [s] log_text = stores.collect { |x| "#{ui_lookup(:table => "storages")} [#{x.name}] id [#{x.id}]" }.join(", ") - _log.info "Disconnecting Vm [#{name}] id [#{id}] from #{log_text}" + _log.info("Disconnecting Vm [#{name}] id [#{id}] from #{log_text}") if s.nil? self.storage = nil @@ -883,11 +883,11 @@ def my_zone_obj # TODO: Come back to this def proxies4job(_job = nil) - _log.debug "Enter" + _log.debug("Enter") all_proxy_list = storage2proxies proxies = storage2active_proxies(all_proxy_list) - _log.debug "# proxies = #{proxies.length}" + _log.debug("# proxies = #{proxies.length}") msg = if all_proxy_list.empty? "No active SmartProxies found to analyze this VM" @@ -947,15 +947,15 @@ def storage2proxies def storage2active_proxies(all_proxy_list = nil) all_proxy_list ||= storage2proxies - _log.debug "all_proxy_list.length = #{all_proxy_list.length}" + _log.debug("all_proxy_list.length = #{all_proxy_list.length}") proxies = all_proxy_list.select(&:is_proxy_active?) - _log.debug "proxies1.length = #{proxies.length}" + _log.debug("proxies1.length = #{proxies.length}") # MiqServer coresident proxy needs to contact the host and provide credentials. # Remove any MiqServer instances if we do not have credentials rsc = self.scan_via_ems? ? ext_management_system : host proxies.delete_if { |p| MiqServer === p } if rsc && !rsc.authentication_status_ok? - _log.debug "proxies2.length = #{proxies.length}" + _log.debug("proxies2.length = #{proxies.length}") proxies end @@ -981,34 +981,34 @@ def miq_server_proxies when 'microsoft' return [] if storage_id.blank? else - _log.debug "else" + _log.debug("else") return [] end host_server_ids = host ? host.vm_scan_affinity.collect(&:id) : [] - _log.debug "host_server_ids.length = #{host_server_ids.length}" + _log.debug("host_server_ids.length = #{host_server_ids.length}") storage_server_ids = storages.collect { |s| s.vm_scan_affinity.collect(&:id) }.reject(&:blank?) - _log.debug "storage_server_ids.length = #{storage_server_ids.length}" + _log.debug("storage_server_ids.length = #{storage_server_ids.length}") all_storage_server_ids = storage_server_ids.inject(:&) || [] - _log.debug "all_storage_server_ids.length = #{all_storage_server_ids.length}" + _log.debug("all_storage_server_ids.length = #{all_storage_server_ids.length}") srs = self.class.miq_servers_for_scan - _log.debug "srs.length = #{srs.length}" + _log.debug("srs.length = #{srs.length}") miq_servers = srs.select do |svr| (svr.vm_scan_host_affinity? ? host_server_ids.detect { |id| id == svr.id } : host_server_ids.empty?) && (svr.vm_scan_storage_affinity? ? all_storage_server_ids.detect { |id| id == svr.id } : storage_server_ids.empty?) end - _log.debug "miq_servers1.length = #{miq_servers.length}" + _log.debug("miq_servers1.length = #{miq_servers.length}") miq_servers.select! do |svr| result = svr.status == "started" && svr.has_zone?(my_zone) result &&= svr.is_vix_disk? if vendor == 'vmware' result end - _log.debug "miq_servers2.length = #{miq_servers.length}" + _log.debug("miq_servers2.length = #{miq_servers.length}") miq_servers end @@ -1244,7 +1244,7 @@ def self.find_by_path(path) begin storage_id, location = parse_path(path) rescue - _log.warn "Invalid path specified [#{path}]" + _log.warn("Invalid path specified [#{path}]") return nil end VmOrTemplate.find_by(:storage_id => storage_id, :location => location) @@ -1618,12 +1618,12 @@ def self.vms_by_ipaddress(ipaddress) end def self.scan_by_property(property, value, _options = {}) - _log.info "scan_vm_by_property called with property:[#{property}] value:[#{value}]" + _log.info("scan_vm_by_property called with property:[#{property}] value:[#{value}]") case property when "ipaddress" vms_by_ipaddress(value) do |vm| if vm.state == "on" - _log.info "Initiating VM scan for [#{vm.id}:#{vm.name}]" + _log.info("Initiating VM scan for [#{vm.id}:#{vm.name}]") vm.scan end end @@ -1633,7 +1633,7 @@ def self.scan_by_property(property, value, _options = {}) end def self.event_by_property(property, value, event_type, event_message, event_time = nil, _options = {}) - _log.info "event_vm_by_property called with property:[#{property}] value:[#{value}] type:[#{event_type}] message:[#{event_message}] event_time:[#{event_time}]" + _log.info("event_vm_by_property called with property:[#{property}] value:[#{value}] type:[#{event_type}] message:[#{event_message}] event_time:[#{event_time}]") event_timestamp = event_time.blank? ? Time.now.utc : event_time.to_time(:utc) case property diff --git a/app/models/vm_or_template/scanning.rb b/app/models/vm_or_template/scanning.rb index 3c5d0c084d7..081f5a73a0e 100644 --- a/app/models/vm_or_template/scanning.rb +++ b/app/models/vm_or_template/scanning.rb @@ -8,7 +8,7 @@ def scan(userid = "system", options = {}) .where(:sync_key => guid) .pluck(:id) unless j.blank? - _log.info "VM scan job will not be added due to existing scan job waiting to be processed. VM ID:[#{id}] Name:[#{name}] Guid:[#{guid}] Existing Job IDs [#{j.join(", ")}]" + _log.info("VM scan job will not be added due to existing scan job waiting to be processed. VM ID:[#{id}] Name:[#{name}] Guid:[#{guid}] Existing Job IDs [#{j.join(", ")}]") return nil end @@ -25,7 +25,7 @@ def raw_scan(userid = "system", options = {}) }.merge(options) options[:zone] = ext_management_system.my_zone unless ext_management_system.nil? - _log.info "NAME [#{options[:name]}] SCAN [#{options[:categories].inspect}] [#{options[:categories].class}]" + _log.info("NAME [#{options[:name]}] SCAN [#{options[:categories].inspect}] [#{options[:categories].class}]") self.last_scan_attempt_on = Time.now.utc save diff --git a/app/models/vm_performance.rb b/app/models/vm_performance.rb index c35b6dadc25..ad0e8820d27 100644 --- a/app/models/vm_performance.rb +++ b/app/models/vm_performance.rb @@ -1,5 +1,5 @@ class VmPerformance < MetricRollup - default_scope { where "resource_type = 'VmOrTemplate' and resource_id IS NOT NULL" } + default_scope { where("resource_type = 'VmOrTemplate' and resource_id IS NOT NULL") } belongs_to :host, :foreign_key => :parent_host_id belongs_to :ems_cluster, :foreign_key => :parent_ems_cluster_id diff --git a/app/models/vm_scan.rb b/app/models/vm_scan.rb index 547c80a46af..0f802b11bac 100644 --- a/app/models/vm_scan.rb +++ b/app/models/vm_scan.rb @@ -38,7 +38,7 @@ def load_transitions end def call_check_policy - _log.info "Enter" + _log.info("Enter") begin vm = VmOrTemplate.find(target_id) @@ -88,7 +88,7 @@ def check_policy_complete(from_zone, status, message, result) end def call_snapshot_create - _log.info "Enter" + _log.info("Enter") begin vm = VmOrTemplate.find(target_id) @@ -143,12 +143,12 @@ def call_snapshot_create end def wait_for_vim_broker - _log.info "Enter" + _log.info("Enter") i = 0 loop do set_status("Waiting for VimBroker to become available (#{i += 1})") sleep(60) - _log.info "Checking VimBroker connection status. Count=[#{i}]" + _log.info("Checking VimBroker connection status. Count=[#{i}]") break if MiqVimBrokerWorker.available? end @@ -156,7 +156,7 @@ def wait_for_vim_broker end def call_scan - _log.info "Enter" + _log.info("Enter") begin host = MiqServer.find(miq_server_id) @@ -176,8 +176,8 @@ def call_scan end end if ems_list[scan_ci_type] - _log.info "[#{host.name}] communicates with [#{scan_ci_type}:#{ems_list[scan_ci_type][:hostname]}"\ - "(#{ems_list[scan_ci_type][:address]})] to scan vm [#{vm.name}]" + _log.info("[#{host.name}] communicates with [#{scan_ci_type}:#{ems_list[scan_ci_type][:hostname]}"\ + "(#{ems_list[scan_ci_type][:address]})] to scan vm [#{vm.name}]") end vm.scan_metadata(options[:categories], "taskid" => jobid, "host" => host, "args" => [YAML.dump(scan_args)]) rescue Timeout::Error @@ -223,7 +223,7 @@ def create_scan_args(vm) end def call_snapshot_delete - _log.info "Enter" + _log.info("Enter") # TODO: remove snapshot here if Vm was running vm = VmOrTemplate.find(target_id) @@ -275,7 +275,7 @@ def call_snapshot_delete end def call_synchronize - _log.info "Enter" + _log.info("Enter") begin host = MiqServer.find(miq_server_id) @@ -300,22 +300,22 @@ def call_synchronize end def synchronizing - _log.info "." + _log.info(".") end def scanning - _log.info "." if context[:scan_attempted] + _log.info(".") if context[:scan_attempted] context[:scan_attempted] = true end def process_data(*args) - _log.info "starting..." + _log.info("starting...") data = args.first set_status("Processing VM data") doc = MiqXml.load(data) - _log.info "Document=#{doc.root.name.downcase}" + _log.info("Document=#{doc.root.name.downcase}") if doc.root.name.downcase == "summary" doc.root.each_element do |s| @@ -401,7 +401,7 @@ def delete_snapshot(mor, vm = nil) end rescue => err _log.error(err.message) - _log.debug err.backtrace.join("\n") + _log.debug(err.backtrace.join("\n")) end else end_user_event_message(vm) @@ -463,7 +463,7 @@ def snapshotDescription(type = nil) def process_cancel(*args) options = args.first || {} - _log.info "job canceling, #{options[:message]}" + _log.info("job canceling, #{options[:message]}") begin delete_snapshot(context[:snapshot_mor]) @@ -594,7 +594,7 @@ def log_user_event(user_event, vm) begin vm.ext_management_system.vm_log_user_event(vm, user_event) rescue => err - _log.warn "Failed to log user event with EMS. Error: [#{err.class.name}]: #{err} Event message [#{user_event}]" + _log.warn("Failed to log user event with EMS. Error: [#{err.class.name}]: #{err} Event message [#{user_event}]") end end end diff --git a/app/models/vmdb_database.rb b/app/models/vmdb_database.rb index c3fdabffce3..32793ffc52b 100644 --- a/app/models/vmdb_database.rb +++ b/app/models/vmdb_database.rb @@ -2,7 +2,7 @@ class VmdbDatabase < ApplicationRecord has_many :vmdb_tables, :dependent => :destroy has_many :evm_tables, :class_name => 'VmdbTableEvm' has_many :vmdb_database_metrics, :dependent => :destroy - has_one :latest_hourly_metric, -> { where(:capture_interval_name => 'hourly').order "timestamp DESC" }, :class_name => 'VmdbDatabaseMetric' + has_one :latest_hourly_metric, -> { where(:capture_interval_name => 'hourly').order("timestamp DESC") }, :class_name => 'VmdbDatabaseMetric' virtual_has_many :vmdb_database_settings virtual_has_many :vmdb_database_connections @@ -30,7 +30,7 @@ def my_metrics end def size - ActiveRecord::Base.connection.database_size name + ActiveRecord::Base.connection.database_size(name) end def top_tables_by(sorted_by, limit = nil) diff --git a/app/models/vmdb_database_connection.rb b/app/models/vmdb_database_connection.rb index f653afe3b8d..2b3518528e5 100644 --- a/app/models/vmdb_database_connection.rb +++ b/app/models/vmdb_database_connection.rb @@ -66,7 +66,7 @@ def command end def spid - read_attribute 'pid' + read_attribute('pid') end def task_state diff --git a/app/models/zone.rb b/app/models/zone.rb index ba400382a5f..5ede8ab9605 100644 --- a/app/models/zone.rb +++ b/app/models/zone.rb @@ -127,15 +127,15 @@ def self.clusters_without_a_zone end def ems_infras - ext_management_systems.select { |e| e.kind_of? EmsInfra } + ext_management_systems.select { |e| e.kind_of?(EmsInfra) } end def ems_containers - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::ContainerManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::ContainerManager) } end def ems_middlewares - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::MiddlewareManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::MiddlewareManager) } end def middleware_servers @@ -143,23 +143,23 @@ def middleware_servers end def ems_datawarehouses - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::DatawarehouseManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::DatawarehouseManager) } end def ems_monitors - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::MonitoringManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::MonitoringManager) } end def ems_configproviders - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::ConfigurationManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::ConfigurationManager) } end def ems_clouds - ext_management_systems.select { |e| e.kind_of? EmsCloud } + ext_management_systems.select { |e| e.kind_of?(EmsCloud) } end def ems_networks - ext_management_systems.select { |e| e.kind_of? ManageIQ::Providers::NetworkManager } + ext_management_systems.select { |e| e.kind_of?(ManageIQ::Providers::NetworkManager) } end def availability_zones diff --git a/bin/setup b/bin/setup index 0dd29fe6286..637156aae2c 100755 --- a/bin/setup +++ b/bin/setup @@ -14,7 +14,7 @@ EOS exit 1 end -Dir.chdir ManageIQ::Environment::APP_ROOT do +Dir.chdir(ManageIQ::Environment::APP_ROOT) do ManageIQ::Environment.ensure_config_files puts '== Installing dependencies ==' diff --git a/bin/update b/bin/update index 1bb99910fae..0f064ec2364 100755 --- a/bin/update +++ b/bin/update @@ -16,7 +16,7 @@ end ENV["SKIP_TEST_RESET"] = 'true' if ENV['RAILS_ENV'] == 'production' -Dir.chdir ManageIQ::Environment::APP_ROOT do +Dir.chdir(ManageIQ::Environment::APP_ROOT) do ManageIQ::Environment.ensure_config_files puts '== Installing dependencies ==' diff --git a/config.ru b/config.ru index 9220ea2f271..44c9cbf33c2 100644 --- a/config.ru +++ b/config.ru @@ -1,4 +1,4 @@ # This file is used by Rack-based servers to start the application. require ::File.expand_path('../config/environment', __FILE__) -run Vmdb::Application +run(Vmdb::Application) diff --git a/config/boot.rb b/config/boot.rb index 7e692f99222..b08da34aa61 100644 --- a/config/boot.rb +++ b/config/boot.rb @@ -5,4 +5,4 @@ require 'bundler/setup' if File.exist?(ENV['BUNDLE_GEMFILE']) # add the lib dir of the engine if we are running as a dummy app for an engine -$LOAD_PATH.unshift File.expand_path('../../../lib', __dir__) if defined?(ENGINE_ROOT) +$LOAD_PATH.unshift(File.expand_path('../../../lib', __dir__)) if defined?(ENGINE_ROOT) diff --git a/config/environments/test.rb b/config/environments/test.rb index 6b6b10a2eb4..ca08c8a39a0 100644 --- a/config/environments/test.rb +++ b/config/environments/test.rb @@ -48,8 +48,8 @@ # Any exception that gets past our ApplicationController's rescue_from # should just be raised intact - config.middleware.delete ::ActionDispatch::ShowExceptions - config.middleware.delete ::ActionDispatch::DebugExceptions + config.middleware.delete(::ActionDispatch::ShowExceptions) + config.middleware.delete(::ActionDispatch::DebugExceptions) # Customize any additional options below... diff --git a/config/initializers/as_to_time.rb b/config/initializers/as_to_time.rb index 424dd873745..f75513d4abf 100644 --- a/config/initializers/as_to_time.rb +++ b/config/initializers/as_to_time.rb @@ -8,7 +8,7 @@ class String def to_time(form = OBJ) if form == OBJ - ActiveSupport::Deprecation.warn "Rails 4 changes the default of String#to_time to local. Please pass the type of conversion you want, like to_time(:utc) or to_time(:local)", caller.drop(1) + ActiveSupport::Deprecation.warn("Rails 4 changes the default of String#to_time to local. Please pass the type of conversion you want, like to_time(:utc) or to_time(:local)", caller.drop(1)) old_to_time(:utc) else old_to_time(form) diff --git a/config/initializers/permissions_repository.rb b/config/initializers/permissions_repository.rb index c8521de732d..b3589332df3 100644 --- a/config/initializers/permissions_repository.rb +++ b/config/initializers/permissions_repository.rb @@ -1,7 +1,7 @@ require 'vmdb/permission_stores' Vmdb::PermissionStores.configure do |config| - yaml_filename = Rails.root.join 'config', 'permissions.yml' + yaml_filename = Rails.root.join('config', 'permissions.yml') if File.exist?(yaml_filename) config.backend = 'yaml' config.options[:filename] = yaml_filename diff --git a/config/initializers/session_store.rb b/config/initializers/session_store.rb index 5f6f26de34a..d992c109c10 100644 --- a/config/initializers/session_store.rb +++ b/config/initializers/session_store.rb @@ -1,5 +1,5 @@ if ENV['RAILS_USE_MEMORY_STORE'] || (!Rails.env.development? && !Rails.env.production?) - Vmdb::Application.config.session_store :memory_store + Vmdb::Application.config.session_store(:memory_store) else session_store = case Settings.server.session_store @@ -40,7 +40,7 @@ def with_lock(*args) } end - Vmdb::Application.config.session_store session_store, session_options + Vmdb::Application.config.session_store(session_store, session_options) msg = "Using session_store: #{Vmdb::Application.config.session_store}" $log.info("MIQ(SessionStore) #{msg}") puts "** #{msg}" unless Rails.env.production? diff --git a/config/initializers/wrap_parameters.rb b/config/initializers/wrap_parameters.rb index da4fb076f39..c948e06341f 100644 --- a/config/initializers/wrap_parameters.rb +++ b/config/initializers/wrap_parameters.rb @@ -5,7 +5,7 @@ # Enable parameter wrapping for JSON. You can disable this by setting :format to an empty array. ActiveSupport.on_load(:action_controller) do - wrap_parameters :format => [:json] + wrap_parameters(:format => [:json]) end # Disable root element in JSON by default. diff --git a/config/puma.rb b/config/puma.rb index 0fb12f3f6e6..678c05b32ce 100644 --- a/config/puma.rb +++ b/config/puma.rb @@ -17,5 +17,5 @@ # thread count or we risk a ActiveRecord::ConnectionTimeoutError waiting on a # connection from the connection pool. # -threads 5, 5 -tag "MIQ: Web Server Worker" +threads(5, 5) +tag("MIQ: Web Server Worker") diff --git a/lib/ems_event_helper.rb b/lib/ems_event_helper.rb index 4c849e318b1..ebac715ae4e 100644 --- a/lib/ems_event_helper.rb +++ b/lib/ems_event_helper.rb @@ -16,11 +16,11 @@ def handle end def before_handle - _log.info "Processing EMS event [#{@event.event_type}] chain_id [#{@event.chain_id}] on EMS [#{@event.ems_id}]..." + _log.info("Processing EMS event [#{@event.event_type}] chain_id [#{@event.chain_id}] on EMS [#{@event.ems_id}]...") end def after_handle - _log.info "Processing EMS event [#{@event.event_type}] chain_id [#{@event.chain_id}] on EMS [#{@event.ems_id}]...Complete" + _log.info("Processing EMS event [#{@event.event_type}] chain_id [#{@event.chain_id}] on EMS [#{@event.ems_id}]...Complete") end def handle_automation_event diff --git a/lib/extensions/ar_adapter/ar_kill.rb b/lib/extensions/ar_adapter/ar_kill.rb index 5b3a5b4b66d..b7a79ee69e4 100644 --- a/lib/extensions/ar_adapter/ar_kill.rb +++ b/lib/extensions/ar_adapter/ar_kill.rb @@ -17,9 +17,9 @@ def kill(pid) item = data.first if item.nil? - _log.info "SPID=[#{pid_numeric}] not found" + _log.info("SPID=[#{pid_numeric}] not found") else - _log.info "Sending CANCEL Request for SPID=[#{pid_numeric}], age=[#{item['age']}], query=[#{item['query']}]" + _log.info("Sending CANCEL Request for SPID=[#{pid_numeric}], age=[#{item['age']}], query=[#{item['query']}]") result = select(<<-SQL, "Cancel SPID") SELECT pg_cancel_backend(#{pid_numeric}) FROM pg_stat_activity diff --git a/lib/extensions/ar_lock.rb b/lib/extensions/ar_lock.rb index e101c10af95..e2e3b0d970e 100644 --- a/lib/extensions/ar_lock.rb +++ b/lib/extensions/ar_lock.rb @@ -22,14 +22,14 @@ def lock(mode = :exclusive, timeout = 60.seconds) end transaction do - _log.debug "Acquiring lock on #{self.class.name}::#{id}..." + _log.debug("Acquiring lock on #{self.class.name}::#{id}...") lock!(lock) - _log.debug "Acquired lock" + _log.debug("Acquired lock") begin Timeout.timeout(timeout) { yield self } ensure - _log.debug "Releasing lock" + _log.debug("Releasing lock") end end end diff --git a/lib/extensions/ar_table_lock.rb b/lib/extensions/ar_table_lock.rb index 476947f32dd..92cb1567604 100644 --- a/lib/extensions/ar_table_lock.rb +++ b/lib/extensions/ar_table_lock.rb @@ -13,14 +13,14 @@ def with_lock(timeout = 60.seconds) lock = "SHARE ROW EXCLUSIVE" transaction do - _log.debug "Acquiring lock on #{name} (table: #{table_name}..." + _log.debug("Acquiring lock on #{name} (table: #{table_name}...") connection.execute("LOCK TABLE #{table_name} in #{lock} MODE") - _log.debug "Acquired lock on #{name} (table: #{table_name}..." + _log.debug("Acquired lock on #{name} (table: #{table_name}...") begin Timeout.timeout(timeout) { yield } ensure - _log.debug "Releasing lock on #{name} (table: #{table_name}..." + _log.debug("Releasing lock on #{name} (table: #{table_name}...") end end end diff --git a/lib/extensions/ar_taggable.rb b/lib/extensions/ar_taggable.rb index 4984f8f07e1..465f94a9816 100644 --- a/lib/extensions/ar_taggable.rb +++ b/lib/extensions/ar_taggable.rb @@ -86,7 +86,7 @@ def tag_with(list, options = {}) .where(:taggable_id => id) .where(:taggable_type => self.class.base_class.name) .where(tagging[:tag_id].eq(tag[:id])) - .where(tag[:name].matches "#{ns}/%") + .where(tag[:name].matches("#{ns}/%")) .destroy_all # Apply new tags @@ -114,10 +114,10 @@ def tag_add(list, options = {}) def tagged_with(options = {}) tagging = Tagging.arel_table query = Tag.includes(:taggings).references(:taggings) - query = query.where(tagging[:taggable_type].eq self.class.base_class.name) - query = query.where(tagging[:taggable_id].eq id) + query = query.where(tagging[:taggable_type].eq(self.class.base_class.name)) + query = query.where(tagging[:taggable_id].eq(id)) ns = Tag.get_namespace(options) - query = query.where(Tag.arel_table[:name].matches "#{ns}%") if ns + query = query.where(Tag.arel_table[:name].matches("#{ns}%")) if ns query end diff --git a/lib/extensions/ar_types.rb b/lib/extensions/ar_types.rb index 1b9184bd364..4468c2870b0 100644 --- a/lib/extensions/ar_types.rb +++ b/lib/extensions/ar_types.rb @@ -3,7 +3,7 @@ prepend Module.new { def initialize_type_map(m) super - m.alias_type 'xid', 'varchar' + m.alias_type('xid', 'varchar') end } end diff --git a/lib/extensions/ar_virtual.rb b/lib/extensions/ar_virtual.rb index 6e5b1a0f476..80c25a4669a 100644 --- a/lib/extensions/ar_virtual.rb +++ b/lib/extensions/ar_virtual.rb @@ -125,7 +125,7 @@ def define_virtual_delegate(method_name, col, options) type = to_ref.klass.type_for_attribute(col) raise "unknown attribute #{to}##{col} referenced in #{name}" unless type arel = virtual_delegate_arel(col, to_ref) - define_virtual_attribute method_name, type, :uses => (options[:uses] || to), :arel => arel + define_virtual_attribute(method_name, type, :uses => (options[:uses] || to), :arel => arel) end # see activesupport module/delegation.rb @@ -171,7 +171,7 @@ def #{method_name}(#{definition}) end METHOD end - method_def = method_def.split("\n").map(&:strip).join ';' + method_def = method_def.split("\n").map(&:strip).join(';') module_eval(method_def, file, line) end @@ -413,8 +413,8 @@ def load_schema! def define_virtual_attribute(name, cast_type, uses: nil, arel: nil) attribute_types[name] = cast_type - define_virtual_include name, uses if uses - define_virtual_arel name, arel if arel + define_virtual_include(name, uses) if uses + define_virtual_arel(name, arel) if arel end end end @@ -430,7 +430,7 @@ module ClassMethods # def virtual_has_one(name, options = {}) - uses = options.delete :uses + uses = options.delete(:uses) reflection = ActiveRecord::Associations::Builder::HasOne.build(self, name, nil, options) add_virtual_reflection(reflection, name, uses, options) end @@ -440,13 +440,13 @@ def virtual_has_many(name, options = {}) records = send(name) records.respond_to?(:ids) ? records.ids : records.collect(&:id) end - uses = options.delete :uses + uses = options.delete(:uses) reflection = ActiveRecord::Associations::Builder::HasMany.build(self, name, nil, options) add_virtual_reflection(reflection, name, uses, options) end def virtual_belongs_to(name, options = {}) - uses = options.delete :uses + uses = options.delete(:uses) reflection = ActiveRecord::Associations::Builder::BelongsTo.build(self, name, nil, options) add_virtual_reflection(reflection, name, uses, options) end @@ -464,7 +464,7 @@ def virtual_reflection(name) # def virtual_reflections - (virtual_fields_base? ? {} : superclass.virtual_reflections).merge _virtual_reflections + (virtual_fields_base? ? {} : superclass.virtual_reflections).merge(_virtual_reflections) end def reflections_with_virtual @@ -585,12 +585,12 @@ def preloaders_for_one(association, records, scope) loaders = klass_map.keys.group_by { |klass| klass.virtual_includes(association) }.flat_map do |virtuals, klasses| subset = klasses.flat_map { |klass| klass_map[klass] } - preload subset, virtuals + preload(subset, virtuals) end records_with_association = klass_map.select { |k, rs| k.reflect_on_association(association) }.flat_map { |k, rs| rs } if records_with_association.any? - loaders.concat super(association, records_with_association, scope) + loaders.concat(super(association, records_with_association, scope)) end loaders @@ -618,7 +618,7 @@ def instantiate(result_set, aliases) model_cache = Hash.new { |h,klass| h[klass] = {} } parents = model_cache[join_root] - column_aliases = aliases.column_aliases join_root + column_aliases = aliases.column_aliases(join_root) # New Code # diff --git a/lib/extensions/descendant_loader.rb b/lib/extensions/descendant_loader.rb index 9853fb0a4c3..9b2c1cb711b 100644 --- a/lib/extensions/descendant_loader.rb +++ b/lib/extensions/descendant_loader.rb @@ -148,7 +148,7 @@ def name_combinations(names) end combos.each do |combo| if (i = combo.rindex { |s| s =~ /^::/ }) - combo.slice! 0, i + combo.slice!(0, i) combo[0] = combo[0].sub(/^::/, '') end end @@ -211,7 +211,7 @@ def class_inheritance_relationships possible_superklasses = scoped_name(sklass, search_scopes) possible_superklasses.each do |possible_superklass| - children[possible_superklass].concat possible_names + children[possible_superklass].concat(possible_names) end end end @@ -232,7 +232,7 @@ def load_subclasses(parent) names_to_load = class_inheritance_relationships[parent.to_s].dup while (name = names_to_load.shift) if (_klass = name.safe_constantize) # this triggers the load - names_to_load.concat class_inheritance_relationships[name] + names_to_load.concat(class_inheritance_relationships[name]) end end end diff --git a/lib/manageiq.rb b/lib/manageiq.rb index bc9ff60967e..b5071edc02c 100644 --- a/lib/manageiq.rb +++ b/lib/manageiq.rb @@ -19,7 +19,7 @@ def self.root if defined?(Rails) Rails.root else - Pathname.new File.expand_path("../..", __FILE__) + Pathname.new(File.expand_path("../..", __FILE__)) end end end diff --git a/lib/miq_apache/control.rb b/lib/miq_apache/control.rb index 00601dbe311..c310006611b 100644 --- a/lib/miq_apache/control.rb +++ b/lib/miq_apache/control.rb @@ -45,7 +45,7 @@ def self.start if ENV["CONTAINER"] system("/usr/sbin/httpd -DFOREGROUND &") else - run_apache_cmd 'start' + run_apache_cmd('start') end end @@ -54,7 +54,7 @@ def self.stop pid = `pgrep -P 1 httpd`.chomp.to_i system("kill -WINCH #{pid}") if pid > 0 else - run_apache_cmd 'stop' + run_apache_cmd('stop') end end diff --git a/lib/miq_expression.rb b/lib/miq_expression.rb index e724870c7f4..762a8eb8a7d 100644 --- a/lib/miq_expression.rb +++ b/lib/miq_expression.rb @@ -401,7 +401,7 @@ def self.expand_conditional_clause(klass, cond) cond = klass.predicate_builder.resolve_column_aliases(cond) cond = klass.send(:expand_hash_conditions_for_aggregates, cond) - klass.predicate_builder.build_from_hash(cond).map { |b| klass.connection.visitor.compile b }.join(' AND ') + klass.predicate_builder.build_from_hash(cond).map { |b| klass.connection.visitor.compile(b) }.join(' AND ') end def self.merge_where_clauses(*list) @@ -1210,7 +1210,7 @@ def self.integer?(n) n = n.to_s n2 = n.delete(',') # strip out commas begin - Integer n2 + Integer(n2) return true rescue return false unless n.number_with_method? @@ -1227,7 +1227,7 @@ def self.numeric?(n) n = n.to_s n2 = n.delete(',') # strip out commas begin - Float n2 + Float(n2) return true rescue return false unless n.number_with_method? @@ -1426,7 +1426,7 @@ def to_arel(exp, tz) end def extract_where_values(klass, scope) - relation = ActiveRecord::Relation.new klass, klass.arel_table, klass.predicate_builder + relation = ActiveRecord::Relation.new(klass, klass.arel_table, klass.predicate_builder) relation = relation.instance_eval(&scope) begin @@ -1434,7 +1434,7 @@ def extract_where_values(klass, scope) # custom visitor instance connection = klass.connection - visitor = WhereExtractionVisitor.new connection + visitor = WhereExtractionVisitor.new(connection) arel = relation.arel binds = relation.bound_attributes diff --git a/lib/miq_ldap.rb b/lib/miq_ldap.rb index 0357ce6a0ee..ffe25db5a33 100644 --- a/lib/miq_ldap.rb +++ b/lib/miq_ldap.rb @@ -51,7 +51,7 @@ def initialize(options = {}) # Make sure we do NOT log the clear-text password log_options = Vmdb::Settings.mask_passwords!(options.deep_clone) - $log.info "options: #{log_options.inspect}" + $log.info("options: #{log_options.inspect}") @ldap = Net::LDAP.new(options) end @@ -70,7 +70,7 @@ def resolve_host(hosts, port) canonical, aliases, type, *addresses = TCPSocket.gethostbyname(host) # Resolve hostname to IP Address $log.info("MiqLdap.connection: Resolved host [#{host}] has these IP Address: #{addresses.inspect}") if $log rescue => err - $log.debug "Warning: '#{err.message}', resolving host: [host]" + $log.debug("Warning: '#{err.message}', resolving host: [host]") next end end @@ -82,7 +82,7 @@ def resolve_host(hosts, port) selected_host = address break rescue => err - $log.debug "Warning: '#{err.message}', connecting to IP Address [#{address}]" + $log.debug("Warning: '#{err.message}', connecting to IP Address [#{address}]") end end @@ -367,12 +367,12 @@ def get_user_info(username, user_type = 'mail') def get_memberships(obj, max_depth = 0, attr = :memberof, followed = [], current_depth = 0) current_depth += 1 - _log.debug "Enter get_memberships: #{obj.inspect}" - _log.debug "Enter get_memberships: #{obj.dn}, max_depth: #{max_depth}, current_depth: #{current_depth}, attr: #{attr}" + _log.debug("Enter get_memberships: #{obj.inspect}") + _log.debug("Enter get_memberships: #{obj.dn}, max_depth: #{max_depth}, current_depth: #{current_depth}, attr: #{attr}") result = [] # puts "obj #{obj.inspect}" groups = MiqLdap.get_attr(obj, attr).to_miq_a - _log.debug "Groups: #{groups.inspect}" + _log.debug("Groups: #{groups.inspect}") return result unless groups groups.each do|group| @@ -381,7 +381,7 @@ def get_memberships(obj, max_depth = 0, attr = :memberof, followed = [], current dn = nil cn = nil if gobj.nil? - _log.debug "Group: DN: #{group} returned a nil object, CN will be extracted from DN, memberships will not be followed" + _log.debug("Group: DN: #{group} returned a nil object, CN will be extracted from DN, memberships will not be followed") normalize(group) =~ /^cn[ ]*=[ ]*([^,]+),/ cn = $1 else @@ -391,9 +391,9 @@ def get_memberships(obj, max_depth = 0, attr = :memberof, followed = [], current if cn.nil? suffix = gobj.nil? ? "unable to extract CN from DN" : "has no CN" - _log.debug "Group: #{group} #{suffix}, skipping" + _log.debug("Group: #{group} #{suffix}, skipping") else - _log.debug "Group: DN: #{group}, extracted CN: #{cn}" + _log.debug("Group: DN: #{group}, extracted CN: #{cn}") result.push(cn.strip) end @@ -402,7 +402,7 @@ def get_memberships(obj, max_depth = 0, attr = :memberof, followed = [], current result.concat(get_memberships(gobj, max_depth, attr, followed, current_depth)) unless max_depth > 0 && current_depth >= max_depth end end - _log.debug "Exit get_memberships: #{obj.dn}, result: #{result.uniq.inspect}" + _log.debug("Exit get_memberships: #{obj.dn}, result: #{result.uniq.inspect}") result.uniq end diff --git a/lib/miq_memcached.rb b/lib/miq_memcached.rb index 5b9478b5ec5..cca95db8fe9 100644 --- a/lib/miq_memcached.rb +++ b/lib/miq_memcached.rb @@ -21,7 +21,7 @@ def initialize(opts = {}) end def save(fname) - File.open(fname, "w") { |f| f.write @config } + File.open(fname, "w") { |f| f.write(@config) } end def update(opts = {}) diff --git a/lib/patches/memcache_patch.rb b/lib/patches/memcache_patch.rb index 315e37dc872..de97191e90c 100644 --- a/lib/patches/memcache_patch.rb +++ b/lib/patches/memcache_patch.rb @@ -10,12 +10,12 @@ class MemCache # unmarshalled. def get(key, raw = false) - server, cache_key = request_setup key + server, cache_key = request_setup(key) value = if @multithread - threadsafe_cache_get server, cache_key + threadsafe_cache_get(server, cache_key) else - cache_get server, cache_key + cache_get(server, cache_key) end return nil if value.nil? @@ -25,7 +25,7 @@ def get(key, raw = false) return value rescue TypeError, SocketError, SystemCallError, IOError => err - handle_error server, err + handle_error(server, err) end ## @@ -44,10 +44,10 @@ def get(key, raw = false) def set(key, value, expiry = 0, raw = false) raise MemCacheError, "Update of readonly cache" if @readonly - server, cache_key = request_setup key + server, cache_key = request_setup(key) socket = server.socket - value = Marshal.dump value unless raw + value = Marshal.dump(value) unless raw if value.length > LARGE_VALUE_SIZE cache_set_large(key, value, expiry) @@ -56,7 +56,7 @@ def set(key, value, expiry = 0, raw = false) begin @mutex.lock if @multithread - socket.write command + socket.write(command) result = socket.gets raise MemCacheError, $1.strip if result =~ /^SERVER_ERROR (.*)/ rescue SocketError, SystemCallError, IOError => err @@ -80,10 +80,10 @@ def set(key, value, expiry = 0, raw = false) def add(key, value, expiry = 0, raw = false) raise MemCacheError, "Update of readonly cache" if @readonly - server, cache_key = request_setup key + server, cache_key = request_setup(key) socket = server.socket - value = Marshal.dump value unless raw + value = Marshal.dump(value) unless raw if value.length > LARGE_VALUE_SIZE cache_set_large(key, value, expiry) @@ -92,7 +92,7 @@ def add(key, value, expiry = 0, raw = false) begin @mutex.lock if @multithread - socket.write command + socket.write(command) socket.gets rescue SocketError, SystemCallError, IOError => err server.close @@ -115,9 +115,9 @@ def cache_get_large(server, cache_key, large_value_key, raw) chunks = (0...large_value_key[LARGE_VALUE_KEY.length..-1].to_i).collect { |c| "#{cache_key}:chunk_#{c}" } chunks_keys = chunks.join(' ') values = if @multithread - threadsafe_cache_get_multi server, chunks_keys + threadsafe_cache_get_multi(server, chunks_keys) else - cache_get_multi server, chunks_keys + cache_get_multi(server, chunks_keys) end values = chunks.collect { |c| values[c] } diff --git a/lib/rbac/filterer.rb b/lib/rbac/filterer.rb index 94b3a73af0a..7678ef02e16 100644 --- a/lib/rbac/filterer.rb +++ b/lib/rbac/filterer.rb @@ -541,7 +541,7 @@ def matches_via_descendants(klass, descendant_klass, options) def lookup_method_for_descendant_class(klass, descendant_klass) key = "#{descendant_klass.base_class}::#{klass.base_class}" MATCH_VIA_DESCENDANT_RELATIONSHIPS[key].tap do |method_name| - _log.warn "could not find method name for #{key}" if method_name.nil? + _log.warn("could not find method name for #{key}") if method_name.nil? end end diff --git a/lib/services/resource_sharer.rb b/lib/services/resource_sharer.rb index 996fe27fd0e..3ddd343c4e1 100644 --- a/lib/services/resource_sharer.rb +++ b/lib/services/resource_sharer.rb @@ -8,7 +8,7 @@ class ResourceSharer attr_accessor :user, :resource, :tenants, :features, :allow_tenant_inheritance - with_options :presence => true do + with_options(:presence => true) do validates :user validates :resource validates :tenants diff --git a/lib/vmdb/appliance.rb b/lib/vmdb/appliance.rb index ef3402c0595..7cf4efb0d38 100644 --- a/lib/vmdb/appliance.rb +++ b/lib/vmdb/appliance.rb @@ -25,24 +25,24 @@ def self.log_config(*args) fh = options[:logger] || $log init_msg = options[:startup] == true ? "* [VMDB] started on [#{Time.now}] *" : "* [VMDB] configuration *" border = "*" * init_msg.length - fh.info border - fh.info init_msg - fh.info border + fh.info(border) + fh.info(init_msg) + fh.info(border) - fh.info "Version: #{self.VERSION}" - fh.info "Build: #{self.BUILD}" - fh.info "RUBY Environment: #{Object.const_defined?(:RUBY_DESCRIPTION) ? RUBY_DESCRIPTION : "ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE} patchlevel #{RUBY_PATCHLEVEL}) [#{RUBY_PLATFORM}]"}" - fh.info "RAILS Environment: #{Rails.env} version #{Rails.version}" + fh.info("Version: #{self.VERSION}") + fh.info("Build: #{self.BUILD}") + fh.info("RUBY Environment: #{Object.const_defined?(:RUBY_DESCRIPTION) ? RUBY_DESCRIPTION : "ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE} patchlevel #{RUBY_PATCHLEVEL}) [#{RUBY_PLATFORM}]"}") + fh.info("RAILS Environment: #{Rails.env} version #{Rails.version}") - fh.info "VMDB settings:" + fh.info("VMDB settings:") VMDBLogger.log_hashes(fh, ::Settings, :filter => Vmdb::Settings::PASSWORD_FIELDS) - fh.info "VMDB settings END" - fh.info "---" + fh.info("VMDB settings END") + fh.info("---") - fh.info "DATABASE settings:" + fh.info("DATABASE settings:") VMDBLogger.log_hashes(fh, Rails.configuration.database_configuration[Rails.env]) - fh.info "DATABASE settings END" - fh.info "---" + fh.info("DATABASE settings END") + fh.info("---") end def self.log_server_identity @@ -56,33 +56,33 @@ def self.log_server_identity begin startup = VMDBLogger.new(startup_fname) log_config(:logger => startup, :startup => true) - startup.info "Server GUID: #{MiqServer.my_guid}" - startup.info "Server Zone: #{MiqServer.my_zone}" - startup.info "Server Role: #{MiqServer.my_role}" + startup.info("Server GUID: #{MiqServer.my_guid}") + startup.info("Server Zone: #{MiqServer.my_zone}") + startup.info("Server Role: #{MiqServer.my_role}") s = MiqServer.my_server region = MiqRegion.my_region - startup.info "Server Region number: #{region.region}, name: #{region.name}" if region - startup.info "Server EVM id and name: #{s.id} #{s.name}" + startup.info("Server Region number: #{region.region}, name: #{region.name}") if region + startup.info("Server EVM id and name: #{s.id} #{s.name}") - startup.info "Currently assigned server roles:" - s.assigned_server_roles(:include => :server_role).each { |r| startup.info "Role: #{r.server_role.name}, Priority: #{r.priority}" } + startup.info("Currently assigned server roles:") + s.assigned_server_roles(:include => :server_role).each { |r| startup.info("Role: #{r.server_role.name}, Priority: #{r.priority}") } issue = `cat /etc/issue 2> /dev/null` rescue nil - startup.info "OS: #{issue.chomp}" unless issue.blank? + startup.info("OS: #{issue.chomp}") unless issue.blank? network = get_network unless network.empty? - startup.info "Network Information:" - network.each { |k, v| startup.info "#{k}: #{v}" } + startup.info("Network Information:") + network.each { |k, v| startup.info("#{k}: #{v}") } end mem = `cat /proc/meminfo 2> /dev/null` rescue nil - startup.info "System Memory Information:\n#{mem}" unless mem.blank? + startup.info("System Memory Information:\n#{mem}") unless mem.blank? cpu = `cat /proc/cpuinfo 2> /dev/null` rescue nil - startup.info "CPU Information:\n#{cpu}" unless cpu.blank? + startup.info("CPU Information:\n#{cpu}") unless cpu.blank? fstab = `cat /etc/fstab 2> /dev/null` rescue nil - startup.info "fstab information:\n#{fstab}" unless fstab.blank? + startup.info("fstab information:\n#{fstab}") unless fstab.blank? ensure startup.close rescue nil end diff --git a/lib/vmdb/config/activator.rb b/lib/vmdb/config/activator.rb index abf49752e16..19dac1b8a70 100644 --- a/lib/vmdb/config/activator.rb +++ b/lib/vmdb/config/activator.rb @@ -33,8 +33,8 @@ def log(data) end def session(data) - Session.timeout data.timeout - Session.interval data.interval + Session.timeout(data.timeout) + Session.interval(data.interval) end def server(data) diff --git a/lib/vmdb/deprecation.rb b/lib/vmdb/deprecation.rb index db0f815de65..e68490c10c2 100644 --- a/lib/vmdb/deprecation.rb +++ b/lib/vmdb/deprecation.rb @@ -31,8 +31,8 @@ def self.default_log def self.proc_for_default_log return unless default_log proc do |message, callstack| - default_log.warn message - default_log.debug callstack.join("\n ") if default_log.debug? + default_log.warn(message) + default_log.debug(callstack.join("\n ")) if default_log.debug? end end private_class_method :proc_for_default_log diff --git a/lib/vmdb/global_methods.rb b/lib/vmdb/global_methods.rb index 675347c95a9..cbac6d1945a 100644 --- a/lib/vmdb/global_methods.rb +++ b/lib/vmdb/global_methods.rb @@ -1,7 +1,7 @@ module Vmdb module GlobalMethods def is_numeric?(n) - Float n + Float(n) rescue false else @@ -10,7 +10,7 @@ def is_numeric?(n) # Check to see if a field contains a valid integer def is_integer?(n) - Integer n + Integer(n) rescue false else @@ -47,14 +47,14 @@ def format_timezone(time, timezone = Time.zone.name, ftype = "view") when "export_filename" # for export/log filename new_time = new_time.strftime("%Y%m%d_%H%M%S") when "tl" - new_time = I18n.l new_time + new_time = I18n.l(new_time) when "raw" # return without formatting when "compare_hdr" # for drift/compare headers new_time = I18n.l(new_time, :format => :long) + new_time.strftime(" %Z") when "widget_footer" # for widget footers new_time = I18n.l(new_time, :format => :long) else # for summary screens - new_time = I18n.l new_time + new_time = I18n.l(new_time) end else # if time is nil new_time = "" diff --git a/lib/vmdb/inflections.rb b/lib/vmdb/inflections.rb index 2668b2db3ce..5a364a615fa 100644 --- a/lib/vmdb/inflections.rb +++ b/lib/vmdb/inflections.rb @@ -34,7 +34,7 @@ def self.load_inflections inflect.plural(/quota$/, "quotas") inflect.irregular("container_quota", "container_quotas") - inflect.acronym 'ManageIQ' + inflect.acronym('ManageIQ') end end end diff --git a/lib/vmdb/initializer.rb b/lib/vmdb/initializer.rb index f2779a0511b..5ae503761a2 100644 --- a/lib/vmdb/initializer.rb +++ b/lib/vmdb/initializer.rb @@ -1,7 +1,7 @@ module Vmdb module Initializer def self.init - _log.info "- Program Name: #{$PROGRAM_NAME}, PID: #{Process.pid}, ENV['MIQ_GUID']: #{ENV['MIQ_GUID']}, ENV['EVMSERVER']: #{ENV['EVMSERVER']}" + _log.info("- Program Name: #{$PROGRAM_NAME}, PID: #{Process.pid}, ENV['MIQ_GUID']: #{ENV['MIQ_GUID']}, ENV['EVMSERVER']: #{ENV['EVMSERVER']}") # UiWorker called in Development Mode # * command line(rails server) diff --git a/lib/vmdb/logging.rb b/lib/vmdb/logging.rb index 483f4074333..ffa78f75d88 100644 --- a/lib/vmdb/logging.rb +++ b/lib/vmdb/logging.rb @@ -19,10 +19,10 @@ class LogProxy < Struct.new(:klass, :separator) location = caller_locations(1, 1) if blk logger.send(level) do - "#{prefix location} #{blk.call}" + "#{prefix(location)} #{blk.call}" end else - logger.send(level, "#{prefix location} #{msg}") + logger.send(level, "#{prefix(location)} #{msg}") end end end @@ -57,5 +57,5 @@ def _log end end - ::Module.send :include, ClassLogging + ::Module.send(:include, ClassLogging) end diff --git a/lib/vmdb/permission_stores.rb b/lib/vmdb/permission_stores.rb index 163cd0bb5f4..2acbf77c826 100644 --- a/lib/vmdb/permission_stores.rb +++ b/lib/vmdb/permission_stores.rb @@ -9,7 +9,7 @@ def initialize end def create - PermissionStores.create self + PermissionStores.create(self) end def load diff --git a/lib/vmdb/permission_stores/yaml.rb b/lib/vmdb/permission_stores/yaml.rb index cc42d94b98b..63e136cb54c 100644 --- a/lib/vmdb/permission_stores/yaml.rb +++ b/lib/vmdb/permission_stores/yaml.rb @@ -3,20 +3,20 @@ module Vmdb module PermissionStores def self.create(config) - YAML.new config.options[:filename] + YAML.new(config.options[:filename]) end class YAML def initialize(file) - @permissions = Psych.load_file file + @permissions = Psych.load_file(file) end def can?(permission) - @permissions.include? permission + @permissions.include?(permission) end def supported_ems_type?(type) - can? "ems-type:#{type}" + can?("ems-type:#{type}") end end end diff --git a/lib/vmdb/util.rb b/lib/vmdb/util.rb index 48458bcfa7a..e0123ca5365 100644 --- a/lib/vmdb/util.rb +++ b/lib/vmdb/util.rb @@ -68,7 +68,7 @@ def self.log_duration_gz(filename) require 'zlib' begin - _log.info "Opening filename: [#{filename}], size: [#{File.size(filename)}]" + _log.info("Opening filename: [#{filename}], size: [#{File.size(filename)}]") Zlib::GzipReader.open(filename) do |gz| line_count = 0 start_time_str = nil @@ -84,14 +84,14 @@ def self.log_duration_gz(filename) start_time = log_timestamp(start_time_str) end_time = log_timestamp(end_time_str) - _log.info "Lines in file: [#{line_count}]" - _log.info "Start Time: [#{start_time.inspect}]" - _log.info "End Time: [#{end_time.inspect}]" + _log.info("Lines in file: [#{line_count}]") + _log.info("Start Time: [#{start_time.inspect}]") + _log.info("End Time: [#{end_time.inspect}]") return start_time, end_time end rescue Exception => e - _log.error e.to_s + _log.error(e.to_s) return [] end end @@ -108,7 +108,7 @@ def self.zip_logs(zip_filename, dirs, userid = "system") zfile = zfile.to_s - _log.info "Creating: [#{zfile_display}]" + _log.info("Creating: [#{zfile_display}]") Zip::File.open(zfile, Zip::File::CREATE) do |zip| dirs.each do |dir| dir = Rails.root.join(dir) unless Pathname.new(dir).absolute? @@ -116,13 +116,13 @@ def self.zip_logs(zip_filename, dirs, userid = "system") begin entry, _mtime = add_zip_entry(zip, file, zfile) rescue => e - _log.error "Failed to add file: [#{entry}]. Error information: #{e.message}" + _log.error("Failed to add file: [#{entry}]. Error information: #{e.message}") end end end zip.close end - _log.info "Created: [#{zfile_display}], Size: [#{File.size(zfile)}]" + _log.info("Created: [#{zfile_display}], Size: [#{File.size(zfile)}]") zfile end @@ -140,7 +140,7 @@ def self.add_zip_entry(zip, file_path, zfile) else zip_entry = Zip::Entry.new(zfile, entry, nil, nil, nil, nil, nil, nil, ztime) zip.add(zip_entry, file_path) - _log.info "Adding file: [#{entry}], size: [#{File.size(file_path)}], mtime: [#{mtime}]" + _log.info("Adding file: [#{entry}], size: [#{File.size(file_path)}], mtime: [#{mtime}]") end return entry, mtime end diff --git a/lib/workers/bin/run_single_worker.rb b/lib/workers/bin/run_single_worker.rb index fa0e72e3201..6fc0c9e1540 100644 --- a/lib/workers/bin/run_single_worker.rb +++ b/lib/workers/bin/run_single_worker.rb @@ -9,7 +9,7 @@ options = {} opt_parser = OptionParser.new do |opts| - opts.banner = "usage: #{File.basename $PROGRAM_NAME, '.rb'} MIQ_WORKER_CLASS_NAME" + opts.banner = "usage: #{File.basename($PROGRAM_NAME, '.rb')} MIQ_WORKER_CLASS_NAME" opts.on("-l", "--[no-]list", "Toggle viewing available worker class names") do |val| options[:list] = val diff --git a/mime-types-redirector/lib/mime-types.rb b/mime-types-redirector/lib/mime-types.rb index af26a998368..72150ae40a5 100644 --- a/mime-types-redirector/lib/mime-types.rb +++ b/mime-types-redirector/lib/mime-types.rb @@ -4,11 +4,11 @@ module MIME class Types class << self def [](type) - Array.wrap MiniMime.lookup_by_content_type(type) + Array.wrap(MiniMime.lookup_by_content_type(type)) end def type_for(filename) - Array.wrap MiniMime.lookup_by_filename(filename) + Array.wrap(MiniMime.lookup_by_filename(filename)) end alias_method :of, :type_for end diff --git a/mime-types-redirector/mime-types.gemspec b/mime-types-redirector/mime-types.gemspec index d244d9555af..37f96c486c6 100644 --- a/mime-types-redirector/mime-types.gemspec +++ b/mime-types-redirector/mime-types.gemspec @@ -5,7 +5,7 @@ Gem::Specification.new do |s| s.name = "mime-types" s.version = "2.6.1" - s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to?(:required_rubygems_version=) s.require_paths = ["lib"] s.authors = ["Nick LaMuro"] s.date = "2017-03-27" diff --git a/product/script/reports_to_multi_yamls.rb b/product/script/reports_to_multi_yamls.rb index 05d22e31006..abaf73ad7b3 100644 --- a/product/script/reports_to_multi_yamls.rb +++ b/product/script/reports_to_multi_yamls.rb @@ -17,7 +17,7 @@ reports.each do |r| ctr += 1 name = File.join(Dir.pwd, "#{ctr}_#{r["name"]}.yaml") - File.open(name, "w") { |f| f.write(YAML.dump r) } + File.open(name, "w") { |f| f.write(YAML.dump(r)) } puts "Created file '#{name}'" end diff --git a/tools/cockpit/cockpit-auth-miq b/tools/cockpit/cockpit-auth-miq index 74ce05a5589..908d6980b22 100755 --- a/tools/cockpit/cockpit-auth-miq +++ b/tools/cockpit/cockpit-auth-miq @@ -28,7 +28,7 @@ def send_auth_command(challenge, response, data) text = JSON.dump(cmd) size = text.length + 1 - $stdout.write "#{size}\n\n#{text}" + $stdout.write("#{size}\n\n#{text}") $stdout.flush end @@ -48,7 +48,7 @@ def send_problem_init(problem, message, auth_result) text = JSON.dump(cmd) size = text.length + 1 - $stdout.write "#{size}\n\n#{text}" + $stdout.write("#{size}\n\n#{text}") $stdout.flush end @@ -68,7 +68,7 @@ def read_size(io) end if t.to_i.zero? && t != "0" - raise ArgumentError "Invalid frame: invalid size" + raise ArgumentError("Invalid frame: invalid size") end size = (size * 10) + t.to_i @@ -76,7 +76,7 @@ def read_size(io) end if seen == 8 - raise ArgumentError "Invalid frame: size too long" + raise ArgumentError("Invalid frame: size too long") end size @@ -99,7 +99,7 @@ def read_auth_reply data = read_frame(1) cmd = JSON.parse(data) if cmd["command"] != "authorize" || !cmd["cookie"] || !cmd["response"] - raise ArgumentError "Did not receive a valid authorize command" + raise ArgumentError("Did not receive a valid authorize command") end cmd["response"] diff --git a/tools/column_ordering/column_ordering.rb b/tools/column_ordering/column_ordering.rb index 6709b96df83..7b2e6ada765 100644 --- a/tools/column_ordering/column_ordering.rb +++ b/tools/column_ordering/column_ordering.rb @@ -98,7 +98,7 @@ def table_dump pg_dump_result = AwesomeSpawn.run("pg_dump", :params => params) - raise ColumnOrderingError <<-ERROR.gsub!(/^ +/, "") if pg_dump_result.failure? + raise ColumnOrderingError(<<-ERROR.gsub!(/^ +/, "")) if pg_dump_result.failure? '#{pg_dump_result.command_line}' failed with #{pg_dump_result.exit_status}: stdout: #{pg_dump_result.output} @@ -211,7 +211,7 @@ def reordered_create_table_statement(current_create_table) def assert_column_list_sizes_match! return if current_columns.length == expected_columns.length - raise ColumnOrderingError <<-ERROR.gsub!(/^ +/, "") + raise ColumnOrderingError(<<-ERROR.gsub!(/^ +/, "")) Current and expected column arrays are of different size for table #{table} expected: #{expected_columns.inspect} @@ -221,7 +221,7 @@ def assert_column_list_sizes_match! def assert_column_list_contents_match! return if current_columns.sort == expected_columns.sort - raise ColumnOrderingError <<-ERROR.gsub!(/^ +/, "") + raise ColumnOrderingError(<<-ERROR.gsub!(/^ +/, "")) Current and expected column arrays have different contents for #{table} expected: #{expected_columns.inspect} diff --git a/tools/env_probe_event_catcher.rb b/tools/env_probe_event_catcher.rb index 2c4dec34a2e..b653b2ee51c 100755 --- a/tools/env_probe_event_catcher.rb +++ b/tools/env_probe_event_catcher.rb @@ -14,15 +14,15 @@ def log(level, msg) end def at_exit(msg) - log :info, msg - log :info, "Total Events Caught: #{$event_cnt}, Process stats: #{MiqProcess.processInfo.inspect}" + log(:info, msg) + log(:info, "Total Events Caught: #{$event_cnt}, Process stats: #{MiqProcess.processInfo.inspect}") exit 0 end ["INT", "KILL", "TERM"].each { |s| trap(s) { at_exit("Interrupt signal (#{s}) received.") } if Signal.list.keys.include?(s) } $event_cnt = 0 -log :info, "Starting Event Catcher on #{host}..." +log(:info, "Starting Event Catcher on #{host}...") tid = Thread.new do begin $vim_em = MiqVimEventMonitor.new(host, user, pass, nil, 100) @@ -44,28 +44,28 @@ def at_exit(msg) display_name = event_type end - log :info, "Caught event [#{display_name}] chainId [#{e1['chainId']}]" + log(:info, "Caught event [#{display_name}] chainId [#{e1['chainId']}]") $event_cnt += 1 end end rescue => err - log "error", err.message - log "error", err.backtrace.join("\n") + log("error", err.message) + log("error", err.backtrace.join("\n")) exit 1 end end -log :info, "Starting Event Catcher on #{host}... Complete" +log(:info, "Starting Event Catcher on #{host}... Complete") puts "\nHit ^C to quit" puts -log :info, "Total Events Caught: #{$event_cnt}, Process stats: #{MiqProcess.processInfo.inspect}" +log(:info, "Total Events Caught: #{$event_cnt}, Process stats: #{MiqProcess.processInfo.inspect}") iterations = 5 loop do sleep 60 - log :info, "Total Events Caught: #{$event_cnt}, Process stats: #{MiqProcess.processInfo.inspect}" + log(:info, "Total Events Caught: #{$event_cnt}, Process stats: #{MiqProcess.processInfo.inspect}") iterations -= 1 break if iterations == 0 end diff --git a/tools/env_probe_vc_inv.rb b/tools/env_probe_vc_inv.rb index 37d700672ba..ec29132d28e 100755 --- a/tools/env_probe_vc_inv.rb +++ b/tools/env_probe_vc_inv.rb @@ -31,11 +31,11 @@ def log(level, msg) end def vim_vc_connect - log :info, "Connecting to EMS: [#{VC_IP}], as [#{VC_USER}]..." + log(:info, "Connecting to EMS: [#{VC_IP}], as [#{VC_USER}]...") @vc_data = {} @vi = nil @vi = MiqVim.new(VC_IP, VC_USER, VC_PASS) - log :info, "Connecting to EMS: [#{VC_IP}], as [#{VC_USER}]... Complete" + log(:info, "Connecting to EMS: [#{VC_IP}], as [#{VC_USER}]... Complete") end def vim_vc_inv_hash @@ -46,40 +46,40 @@ def vim_vc_inv_hash # verify we are in the vmdb directory unless File.exist?('app') - log :error, "Please run this script using 'script/runner perf_environment.rb' from vmdb directory" + log(:error, "Please run this script using 'script/runner perf_environment.rb' from vmdb directory") exit 1 end -log :info, "Running EMS Inventory tests..." +log(:info, "Running EMS Inventory tests...") -log :info, "EMS Host: #{VC_IP}" -log :info, "EMS User: #{VC_USER}" +log(:info, "EMS Host: #{VC_IP}") +log(:info, "EMS User: #{VC_USER}") -log :info, "Process stats: #{MiqProcess.processInfo.inspect}" +log(:info, "Process stats: #{MiqProcess.processInfo.inspect}") begin t0 = Time.now vc_data = {} con = vim_vc_connect inv = vim_vc_inv_hash - log :info, "Requesting inventory accessors..." + log(:info, "Requesting inventory accessors...") VC_ACCESSORS.each do |acc, type| inv_hash = @vi.send(acc) vc_data[type] = inv_hash end rescue => err - log :error, err + log(:error, err) exit 1 end -log :info, "Running EMS Inventory tests... Complete, Elapsed time: [#{Time.now.to_i - t0.to_i} seconds]" -log :info, "EMS Inventory summary: " + vc_data.collect { |k, v| k.to_s << "=>" << v.length.to_s }.inspect -log :info, "Process stats: #{MiqProcess.processInfo.inspect}" +log(:info, "Running EMS Inventory tests... Complete, Elapsed time: [#{Time.now.to_i - t0.to_i} seconds]") +log(:info, "EMS Inventory summary: " + vc_data.collect { |k, v| k.to_s << "=>" << v.length.to_s }.inspect) +log(:info, "Process stats: #{MiqProcess.processInfo.inspect}") -log :info, "Writing inventory to #{inv_yml}..." +log(:info, "Writing inventory to #{inv_yml}...") $yml_fd.write(YAML.dump(vc_data)) $yml_fd.close -log :info, "Done" +log(:info, "Done") exit 0 diff --git a/tools/evm_dump.rb b/tools/evm_dump.rb index 162379cf10f..8319df329c5 100644 --- a/tools/evm_dump.rb +++ b/tools/evm_dump.rb @@ -24,7 +24,7 @@ def yml_dump(yml_fname, items) # verify we are in the vmdb directory unless File.exist?('app') - log :error, "Please run this script using 'script/runner miq_queue_dump.rb' from vmdb directory" + log(:error, "Please run this script using 'script/runner miq_queue_dump.rb' from vmdb directory") exit 1 end @@ -45,15 +45,15 @@ def yml_dump(yml_fname, items) MODELS += ARGV.collect { |model| Object.const_get(model) } MODELS.each do |klass| - log :info, "Getting #{klass} objects" + log(:info, "Getting #{klass} objects") items = klass.all.to_a if items.length > 0 fname = yml_fname(klass) yml_fnames << fname - log :info, "Writing #{items.length} #{klass} objects to #{fname}" + log(:info, "Writing #{items.length} #{klass} objects to #{fname}") yml_dump(fname, items) else - log :info, "Found #{items.length} #{klass} objects" + log(:info, "Found #{items.length} #{klass} objects") end end @@ -61,11 +61,11 @@ def yml_dump(yml_fname, items) zip_fname = File.join(LOG_DIR, "evm_dump.zip") File.delete(zip_fname) if File.exist?(zip_fname) cmdline = "zip #{zip_fname} #{logfile} #{yml_fnames.join(' ')}" - log :info, "Zipping dump into #{zip_fname}" + log(:info, "Zipping dump into #{zip_fname}") system(cmdline) yml_fnames.each { |fname| File.delete(fname) } end -log :info, "Done" +log(:info, "Done") exit 0 diff --git a/tools/export_tags.rb b/tools/export_tags.rb index 848d56600a9..d1ec6eab17c 100644 --- a/tools/export_tags.rb +++ b/tools/export_tags.rb @@ -2,5 +2,5 @@ raise "No output file provided" if output.nil? puts "Exporting classification tags..." -File.open(output, "w") { |f| f.write Classification.export_to_yaml } +File.open(output, "w") { |f| f.write(Classification.export_to_yaml) } puts "Exporting classification tags... Complete" diff --git a/tools/fix_disk_sizes.rb b/tools/fix_disk_sizes.rb index 1e8aad45e39..563fc223293 100644 --- a/tools/fix_disk_sizes.rb +++ b/tools/fix_disk_sizes.rb @@ -16,19 +16,19 @@ def getDinfo(vim) end log_header = "MIQ(#{__FILE__})" -$log.info "#{log_header} Correcting Disk Sizes..." +$log.info("#{log_header} Correcting Disk Sizes...") disks_by_filename = Disk.all.inject({}) { |h, d| h[d.filename] = d; h } changed_disks = {} ExtManagementSystem.all.each do |e| - $log.info "#{log_header} Correcting Disk Sizes for disks under ExtManagementSystem name: [#{e.name}], id: [#{e.id}]..." + $log.info("#{log_header} Correcting Disk Sizes for disks under ExtManagementSystem name: [#{e.name}], id: [#{e.id}]...") begin vim = e.connect dinfo = getDinfo(vim) rescue => err - $log.error "#{log_header} Error during Correcting Disk Sizes for disks under ExtManagementSystem name: [#{e.name}], id: [#{e.id}]...Skipping" + $log.error("#{log_header} Error during Correcting Disk Sizes for disks under ExtManagementSystem name: [#{e.name}], id: [#{e.id}]...Skipping") $log.log_backtrace(err) next ensure @@ -52,8 +52,8 @@ def getDinfo(vim) end end - $log.info "#{log_header} Collecting Disk Sizes for disks under ExtManagementSystem name: [#{e.name}], id: [#{e.id}]...Complete" + $log.info("#{log_header} Collecting Disk Sizes for disks under ExtManagementSystem name: [#{e.name}], id: [#{e.id}]...Complete") end -$log.info "#{log_header} Changed disks: #{changed_disks.inspect}" -$log.info "#{log_header} Correcting Disk Sizes...Complete" +$log.info("#{log_header} Changed disks: #{changed_disks.inspect}") +$log.info("#{log_header} Correcting Disk Sizes...Complete") diff --git a/tools/metrics_capture_gap.rb b/tools/metrics_capture_gap.rb index b351728a33b..162fcd8798f 100644 --- a/tools/metrics_capture_gap.rb +++ b/tools/metrics_capture_gap.rb @@ -7,10 +7,10 @@ start_date, end_date = *ARGV def log(msg) - $log.info "MIQ(#{__FILE__}) #{msg}" + $log.info("MIQ(#{__FILE__}) #{msg}") puts msg end -log "Queueing metrics capture for [#{start_date}..#{end_date}]..." +log("Queueing metrics capture for [#{start_date}..#{end_date}]...") Metric::Capture.perf_capture_gap(Time.parse(start_date), Time.parse(end_date)) -log "Queueing metrics capture for [#{start_date}..#{end_date}]...Complete" +log("Queueing metrics capture for [#{start_date}..#{end_date}]...Complete") diff --git a/tools/metrics_destroy_for_time_profile.rb b/tools/metrics_destroy_for_time_profile.rb index 73fa45a37b8..7795ef94402 100644 --- a/tools/metrics_destroy_for_time_profile.rb +++ b/tools/metrics_destroy_for_time_profile.rb @@ -5,10 +5,10 @@ tp_id = MiqRegion.uncompress_id(ARGV.first) def log(msg) - $log.info "MIQ(#{__FILE__}) #{msg}" + $log.info("MIQ(#{__FILE__}) #{msg}") puts msg end -log "Removing performance records for time profile #{tp_id}..." +log("Removing performance records for time profile #{tp_id}...") TimeProfile.find(tp_id).destroy_metric_rollups -log "Removing performance records for time profile #{tp_id}...Complete" +log("Removing performance records for time profile #{tp_id}...Complete") diff --git a/tools/purge_archived_vms.rb b/tools/purge_archived_vms.rb index e5d39277b96..bc91ab15157 100644 --- a/tools/purge_archived_vms.rb +++ b/tools/purge_archived_vms.rb @@ -10,12 +10,12 @@ query = Vm.where("updated_on < ? or updated_on IS NULL", ARCHIVE_CUTOFF) archived = 0 -$log.info "Searching for archived VMs older than #{ARCHIVE_CUTOFF} UTC." -$log.info "Expecting to prune #{query.all_archived.count} of the #{query.count} older vms" +$log.info("Searching for archived VMs older than #{ARCHIVE_CUTOFF} UTC.") +$log.info("Expecting to prune #{query.all_archived.count} of the #{query.count} older vms") if REPORT_ONLY - $log.info "Reporting only; no rows will be deleted." + $log.info("Reporting only; no rows will be deleted.") else - $log.warn "Will delete any matching records." + $log.warn("Will delete any matching records.") end query.all_archived.find_in_batches do |vms| @@ -23,7 +23,7 @@ begin archived += 1 unless REPORT_ONLY - $log.info "Deleting archived VM '#{vm.name}' (id #{vm.id})" + $log.info("Deleting archived VM '#{vm.name}' (id #{vm.id})") vm.destroy end rescue => err @@ -32,7 +32,7 @@ end end -$log.info "Completed purging archived VMs. #{REPORT_ONLY ? 'Found' : 'Purged'} #{archived} archived VMs." +$log.info("Completed purging archived VMs. #{REPORT_ONLY ? 'Found' : 'Purged'} #{archived} archived VMs.") $log.close $log = old_logger diff --git a/tools/purge_metrics.rb b/tools/purge_metrics.rb index 957bc492eed..27788d4c925 100644 --- a/tools/purge_metrics.rb +++ b/tools/purge_metrics.rb @@ -19,25 +19,25 @@ Trollop.die :window, "must be a number greater than 0" if opts[:window] <= 0 def log(msg) - $log.info "MIQ(#{__FILE__}) #{msg}" + $log.info("MIQ(#{__FILE__}) #{msg}") puts msg end formatter = Class.new.extend(ActionView::Helpers::NumberHelper) -log "Purge Counts" +log("Purge Counts") dates = {} counts = {} %w(realtime hourly daily).each do |interval| dates[interval] = opts[interval.to_sym].to_i_with_method.seconds.ago.utc counts[interval] = Metric::Purging.purge_count(dates[interval], interval) - log " #{"#{interval.titleize}:".ljust(9)} #{formatter.number_with_delimiter(counts[interval])}" + log(" #{"#{interval.titleize}:".ljust(9)} #{formatter.number_with_delimiter(counts[interval])}") end puts exit if opts[:mode] != "purge" -log "Purging..." +log("Purging...") require 'ruby-progressbar' %w(realtime hourly daily).each do |interval| pbar = ProgressBar.create(:title => interval.titleize, :total => counts[interval], :autofinish => false) @@ -48,4 +48,4 @@ def log(msg) end pbar.finish end -log "Purging...Complete" +log("Purging...Complete") diff --git a/tools/purge_miq_report_results.rb b/tools/purge_miq_report_results.rb index 1312cbe0fe1..0c4b6392981 100644 --- a/tools/purge_miq_report_results.rb +++ b/tools/purge_miq_report_results.rb @@ -26,7 +26,7 @@ end def log(msg) - $log.info "MIQ(#{__FILE__}) #{msg}" + $log.info("MIQ(#{__FILE__}) #{msg}") puts msg end @@ -36,15 +36,15 @@ def log(msg) when :remaining then "last #{purge_value} results" when :date then "[#{purge_value.iso8601}]" end -log "Executing in #{opts[:mode]} mode for report results older than #{msg}" +log("Executing in #{opts[:mode]} mode for report results older than #{msg}") count = MiqReportResult.purge_count(purge_mode, purge_value) -log "Purge Count: #{formatter.number_with_delimiter(count)}" +log("Purge Count: #{formatter.number_with_delimiter(count)}") puts exit if opts[:mode] != "purge" -log "Purging..." +log("Purging...") require 'ruby-progressbar' pbar = ProgressBar.create(:title => "Purging", :total => count, :autofinish => false) @@ -55,4 +55,4 @@ def log(msg) end pbar.finish -log "Purging...Complete" +log("Purging...Complete") diff --git a/tools/purge_orphaned_tag_values.rb b/tools/purge_orphaned_tag_values.rb index 164cb35b8c9..6a5aa981434 100644 --- a/tools/purge_orphaned_tag_values.rb +++ b/tools/purge_orphaned_tag_values.rb @@ -11,28 +11,28 @@ Trollop.die :delete_window, "must be a number greater than 0" if opts[:delete_window] <= 0 def log(msg) - $log.info "MIQ(#{__FILE__}) #{msg}" + $log.info("MIQ(#{__FILE__}) #{msg}") puts msg end formatter = Class.new.extend(ActionView::Helpers::NumberHelper) require 'ruby-progressbar' -log "Purging orphaned tag values..." +log("Purging orphaned tag values...") # Determine all of the known metric ids in the tag values table -log "Finding known metric ids..." +log("Finding known metric ids...") perf_ids = Hash.new { |h, k| h[k] = [] } # TODO: there is probably a way to do this without bringing the ids back t = Benchmark.realtime do VimPerformanceTagValue.select("metric_type, metric_id").distinct.order(nil).each { |v| perf_ids[v.metric_type] << v.metric_id } end perf_ids_count = perf_ids.inject(0) { |sum, (_type, ids)| sum + ids.length } -log "Finding known metric ids...Complete - #{formatter.number_with_delimiter(perf_ids_count)} records (#{t}s)" +log("Finding known metric ids...Complete - #{formatter.number_with_delimiter(perf_ids_count)} records (#{t}s)") if perf_ids_count > 0 # Determine the orphaned tag values by finding deleted metric ids - log "Finding deleted metric ids..." + log("Finding deleted metric ids...") deleted_ids = Hash.new { |h, k| h[k] = [] } pbar = ProgressBar.create(:title => "Searching", :total => perf_ids_count, :autofinish => false) perf_ids.each do |type, ids| @@ -45,13 +45,13 @@ def log(msg) end pbar.finish deleted_ids_count = deleted_ids.inject(0) { |sum, (_type, ids)| sum + ids.length } - log "Finding deleted metric ids...Complete - #{formatter.number_with_delimiter(deleted_ids_count)} records" + log("Finding deleted metric ids...Complete - #{formatter.number_with_delimiter(deleted_ids_count)} records") perf_ids = nil # Allow GC to collect the huge array if deleted_ids_count > 0 # Delete the orphaned tag values by the known deleted metric ids - log "Deleting orphaned tag values..." + log("Deleting orphaned tag values...") pbar = ProgressBar.create(:title => "Deleting", :total => deleted_ids_count, :autofinish => false) deleted_ids.each do |type, ids| ids.each_slice(opts[:delete_window]) do |ids_window| @@ -60,8 +60,8 @@ def log(msg) end end pbar.finish - log "Deleting orphaned tag values...Complete" + log("Deleting orphaned tag values...Complete") end end -log "Purging orphaned tag values...Complete" +log("Purging orphaned tag values...Complete") diff --git a/tools/rebuild_provision_request.rb b/tools/rebuild_provision_request.rb index a7fd71a8e1e..4503ca61197 100644 --- a/tools/rebuild_provision_request.rb +++ b/tools/rebuild_provision_request.rb @@ -254,7 +254,7 @@ def perform def run_it print "\nRE Running the Rest API POST to request_id: #{@options[:request_id]}\n\n" unless @quiet - result = RestClient.post build_url, output.to_json + result = RestClient.post(build_url, output.to_json) print "#{result}\n" end diff --git a/tools/vm_retirement.rb b/tools/vm_retirement.rb index 711d775f69e..c993019c834 100644 --- a/tools/vm_retirement.rb +++ b/tools/vm_retirement.rb @@ -100,9 +100,9 @@ def parse_command_line when "list" list when "list_invalid" - list_invalid parameters[:valid_warnings] + list_invalid(parameters[:valid_warnings]) when "reset_invalid" - reset_invalid parameters[:valid_warnings], parameters[:default_warning] + reset_invalid(parameters[:valid_warnings], parameters[:default_warning]) else puts "Invalid Verb on Command Line: <#{parameters[:verb]}>" end