Skip to content

Commit

Permalink
Passing important skeletal precreate specs
Browse files Browse the repository at this point in the history
Passing important skeletal precreate specs
  • Loading branch information
Ladas committed Jan 26, 2018
1 parent ee76d4c commit 2856a9d
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 43 deletions.
24 changes: 18 additions & 6 deletions spec/models/manager_refresh/persister/skeletal_precreate_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@

let(:persister) { create_containers_persister }

# TODO(lsmola) we have to make this pass for :complete => true but also for :targeted => true, targeted needs
# conditions here https://github.com/Ladas/manageiq/blob/0b7ceab23388a19c5da8672b2e8baaef8baf8a80/app/models/manager_refresh/inventory_collection.rb#L531

it "tests container relations are pre-created and updated by other refresh" do
persister.containers.build(
container_data(
Expand Down Expand Up @@ -154,10 +157,9 @@

# The batch saving must not save full record and skeletal record together, otherwise that would
# lead to nullifying of all attributes of the existing record, that skeletal record points to.
# TODO(lsmola) make sure we do smart batching, so this doesn't happen
expect(ContainerProject.find_by(:ems_ref => "container_project_ems_ref_1")).to(
have_attributes(
:name => nil, # This has to be "container_project_name_1",
:name => "container_project_name_1", # This has to be "container_project_name_1",
:ems_id => @ems.id,
:ems_ref => "container_project_ems_ref_1",
)
Expand All @@ -171,6 +173,16 @@
)
end

it "test skeletal precreate sets a base STI type and entity full refresh updates it, then skeletal leaves it be" do
# TODO(lsmola) we need to allow STI type to be updatable

end

it "test updating 2 partial records has correct smart batches" do
# TODO(lsmola) smart batching should be tested elsewhere
end


it "tests smart batching batches only records with the same fields" do
FactoryGirl.create(:container_project, container_project_data(1).merge(:ems_id => @ems.id))
FactoryGirl.create(:container_project, container_project_data(2).merge(:ems_id => @ems.id))
Expand Down Expand Up @@ -244,7 +256,9 @@

it "we reconnect existing container group" do
# TODO(lsmola) this belongs to targeted refresh spec
# TODO(lsmola) this currently needs custom_reconnect_block, delete it make this pass without it
# TODO(lsmola) to reconnect correctly, we need :deleted_on => nil, in :builder_params, is that viable? We probably
# do not want to solve this in general? If yes, we would have to allow this to be settable in parser. E.g.
# for OpenShift pods watch targeted refresh, we can refresh already disconnected entity
FactoryGirl.create(:container_group, container_group_data(1).merge(
:ems_id => @ems.id,
:deleted_on => Time.now.utc)
Expand Down Expand Up @@ -449,11 +463,9 @@
)

container_group = ContainerGroup.first
# TODO(lsmola) Here should be container_project_name_1, but since this came from skeletal pre-create, the :name
# attribute was not set.
expect(container_group).to(
have_attributes(
:name => nil,
:name => "container_project_name_1",
:ems_ref => "container_group_ems_ref_1"
)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ def initialize_inventory_collections
# TODO: should match on digest when available
:manager_ref => [:image_ref],
:delete_method => :disconnect_inv,
:custom_reconnect_block => custom_reconnect_block
)
)
# images have custom_attributes but that's done conditionally in openshift parser
Expand All @@ -140,12 +139,11 @@ def initialize_inventory_collections
shared_options.merge(
:model_class => ContainerGroup,
:parent => manager,
:builder_params => {:ems_id => manager.id},
:builder_params => {:ems_id => manager.id, :deleted_on => nil},
:association => :container_groups,
:secondary_refs => {:by_container_project_and_name => [:container_project, :name]},
:attributes_blacklist => [:namespace],
:delete_method => :disconnect_inv,
:custom_reconnect_block => custom_reconnect_block
)
)
initialize_container_conditions_collection(manager, :container_groups)
Expand All @@ -170,7 +168,6 @@ def initialize_inventory_collections
:association => :containers,
# parser sets :ems_ref => "#{pod_id}_#{container.name}_#{container.image}"
:delete_method => :disconnect_inv,
:custom_reconnect_block => custom_reconnect_block
)
)
@collections[:container_port_configs] =
Expand Down Expand Up @@ -391,37 +388,4 @@ def initialize_taggings_collection(parent_collection)
def add_collection(collection)
@collections[collection.name] = collection
end

def custom_reconnect_block
# TODO(lsmola) once we have DB unique indexes, we can stop using manual reconnect, since it adds processing time
lambda do |inventory_collection, inventory_objects_index, attributes_index|
relation = inventory_collection.model_class.where(:ems_id => inventory_collection.parent.id).archived

# Skip reconnect if there are no archived entities
return if relation.archived.count <= 0
raise "Allowed only manager_ref size of 1, got #{inventory_collection.manager_ref}" if inventory_collection.manager_ref.count > 1

inventory_objects_index.each_slice(1000) do |batch|
relation.where(inventory_collection.manager_ref.first => batch.map(&:first)).each do |record|
index = inventory_collection.object_index_with_keys(inventory_collection.manager_ref_to_cols, record)

# We need to delete the record from the inventory_objects_index and attributes_index, otherwise it
# would be sent for create.
inventory_object = inventory_objects_index.delete(index)
hash = attributes_index.delete(index)

# Make the entity active again, otherwise we would be duplicating nested entities
hash[:deleted_on] = nil

record.assign_attributes(hash.except(:id, :type))
if !inventory_collection.check_changed? || record.changed?
record.save!
inventory_collection.store_updated_records(record)
end

inventory_object.id = record.id
end
end
end
end
end

0 comments on commit 2856a9d

Please sign in to comment.