Hello community,
here is the log from the commit of package velum for openSUSE:Factory checked in at 2018-04-05 15:33:23
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/velum (Old)
and /work/SRC/openSUSE:Factory/.velum.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "velum"
Thu Apr 5 15:33:23 2018 rev:18 rq:593478 version:3.0.0+dev+git_r703_df2128664b7874c6a1ace8bd622484e95db32af9
Changes:
--------
--- /work/SRC/openSUSE:Factory/velum/velum.changes 2018-03-28 10:32:39.990272520 +0200
+++ /work/SRC/openSUSE:Factory/.velum.new/velum.changes 2018-04-05 15:33:25.740278286 +0200
@@ -1,0 +2,51 @@
+Wed Apr 4 09:33:04 UTC 2018 - containers-bugowner@suse.de
+
+- Commit 3320a77 by Rafael Fernández López ereslibre@ereslibre.es
+ Properly create and update orchestrations that have been executed out of
+ Velum
+
+ It's more correct for the removal orchestration to read the parameters that
+ were passed to the orchestration rather than to rely on the current status of
+ the minions on the database.
+
+
+-------------------------------------------------------------------
+Tue Apr 3 10:35:05 UTC 2018 - containers-bugowner@suse.de
+
+- Commit 2fd6eee by Maximilian Meister mmeister@suse.de
+ update rails-html-sanitizer (bsc#1086598)
+
+ CVE-2018-3741
+
+ Signed-off-by: Maximilian Meister
+
+
+-------------------------------------------------------------------
+Mon Apr 2 13:25:30 UTC 2018 - containers-bugowner@suse.de
+
+- Commit 27645e8 by Vítor Avelino contact@vitoravelino.me
+ Added alert in overview page when unsupported config
+
+ Whenever the cluster goes to an unsupported configuration state, an alert is
+ shown with the reason to keep the user aware of that.
+
+ enhancement#unsupported_config_msg
+
+ Signed-off-by: Vítor Avelino
+
+
+-------------------------------------------------------------------
+Mon Apr 2 13:21:38 UTC 2018 - containers-bugowner@suse.de
+
+- Commit f1277f2 by Vítor Avelino contact@vitoravelino.me
+ Added minimum of three nodes to warning
+
+ This is a follow up from the previous node removal feature patch that shows
+ the requirement of minimum of three nodes when user removes a node.
+
+ enhancement#follow_up_node_removal
+
+ Signed-off-by: Vítor Avelino
+
+
+-------------------------------------------------------------------
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ velum.spec ++++++
--- /var/tmp/diff_new_pack.MdSNNb/_old 2018-04-05 15:33:27.328220895 +0200
+++ /var/tmp/diff_new_pack.MdSNNb/_new 2018-04-05 15:33:27.328220895 +0200
@@ -23,7 +23,7 @@
# Version: 1.0.0
# %%define branch 1.0.0
-Version: 3.0.0+dev+git_r695_2c7f4d4eda39b1fcff1ccea959177a200da9c718
+Version: 3.0.0+dev+git_r703_df2128664b7874c6a1ace8bd622484e95db32af9
Release: 0
%define branch master
Summary: Dashboard for CaasP
@@ -96,7 +96,7 @@
%description
velum is the dashboard for CaasP to manage and deploy kubernetes clusters on top of MicroOS
-This package has been built with commit 2c7f4d4eda39b1fcff1ccea959177a200da9c718 from branch master on date Mon, 26 Mar 2018 10:48:06 +0000
+This package has been built with commit df2128664b7874c6a1ace8bd622484e95db32af9 from branch master on date Wed, 04 Apr 2018 09:32:21 +0000
%prep
%setup -q -n velum-%{branch}
++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/Gemfile.lock new/velum-master/Gemfile.lock
--- old/velum-master/Gemfile.lock 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/Gemfile.lock 2018-04-04 11:32:28.000000000 +0200
@@ -109,7 +109,7 @@
url_safe_base64
json-schema (2.8.0)
addressable (>= 2.4)
- loofah (2.2.1)
+ loofah (2.2.2)
crass (~> 1.0.2)
nokogiri (>= 1.5.9)
mail (2.7.0)
@@ -180,8 +180,8 @@
activesupport (>= 4.2.0, < 5.0)
nokogiri (~> 1.6)
rails-deprecated_sanitizer (>= 1.0.1)
- rails-html-sanitizer (1.0.3)
- loofah (~> 2.0)
+ rails-html-sanitizer (1.0.4)
+ loofah (~> 2.2, >= 2.2.2)
rails_stdout_logging (0.0.5)
railties (4.2.10)
actionpack (= 4.2.10)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/app/assets/javascripts/dashboard/dashboard.js new/velum-master/app/assets/javascripts/dashboard/dashboard.js
--- old/velum-master/app/assets/javascripts/dashboard/dashboard.js 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/app/assets/javascripts/dashboard/dashboard.js 2018-04-04 11:32:28.000000000 +0200
@@ -186,6 +186,7 @@
MinionPoller.handleRetryableOrchestrations(data);
handleBootstrapErrors();
+ handleUnsupportedClusterConfiguration();
// show/hide "update all nodes" link
var hasAdminNodeUpdate = data.admin.update_status === 1 || data.admin.update_status === 2;
@@ -887,6 +888,11 @@
errors.push("You need at least one worker");
}
+ // We need at least three nodes
+ if (masters.length + workers.length < 3) {
+ errors.push("Minimum of three nodes");
+ }
+
// We need an odd number of masters
if (masters.length % 2 !== 1) {
errors.push('The number of masters has to be an odd number');
@@ -926,3 +932,21 @@
$('.pending-accept-link').addClass('hidden');
$('.admin-outdated-notification').addClass('hidden');
}
+
+function handleUnsupportedClusterConfiguration() {
+ var masters = State.minions.filter(function (m) { return m.role === 'master' });
+ var workers = State.minions.filter(function (m) { return m.role === 'worker' });
+ var $alert = $('.unsupported-alert');
+
+ // We need at least three nodes
+ if (masters.length + workers.length < 3) {
+ $alert.find('.reason').text('a minimum of three nodes');
+ $alert.fadeIn(100);
+ } else if (masters.length % 2 === 0) {
+ // We need an odd number of masters
+ $alert.find('.reason').text('an odd number of masters nodes');
+ $alert.fadeIn(100);
+ } else {
+ $alert.fadeOut(500);
+ }
+}
\ No newline at end of file
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/app/models/salt_handler/orchestration_result.rb new/velum-master/app/models/salt_handler/orchestration_result.rb
--- old/velum-master/app/models/salt_handler/orchestration_result.rb 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/app/models/salt_handler/orchestration_result.rb 2018-04-04 11:32:28.000000000 +0200
@@ -19,11 +19,15 @@
when "orch.kubernetes", "orch.update"
update_minions orchestration_succeeded: orchestration_succeeded
when "orch.removal"
+ # Since this orchestration is parametrized is more correct to only update the minions that
+ # are specified in the orchestration parameters.
if orchestration_succeeded
- Minion.pending_removal.destroy_all
+ Minion.pending_removal.where(minion_id: (orchestration.params || {})["target"]).destroy_all
else
# rubocop:disable SkipsModelValidations
- Minion.pending_removal.update_all highstate: Minion.highstates[:removal_failed]
+ Minion.pending_removal.where(minion_id: (orchestration.params || {})["target"]).update_all(
+ highstate: Minion.highstates[:removal_failed]
+ )
# rubocop:enable SkipsModelValidations
end
end
@@ -33,9 +37,13 @@
private
- def update_orchestration(orchestration_succeeded:, event_data:)
+ def orchestration
jid, = @salt_event.tag.match(self.class.tag_matcher).captures
- ::Orchestration.find_by(jid: jid).tap do |orchestration|
+ ::Orchestration.find_by jid: jid
+ end
+
+ def update_orchestration(orchestration_succeeded:, event_data:)
+ orchestration.tap do |orchestration|
orchestration.status = if orchestration_succeeded
::Orchestration.statuses[:succeeded]
else
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/app/models/salt_handler/orchestration_trigger.rb new/velum-master/app/models/salt_handler/orchestration_trigger.rb
--- old/velum-master/app/models/salt_handler/orchestration_trigger.rb 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/app/models/salt_handler/orchestration_trigger.rb 2018-04-04 11:32:28.000000000 +0200
@@ -19,10 +19,12 @@
when "orch.removal"
::Orchestration.kinds[:removal]
end
+ orch.params = (
+ event_data["fun_args"].find { |k| k.respond_to?(:key?) && k.key?("pillar") } || {}
+ )["pillar"]
end
orchestration.started_at = Time.zone.parse event_data["_stamp"]
orchestration.save
-
true
end
end
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/app/views/dashboard/_warn_node_removal_modal.html.slim new/velum-master/app/views/dashboard/_warn_node_removal_modal.html.slim
--- old/velum-master/app/views/dashboard/_warn_node_removal_modal.html.slim 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/app/views/dashboard/_warn_node_removal_modal.html.slim 2018-04-04 11:32:28.000000000 +0200
@@ -6,7 +6,7 @@
span aria-hidden="true"
| ×
h4#modal-label.modal-title
- | Invalid cluster topology
+ | Unsupported cluster topology
.modal-body
p By removing the respective node you'll be breaking the following constraints that is needed to have a stable and supported topology:
ul.node-removal-constraints-list
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/app/views/dashboard/index.html.slim new/velum-master/app/views/dashboard/index.html.slim
--- old/velum-master/app/views/dashboard/index.html.slim 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/app/views/dashboard/index.html.slim 2018-04-04 11:32:28.000000000 +0200
@@ -1,3 +1,10 @@
+.alert.alert-warning.unsupported-alert role="alert" hidden="true"
+ i.fa.fa-4x.pull-left aria-hidden="true"
+ span
+ | A supported deployment of SUSE CaaS Platform requires
+ span class="reason"
+ | .
+
h1 Cluster Status
.nodes-summary
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/spec/features/dashboard_feature_spec.rb new/velum-master/spec/features/dashboard_feature_spec.rb
--- old/velum-master/spec/features/dashboard_feature_spec.rb 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/spec/features/dashboard_feature_spec.rb 2018-04-04 11:32:28.000000000 +0200
@@ -131,6 +131,28 @@
expect(page).not_to have_content("(new)")
end
+
+ context "when unsupported configuration" do
+ before do
+ Minion.destroy_all
+ end
+ it "shows alert if nodes is less than 3", js: true do
+ Minion.create! [{ minion_id: SecureRandom.hex, fqdn: "minion0.k8s.local", role: "master" },
+ { minion_id: SecureRandom.hex, fqdn: "minion1.k8s.local", role: "worker" }]
+
+ visit authenticated_root_path
+ expect(page).to have_content("requires a minimum of three nodes")
+ end
+
+ it "shows alert if masters number is even", js: true do
+ Minion.create! [{ minion_id: SecureRandom.hex, fqdn: "minion0.k8s.local", role: "master" },
+ { minion_id: SecureRandom.hex, fqdn: "minion1.k8s.local", role: "master" },
+ { minion_id: SecureRandom.hex, fqdn: "minion2.k8s.local", role: "worker" }]
+
+ visit authenticated_root_path
+ expect(page).to have_content("requires an odd number of masters")
+ end
+ end
end
end
# rubocop:enable RSpec/ExampleLength
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/spec/features/node_removal_feature_spec.rb new/velum-master/spec/features/node_removal_feature_spec.rb
--- old/velum-master/spec/features/node_removal_feature_spec.rb 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/spec/features/node_removal_feature_spec.rb 2018-04-04 11:32:28.000000000 +0200
@@ -55,7 +55,7 @@
master_selector = ".remove-node-link[data-id='#{minions[0].minion_id}']"
find(master_selector).click
- expect(page).to have_content("Invalid cluster topology")
+ expect(page).to have_content("Unsupported cluster topology")
end
it "proceeds with removal even after warning" do
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velum-master/spec/models/salt_handler/orchestration_result_spec.rb new/velum-master/spec/models/salt_handler/orchestration_result_spec.rb
--- old/velum-master/spec/models/salt_handler/orchestration_result_spec.rb 2018-03-26 12:47:04.000000000 +0200
+++ new/velum-master/spec/models/salt_handler/orchestration_result_spec.rb 2018-04-04 11:32:28.000000000 +0200
@@ -1,10 +1,21 @@
require "rails_helper"
describe SaltHandler::OrchestrationResult do
+ let(:orchestration) do
+ FactoryGirl.create(:orchestration,
+ jid: "20170706104527757673")
+ end
+
+ let(:removal_orchestration) do
+ FactoryGirl.create(:removal_orchestration,
+ jid: "20170706104527757673",
+ params: { target: pending_removal_minion.minion_id })
+ end
+
let(:successful_orchestration_result) do
event_data = {
- "fun_args" => ["orch.kubernetes", { "orchestration_jid" => "20170706104527757673" }],
- "jid" => "20170706104527757673",
+ "fun_args" => ["orch.kubernetes", { "orchestration_jid" => orchestration.jid }],
+ "jid" => orchestration.jid,
"return" => { "retcode" => 0 },
"success" => true,
"_stamp" => "2017-07-06T10:45:54.734096",
@@ -13,14 +24,19 @@
}.to_json
FactoryGirl.create(:salt_event,
- tag: "salt/run/20170706104527757673/ret",
+ tag: "salt/run/#{orchestration.jid}/ret",
data: event_data)
end
+ let(:removal_params) do
+ { "pillar" => { "target" => removal_orchestration.params["target"] },
+ "orchestration_jid" => removal_orchestration.jid }
+ end
+
let(:successful_removal_orchestration_result) do
event_data = {
- "fun_args" => ["orch.removal", { "orchestration_jid" => "20170706104527757673" }],
- "jid" => "20170706104527757673",
+ "fun_args" => ["orch.removal", removal_params],
+ "jid" => removal_orchestration.jid,
"return" => { "retcode" => 0 },
"success" => true,
"_stamp" => "2017-07-06T10:45:54.734096",
@@ -29,14 +45,14 @@
}.to_json
FactoryGirl.create(:salt_event,
- tag: "salt/run/20170706104527757673/ret",
+ tag: "salt/run/#{removal_orchestration.jid}/ret",
data: event_data)
end
let(:mid_successful_orchestration_result) do
event_data = {
- "fun_args" => ["orch.kubernetes", { "orchestration_jid" => "20170706104527757673" }],
- "jid" => "20170706104527757673",
+ "fun_args" => ["orch.kubernetes", { "orchestration_jid" => orchestration.jid }],
+ "jid" => orchestration.jid,
"return" => { "retcode" => 1 },
"success" => true,
"_stamp" => "2017-07-06T10:45:54.734096",
@@ -45,14 +61,14 @@
}.to_json
FactoryGirl.create(:salt_event,
- tag: "salt/run/20170706104527757673/ret",
+ tag: "salt/run/#{orchestration.jid}/ret",
data: event_data)
end
let(:failed_orchestration_result) do
event_data = {
- "fun_args" => ["orch.kubernetes", { "orchestration_jid" => "20170706104527757673" }],
- "jid" => "20170706104527757673",
+ "fun_args" => ["orch.kubernetes", { "orchestration_jid" => orchestration.jid }],
+ "jid" => orchestration.jid,
"return" => { "retcode" => 1 },
"success" => false,
"_stamp" => "2017-07-06T10:45:54.734096",
@@ -61,14 +77,14 @@
}.to_json
FactoryGirl.create(:salt_event,
- tag: "salt/run/20170706104527757673/ret",
+ tag: "salt/run/#{orchestration.jid}/ret",
data: event_data)
end
let(:failed_removal_orchestration_result) do
event_data = {
- "fun_args" => ["orch.removal", { "orchestration_jid" => "20170706104527757673" }],
- "jid" => "20170706104527757673",
+ "fun_args" => ["orch.removal", removal_params],
+ "jid" => removal_orchestration.jid,
"return" => { "retcode" => 1 },
"success" => false,
"_stamp" => "2017-07-06T10:45:54.734096",
@@ -77,7 +93,7 @@
}.to_json
FactoryGirl.create(:salt_event,
- tag: "salt/run/20170706104527757673/ret",
+ tag: "salt/run/#{removal_orchestration.jid}/ret",
data: event_data)
end
@@ -106,8 +122,6 @@
before do
pending_minion
applied_minion
- FactoryGirl.create(:orchestration,
- jid: "20170706104527757673")
end
describe "with a successful orchestration result" do
@@ -149,7 +163,9 @@
let(:handler) { described_class.new(successful_removal_orchestration_result) }
before do
- pending_removal_minion
+ FactoryGirl.create(:removal_orchestration,
+ jid: "20170706104527757673",
+ params: { target: pending_removal_minion.minion_id })
end
it "destroys the minion with pending_removal state" do
@@ -161,7 +177,9 @@
let(:handler) { described_class.new(failed_removal_orchestration_result) }
before do
- pending_removal_minion
+ FactoryGirl.create(:removal_orchestration,
+ jid: "20170706104527757673",
+ params: { target: pending_removal_minion.minion_id })
end
it "marks the minion with removal_failed state if it failed" do